gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# ccm node
from __future__ import with_statement
import os
import re
import shutil
import signal
import stat
import subprocess
import time
import yaml
from six import print_
from ccmlib import common
from ccmlib.node import Node, NodeError
class DseNode(Node):
"""
Provides interactions to a DSE node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None):
super(DseNode, self).__init__(name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface)
self.get_cassandra_version()
if self.cluster.hasOpscenter():
self._copy_agent()
def get_install_cassandra_root(self):
return os.path.join(self.get_install_dir(), 'resources', 'cassandra')
def get_node_cassandra_root(self):
return os.path.join(self.get_path(), 'resources', 'cassandra')
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf')
def get_tool(self, toolname):
return common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', toolname)
def get_tool_args(self, toolname):
return [common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', 'dse'), toolname]
def get_env(self):
return common.make_dse_env(self.get_install_dir(), self.get_path())
def get_cassandra_version(self):
return common.get_dse_cassandra_version(self.get_install_dir())
def set_workload(self, workload):
self.workload = workload
self._update_config()
if workload == 'solr':
self.__generate_server_xml()
def start(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=False,
replace_token=None,
replace_address=None,
jvm_args=[],
wait_for_binary_proto=False,
profile_options=None,
use_jna=False):
"""
Start the node. Options includes:
- join_ring: if false, start the node with -Dcassandra.join_ring=False
- no_wait: by default, this method returns when the node is started and listening to clients.
If no_wait=True, the method returns sooner.
- wait_other_notice: if True, this method returns only when all other live node of the cluster
have marked this node UP.
- replace_token: start the node with the -Dcassandra.replace_token option.
- replace_address: start the node with the -Dcassandra.replace_address option.
"""
if self.is_running():
raise NodeError("%s is already running" % self.name)
for itf in list(self.network_interfaces.values()):
if itf is not None and replace_address is None:
common.check_socket_available(itf)
if wait_other_notice:
marks = [(node, node.mark_log()) for node in list(self.cluster.nodes.values()) if node.is_running()]
cdir = self.get_install_dir()
launch_bin = common.join_bin(cdir, 'bin', 'dse')
# Copy back the dse scripts since profiling may have modified it the previous time
shutil.copy(launch_bin, self.get_bin_dir())
launch_bin = common.join_bin(self.get_path(), 'bin', 'dse')
# If Windows, change entries in .bat file to split conf from binaries
if common.is_win():
self.__clean_bat()
if profile_options is not None:
config = common.get_config()
if 'yourkit_agent' not in config:
raise NodeError("Cannot enable profile. You need to set 'yourkit_agent' to the path of your agent in a {0}/config".format(common.get_default_path_display_name()))
cmd = '-agentpath:%s' % config['yourkit_agent']
if 'options' in profile_options:
cmd = cmd + '=' + profile_options['options']
print_(cmd)
# Yes, it's fragile as shit
pattern = r'cassandra_parms="-Dlog4j.configuration=log4j-server.properties -Dlog4j.defaultInitOverride=true'
common.replace_in_file(launch_bin, pattern, ' ' + pattern + ' ' + cmd + '"')
os.chmod(launch_bin, os.stat(launch_bin).st_mode | stat.S_IEXEC)
env = common.make_dse_env(self.get_install_dir(), self.get_path())
if common.is_win():
self._clean_win_jmx()
pidfile = os.path.join(self.get_path(), 'cassandra.pid')
args = [launch_bin, 'cassandra']
if self.workload is not None:
if 'hadoop' in self.workload:
args.append('-t')
if 'solr' in self.workload:
args.append('-s')
if 'spark' in self.workload:
args.append('-k')
if 'cfs' in self.workload:
args.append('-c')
args += ['-p', pidfile, '-Dcassandra.join_ring=%s' % str(join_ring)]
if replace_token is not None:
args.append('-Dcassandra.replace_token=%s' % str(replace_token))
if replace_address is not None:
args.append('-Dcassandra.replace_address=%s' % str(replace_address))
if use_jna is False:
args.append('-Dcassandra.boot_without_jna=true')
args = args + jvm_args
process = None
if common.is_win():
# clean up any old dirty_pid files from prior runs
if (os.path.isfile(self.get_path() + "/dirty_pid.tmp")):
os.remove(self.get_path() + "/dirty_pid.tmp")
process = subprocess.Popen(args, cwd=self.get_bin_dir(), env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
process = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Our modified batch file writes a dirty output with more than just the pid - clean it to get in parity
# with *nix operation here.
if common.is_win():
self.__clean_win_pid()
self._update_pid(process)
elif update_pid:
if no_wait:
time.sleep(2) # waiting 2 seconds nevertheless to check for early errors and for the pid to be set
else:
for line in process.stdout:
if verbose:
print_(line.rstrip('\n'))
self._update_pid(process)
if not self.is_running():
raise NodeError("Error starting node %s" % self.name, process)
if wait_other_notice:
for node, mark in marks:
node.watch_log_for_alive(self, from_mark=mark)
if wait_for_binary_proto:
self.wait_for_binary_interface()
if self.cluster.hasOpscenter():
self._start_agent()
return process
def stop(self, wait=True, wait_other_notice=False, gently=True):
stopped = super(DseNode, self).stop(wait, wait_other_notice, gently)
if self.cluster.hasOpscenter():
self._stop_agent()
return stopped
def dsetool(self, cmd):
env = common.make_dse_env(self.get_install_dir(), self.get_path())
host = self.address()
dsetool = common.join_bin(self.get_install_dir(), 'bin', 'dsetool')
args = [dsetool, '-h', host, '-j', str(self.jmx_port)]
args += cmd.split()
p = subprocess.Popen(args, env=env)
p.wait()
def dse(self, dse_options=[]):
env = common.make_dse_env(self.get_install_dir(), self.get_path())
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse]
args += dse_options
p = subprocess.Popen(args, env=env)
p.wait()
def hadoop(self, hadoop_options=[]):
env = common.make_dse_env(self.get_install_dir(), self.get_path())
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hadoop']
args += hadoop_options
p = subprocess.Popen(args, env=env)
p.wait()
def hive(self, hive_options=[]):
env = common.make_dse_env(self.get_install_dir(), self.get_path())
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hive']
args += hive_options
p = subprocess.Popen(args, env=env)
p.wait()
def pig(self, pig_options=[]):
env = common.make_dse_env(self.get_install_dir(), self.get_path())
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'pig']
args += pig_options
p = subprocess.Popen(args, env=env)
p.wait()
def sqoop(self, sqoop_options=[]):
env = common.make_dse_env(self.get_install_dir(), self.get_path())
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'sqoop']
args += sqoop_options
p = subprocess.Popen(args, env=env)
p.wait()
def spark(self, spark_options=[]):
env = common.make_dse_env(self.get_install_dir(), self.get_path())
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'spark']
args += spark_options
p = subprocess.Popen(args, env=env)
p.wait()
def import_dse_config_files(self):
self._update_config()
if not os.path.isdir(os.path.join(self.get_path(), 'resources', 'dse', 'conf')):
os.makedirs(os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'dse', 'conf'), os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
self.__update_yaml()
def copy_config_files(self):
for product in ['dse', 'cassandra', 'hadoop', 'sqoop', 'hive', 'tomcat', 'spark', 'shark', 'mahout', 'pig', 'solr']:
src_conf = os.path.join(self.get_install_dir(), 'resources', product, 'conf')
dst_conf = os.path.join(self.get_path(), 'resources', product, 'conf')
if not os.path.isdir(src_conf):
continue
if os.path.isdir(dst_conf):
common.rmdirs(dst_conf)
shutil.copytree(src_conf, dst_conf)
if product == 'solr':
src_web = os.path.join(self.get_install_dir(), 'resources', product, 'web')
dst_web = os.path.join(self.get_path(), 'resources', product, 'web')
if os.path.isdir(dst_web):
common.rmdirs(dst_web)
shutil.copytree(src_web, dst_web)
if product == 'tomcat':
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'lib')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'lib')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
shutil.copytree(src_lib, dst_lib)
src_webapps = os.path.join(self.get_install_dir(), 'resources', product, 'webapps')
dst_webapps = os.path.join(self.get_path(), 'resources', product, 'webapps')
if os.path.isdir(dst_webapps):
common.rmdirs(dst_webapps)
shutil.copytree(src_webapps, dst_webapps)
def import_bin_files(self):
os.makedirs(os.path.join(self.get_path(), 'resources', 'cassandra', 'bin'))
common.copy_directory(os.path.join(self.get_install_dir(), 'bin'), self.get_bin_dir())
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'bin'), os.path.join(self.get_path(), 'resources', 'cassandra', 'bin'))
def __update_yaml(self):
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['system_key_directory'] = os.path.join(self.get_path(), 'keys')
full_options = dict(list(self.cluster._dse_config_options.items()))
for name in full_options:
value = full_options[name]
if isinstance(value, str) and (value is None or len(value) == 0):
try:
del data[name]
except KeyError:
# it is fine to remove a key not there
pass
else:
data[name] = full_options[name]
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _update_log4j(self):
super(DseNode, self)._update_log4j()
conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF)
append_pattern = 'log4j.appender.V.File='
log_file = os.path.join(self.get_path(), 'logs', 'solrvalidation.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.A.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.B.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit', 'dropped-events.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
def __generate_server_xml(self):
server_xml = os.path.join(self.get_path(), 'resources', 'tomcat', 'conf', 'server.xml')
if os.path.isfile(server_xml):
os.remove(server_xml)
with open(server_xml, 'w+') as f:
f.write('<Server port="8005" shutdown="SHUTDOWN">\n')
f.write(' <Service name="Solr">\n')
f.write(' <Connector port="8983" address="%s" protocol="HTTP/1.1" connectionTimeout="20000" maxThreads = "200" URIEncoding="UTF-8"/>\n' % self.network_interfaces['thrift'][0])
f.write(' <Engine name="Solr" defaultHost="localhost">\n')
f.write(' <Host name="localhost" appBase="../solr/web"\n')
f.write(' unpackWARs="true" autoDeploy="true"\n')
f.write(' xmlValidation="false" xmlNamespaceAware="false">\n')
f.write(' </Host>\n')
f.write(' </Engine>\n')
f.write(' </Service>\n')
f.write('</Server>\n')
f.close()
def _get_directories(self):
dirs = []
for i in ['data', 'commitlogs', 'saved_caches', 'logs', 'bin', 'keys', 'resources', os.path.join('data', 'hints')]:
dirs.append(os.path.join(self.get_path(), i))
return dirs
def _copy_agent(self):
agent_source = os.path.join(self.get_install_dir(), 'datastax-agent')
agent_target = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_source) and not os.path.exists(agent_target):
shutil.copytree(agent_source, agent_target)
def _start_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
self._write_agent_address_yaml(agent_dir)
self._write_agent_log4j_properties(agent_dir)
args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))]
subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _stop_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
pidfile = os.path.join(agent_dir, 'datastax-agent.pid')
if os.path.exists(pidfile):
with open(pidfile, 'r') as f:
pid = int(f.readline().strip())
f.close()
if pid is not None:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
os.remove(pidfile)
def _write_agent_address_yaml(self, agent_dir):
address_yaml = os.path.join(agent_dir, 'conf', 'address.yaml')
if not os.path.exists(address_yaml):
with open(address_yaml, 'w+') as f:
(ip, port) = self.network_interfaces['thrift']
jmx = self.jmx_port
f.write('stomp_interface: 127.0.0.1\n')
f.write('local_interface: %s\n' % ip)
f.write('agent_rpc_interface: %s\n' % ip)
f.write('agent_rpc_broadcast_address: %s\n' % ip)
f.write('cassandra_conf: %s\n' % os.path.join(self.get_path(), 'resources', 'cassandra', 'conf', 'cassandra.yaml'))
f.write('cassandra_install: %s\n' % self.get_path())
f.write('cassandra_logs: %s\n' % os.path.join(self.get_path(), 'logs'))
f.write('thrift_port: %s\n' % port)
f.write('jmx_port: %s\n' % jmx)
f.close()
def _write_agent_log4j_properties(self, agent_dir):
log4j_properties = os.path.join(agent_dir, 'conf', 'log4j.properties')
with open(log4j_properties, 'w+') as f:
f.write('log4j.rootLogger=INFO,R\n')
f.write('log4j.logger.org.apache.http=OFF\n')
f.write('log4j.logger.org.eclipse.jetty.util.log=WARN,R\n')
f.write('log4j.appender.R=org.apache.log4j.RollingFileAppender\n')
f.write('log4j.appender.R.maxFileSize=20MB\n')
f.write('log4j.appender.R.maxBackupIndex=5\n')
f.write('log4j.appender.R.layout=org.apache.log4j.PatternLayout\n')
f.write('log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %m%n\n')
f.write('log4j.appender.R.File=./log/agent.log\n')
f.close()
|
|
"""The built-in Socket Mode client
* https://api.slack.com/apis/connections/socket
* https://slack.dev/python-slack-sdk/socket-mode/
"""
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from logging import Logger
from queue import Queue
from threading import Lock
from typing import Union, Optional, List, Callable, Dict
from slack_sdk.socket_mode.client import BaseSocketModeClient
from slack_sdk.socket_mode.listeners import (
WebSocketMessageListener,
SocketModeRequestListener,
)
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.web import WebClient
from .connection import Connection, ConnectionState
from ..interval_runner import IntervalRunner
from ...errors import SlackClientConfigurationError, SlackClientNotConnectedError
from ...proxy_env_variable_loader import load_http_proxy_from_env
class SocketModeClient(BaseSocketModeClient):
logger: Logger
web_client: WebClient
app_token: str
wss_uri: Optional[str]
message_queue: Queue
message_listeners: List[
Union[
WebSocketMessageListener,
Callable[["BaseSocketModeClient", dict, Optional[str]], None],
]
]
socket_mode_request_listeners: List[
Union[
SocketModeRequestListener,
Callable[["BaseSocketModeClient", SocketModeRequest], None],
]
]
current_session: Optional[Connection]
current_session_state: ConnectionState
current_session_runner: IntervalRunner
current_app_monitor: IntervalRunner
current_app_monitor_started: bool
message_processor: IntervalRunner
message_workers: ThreadPoolExecutor
auto_reconnect_enabled: bool
default_auto_reconnect_enabled: bool
trace_enabled: bool
receive_buffer_size: int # bytes size
connect_operation_lock: Lock
on_message_listeners: List[Callable[[str], None]]
on_error_listeners: List[Callable[[Exception], None]]
on_close_listeners: List[Callable[[int, Optional[str]], None]]
def __init__(
self,
app_token: str,
logger: Optional[Logger] = None,
web_client: Optional[WebClient] = None,
auto_reconnect_enabled: bool = True,
trace_enabled: bool = False,
all_message_trace_enabled: bool = False,
ping_pong_trace_enabled: bool = False,
ping_interval: float = 5,
receive_buffer_size: int = 1024,
concurrency: int = 10,
proxy: Optional[str] = None,
proxy_headers: Optional[Dict[str, str]] = None,
on_message_listeners: Optional[List[Callable[[str], None]]] = None,
on_error_listeners: Optional[List[Callable[[Exception], None]]] = None,
on_close_listeners: Optional[List[Callable[[int, Optional[str]], None]]] = None,
):
"""Socket Mode client
Args:
app_token: App-level token
logger: Custom logger
web_client: Web API client
auto_reconnect_enabled: True if automatic reconnection is enabled (default: True)
trace_enabled: True if more detailed debug-logging is enabled (default: False)
all_message_trace_enabled: True if all message dump in debug logs is enabled (default: False)
ping_pong_trace_enabled: True if trace logging for all ping-pong communications is enabled (default: False)
ping_interval: interval for ping-pong with Slack servers (seconds)
receive_buffer_size: the chunk size of a single socket recv operation (default: 1024)
concurrency: the size of thread pool (default: 10)
proxy: the HTTP proxy URL
proxy_headers: additional HTTP header for proxy connection
on_message_listeners: listener functions for on_message
on_error_listeners: listener functions for on_error
on_close_listeners: listener functions for on_close
"""
self.app_token = app_token
self.logger = logger or logging.getLogger(__name__)
self.web_client = web_client or WebClient()
self.default_auto_reconnect_enabled = auto_reconnect_enabled
self.auto_reconnect_enabled = self.default_auto_reconnect_enabled
self.trace_enabled = trace_enabled
self.all_message_trace_enabled = all_message_trace_enabled
self.ping_pong_trace_enabled = ping_pong_trace_enabled
self.ping_interval = ping_interval
self.receive_buffer_size = receive_buffer_size
if self.receive_buffer_size < 16:
raise SlackClientConfigurationError(
"Too small receive_buffer_size detected."
)
self.wss_uri = None
self.message_queue = Queue()
self.message_listeners = []
self.socket_mode_request_listeners = []
self.current_session = None
self.current_session_state = ConnectionState()
self.current_session_runner = IntervalRunner(
self._run_current_session, 0.1
).start()
self.current_app_monitor_started = False
self.current_app_monitor = IntervalRunner(
self._monitor_current_session, self.ping_interval
)
self.closed = False
self.connect_operation_lock = Lock()
self.message_processor = IntervalRunner(self.process_messages, 0.001).start()
self.message_workers = ThreadPoolExecutor(max_workers=concurrency)
self.proxy = proxy
if self.proxy is None or len(self.proxy.strip()) == 0:
env_variable = load_http_proxy_from_env(self.logger)
if env_variable is not None:
self.proxy = env_variable
self.proxy_headers = proxy_headers
self.on_message_listeners = on_message_listeners or []
self.on_error_listeners = on_error_listeners or []
self.on_close_listeners = on_close_listeners or []
def session_id(self) -> Optional[str]:
if self.current_session is not None:
return self.current_session.session_id
return None
def is_connected(self) -> bool:
return self.current_session is not None and self.current_session.is_active()
def connect(self) -> None:
old_session: Optional[Connection] = self.current_session
old_current_session_state: ConnectionState = self.current_session_state
if self.wss_uri is None:
self.wss_uri = self.issue_new_wss_url()
current_session = Connection(
url=self.wss_uri,
logger=self.logger,
ping_interval=self.ping_interval,
trace_enabled=self.trace_enabled,
all_message_trace_enabled=self.all_message_trace_enabled,
ping_pong_trace_enabled=self.ping_pong_trace_enabled,
receive_buffer_size=self.receive_buffer_size,
proxy=self.proxy,
proxy_headers=self.proxy_headers,
on_message_listener=self._on_message,
on_error_listener=self._on_error,
on_close_listener=self._on_close,
)
current_session.connect()
if old_current_session_state is not None:
old_current_session_state.terminated = True
if old_session is not None:
old_session.close()
self.current_session = current_session
self.current_session_state = ConnectionState()
self.auto_reconnect_enabled = self.default_auto_reconnect_enabled
if not self.current_app_monitor_started:
self.current_app_monitor_started = True
self.current_app_monitor.start()
self.logger.info(
f"A new session has been established (session id: {self.session_id()})"
)
def disconnect(self) -> None:
if self.current_session is not None:
self.current_session.close()
def send_message(self, message: str) -> None:
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Sending a message (session id: {self.session_id()}, message: {message})"
)
try:
self.current_session.send(message)
except SlackClientNotConnectedError as e:
# We rarely get this exception while replacing the underlying WebSocket connections.
# We can do one more try here as the self.current_session should be ready now.
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Failed to send a message (session id: {self.session_id()}, error: {e}, message: {message})"
" as the underlying connection was replaced. Retrying the same request only one time..."
)
# Although acquiring self.connect_operation_lock also for the first method call is the safest way,
# we avoid synchronizing a lot for better performance. That's why we are doing a retry here.
with self.connect_operation_lock:
if self.is_connected():
self.current_session.send(message)
else:
self.logger.warning(
f"The current session (session id: {self.session_id()}) is no longer active. "
"Failed to send a message"
)
raise e
def close(self):
self.closed = True
self.auto_reconnect_enabled = False
self.disconnect()
if self.current_app_monitor.is_alive():
self.current_app_monitor.shutdown()
if self.message_processor.is_alive():
self.message_processor.shutdown()
self.message_workers.shutdown()
def _on_message(self, message: str):
if self.logger.level <= logging.DEBUG:
self.logger.debug(f"on_message invoked: (message: {message})")
self.enqueue_message(message)
for listener in self.on_message_listeners:
listener(message)
def _on_error(self, error: Exception):
self.logger.exception(
f"on_error invoked (session id: {self.session_id()}, "
f"error: {type(error).__name__}, message: {error})"
)
for listener in self.on_error_listeners:
listener(error)
def _on_close(self, code: int, reason: Optional[str] = None):
if self.logger.level <= logging.DEBUG:
self.logger.debug(f"on_close invoked (session id: {self.session_id()})")
if self.auto_reconnect_enabled:
self.logger.info(
"Received CLOSE event. Reconnecting... "
f"(session id: {self.session_id()})"
)
self.connect_to_new_endpoint()
for listener in self.on_close_listeners:
listener(code, reason)
def _run_current_session(self):
if self.current_session is not None and self.current_session.is_active():
session_id = self.session_id()
try:
self.logger.info(
"Starting to receive messages from a new connection"
f" (session id: {session_id})"
)
self.current_session_state.terminated = False
self.current_session.run_until_completion(self.current_session_state)
self.logger.info(
"Stopped receiving messages from a connection"
f" (session id: {session_id})"
)
except Exception as e:
self.logger.exception(
"Failed to start or stop the current session"
f" (session id: {session_id}, error: {e})"
)
def _monitor_current_session(self):
if self.current_app_monitor_started:
try:
self.current_session.check_state()
if self.auto_reconnect_enabled and (
self.current_session is None or not self.current_session.is_active()
):
self.logger.info(
"The session seems to be already closed. Reconnecting... "
f"(session id: {self.session_id()})"
)
self.connect_to_new_endpoint()
except Exception as e:
self.logger.error(
"Failed to check the current session or reconnect to the server "
f"(session id: {self.session_id()}, error: {type(e).__name__}, message: {e})"
)
|
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic_session_run_hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import threading
import time
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.testing.python.framework import fake_summary_writer
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary as summary_lib
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
class MockCheckpointSaverListener(
basic_session_run_hooks.CheckpointSaverListener):
def __init__(self):
self.begin_count = 0
self.before_save_count = 0
self.after_save_count = 0
self.end_count = 0
def begin(self):
self.begin_count += 1
def before_save(self, session, global_step):
self.before_save_count += 1
def after_save(self, session, global_step):
self.after_save_count += 1
def end(self, session, global_step):
self.end_count += 1
def get_counts(self):
return {
'begin': self.begin_count,
'before_save': self.before_save_count,
'after_save': self.after_save_count,
'end': self.end_count
}
class SecondOrStepTimerTest(test.TestCase):
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SecondOrStepTimer(every_secs=2.0, every_steps=10)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SecondOrStepTimer()
def test_every_secs(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=1.0)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
time.sleep(1.0)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertTrue(timer.should_trigger_for_step(2))
def test_every_steps(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=3)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
self.assertFalse(timer.should_trigger_for_step(3))
self.assertTrue(timer.should_trigger_for_step(4))
def test_update_last_triggered_step(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=1)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1)
self.assertEqual(None, elapsed_secs)
self.assertEqual(None, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5)
self.assertLess(0, elapsed_secs)
self.assertEqual(4, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7)
self.assertLess(0, elapsed_secs)
self.assertEqual(2, elapsed_steps)
class StopAtStepTest(test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.StopAtStepHook(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
h = basic_session_run_hooks.StopAtStepHook(last_step=10)
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 9))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 10))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 11))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
def test_stop_based_on_num_step(self):
h = basic_session_run_hooks.StopAtStepHook(num_steps=10)
with ops.Graph().as_default():
global_step = variables.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 13))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 14))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 15))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
class LoggingTensorHookTest(test.TestCase):
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = tf_logging.info
self.logged_message = None
def mock_log(*args, **kwargs):
self.logged_message = args
self._actual_log(*args, **kwargs)
tf_logging.info = mock_log
def tearDown(self):
tf_logging.info = self._actual_log
def test_illegal_args(self):
with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=0)
with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=-10)
with self.assertRaisesRegexp(ValueError, 'xactly one of'):
basic_session_run_hooks.LoggingTensorHook(
tensors=['t'], every_n_iter=5, every_n_secs=5)
with self.assertRaisesRegexp(ValueError, 'xactly one of'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'])
def test_print_every_n_steps(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], every_n_iter=10)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
sess.run(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
for j in range(3):
_ = j
self.logged_message = ''
for i in range(9):
_ = i
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
def test_print_every_n_secs(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], every_n_secs=1.0)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
sess.run(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.logged_message = ''
mon_sess.run(train_op)
self.assertEqual(str(self.logged_message).find(t.name), -1)
time.sleep(1.0)
self.logged_message = ''
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
class CheckpointSaverHookTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = variables.get_or_create_global_step()
self.train_op = state_ops.assign_add(self.global_step, 1)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def test_raise_when_saver_and_scaffold_both_missing(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(self.model_dir)
def test_raise_when_saver_and_scaffold_both_present(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, saver=self.scaffold.saver, scaffold=self.scaffold)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=10, save_steps=20)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(self.model_dir)
def test_save_secs_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_secs_calls_listeners_at_begin_and_end(self):
with self.graph.as_default():
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_secs=2,
scaffold=self.scaffold,
listeners=[listener])
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op) # hook runs here
mon_sess.run(self.train_op) # hook won't run here, so it does at end
hook.end(sess) # hook runs here
self.assertEqual({
'begin': 1,
'before_save': 2,
'after_save': 2,
'end': 1
}, listener.get_counts())
@test.mock.patch('time.time')
def test_save_secs_saves_periodically(self, mock_time):
# Let's have a realistic start time
current_time = 1484695987.209386
with self.graph.as_default():
mock_time.return_value = current_time
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mock_time.return_value = current_time
mon_sess.run(self.train_op) # Saved.
mock_time.return_value = current_time + 0.5
mon_sess.run(self.train_op) # Not saved.
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Simulate 2.5 seconds of sleep.
mock_time.return_value = current_time + 2.5
mon_sess.run(self.train_op) # Saved.
mock_time.return_value = current_time + 2.6
mon_sess.run(self.train_op) # Not saved.
mock_time.return_value = current_time + 2.7
mon_sess.run(self.train_op) # Not saved.
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Simulate 7.5 more seconds of sleep (10 seconds from start.
mock_time.return_value = current_time + 10
mon_sess.run(self.train_op) # Saved.
self.assertEqual(6,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_secs_calls_listeners_periodically(self):
with self.graph.as_default():
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_secs=2,
scaffold=self.scaffold,
listeners=[listener])
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op) # hook runs here
mon_sess.run(self.train_op)
time.sleep(2.5)
mon_sess.run(self.train_op) # hook runs here
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
time.sleep(2.5)
mon_sess.run(self.train_op) # hook runs here
mon_sess.run(self.train_op) # hook won't run here, so it does at end
hook.end(sess) # hook runs here
self.assertEqual({
'begin': 1,
'before_save': 4,
'after_save': 4,
'end': 1
}, listener.get_counts())
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_saves_at_end(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
hook.end(sess)
self.assertEqual(2,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_summary_writer_defs(self):
fake_summary_writer.FakeSummaryWriter.install()
writer_cache.FileWriterCache.clear()
summary_writer = writer_cache.FileWriterCache.get(self.model_dir)
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.model_dir,
expected_added_meta_graphs=[
meta_graph.create_meta_graph_def(
graph_def=self.graph.as_graph_def(add_shapes=True),
saver_def=self.scaffold.saver.saver_def)
])
fake_summary_writer.FakeSummaryWriter.uninstall()
class StepCounterHookTest(test.TestCase):
def setUp(self):
self.log_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.log_dir, ignore_errors=True)
def test_step_counter_every_n_steps(self):
with ops.Graph().as_default() as g, session_lib.Session() as sess:
global_step = variables.get_or_create_global_step()
train_op = state_ops.assign_add(global_step, 1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=10)
hook.begin()
sess.run(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
time.sleep(0.01)
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([11, 21], summary_writer.summaries.keys())
for step in [11, 21]:
summary_value = summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
def test_step_counter_every_n_secs(self):
with ops.Graph().as_default() as g, session_lib.Session() as sess:
global_step = variables.get_or_create_global_step()
train_op = state_ops.assign_add(global_step, 1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1)
hook.begin()
sess.run(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
time.sleep(0.2)
mon_sess.run(train_op)
time.sleep(0.2)
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2, 3], summary_writer.summaries.keys())
for summary in summary_writer.summaries.values():
summary_value = summary[0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
def test_global_step_name(self):
with ops.Graph().as_default() as g, session_lib.Session() as sess:
with variable_scope.variable_scope('bar'):
foo_step = variable_scope.get_variable(
'foo',
initializer=0,
trainable=False,
collections=[
ops.GraphKeys.GLOBAL_STEP, ops.GraphKeys.GLOBAL_VARIABLES
])
train_op = state_ops.assign_add(foo_step, 1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=1, every_n_secs=None)
hook.begin()
sess.run(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2], summary_writer.summaries.keys())
summary_value = summary_writer.summaries[2][0].value[0]
self.assertEqual('bar/foo/sec', summary_value.tag)
class SummarySaverHookTest(test.TestCase):
def setUp(self):
test.TestCase.setUp(self)
self.log_dir = 'log/dir'
self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)
var = variables_lib.Variable(0.0)
tensor = state_ops.assign_add(var, 1.0)
tensor2 = tensor * 2
self.summary_op = summary_lib.scalar('my_summary', tensor)
self.summary_op2 = summary_lib.scalar('my_summary2', tensor2)
global_step = variables.get_or_create_global_step()
self.train_op = state_ops.assign_add(global_step, 1)
def test_raise_when_scaffold_and_summary_op_both_missing(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook()
def test_raise_when_scaffold_and_summary_op_both_present(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
scaffold=monitored_session.Scaffold(), summary_op=self.summary_op)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
save_secs=10, save_steps=20, summary_writer=self.summary_writer)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
save_secs=None, save_steps=None, summary_writer=self.summary_writer)
def test_save_steps(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.test_session() as sess:
hook.begin()
sess.run(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
9: {
'my_summary': 2.0
},
17: {
'my_summary': 3.0
},
25: {
'my_summary': 4.0
},
})
def test_multiple_summaries(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=[self.summary_op, self.summary_op2])
with self.test_session() as sess:
hook.begin()
sess.run(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(10):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0,
'my_summary2': 2.0
},
9: {
'my_summary': 2.0,
'my_summary2': 4.0
},
})
def test_save_secs_saving_once_every_step(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_secs=0.5,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.test_session() as sess:
hook.begin()
sess.run(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(4):
mon_sess.run(self.train_op)
time.sleep(0.5)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
2: {
'my_summary': 2.0
},
3: {
'my_summary': 3.0
},
4: {
'my_summary': 4.0
},
})
def test_save_secs_saving_once_every_three_steps(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_secs=0.9,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.test_session() as sess:
hook.begin()
sess.run(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(8):
mon_sess.run(self.train_op)
time.sleep(0.3)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
4: {
'my_summary': 2.0
},
7: {
'my_summary': 3.0
},
})
class GlobalStepWaiterHookTest(test.TestCase):
def test_not_wait_for_step_zero(self):
with ops.Graph().as_default():
variables.get_or_create_global_step()
hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=0)
hook.begin()
with session_lib.Session() as sess:
# Before run should return without waiting gstep increment.
hook.before_run(
session_run_hook.SessionRunContext(
original_args=None, session=sess))
def test_wait_for_step(self):
with ops.Graph().as_default():
gstep = variables.get_or_create_global_step()
hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=1000)
hook.begin()
with session_lib.Session() as sess:
sess.run(variables_lib.global_variables_initializer())
waiter = threading.Thread(
target=hook.before_run,
args=(session_run_hook.SessionRunContext(
original_args=None, session=sess),))
waiter.daemon = True
waiter.start()
time.sleep(1.0)
self.assertTrue(waiter.is_alive())
sess.run(state_ops.assign(gstep, 500))
time.sleep(1.0)
self.assertTrue(waiter.is_alive())
sess.run(state_ops.assign(gstep, 1100))
time.sleep(1.2)
self.assertFalse(waiter.is_alive())
class FinalOpsHookTest(test.TestCase):
def test_final_ops_is_scalar_tensor(self):
with ops.Graph().as_default():
expected_value = 4
final_ops = constant_op.constant(expected_value)
hook = basic_session_run_hooks.FinalOpsHook(final_ops)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertEqual(expected_value,
hook.final_ops_values)
def test_final_ops_is_tensor(self):
with ops.Graph().as_default():
expected_values = [1, 6, 3, 5, 2, 4]
final_ops = constant_op.constant(expected_values)
hook = basic_session_run_hooks.FinalOpsHook(final_ops)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertListEqual(expected_values,
hook.final_ops_values.tolist())
def test_final_ops_with_dictionary(self):
with ops.Graph().as_default():
expected_values = [4, -3]
final_ops = array_ops.placeholder(dtype=dtypes.float32)
final_ops_feed_dict = {final_ops: expected_values}
hook = basic_session_run_hooks.FinalOpsHook(
final_ops, final_ops_feed_dict)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertListEqual(expected_values,
hook.final_ops_values.tolist())
if __name__ == '__main__':
test.main()
|
|
import os
import time
from datetime import datetime
import pytest
from pykechain.enums import ServiceEnvironmentVersion, ServiceExecutionStatus, ServiceType
from pykechain.exceptions import APIError, IllegalArgumentError, MultipleFoundError, NotFoundError
# new in 1.13
from pykechain.models import Service
from pykechain.utils import temp_chdir
from tests.classes import TestBetamax
class TestServiceSetup(TestBetamax):
"""Only for test setup, will create a service with a debug script
:ivar service: service with a debug.py script
"""
def _create_service(self, name=None):
"""Creates a service with name, and adds a test_upload_script.py (debugging)"""
# setUp
new_service = self.project.create_service(
name=name or "Test upload script to service",
description="Only used for testing - you can safely remove this",
environment_version=ServiceEnvironmentVersion.PYTHON_3_8,
)
upload_path = os.path.join(
self.test_assets_dir,
"tests",
"files",
"test_upload_script_to_service",
"test_upload_script.py",
)
# testing
new_service.upload(pkg_path=upload_path)
self.assertEqual(new_service._json_data["script_file_name"], "test_upload_script.py")
return new_service
def setUp(self):
super().setUp()
self.test_assets_dir = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")
)
self.service = self._create_service()
def tearDown(self):
self.service.delete()
super().tearDown()
class TestServices(TestBetamax):
def _create_service(self, name=None):
"""Creates a service with name, and adds a test_upload_script.py (debugging)"""
# setUp
new_service = self.project.create_service(
name=name or "Test upload script to service",
description="Only used for testing - you can safely remove this",
)
upload_path = os.path.join(
self.test_assets_dir, "tests", "files", "uploaded", "test_upload_script.py"
)
# testing
new_service.upload(pkg_path=upload_path)
new_service.refresh()
self.assertEqual(new_service._json_data["script_file_name"], "test_upload_script.py")
return new_service
def setUp(self):
super().setUp()
self.test_assets_dir = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")
)
def test_retrieve_services(self):
self.assertTrue(self.project.services())
def test_retrieve_services_with_kwargs(self):
# setUp
retrieved_services_with_kwargs = self.project.services(
script_type=ServiceType.PYTHON_SCRIPT
)
# testing
self.assertTrue(retrieved_services_with_kwargs)
for service in retrieved_services_with_kwargs:
self.assertEqual(ServiceType.PYTHON_SCRIPT, service._json_data["script_type"])
def test_retrieve_service_but_found_multiple(self):
with self.assertRaises(MultipleFoundError):
self.project.service(script_type=ServiceType.PYTHON_SCRIPT)
def test_retrieve_single_service(self):
services = self.project.services()
self.assertTrue(services)
service_1 = services[0]
self.assertEqual(self.project.service(pk=service_1.id), service_1)
def test_retrieve_service_by_name(self):
service_name = "Service Gears - Successful"
service = self.project.service(name=service_name)
self.assertTrue(service)
self.assertEqual(service.name, service_name)
def test_properties_of_service(self):
service_name = "Service Gears - Successful with Package"
service = self.project.service(name=service_name)
for key, value in service.__dict__.items():
if str(key).startswith("_"):
continue
# Verified on is an optional variable
if key == "verified_on":
continue
with self.subTest(msg=f"{key}: {value}"):
self.assertIsNotNone(value)
@pytest.mark.skipif(
"os.getenv('TRAVIS', False) or os.getenv('GITHUB_ACTIONS', False)",
reason="Skipping tests when using Travis or Github Actions, as not Auth can be provided",
)
def test_debug_service_execute(self):
service_name = "Service Gears - Successful"
service = self.project.service(name=service_name)
service_execution = service.execute()
self.assertTrue(service_execution.status in ServiceExecutionStatus.values())
if service_execution.status in (
ServiceExecutionStatus.LOADING,
ServiceExecutionStatus.RUNNING,
):
# sleep 2000 ms
time.sleep(2)
service_execution.refresh()
self.assertTrue(service_execution.status in ServiceExecutionStatus.values())
@pytest.mark.skipif(
"os.getenv('TRAVIS', False) or os.getenv('GITHUB_ACTIONS', False)",
reason="Skipping tests when using Travis or Github Actions, as not Auth can be provided",
)
def test_service_context(self):
some_activity = self.project.activities()[0]
service = self.project.service(name="Service Gears - Successful")
service_execution = service.execute(activity_id=some_activity.id)
self.assertEqual(some_activity.id, service_execution.activity_id)
def test_update_service(self):
# setUp
service_name = "Service Gears - Successful"
service = self.project.service(name=service_name)
version_before = str(service.version)
name_before = service_name
name_after = "Pykechain needs no debugging"
description_before = str(service._json_data["description"])
description_after = "Pykechain is way too good for that"
version_after = "-latest"
# testing
service.edit(name=name_after, description=description_after, version=version_after)
service.refresh()
self.assertEqual(service.name, name_after)
self.assertEqual(service._json_data["description"], description_after)
self.assertEqual(service.version, version_after)
# tearDown
service.edit(name=name_before, description=description_before, version=version_before)
# test added due to #847 - providing no inputs overwrites values
def test_edit_service_clear_values(self):
# setup
initial_name = "Service testing editing"
initial_description = "Description test"
initial_version = "1.0"
initial_run_as = "kenode"
initial_trusted = False
initial_type = ServiceType.NOTEBOOK
initial_env = ServiceEnvironmentVersion.PYTHON_3_8_NOTEBOOKS
# TODO: to be removed in later versions of pykechain, only for temporal compatibility
compatibility_env = "3.7_notebook"
self.service = self.project.create_service(name=initial_name)
self.service.edit(
name=initial_name,
description=initial_description,
version=initial_version,
type=initial_type,
environment_version=initial_env,
run_as=initial_run_as,
trusted=initial_trusted,
)
# Edit without mentioning values, everything should stay the same
new_name = "Changed service name"
self.service.edit(name=new_name)
# testing
self.assertEqual(self.service.name, new_name)
self.assertEqual(self.service.description, initial_description)
self.assertEqual(self.service.version, initial_version)
self.assertEqual(self.service.run_as, initial_run_as)
self.assertEqual(self.service.type, initial_type)
self.assertIn(self.service.environment, (initial_env, compatibility_env))
self.assertEqual(self.service.trusted, initial_trusted)
# Edit with clearing the values, name and status cannot be cleared
self.service.edit(
name=None,
description=None,
version=None,
type=None,
environment_version=None,
run_as=None,
trusted=None,
)
self.assertEqual(self.service.name, new_name)
self.assertEqual(self.service.description, "")
self.assertEqual(self.service.version, "")
self.assertEqual(self.service.type, initial_type)
self.assertIn(self.service.environment, (initial_env, compatibility_env))
self.assertEqual(self.service.run_as, initial_run_as)
self.assertEqual(self.service.trusted, initial_trusted)
# teardown
self.service.delete()
# test added in 3.1
def test_retrieve_services_with_refs(self):
# setup
service_ref = "service-gears-successful"
service_name = "Service Gears - Successful"
service = self.project.service(ref=service_ref)
# testing
self.assertIsInstance(service, Service)
self.assertTrue(service.name, service_name)
class TestServicesWithCustomUploadedService(TestServiceSetup):
def test_update_service_incorrect_name(self):
with self.assertRaises(IllegalArgumentError):
self.service.edit(name=1234)
def test_update_service_incorrect_description(self):
with self.assertRaises(IllegalArgumentError):
self.service.edit(description=True)
def test_update_service_incorrect_version(self):
with self.assertRaises(IllegalArgumentError):
self.service.edit(version=["2.0"])
def test_service_refresh_from_kechain(self):
version_after = "-latest"
self.service.edit(version=version_after)
self.service.refresh()
self.assertEqual(self.service.version, version_after)
def test_get_executions_of_service(self):
self.assertTrue(len(self.service.get_executions()) >= 0)
def test_create_and_delete_service(self):
service_name = "Test service creation"
new_service = self.project.create_service(service_name)
self.assertTrue(new_service.name, service_name)
self.assertTrue(new_service)
# tearDown
new_service.delete()
with self.assertRaisesRegex(NotFoundError, "fit criteria"):
self.project.service(pk=new_service.id)
def test_create_service_with_wrong_service_type(self):
with self.assertRaisesRegex(IllegalArgumentError, "must be an option from enum"):
self.project.create_service(
name="This service type does not exist", service_type="RUBY_SCRIPT"
)
def test_create_service_with_wrong_environment_version(self):
with self.assertRaisesRegex(IllegalArgumentError, "must be an option from enum"):
self.project.create_service(
name="This env version does not exist", environment_version="0.0"
)
def test_save_service_script(self):
# setUp
with temp_chdir() as target_dir:
self.service.save_as(target_dir=target_dir)
self.assertEqual(len(os.listdir(target_dir)), 1)
def test_upload_script_to_service(self):
# setUp
upload_path = os.path.join(
self.test_assets_dir,
"tests",
"files",
"test_upload_script_to_service",
"test_upload_script.py",
)
# testing
self.service.upload(pkg_path=upload_path)
# second upload modified filename
self.assertRegex(self.service._json_data["script_file_name"], r"test_upload_\w+.py")
def test_upload_script_to_service_with_wrong_path(self):
# setUp
upload_path = os.path.join(
self.test_assets_dir, "tests", "files", "uploaded", "this_file_does_exists.not"
)
# testing
with self.assertRaisesRegex(OSError, "Could not locate python package to upload in"):
self.service.upload(pkg_path=upload_path)
# new in 1.13
class TestServiceExecutions(TestServiceSetup):
def test_retrieve_service_executions(self):
self.assertTrue(self.project.service_executions())
def test_retrieve_service_executions_with_kwargs(self):
# setUp
limit = 15
retrieved_executions_with_kwargs = self.project.service_executions(limit=limit)
# testing
self.assertTrue(len(retrieved_executions_with_kwargs) <= limit)
def test_retrieve_single_service_execution(self):
service_executions = self.project.service_executions()
self.assertTrue(service_executions)
service_execution_1 = service_executions[0]
self.assertEqual(
self.project.service_execution(pk=service_execution_1.id), service_execution_1
)
def test_retrieve_single_service_execution_but_found_none(self):
with self.assertRaises(NotFoundError):
self.project.service_execution(
username="No service execution as this user does not exist"
)
def test_retrieve_single_service_execution_but_found_multiple(self):
# setUp
service_execution = self.service.execute()
while service_execution.status in [
ServiceExecutionStatus.LOADING,
ServiceExecutionStatus.RUNNING,
]:
time.sleep(0.500) # 200ms
service_execution.refresh()
self.service.execute()
# testing
with self.assertRaises(MultipleFoundError):
self.project.service_execution(service=self.service.id)
def test_service_execution_conflict(self):
# setUp
self.service.execute()
# testing
with self.assertRaisesRegex(APIError, "Conflict: Could not execute"):
self.service.execute()
def test_properties_of_service_execution(self):
service_name = "Service Gears - Successful"
service = self.project.service(name=service_name)
service_executions = self.project.service_executions(service=service.id, limit=1)
self.assertTrue(service_executions)
service_execution = service_executions[0]
self.assertIsInstance(service_execution.service, Service)
self.assertIsInstance(service_execution.started_at, datetime)
self.assertIsInstance(service_execution.finished_at, datetime)
for key, value in service_execution.__dict__.items():
if str(key).startswith("_"):
continue
# Originating activity is an optional variable
if key == "activity_id":
continue
with self.subTest(msg=f"{key}: {value}"):
self.assertIsNotNone(value)
@pytest.mark.skipif(
"os.getenv('TRAVIS', False) or os.getenv('GITHUB_ACTIONS', False)",
reason="Skipping tests when using Travis or Github Actions, as not Auth can be provided",
)
def test_debug_service_execution_terminate(self):
service_execution = self.service.execute()
self.assertEqual(service_execution.status, ServiceExecutionStatus.LOADING)
time.sleep(2)
service_execution.refresh()
self.assertEqual(service_execution.status, ServiceExecutionStatus.RUNNING)
service_execution.terminate()
self.assertNotEqual(
service_execution.status,
ServiceExecutionStatus.FAILED,
"The service execution is status 'FAILED', please upload working debugging scripts"
" before running the tests",
)
def test_log_of_service_execution(self):
# setUp
service_execution = self.service.execute()
time.sleep(5)
with temp_chdir() as target_dir:
service_execution.get_log(target_dir=target_dir)
log_file = os.path.join(target_dir, "log.txt")
self.assertTrue(log_file)
|
|
from __future__ import print_function
import subprocess,os,shutil,sys
if not os.path.exists('_deps'):
os.mkdir('_deps')
def InstallPrereqs():
""" Get the requirements for CoolProp """
#Collect the source for Cython and put in _deps/cython-master
import urllib,zipfile
print('getting cython sources')
urllib.urlretrieve('https://github.com/cython/cython/archive/master.zip', filename = 'master.zip')
with zipfile.ZipFile('master.zip', 'r') as myzip:
myzip.extractall(path='_deps')
os.remove('master.zip')
for python_install in PYTHONVERSIONS:
for cwd in ['_deps/cython-master']:
print(subprocess.check_output([python_install, 'setup.py', 'install'], cwd = cwd))
def PYPI():
path_32bit = "c:\\Miniconda32bit\\Scripts\\"
path_64bit = 'c:\\Miniconda\\Scripts\\'
envs = ['py27','py33','py34']
# Build all the wheels
for env in envs:
for path in [path_64bit, path_32bit]:
# Actually build the wheel now
subprocess.check_call(path+'activate '+env+' '+'&& python --version && python setup.py bdist_wheel upload',cwd=os.path.join('wrappers','Python'),stdout = sys.stdout, shell = True)
# If we get this far, all the wheels built
subprocess.call(['python','setup.py','sdist','upload'],cwd=os.path.join('wrappers','Python'))
def Source():
print(subprocess.check_output(['python','setup.py','sdist','--dist-dir=../../dist_temp/Python'],shell=True,cwd=os.path.join('wrappers','Python')))
def DLL_and_Excel():
print('DLL and Excel')
""" Build a DLL using __stdcall calling convention """
subprocess.check_output(['BuildDLL'],shell=True,cwd=os.path.join('wrappers','Excel'))
subprocess.check_output(['BuildDLLx64'],shell=True,cwd=os.path.join('wrappers','Excel'))
#Collect the zip file and p
try:
os.makedirs(os.path.join('dist_temp','Excel and DLL'))
except os.error:
pass
shutil.copy2(os.path.join('CoolProp','CoolProp.h'),os.path.join('dist_temp','Excel and DLL','CoolProp.h'))
shutil.copy2(os.path.join('wrappers','Excel','CoolProp.dll'),os.path.join('dist_temp','Excel and DLL','CoolProp.dll'))
shutil.copy2(os.path.join('wrappers','Excel','CoolProp_x64.dll'),os.path.join('dist_temp','Excel and DLL','CoolProp_x64.dll'))
shutil.copy2(os.path.join('wrappers','Excel','CoolProp.xlam'),os.path.join('dist_temp','Excel and DLL','CoolProp.xlam'))
shutil.copy2(os.path.join('wrappers','Excel','CoolProp.xla'),os.path.join('dist_temp','Excel and DLL','CoolProp.xla'))
shutil.copy2(os.path.join('wrappers','Excel','TestExcel.xlsx'),os.path.join('dist_temp','Excel and DLL','TestExcel.xlsx'))
shutil.copy2(os.path.join('wrappers','Excel','README.rst'),os.path.join('dist_temp','Excel and DLL','README.rst'))
def Octave():
print('Octave')
try:
os.makedirs(os.path.join('dist_temp','Octave'))
os.makedirs(os.path.join('dist_temp','Octave','3.6.4'))
except os.error: pass
subprocess.check_output(['OctaveBuilder.bat'],shell=True,cwd=os.path.join('wrappers','Octave'))
shutil.copy2(os.path.join('wrappers','Octave','3.6.4','CoolProp.oct'),os.path.join('dist_temp','Octave','3.6.4','CoolProp.oct'))
shutil.copy2(os.path.join('wrappers','Octave','example.m'),os.path.join('dist_temp','Octave','example.m'))
shutil.copy2(os.path.join('wrappers','Octave','README.rst'),os.path.join('dist_temp','Octave','README.rst'))
def Csharp():
print('C#')
try:
os.makedirs(os.path.join('dist_temp','C#'))
except os.error: pass
subprocess.check_output(['BuildCsharpDLL.bat'],shell=True,cwd=os.path.join('wrappers','C#'))
shutil.copy2(os.path.join('wrappers','C#','readme.txt'),os.path.join('dist_temp','C#','readme.txt'))
shutil.copy2(os.path.join('wrappers','C#','Csharp.7z'),os.path.join('dist_temp','C#','Csharp.7z'))
def MATLAB():
print('MATLAB')
try:
os.makedirs(os.path.join('dist_temp','MATLAB'))
except os.error: pass
process = subprocess.Popen(['C:\\MATLAB_32bit\\bin\\matlab','-wait','-nodesktop','-nosplash','-nojvm','-r','"MATLABBuilder; quit"'],shell=True,cwd=os.path.join('wrappers','MATLAB'))
process.wait()
process = subprocess.Popen(['matlab','-nojvm','-nodesktop','-nosplash','-wait','-r','"MATLABBuilder; quit"'],shell=True,cwd=os.path.join('wrappers','MATLAB'))
process.wait()
shutil.copy2(os.path.join('wrappers','MATLAB','PropsSI.mexw32'),os.path.join('dist_temp','MATLAB','PropsSI.mexw32'))
shutil.copy2(os.path.join('wrappers','MATLAB','PropsSI.mexw64'),os.path.join('dist_temp','MATLAB','PropsSI.mexw64'))
shutil.copy2(os.path.join('wrappers','MATLAB','Props.mexw32'),os.path.join('dist_temp','MATLAB','Props.mexw32'))
shutil.copy2(os.path.join('wrappers','MATLAB','Props.mexw64'),os.path.join('dist_temp','MATLAB','Props.mexw64'))
shutil.copy2(os.path.join('wrappers','MATLAB','HAProps.mexw32'),os.path.join('dist_temp','MATLAB','HAProps.mexw32'))
shutil.copy2(os.path.join('wrappers','MATLAB','HAProps.mexw64'),os.path.join('dist_temp','MATLAB','HAProps.mexw64'))
shutil.copy2(os.path.join('wrappers','MATLAB','README.rst'),os.path.join('dist_temp','MATLAB','README.rst'))
shutil.copy2(os.path.join('wrappers','MATLAB','MATLAB_sample.m'),os.path.join('dist_temp','MATLAB','example.m'))
def Labview():
print('Labview')
import CoolProp
version = CoolProp.__version__
try:
os.makedirs(os.path.join('dist_temp','Labview'))
except os.error: pass
process = subprocess.Popen(['BuildDLL.bat'],shell=True,cwd=os.path.join('wrappers','Labview'))
process.wait()
shutil.copy2(os.path.join('wrappers','Labview','CoolProp.dll'),os.path.join('dist_temp','Labview','CoolProp.dll'))
shutil.copy2(os.path.join('wrappers','Labview','CoolProp.llb'),os.path.join('dist_temp','Labview','CoolProp.llb'))
shutil.copy2(os.path.join('wrappers','Labview','CoolProp.vi'),os.path.join('dist_temp','Labview','CoolProp.vi'))
shutil.copy2(os.path.join('wrappers','Labview','README.rst'),os.path.join('dist_temp','Labview','README.rst'))
def EES():
print('EES')
import CoolProp
version = CoolProp.__version__
try:
os.makedirs(os.path.join('dist_temp','EES'))
except os.error: pass
subprocess.check_output('BuildDLF.bat',shell=True,cwd=os.path.join('wrappers','EES'))
#Make an installer using InnoSetup
subprocess.call(['C:\Program Files (x86)\Inno Setup 5\Compil32.exe','/cc','BuildInnoInstaller.iss'],cwd=os.path.join('wrappers','EES'))
shutil.copy2(os.path.join('wrappers','EES','Output','SetupCOOLPROP_EES.exe'),os.path.join('dist_temp','EES','SetupCOOLPROP_EES.exe'))
shutil.copy2(os.path.join('wrappers','EES','CoolProp.htm'),os.path.join('dist_temp','EES','CoolProp.htm'))
shutil.copy2(os.path.join('wrappers','EES','README.rst'),os.path.join('dist_temp','EES','README.rst'))
def Javascript():
print('Javascript')
import CoolProp
version = CoolProp.__version__
try:
os.makedirs(os.path.join('dist_temp','Javascript'))
except os.error: pass
subprocess.check_output('python build.py',shell=True,cwd=os.path.join('wrappers','Javascript'))
shutil.copy2(os.path.join('wrappers','Javascript','index.html'),os.path.join('dist_temp','Javascript','index.html'))
shutil.copy2(os.path.join('wrappers','Javascript','coolprop.js'),os.path.join('dist_temp','Javascript','coolprop.js'))
shutil.copy2(os.path.join('wrappers','Javascript','README.rst'),os.path.join('dist_temp','Javascript','README.rst'))
def Java():
print('Java')
import CoolProp
version = CoolProp.__version__
try:
os.makedirs(os.path.join('dist_temp','Java','win32'))
except os.error as E: print(E)
try:
os.makedirs(os.path.join('dist_temp','Java','x64'))
except os.error as E: print(E)
subprocess.check_output('build_win32.bat',shell=True,cwd=os.path.join('wrappers','Java'))
subprocess.check_output('build_x64.bat',shell=True,cwd=os.path.join('wrappers','Java'))
subprocess.check_call(['7z','a','-r','dist_temp/Java/sources.zip','wrappers/Java/*.java'])
shutil.copy2(os.path.join('wrappers','Java','win32','CoolProp.dll'),os.path.join('dist_temp','Java','win32','CoolProp.dll'))
shutil.copy2(os.path.join('wrappers','Java','x64','CoolProp.dll'),os.path.join('dist_temp','Java','x64','CoolProp.dll'))
shutil.copy2(os.path.join('wrappers','Java','README.rst'),os.path.join('dist_temp','Java','README.rst'))
def Python():
print('Python')
subprocess.call(['python','setup.py','install'],shell=True,cwd=os.path.join('wrappers','Python'))
path_32bit = "c:\\Miniconda32bit\\Scripts\\"
path_64bit = 'c:\\Miniconda\\Scripts\\'
envs = ['py27','py33','py34']
# Build all the installers
for env in envs:
for path in [path_64bit, path_32bit]:
# Install cython and pip if needed
subprocess.check_call(path+'conda install -n '+env+' Cython pip',cwd=os.path.join('wrappers','Python'),stdout = sys.stdout, shell = True)
# Install wheel package using pip
subprocess.check_call(path+'activate '+env+' '+'&& pip install wheel && deactivate',cwd=os.path.join('wrappers','Python'),stdout = sys.stdout, shell = True)
# Build the installer
subprocess.check_call([path+'activate',env,'&&','python','setup.py','bdist','--format=wininst','--dist-dir=../../dist_temp/Python'],shell=True,cwd=os.path.join('wrappers','Python'),stdout=sys.stdout)
def Maple():
print('Maple')
try:
os.makedirs(os.path.join('dist_temp','Maple'))
except os.error: pass
process = subprocess.check_output(['BuildDLLx64.bat'],shell=True,cwd=os.path.join('wrappers','Maple'))
listing = ['Analysis of a Refrigeration Cycle with CoolProp.mw','sample_file.mw','CoolProp_x64.dll']
for file in listing:
shutil.copy2(os.path.join('wrappers','Maple',file),os.path.join('dist_temp','Maple',file))
def Mathematica():
print('Mathematica')
try:
os.makedirs(os.path.join('dist_temp','Mathematica'))
except os.error: pass
process = subprocess.check_output(['BuildDLL.bat'],shell=True,cwd=os.path.join('wrappers','Mathematica'))
listing = ['README.rst','example.nb','CoolProp.dll']
for file in listing:
shutil.copy2(os.path.join('wrappers','Mathematica',file),os.path.join('dist_temp','Mathematica',file))
def Scilab():
print('Scilab')
try:
os.makedirs(os.path.join('dist_temp','Scilab'))
except os.error: pass
process = subprocess.check_output(['BuildDLL.bat'],shell=True,cwd=os.path.join('wrappers','Scilab'))
listing = ['README.rst','sample.sce','CoolProp_x64.dll','CoolProp.dll']
for file in listing:
shutil.copy2(os.path.join('wrappers','Scilab',file),os.path.join('dist_temp','Scilab',file))
def LibreOffice():
print('LibreOffice')
try:
os.makedirs(os.path.join('dist_temp','LibreOffice'))
except os.error: pass
shutil.copy2(os.path.join('wrappers','Excel','CoolProp.dll'),os.path.join('dist_temp','LibreOffice','CoolProp.dll'))
shutil.copy2(os.path.join('wrappers','LibreOffice','TestLibreOffice.ods'),os.path.join('dist_temp','LibreOffice','TestLibreOffice.ods'))
def MathCAD():
print('MathCAD')
try:
os.makedirs(os.path.join('dist_temp','MathCAD','Prime'))
except os.error: pass
process = subprocess.check_output(['BuildDLL.bat'],shell=True,cwd=os.path.join('wrappers','MathCAD'))
process = subprocess.check_output(['BuildDLL.bat'],shell=True,cwd=os.path.join('wrappers','MathCAD','Prime'))
shutil.copy2(os.path.join('wrappers','MathCAD','CoolPropMathcadWrapper.dll'),os.path.join('dist_temp','MathCAD','CoolPropMathcadWrapper.dll'))
shutil.copy2(os.path.join('wrappers','MathCAD','CoolPropFluidProperties.xmcd'),os.path.join('dist_temp','MathCAD','CoolPropFluidProperties.xmcd'))
shutil.copy2(os.path.join('wrappers','MathCAD','README.rst'),os.path.join('dist_temp','MathCAD','README.rst'))
shutil.copy2(os.path.join('wrappers','MathCAD','Prime','CoolPropMathcadWrapper.dll'),os.path.join('dist_temp','MathCAD','Prime','CoolPropMathcadWrapper.dll'))
shutil.copy2(os.path.join('wrappers','MathCAD','Prime','CoolPropFluidProperties.mcdx'),os.path.join('dist_temp','MathCAD','Prime','CoolPropFluidProperties.xmcd'))
shutil.copy2(os.path.join('wrappers','MathCAD','Prime','README.rst'),os.path.join('dist_temp','MathCAD','Prime','README.rst'))
# def Modelica():
# print('Modelica')
# try:
# os.makedirs(os.path.join('dist_temp','Modelica'))
# except os.error: pass
#
# process = subprocess.Popen(['BuildLIB-VS2008.bat'],shell=True,cwd=os.path.join('wrappers','Modelica')); process.wait()
# process = subprocess.Popen(['BuildLIB-VS2010.bat'],shell=True,cwd=os.path.join('wrappers','Modelica')); process.wait()
#
# shutil.copy2(os.path.join('wrappers','Modelica','README.rst'),os.path.join('dist_temp','Modelica','README.rst'))
# #shutil.copy2(os.path.join('wrappers','Modelica','src_modelica','CoolProp2Modelica.mo'),os.path.join('dist_temp','Modelica','CoolProp2Modelica.mo'))
# shutil.copy2(os.path.join('wrappers','Modelica','src','CoolPropLib.h'),os.path.join('dist_temp','Modelica','CoolPropLib.h'))
# shutil.copytree(os.path.join('wrappers','Modelica','bin','VS2008'),os.path.join('dist_temp','Modelica','VS2008'))
# shutil.copytree(os.path.join('wrappers','Modelica','bin','VS2010'),os.path.join('dist_temp','Modelica','VS2010'))
def UploadSourceForge():
#Rename folder to version number
import CoolProp
try:
shutil.copytree('dist_temp',CoolProp.__version__)
except WindowsError: pass
call_str = ['pscp','README.txt','ibell,coolprop@frs.sf.net:/home/pfs/project/c/co/coolprop/CoolProp/']
print('Calling: '+' '.join(call_str))
subprocess.check_output(call_str,shell=True)
call_str = ['pscp','-r','-v',CoolProp.__version__,'ibell,coolprop@frs.sf.net:/home/pfs/project/c/co/coolprop/CoolProp/']
print('Calling: '+' '.join(call_str))
subprocess.check_output(call_str,shell=True)
####### DOCUMENTATION STUFF ################
def Doxygen():
# Open Doxyfile, and update the version number in the file
lines = open('Doxyfile','r').readlines()
import CoolProp
for i in range(len(lines)):
if lines[i].startswith('PROJECT_NUMBER'):
line = lines[i].split('=')[0]+' = '+CoolProp.__version__+'\n'
lines[i]=line
break
open('Doxyfile','w').write(''.join(lines))
subprocess.check_output(['doxygen','Doxyfile'],shell=True)
def RunExamples():
subprocess.check_call(['run_examples.bat'],shell=True,cwd='Web/examples',stdout= sys.stdout)
def BuildDocs():
# Inject the revision number into the docs main pages for the link
lines = open('Web/_templates/index.html','r').readlines()
import CoolProp
languages = ['Python','Modelica','Labview','MATLAB','EES','Octave','Excel','C#','Java','Javascript','MathCAD','Maple','Mathematica','Scilab']
for i in range(len(lines)):
if (lines[i].find('http://sourceforge.net/projects/coolprop/files/CoolProp/') > -1
and any([lines[i].find(a) > -1 for a in languages])
):
oldVersion = lines[i].split('http://sourceforge.net/projects/coolprop/files/CoolProp/')[1].split('/',1)[0]
lines[i] = lines[i][:].replace(oldVersion,CoolProp.__version__)
open('Web/_templates/index.html','w').write(''.join(lines))
shutil.rmtree(os.path.join('Web','_build'),ignore_errors = True)
subprocess.check_output(['BuildCPDocs.bat'],shell=True,cwd='Web')
def UploadDocs():
call_str = ['pscp','-r','-v','Web/_build/html/*.*','ibell@web.sourceforge.net:/home/groups/coolprop/htdocs']
print('Calling: '+' '.join(call_str))
subprocess.check_output(call_str, shell = True)
def Superpacks():
import CoolProp
subprocess.check_call(['git','archive','-o','dist_temp/CoolProp-'+CoolProp.__version__+'-source_code.zip','HEAD'])
## Windows superpack
try:
os.mkdir(os.path.join('dist_temp','windows_superpack'))
except WindowsError:
pass
shutil.copy2(os.path.join('dist_temp/CoolProp-'+CoolProp.__version__+'-source_code.zip'),os.path.join('dist_temp','windows_superpack','CoolProp-'+CoolProp.__version__+'-source_code.zip'))
# Octave removed for now : 'Octave'
for folder in ['Excel and DLL','Python','C#','MATLAB','EES','Labview','LibreOffice','Maple','Scilab','Mathematica','Java','Javascript','MathCAD']:
shutil.copytree(os.path.join('dist_temp',folder), os.path.join('dist_temp','windows_superpack',folder))
subprocess.check_call(['7z','a','-r','dist_temp/CoolProp-'+CoolProp.__version__+'-windows_superpack.zip','dist_temp/windows_superpack/*.*'])
shutil.rmtree(os.path.join('dist_temp','windows_superpack'))
if __name__=='__main__':
# InstallPrereqs() #This is optional if you think any of the pre-reqs have been updated
Python() # This one must be first to ensure that version gets rebuild properly
## Always run the examples first. This serves as an integration test of
## CoolProp
RunExamples()
DLL_and_Excel()
Source()
Csharp()
#~ ###########~ Octave()
MATLAB()
Maple()
Mathematica()
Scilab()
EES()
LibreOffice()
Javascript()
Java()
MathCAD()
Labview()
Superpacks()
PYPI()
UploadSourceForge()
Doxygen()
BuildDocs()
UploadDocs()
|
|
"""Implementation of RootOf class and related tools. """
from __future__ import print_function, division
from sympy.core import (S, Expr, Integer, Float, I, Add, Lambda, symbols,
sympify, Rational, Dummy)
from sympy.core.cache import cacheit
from sympy.core.function import AppliedUndef
from sympy.functions.elementary.miscellaneous import root as _root
from sympy.polys.polytools import Poly, PurePoly, factor
from sympy.polys.rationaltools import together
from sympy.polys.polyfuncs import symmetrize, viete
from sympy.polys.rootisolation import (
dup_isolate_complex_roots_sqf,
dup_isolate_real_roots_sqf)
from sympy.polys.polyroots import (
roots_linear, roots_quadratic, roots_binomial,
preprocess_roots, roots)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
GeneratorsNeeded,
PolynomialError,
DomainError)
from sympy.polys.domains import QQ
from mpmath import mpf, mpc, findroot, workprec
from mpmath.libmp.libmpf import prec_to_dps
from sympy.utilities import lambdify, public
from sympy.core.compatibility import range
from sympy.core.decorators import deprecated
from math import log as mathlog
__all__ = ['CRootOf']
def _ispow2(i):
v = mathlog(i, 2)
return v == int(v)
_reals_cache = {}
_complexes_cache = {}
@public
def rootof(f, x, index=None, radicals=True, expand=True):
"""An indexed root of a univariate polynomial.
Returns either a ``ComplexRootOf`` object or an explicit
expression involving radicals.
Parameters
----------
f : Expr
Univariate polynomial.
x : Symbol, optional
Generator for ``f``.
index : int or Integer
radicals : bool
Return a radical expression if possible.
expand : bool
Expand ``f``.
"""
return CRootOf(f, x, index=index, radicals=radicals, expand=expand)
@public
class RootOf(Expr):
"""Represents a root of a univariate polynomial.
Base class for roots of different kinds of polynomials.
Only complex roots are currently supported.
"""
__slots__ = ['poly']
def __new__(cls, f, x, index=None, radicals=True, expand=True):
"""Construct a new ``CRootOf`` object for ``k``-th root of ``f``."""
return rootof(f, x, index=index, radicals=radicals, expand=expand)
@public
class ComplexRootOf(RootOf):
"""Represents an indexed complex root of a polynomial.
Roots of a univariate polynomial separated into disjoint
real or complex intervals and indexed in a fixed order.
Currently only rational coefficients are allowed.
Can be imported as ``CRootOf``.
"""
__slots__ = ['index']
is_complex = True
is_number = True
def __new__(cls, f, x, index=None, radicals=False, expand=True):
""" Construct an indexed complex root of a polynomial.
See ``rootof`` for the parameters.
The default value of ``radicals`` is ``False`` to satisfy
``eval(srepr(expr) == expr``.
"""
x = sympify(x)
if index is None and x.is_Integer:
x, index = None, x
else:
index = sympify(index)
if index is not None and index.is_Integer:
index = int(index)
else:
raise ValueError("expected an integer root index, got %s" % index)
poly = PurePoly(f, x, greedy=False, expand=expand)
if not poly.is_univariate:
raise PolynomialError("only univariate polynomials are allowed")
degree = poly.degree()
if degree <= 0:
raise PolynomialError("can't construct CRootOf object for %s" % f)
if index < -degree or index >= degree:
raise IndexError("root index out of [%d, %d] range, got %d" %
(-degree, degree - 1, index))
elif index < 0:
index += degree
dom = poly.get_domain()
if not dom.is_Exact:
poly = poly.to_exact()
roots = cls._roots_trivial(poly, radicals)
if roots is not None:
return roots[index]
coeff, poly = preprocess_roots(poly)
dom = poly.get_domain()
if not dom.is_ZZ:
raise NotImplementedError("CRootOf is not supported over %s" % dom)
root = cls._indexed_root(poly, index)
return coeff * cls._postprocess_root(root, radicals)
@classmethod
def _new(cls, poly, index):
"""Construct new ``CRootOf`` object from raw data. """
obj = Expr.__new__(cls)
obj.poly = PurePoly(poly)
obj.index = index
try:
_reals_cache[obj.poly] = _reals_cache[poly]
_complexes_cache[obj.poly] = _complexes_cache[poly]
except KeyError:
pass
return obj
def _hashable_content(self):
return (self.poly, self.index)
@property
def expr(self):
return self.poly.as_expr()
@property
def args(self):
return (self.expr, Integer(self.index))
@property
def free_symbols(self):
# CRootOf currently only works with univariate expressions and although
# the poly attribute is often a PurePoly, sometimes it is a Poly. In
# either case no free symbols should be reported.
return set()
def _eval_is_real(self):
"""Return ``True`` if the root is real. """
return self.index < len(_reals_cache[self.poly])
@classmethod
def real_roots(cls, poly, radicals=True):
"""Get real roots of a polynomial. """
return cls._get_roots("_real_roots", poly, radicals)
@classmethod
def all_roots(cls, poly, radicals=True):
"""Get real and complex roots of a polynomial. """
return cls._get_roots("_all_roots", poly, radicals)
@classmethod
def _get_reals_sqf(cls, factor):
"""Get real root isolating intervals for a square-free factor."""
if factor in _reals_cache:
real_part = _reals_cache[factor]
else:
_reals_cache[factor] = real_part = \
dup_isolate_real_roots_sqf(
factor.rep.rep, factor.rep.dom, blackbox=True)
return real_part
@classmethod
def _get_complexes_sqf(cls, factor):
"""Get complex root isolating intervals for a square-free factor."""
if factor in _complexes_cache:
complex_part = _complexes_cache[factor]
else:
_complexes_cache[factor] = complex_part = \
dup_isolate_complex_roots_sqf(
factor.rep.rep, factor.rep.dom, blackbox=True)
return complex_part
@classmethod
def _get_reals(cls, factors):
"""Compute real root isolating intervals for a list of factors. """
reals = []
for factor, k in factors:
real_part = cls._get_reals_sqf(factor)
reals.extend([(root, factor, k) for root in real_part])
return reals
@classmethod
def _get_complexes(cls, factors):
"""Compute complex root isolating intervals for a list of factors. """
complexes = []
for factor, k in factors:
complex_part = cls._get_complexes_sqf(factor)
complexes.extend([(root, factor, k) for root in complex_part])
return complexes
@classmethod
def _reals_sorted(cls, reals):
"""Make real isolating intervals disjoint and sort roots. """
cache = {}
for i, (u, f, k) in enumerate(reals):
for j, (v, g, m) in enumerate(reals[i + 1:]):
u, v = u.refine_disjoint(v)
reals[i + j + 1] = (v, g, m)
reals[i] = (u, f, k)
reals = sorted(reals, key=lambda r: r[0].a)
for root, factor, _ in reals:
if factor in cache:
cache[factor].append(root)
else:
cache[factor] = [root]
for factor, roots in cache.items():
_reals_cache[factor] = roots
return reals
@classmethod
def _separate_imaginary_from_complex(cls, complexes):
from sympy.utilities.iterables import sift
def is_imag(c):
'''
return True if all roots are imaginary (ax**2 + b)
return False if no roots are imaginary
return None if 2 roots are imaginary (ax**N'''
u, f, k = c
deg = f.degree()
if f.length() == 2:
if deg == 2:
return True # both imag
elif _ispow2(deg):
if f.LC()*f.TC() < 0:
return None # 2 are imag
return False # none are imag
# separate according to the function
sifted = sift(complexes, lambda c: c[1])
del complexes
imag = []
complexes = []
for f in sifted:
isift = sift(sifted[f], lambda c: is_imag(c))
imag.extend(isift.pop(True, []))
complexes.extend(isift.pop(False, []))
mixed = isift.pop(None, [])
assert not isift
if not mixed:
continue
while True:
# the non-imaginary ones will be on one side or the other
# of the y-axis
i = 0
while i < len(mixed):
u, f, k = mixed[i]
if u.ax*u.bx > 0:
complexes.append(mixed.pop(i))
else:
i += 1
if len(mixed) == 2:
imag.extend(mixed)
break
# refine
for i, (u, f, k) in enumerate(mixed):
u = u._inner_refine()
mixed[i] = u, f, k
return imag, complexes
@classmethod
def _refine_complexes(cls, complexes):
"""return complexes such that no bounding rectangles of non-conjugate
roots would intersect if slid horizontally or vertically/
"""
while complexes: # break when all are distinct
# get the intervals pairwise-disjoint.
# If rectangles were drawn around the coordinates of the bounding
# rectangles, no rectangles would intersect after this procedure.
for i, (u, f, k) in enumerate(complexes):
for j, (v, g, m) in enumerate(complexes[i + 1:]):
u, v = u.refine_disjoint(v)
complexes[i + j + 1] = (v, g, m)
complexes[i] = (u, f, k)
# Although there are no intersecting rectangles, a given rectangle
# might intersect another when slid horizontally. We have to refine
# intervals until this is not true so we can sort the roots
# unambiguously. Since complex roots come in conjugate pairs, we
# will always have 2 rectangles above each other but we should not
# have more than that.
N = len(complexes)//2 - 1
# check x (real) parts: there must be N + 1 disjoint x ranges, i.e.
# the first one must be different from N others
uu = set([(u.ax, u.bx) for u, _, _ in complexes])
u = uu.pop()
if sum([u[1] <= v[0] or v[1] <= u[0] for v in uu]) < N:
# refine
for i, (u, f, k) in enumerate(complexes):
u = u._inner_refine()
complexes[i] = u, f, k
else:
# intervals with identical x-values have disjoint y-values or
# else they would not be disjoint so there is no need for
# further checks
break
return complexes
@classmethod
def _complexes_sorted(cls, complexes):
"""Make complex isolating intervals disjoint and sort roots. """
if not complexes:
return []
cache = {}
# imaginary roots can cause a problem in terms of sorting since
# their x-intervals will never refine as distinct from others
# so we handle them separately
imag, complexes = cls._separate_imaginary_from_complex(complexes)
complexes = cls._refine_complexes(complexes)
# sort imaginary roots
def key(c):
'''return, for ax**n+b, +/-root(abs(b/a), b) according to the
apparent sign of the imaginary interval, e.g. if the interval
were (0, 3) the positive root would be returned.
'''
u, f, k = c
r = _root(abs(f.TC()/f.LC()), f.degree())
if u.ay < 0 or u.by < 0:
return -r
return r
imag = sorted(imag, key=lambda c: key(c))
# sort complexes and combine with imag
if complexes:
# key is (x1, y1) e.g. (1, 2)x(3, 4) -> (1,3)
complexes = sorted(complexes, key=lambda c: c[0].a)
# find insertion point for imaginary
for i, c in enumerate(reversed(complexes)):
if c[0].bx <= 0:
break
i = len(complexes) - i - 1
if i:
i += 1
complexes = complexes[:i] + imag + complexes[i:]
else:
complexes = imag
# update cache
for root, factor, _ in complexes:
if factor in cache:
cache[factor].append(root)
else:
cache[factor] = [root]
for factor, roots in cache.items():
_complexes_cache[factor] = roots
return complexes
@classmethod
def _reals_index(cls, reals, index):
"""
Map initial real root index to an index in a factor where
the root belongs.
"""
i = 0
for j, (_, factor, k) in enumerate(reals):
if index < i + k:
poly, index = factor, 0
for _, factor, _ in reals[:j]:
if factor == poly:
index += 1
return poly, index
else:
i += k
@classmethod
def _complexes_index(cls, complexes, index):
"""
Map initial complex root index to an index in a factor where
the root belongs.
"""
index, i = index, 0
for j, (_, factor, k) in enumerate(complexes):
if index < i + k:
poly, index = factor, 0
for _, factor, _ in complexes[:j]:
if factor == poly:
index += 1
index += len(_reals_cache[poly])
return poly, index
else:
i += k
@classmethod
def _count_roots(cls, roots):
"""Count the number of real or complex roots with multiplicities."""
return sum([k for _, _, k in roots])
@classmethod
def _indexed_root(cls, poly, index):
"""Get a root of a composite polynomial by index. """
(_, factors) = poly.factor_list()
reals = cls._get_reals(factors)
reals_count = cls._count_roots(reals)
if index < reals_count:
reals = cls._reals_sorted(reals)
return cls._reals_index(reals, index)
else:
complexes = cls._get_complexes(factors)
complexes = cls._complexes_sorted(complexes)
return cls._complexes_index(complexes, index - reals_count)
@classmethod
def _real_roots(cls, poly):
"""Get real roots of a composite polynomial. """
(_, factors) = poly.factor_list()
reals = cls._get_reals(factors)
reals = cls._reals_sorted(reals)
reals_count = cls._count_roots(reals)
roots = []
for index in range(0, reals_count):
roots.append(cls._reals_index(reals, index))
return roots
@classmethod
def _all_roots(cls, poly):
"""Get real and complex roots of a composite polynomial. """
(_, factors) = poly.factor_list()
reals = cls._get_reals(factors)
reals = cls._reals_sorted(reals)
reals_count = cls._count_roots(reals)
roots = []
for index in range(0, reals_count):
roots.append(cls._reals_index(reals, index))
complexes = cls._get_complexes(factors)
complexes = cls._complexes_sorted(complexes)
complexes_count = cls._count_roots(complexes)
for index in range(0, complexes_count):
roots.append(cls._complexes_index(complexes, index))
return roots
@classmethod
@cacheit
def _roots_trivial(cls, poly, radicals):
"""Compute roots in linear, quadratic and binomial cases. """
if poly.degree() == 1:
return roots_linear(poly)
if not radicals:
return None
if poly.degree() == 2:
return roots_quadratic(poly)
elif poly.length() == 2 and poly.TC():
return roots_binomial(poly)
else:
return None
@classmethod
def _preprocess_roots(cls, poly):
"""Take heroic measures to make ``poly`` compatible with ``CRootOf``."""
dom = poly.get_domain()
if not dom.is_Exact:
poly = poly.to_exact()
coeff, poly = preprocess_roots(poly)
dom = poly.get_domain()
if not dom.is_ZZ:
raise NotImplementedError(
"sorted roots not supported over %s" % dom)
return coeff, poly
@classmethod
def _postprocess_root(cls, root, radicals):
"""Return the root if it is trivial or a ``CRootOf`` object. """
poly, index = root
roots = cls._roots_trivial(poly, radicals)
if roots is not None:
return roots[index]
else:
return cls._new(poly, index)
@classmethod
def _get_roots(cls, method, poly, radicals):
"""Return postprocessed roots of specified kind. """
if not poly.is_univariate:
raise PolynomialError("only univariate polynomials are allowed")
coeff, poly = cls._preprocess_roots(poly)
roots = []
for root in getattr(cls, method)(poly):
roots.append(coeff*cls._postprocess_root(root, radicals))
return roots
def _get_interval(self):
"""Internal function for retrieving isolation interval from cache. """
if self.is_real:
return _reals_cache[self.poly][self.index]
else:
reals_count = len(_reals_cache[self.poly])
return _complexes_cache[self.poly][self.index - reals_count]
def _set_interval(self, interval):
"""Internal function for updating isolation interval in cache. """
if self.is_real:
_reals_cache[self.poly][self.index] = interval
else:
reals_count = len(_reals_cache[self.poly])
_complexes_cache[self.poly][self.index - reals_count] = interval
def _eval_subs(self, old, new):
# don't allow subs to change anything
return self
def _eval_evalf(self, prec):
"""Evaluate this complex root to the given precision. """
with workprec(prec):
g = self.poly.gen
if not g.is_Symbol:
d = Dummy('x')
func = lambdify(d, self.expr.subs(g, d))
else:
func = lambdify(g, self.expr)
interval = self._get_interval()
if not self.is_real:
# For complex intervals, we need to keep refining until the
# imaginary interval is disjunct with other roots, that is,
# until both ends get refined.
ay = interval.ay
by = interval.by
while interval.ay == ay or interval.by == by:
interval = interval.refine()
while True:
if self.is_real:
a = mpf(str(interval.a))
b = mpf(str(interval.b))
if a == b:
root = a
break
x0 = mpf(str(interval.center))
else:
ax = mpf(str(interval.ax))
bx = mpf(str(interval.bx))
ay = mpf(str(interval.ay))
by = mpf(str(interval.by))
if ax == bx and ay == by:
# the sign of the imaginary part will be assigned
# according to the desired index using the fact that
# roots are sorted with negative imag parts coming
# before positive (and all imag roots coming after real
# roots)
deg = self.poly.degree()
i = self.index # a positive attribute after creation
if (deg - i) % 2:
if ay < 0:
ay = -ay
else:
if ay > 0:
ay = -ay
root = mpc(ax, ay)
break
x0 = mpc(*map(str, interval.center))
try:
root = findroot(func, x0)
# If the (real or complex) root is not in the 'interval',
# then keep refining the interval. This happens if findroot
# accidentally finds a different root outside of this
# interval because our initial estimate 'x0' was not close
# enough. It is also possible that the secant method will
# get trapped by a max/min in the interval; the root
# verification by findroot will raise a ValueError in this
# case and the interval will then be tightened -- and
# eventually the root will be found.
#
# It is also possible that findroot will not have any
# successful iterations to process (in which case it
# will fail to initialize a variable that is tested
# after the iterations and raise an UnboundLocalError).
if self.is_real:
if (a <= root <= b):
break
elif (ax <= root.real <= bx and ay <= root.imag <= by):
break
except (UnboundLocalError, ValueError):
pass
interval = interval.refine()
return (Float._new(root.real._mpf_, prec)
+ I*Float._new(root.imag._mpf_, prec))
def eval_rational(self, tol):
"""
Return a Rational approximation to ``self`` with the tolerance ``tol``.
This method uses bisection, which is very robust and it will always
converge. The returned Rational instance will be at most 'tol' from the
exact root.
The following example first obtains Rational approximation to 1e-7
accuracy for all roots of the 4-th order Legendre polynomial, and then
evaluates it to 5 decimal digits (so all digits will be correct
including rounding):
>>> from sympy import S, legendre_poly, Symbol
>>> x = Symbol("x")
>>> p = legendre_poly(4, x, polys=True)
>>> roots = [r.eval_rational(S(1)/10**7) for r in p.real_roots()]
>>> roots = [str(r.n(5)) for r in roots]
>>> roots
['-0.86114', '-0.33998', '0.33998', '0.86114']
"""
if not self.is_real:
raise NotImplementedError(
"eval_rational() only works for real polynomials so far")
func = lambdify(self.poly.gen, self.expr)
interval = self._get_interval()
a = Rational(str(interval.a))
b = Rational(str(interval.b))
return bisect(func, a, b, tol)
def _eval_Eq(self, other):
# CRootOf represents a Root, so if other is that root, it should set
# the expression to zero *and* it should be in the interval of the
# CRootOf instance. It must also be a number that agrees with the
# is_real value of the CRootOf instance.
if type(self) == type(other):
return sympify(self.__eq__(other))
if not (other.is_number and not other.has(AppliedUndef)):
return S.false
if not other.is_finite:
return S.false
z = self.expr.subs(self.expr.free_symbols.pop(), other).is_zero
if z is False: # all roots will make z True but we don't know
# whether this is the right root if z is True
return S.false
o = other.is_real, other.is_imaginary
s = self.is_real, self.is_imaginary
if o != s and None not in o and None not in s:
return S.false
i = self._get_interval()
was = i.a, i.b
need = [True]*2
# make sure it would be distinct from others
while any(need):
i = i.refine()
a, b = i.a, i.b
if need[0] and a != was[0]:
need[0] = False
if need[1] and b != was[1]:
need[1] = False
re, im = other.as_real_imag()
if not im:
if self.is_real:
a, b = [Rational(str(i)) for i in (a, b)]
return sympify(a < other and other < b)
return S.false
if self.is_real:
return S.false
z = r1, r2, i1, i2 = [Rational(str(j)) for j in (
i.ax, i.bx, i.ay, i.by)]
return sympify((
r1 < re and re < r2) and (
i1 < im and im < i2))
CRootOf = ComplexRootOf
@public
class RootSum(Expr):
"""Represents a sum of all roots of a univariate polynomial. """
__slots__ = ['poly', 'fun', 'auto']
def __new__(cls, expr, func=None, x=None, auto=True, quadratic=False):
"""Construct a new ``RootSum`` instance of roots of a polynomial."""
coeff, poly = cls._transform(expr, x)
if not poly.is_univariate:
raise MultivariatePolynomialError(
"only univariate polynomials are allowed")
if func is None:
func = Lambda(poly.gen, poly.gen)
else:
try:
is_func = func.is_Function
except AttributeError:
is_func = False
if is_func and 1 in func.nargs:
if not isinstance(func, Lambda):
func = Lambda(poly.gen, func(poly.gen))
else:
raise ValueError(
"expected a univariate function, got %s" % func)
var, expr = func.variables[0], func.expr
if coeff is not S.One:
expr = expr.subs(var, coeff*var)
deg = poly.degree()
if not expr.has(var):
return deg*expr
if expr.is_Add:
add_const, expr = expr.as_independent(var)
else:
add_const = S.Zero
if expr.is_Mul:
mul_const, expr = expr.as_independent(var)
else:
mul_const = S.One
func = Lambda(var, expr)
rational = cls._is_func_rational(poly, func)
(_, factors), terms = poly.factor_list(), []
for poly, k in factors:
if poly.is_linear:
term = func(roots_linear(poly)[0])
elif quadratic and poly.is_quadratic:
term = sum(map(func, roots_quadratic(poly)))
else:
if not rational or not auto:
term = cls._new(poly, func, auto)
else:
term = cls._rational_case(poly, func)
terms.append(k*term)
return mul_const*Add(*terms) + deg*add_const
@classmethod
def _new(cls, poly, func, auto=True):
"""Construct new raw ``RootSum`` instance. """
obj = Expr.__new__(cls)
obj.poly = poly
obj.fun = func
obj.auto = auto
return obj
@classmethod
def new(cls, poly, func, auto=True):
"""Construct new ``RootSum`` instance. """
if not func.expr.has(*func.variables):
return func.expr
rational = cls._is_func_rational(poly, func)
if not rational or not auto:
return cls._new(poly, func, auto)
else:
return cls._rational_case(poly, func)
@classmethod
def _transform(cls, expr, x):
"""Transform an expression to a polynomial. """
poly = PurePoly(expr, x, greedy=False)
return preprocess_roots(poly)
@classmethod
def _is_func_rational(cls, poly, func):
"""Check if a lambda is areational function. """
var, expr = func.variables[0], func.expr
return expr.is_rational_function(var)
@classmethod
def _rational_case(cls, poly, func):
"""Handle the rational function case. """
roots = symbols('r:%d' % poly.degree())
var, expr = func.variables[0], func.expr
f = sum(expr.subs(var, r) for r in roots)
p, q = together(f).as_numer_denom()
domain = QQ[roots]
p = p.expand()
q = q.expand()
try:
p = Poly(p, domain=domain, expand=False)
except GeneratorsNeeded:
p, p_coeff = None, (p,)
else:
p_monom, p_coeff = zip(*p.terms())
try:
q = Poly(q, domain=domain, expand=False)
except GeneratorsNeeded:
q, q_coeff = None, (q,)
else:
q_monom, q_coeff = zip(*q.terms())
coeffs, mapping = symmetrize(p_coeff + q_coeff, formal=True)
formulas, values = viete(poly, roots), []
for (sym, _), (_, val) in zip(mapping, formulas):
values.append((sym, val))
for i, (coeff, _) in enumerate(coeffs):
coeffs[i] = coeff.subs(values)
n = len(p_coeff)
p_coeff = coeffs[:n]
q_coeff = coeffs[n:]
if p is not None:
p = Poly(dict(zip(p_monom, p_coeff)), *p.gens).as_expr()
else:
(p,) = p_coeff
if q is not None:
q = Poly(dict(zip(q_monom, q_coeff)), *q.gens).as_expr()
else:
(q,) = q_coeff
return factor(p/q)
def _hashable_content(self):
return (self.poly, self.fun)
@property
def expr(self):
return self.poly.as_expr()
@property
def args(self):
return (self.expr, self.fun, self.poly.gen)
@property
def free_symbols(self):
return self.poly.free_symbols | self.fun.free_symbols
@property
def is_commutative(self):
return True
def doit(self, **hints):
if not hints.get('roots', True):
return self
_roots = roots(self.poly, multiple=True)
if len(_roots) < self.poly.degree():
return self
else:
return Add(*[self.fun(r) for r in _roots])
def _eval_evalf(self, prec):
try:
_roots = self.poly.nroots(n=prec_to_dps(prec))
except (DomainError, PolynomialError):
return self
else:
return Add(*[self.fun(r) for r in _roots])
def _eval_derivative(self, x):
var, expr = self.fun.args
func = Lambda(var, expr.diff(x))
return self.new(self.poly, func, self.auto)
def bisect(f, a, b, tol):
"""
Implements bisection. This function is used in RootOf.eval_rational() and
it needs to be robust.
Examples
========
>>> from sympy import S
>>> from sympy.polys.rootoftools import bisect
>>> bisect(lambda x: x**2-1, -10, 0, S(1)/10**2)
-1025/1024
>>> bisect(lambda x: x**2-1, -10, 0, S(1)/10**4)
-131075/131072
"""
a = sympify(a)
b = sympify(b)
fa = f(a)
fb = f(b)
if fa * fb >= 0:
raise ValueError("bisect: f(a) and f(b) must have opposite signs")
while (b - a > tol):
c = (a + b)/2
fc = f(c)
if (fc == 0):
return c # We need to make sure f(c) is not zero below
if (fa * fc < 0):
b = c
fb = fc
else:
a = c
fa = fc
return (a + b)/2
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsScheduler
"""
import copy
import time
import mock
from oslo_utils import uuidutils
from jacket.compute import block_device
from jacket.compute.cells import filters
from jacket.compute.cells import weights
from jacket.compute.cloud import vm_states
import jacket.compute.conf
from jacket import context
from jacket.db import compute
from jacket.compute import exception
from jacket.objects import compute
from jacket.compute.scheduler import utils as scheduler_utils
from jacket.compute import test
from jacket.tests.compute.unit.cells import fakes
from jacket.tests.compute.unit import fake_block_device
from jacket.tests.compute import uuidsentinel
from jacket.compute import utils
CONF = jacket.compute.conf.CONF
class FakeFilterClass1(filters.BaseCellFilter):
pass
class FakeFilterClass2(filters.BaseCellFilter):
pass
class FakeWeightClass1(weights.BaseCellWeigher):
def _weigh_object(self, obj, weight_properties):
pass
class FakeWeightClass2(weights.BaseCellWeigher):
def _weigh_object(self, obj, weight_properties):
pass
class CellsSchedulerTestCase(test.TestCase):
"""Test case for CellsScheduler class."""
def setUp(self):
super(CellsSchedulerTestCase, self).setUp()
self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[],
group='cells')
self._init_cells_scheduler()
def _init_cells_scheduler(self):
fakes.init(self)
self.msg_runner = fakes.get_message_runner('api-cell')
self.scheduler = self.msg_runner.scheduler
self.state_manager = self.msg_runner.state_manager
self.my_cell_state = self.state_manager.get_my_state()
self.ctxt = context.RequestContext('fake', 'fake')
instance_uuids = []
for x in range(3):
instance_uuids.append(uuidutils.generate_uuid())
self.instance_uuids = instance_uuids
self.instances = [compute.Instance(uuid=uuid, id=id)
for id, uuid in enumerate(instance_uuids)]
self.request_spec = {
'num_instances': len(instance_uuids),
'instance_properties': self.instances[0],
'instance_type': 'fake_type',
'image': 'fake_image'}
self.build_inst_kwargs = {
'instances': self.instances,
'image': 'fake_image',
'filter_properties': {'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
def test_create_instances_here(self):
# Just grab the first instance type
inst_type = compute.Flavor.get_by_id(self.ctxt, 1)
image = {'properties': {}}
instance_uuids = self.instance_uuids
instance_props = {'id': 'removed',
'security_groups': 'removed',
'info_cache': 'removed',
'name': 'instance-00000001',
'hostname': 'meow',
'display_name': 'moo',
'image_ref': 'fake_image_ref',
'user_id': self.ctxt.user_id,
# Test these as lists
'metadata': {'moo': 'cow'},
'system_metadata': {'meow': 'cat'},
'flavor': inst_type,
'project_id': self.ctxt.project_id}
call_info = {'uuids': []}
block_device_mapping = [
compute.BlockDeviceMapping(context=self.ctxt,
**fake_block_device.FakeDbBlockDeviceDict(
block_device.create_image_bdm('fake_image_ref'),
anon=True))
]
def _fake_instance_update_at_top(_ctxt, instance):
call_info['uuids'].append(instance['uuid'])
self.stubs.Set(self.msg_runner, 'instance_update_at_top',
_fake_instance_update_at_top)
self.scheduler._create_instances_here(self.ctxt, instance_uuids,
instance_props, inst_type, image,
['default'], block_device_mapping)
self.assertEqual(instance_uuids, call_info['uuids'])
for count, instance_uuid in enumerate(instance_uuids):
instance = compute.instance_get_by_uuid(self.ctxt, instance_uuid)
meta = utils.instance_meta(instance)
self.assertEqual('cow', meta['moo'])
sys_meta = utils.instance_sys_meta(instance)
self.assertEqual('cat', sys_meta['meow'])
self.assertEqual('meow', instance['hostname'])
self.assertEqual('moo-%d' % (count + 1),
instance['display_name'])
self.assertEqual('fake_image_ref', instance['image_ref'])
@mock.patch('compute.compute.Instance.update')
def test_create_instances_here_pops_problematic_properties(self,
mock_update):
values = {
'uuid': uuidsentinel.instance,
'metadata': [],
'id': 1,
'name': 'foo',
'info_cache': 'bar',
'security_groups': 'not secure',
'flavor': 'chocolate',
'pci_requests': 'no thanks',
'ec2_ids': 'prime',
}
@mock.patch.object(self.scheduler.compute_api,
'create_db_entry_for_new_instance')
def test(mock_create_db):
self.scheduler._create_instances_here(
self.ctxt, [uuidsentinel.instance], values,
compute.Flavor(), 'foo', [], [])
test()
# NOTE(danms): Make sure that only the expected properties
# are applied to the instance object. The complex ones that
# would have been mangled over RPC should be removed.
mock_update.assert_called_once_with(
{'uuid': uuidsentinel.instance,
'metadata': {}})
def test_build_instances_selects_child_cell(self):
# Make sure there's no capacity info so we're sure to
# select a child cell
our_cell_info = self.state_manager.get_my_state()
our_cell_info.capacities = {}
call_info = {'times': 0}
orig_fn = self.msg_runner.build_instances
def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs):
# This gets called twice. Once for our running it
# in this cell.. and then it'll get called when the
# child cell is picked. So, first time.. just run it
# like normal.
if not call_info['times']:
call_info['times'] += 1
return orig_fn(ctxt, target_cell, build_inst_kwargs)
call_info['ctxt'] = ctxt
call_info['target_cell'] = target_cell
call_info['build_inst_kwargs'] = build_inst_kwargs
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stubs.Set(self.msg_runner, 'build_instances',
msg_runner_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
self.build_inst_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.build_inst_kwargs,
call_info['build_inst_kwargs'])
child_cells = self.state_manager.get_child_cells()
self.assertIn(call_info['target_cell'], child_cells)
def test_build_instances_selects_current_cell(self):
# Make sure there's no child cells so that we will be
# selected
self.state_manager.child_cells = {}
call_info = {}
build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs)
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
return self.instances
def fake_rpc_build_instances(ctxt, **build_inst_kwargs):
call_info['build_inst_kwargs'] = build_inst_kwargs
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
build_inst_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.build_inst_kwargs['instances'][0]['id'],
call_info['instance_properties']['id'])
self.assertEqual(
self.build_inst_kwargs['filter_properties']['instance_type'],
call_info['instance_type'])
self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
self.assertEqual(self.build_inst_kwargs['security_groups'],
call_info['security_groups'])
self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
call_info['block_device_mapping'])
self.assertEqual(build_inst_kwargs,
call_info['build_inst_kwargs'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
def test_build_instances_retries_when_no_cells_avail(self):
self.flags(scheduler_retries=7, group='cells')
call_info = {'num_tries': 0, 'errored_uuids': []}
def fake_grab_target_cells(filter_properties):
call_info['num_tries'] += 1
raise exception.NoCellsAvailable()
def fake_sleep(_secs):
return
def fake_instance_save(inst):
self.assertEqual(vm_states.ERROR, inst.vm_state)
call_info['errored_uuids'].append(inst.uuid)
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_grab_target_cells',
fake_grab_target_cells)
self.stubs.Set(time, 'sleep', fake_sleep)
self.stubs.Set(compute.Instance, 'save', fake_instance_save)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
self.build_inst_kwargs)
self.assertEqual(8, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
def test_schedule_method_on_random_exception(self):
self.flags(scheduler_retries=7, group='cells')
instances = [compute.Instance(uuid=uuid) for uuid in
self.instance_uuids]
method_kwargs = {
'image': 'fake_image',
'instances': instances,
'filter_properties': {}}
call_info = {'num_tries': 0,
'errored_uuids1': [],
'errored_uuids2': []}
def fake_grab_target_cells(filter_properties):
call_info['num_tries'] += 1
raise test.TestingException()
def fake_instance_save(inst):
self.assertEqual(vm_states.ERROR, inst.vm_state)
call_info['errored_uuids1'].append(inst.uuid)
def fake_instance_update_at_top(ctxt, instance):
self.assertEqual(vm_states.ERROR, instance['vm_state'])
call_info['errored_uuids2'].append(instance['uuid'])
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_grab_target_cells',
fake_grab_target_cells)
self.stubs.Set(compute.Instance, 'save', fake_instance_save)
self.stubs.Set(self.msg_runner, 'instance_update_at_top',
fake_instance_update_at_top)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
method_kwargs)
# Shouldn't retry
self.assertEqual(1, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids1'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids2'])
def test_filter_schedule_skipping(self):
# if a filter handles scheduling, short circuit
def _grab(filter_properties):
return None
self.stubs.Set(self.scheduler, '_grab_target_cells', _grab)
def _test(self, *args):
raise test.TestingException("shouldn't be called")
try:
self.scheduler._schedule_build_to_cells(None, None, None, _test,
None)
except test.TestingException:
self.fail("Scheduling did not properly short circuit")
def test_cells_filter_args_correct(self):
# Re-init our fakes with some filters.
our_path = 'compute.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeFilterClass1',
our_path + '.' + 'FakeFilterClass2']
self.flags(scheduler_filter_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {}
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
def fake_get_filtered_objs(filters, cells, filt_properties):
call_info['filt_objects'] = filters
call_info['filt_cells'] = cells
call_info['filt_props'] = filt_properties
return cells
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'instance_properties': instances[0],
'image': image,
'instance_type': 'fake_type'}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
filter_handler = self.scheduler.filter_handler
self.stubs.Set(filter_handler, 'get_filtered_objects',
fake_get_filtered_objs)
host_sched_kwargs = {'image': 'fake_image',
'instances': self.instances,
'filter_properties':
{'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.request_spec['instance_properties']['id'],
call_info['instance_properties']['id'])
self.assertEqual(self.request_spec['instance_type'],
call_info['instance_type'])
self.assertEqual(self.request_spec['image'], call_info['image'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Filter args are correct
expected_filt_props = {'context': self.ctxt,
'scheduler': self.scheduler,
'routing_path': self.my_cell_state.name,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': self.request_spec,
'instance_type': 'fake_type'}
self.assertEqual(expected_filt_props, call_info['filt_props'])
self.assertEqual([FakeFilterClass1, FakeFilterClass2],
[obj.__class__ for obj in call_info['filt_objects']])
self.assertEqual([self.my_cell_state], call_info['filt_cells'])
def test_cells_filter_returning_none(self):
# Re-init our fakes with some filters.
our_path = 'compute.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeFilterClass1',
our_path + '.' + 'FakeFilterClass2']
self.flags(scheduler_filter_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {'scheduled': False}
def fake_create_instances_here(ctxt, request_spec):
# Should not be called
call_info['scheduled'] = True
def fake_get_filtered_objs(filter_classes, cells, filt_properties):
# Should cause scheduling to be skipped. Means that the
# filter did it.
return None
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
filter_handler = self.scheduler.filter_handler
self.stubs.Set(filter_handler, 'get_filtered_objects',
fake_get_filtered_objs)
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, {})
self.assertFalse(call_info['scheduled'])
def test_cells_weight_args_correct(self):
# Re-init our fakes with some filters.
our_path = 'compute.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeWeightClass1',
our_path + '.' + 'FakeWeightClass2']
self.flags(scheduler_weight_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {}
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
def fake_get_weighed_objs(weighers, cells, filt_properties):
call_info['weighers'] = weighers
call_info['weight_cells'] = cells
call_info['weight_props'] = filt_properties
return [weights.WeightedCell(cells[0], 0.0)]
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'instance_properties': instances[0],
'image': image,
'instance_type': 'fake_type'}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
weight_handler = self.scheduler.weight_handler
self.stubs.Set(weight_handler, 'get_weighed_objects',
fake_get_weighed_objs)
host_sched_kwargs = {'image': 'fake_image',
'instances': self.instances,
'filter_properties':
{'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.request_spec['instance_properties']['id'],
call_info['instance_properties']['id'])
self.assertEqual(self.request_spec['instance_type'],
call_info['instance_type'])
self.assertEqual(self.request_spec['image'], call_info['image'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Weight args are correct
expected_filt_props = {'context': self.ctxt,
'scheduler': self.scheduler,
'routing_path': self.my_cell_state.name,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': self.request_spec,
'instance_type': 'fake_type'}
self.assertEqual(expected_filt_props, call_info['weight_props'])
self.assertEqual([FakeWeightClass1, FakeWeightClass2],
[obj.__class__ for obj in call_info['weighers']])
self.assertEqual([self.my_cell_state], call_info['weight_cells'])
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class TransportProtocol(str, Enum):
udp = "Udp"
tcp = "Tcp"
class IPAllocationMethod(str, Enum):
static = "Static"
dynamic = "Dynamic"
class IPVersion(str, Enum):
ipv4 = "IPv4"
ipv6 = "IPv6"
class SecurityRuleProtocol(str, Enum):
tcp = "Tcp"
udp = "Udp"
asterisk = "*"
class SecurityRuleAccess(str, Enum):
allow = "Allow"
deny = "Deny"
class SecurityRuleDirection(str, Enum):
inbound = "Inbound"
outbound = "Outbound"
class RouteNextHopType(str, Enum):
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
internet = "Internet"
virtual_appliance = "VirtualAppliance"
none = "None"
class ApplicationGatewayProtocol(str, Enum):
http = "Http"
https = "Https"
class ApplicationGatewayCookieBasedAffinity(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class ApplicationGatewayBackendHealthServerHealth(str, Enum):
unknown = "Unknown"
up = "Up"
down = "Down"
partial = "Partial"
draining = "Draining"
class ApplicationGatewaySkuName(str, Enum):
standard_small = "Standard_Small"
standard_medium = "Standard_Medium"
standard_large = "Standard_Large"
waf_medium = "WAF_Medium"
waf_large = "WAF_Large"
class ApplicationGatewayTier(str, Enum):
standard = "Standard"
waf = "WAF"
class ApplicationGatewaySslProtocol(str, Enum):
tl_sv1_0 = "TLSv1_0"
tl_sv1_1 = "TLSv1_1"
tl_sv1_2 = "TLSv1_2"
class ApplicationGatewayRequestRoutingRuleType(str, Enum):
basic = "Basic"
path_based_routing = "PathBasedRouting"
class ApplicationGatewayOperationalState(str, Enum):
stopped = "Stopped"
starting = "Starting"
running = "Running"
stopping = "Stopping"
class ApplicationGatewayFirewallMode(str, Enum):
detection = "Detection"
prevention = "Prevention"
class AuthorizationUseStatus(str, Enum):
available = "Available"
in_use = "InUse"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(str, Enum):
not_configured = "NotConfigured"
configuring = "Configuring"
configured = "Configured"
validation_needed = "ValidationNeeded"
class Access(str, Enum):
allow = "Allow"
deny = "Deny"
class ExpressRouteCircuitPeeringType(str, Enum):
azure_public_peering = "AzurePublicPeering"
azure_private_peering = "AzurePrivatePeering"
microsoft_peering = "MicrosoftPeering"
class ExpressRouteCircuitPeeringState(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class ExpressRouteCircuitSkuTier(str, Enum):
standard = "Standard"
premium = "Premium"
class ExpressRouteCircuitSkuFamily(str, Enum):
unlimited_data = "UnlimitedData"
metered_data = "MeteredData"
class ServiceProviderProvisioningState(str, Enum):
not_provisioned = "NotProvisioned"
provisioning = "Provisioning"
provisioned = "Provisioned"
deprovisioning = "Deprovisioning"
class LoadDistribution(str, Enum):
default = "Default"
source_ip = "SourceIP"
source_ip_protocol = "SourceIPProtocol"
class ProbeProtocol(str, Enum):
http = "Http"
tcp = "Tcp"
class NetworkOperationStatus(str, Enum):
in_progress = "InProgress"
succeeded = "Succeeded"
failed = "Failed"
class EffectiveRouteSource(str, Enum):
unknown = "Unknown"
user = "User"
virtual_network_gateway = "VirtualNetworkGateway"
default = "Default"
class EffectiveRouteState(str, Enum):
active = "Active"
invalid = "Invalid"
class ProvisioningState(str, Enum):
succeeded = "Succeeded"
updating = "Updating"
deleting = "Deleting"
failed = "Failed"
class AssociationType(str, Enum):
associated = "Associated"
contains = "Contains"
class Direction(str, Enum):
inbound = "Inbound"
outbound = "Outbound"
class Protocol(str, Enum):
tcp = "TCP"
udp = "UDP"
class NextHopType(str, Enum):
internet = "Internet"
virtual_appliance = "VirtualAppliance"
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
hyper_net_gateway = "HyperNetGateway"
none = "None"
class PcProtocol(str, Enum):
tcp = "TCP"
udp = "UDP"
any = "Any"
class PcStatus(str, Enum):
not_started = "NotStarted"
running = "Running"
stopped = "Stopped"
error = "Error"
unknown = "Unknown"
class PcError(str, Enum):
internal_error = "InternalError"
agent_stopped = "AgentStopped"
capture_failed = "CaptureFailed"
local_file_failed = "LocalFileFailed"
storage_failed = "StorageFailed"
class Origin(str, Enum):
local = "Local"
inbound = "Inbound"
outbound = "Outbound"
class Severity(str, Enum):
error = "Error"
warning = "Warning"
class IssueType(str, Enum):
unknown = "Unknown"
agent_stopped = "AgentStopped"
guest_firewall = "GuestFirewall"
dns_resolution = "DnsResolution"
socket_bind = "SocketBind"
network_security_rule = "NetworkSecurityRule"
user_defined_route = "UserDefinedRoute"
port_throttled = "PortThrottled"
platform = "Platform"
class ConnectionStatus(str, Enum):
unknown = "Unknown"
connected = "Connected"
disconnected = "Disconnected"
degraded = "Degraded"
class VirtualNetworkPeeringState(str, Enum):
initiated = "Initiated"
connected = "Connected"
disconnected = "Disconnected"
class VirtualNetworkGatewayType(str, Enum):
vpn = "Vpn"
express_route = "ExpressRoute"
class VpnType(str, Enum):
policy_based = "PolicyBased"
route_based = "RouteBased"
class VirtualNetworkGatewaySkuName(str, Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
vpn_gw1 = "VpnGw1"
vpn_gw2 = "VpnGw2"
vpn_gw3 = "VpnGw3"
class VirtualNetworkGatewaySkuTier(str, Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
vpn_gw1 = "VpnGw1"
vpn_gw2 = "VpnGw2"
vpn_gw3 = "VpnGw3"
class BgpPeerState(str, Enum):
unknown = "Unknown"
stopped = "Stopped"
idle = "Idle"
connecting = "Connecting"
connected = "Connected"
class ProcessorArchitecture(str, Enum):
amd64 = "Amd64"
x86 = "X86"
class VirtualNetworkGatewayConnectionStatus(str, Enum):
unknown = "Unknown"
connecting = "Connecting"
connected = "Connected"
not_connected = "NotConnected"
class VirtualNetworkGatewayConnectionType(str, Enum):
ipsec = "IPsec"
vnet2_vnet = "Vnet2Vnet"
express_route = "ExpressRoute"
vpn_client = "VPNClient"
class IpsecEncryption(str, Enum):
none = "None"
des = "DES"
des3 = "DES3"
aes128 = "AES128"
aes192 = "AES192"
aes256 = "AES256"
gcmaes128 = "GCMAES128"
gcmaes192 = "GCMAES192"
gcmaes256 = "GCMAES256"
class IpsecIntegrity(str, Enum):
md5 = "MD5"
sha1 = "SHA1"
sha256 = "SHA256"
gcmaes128 = "GCMAES128"
gcmaes192 = "GCMAES192"
gcmaes256 = "GCMAES256"
class IkeEncryption(str, Enum):
des = "DES"
des3 = "DES3"
aes128 = "AES128"
aes192 = "AES192"
aes256 = "AES256"
class IkeIntegrity(str, Enum):
md5 = "MD5"
sha1 = "SHA1"
sha256 = "SHA256"
sha384 = "SHA384"
class DhGroup(str, Enum):
none = "None"
dh_group1 = "DHGroup1"
dh_group2 = "DHGroup2"
dh_group14 = "DHGroup14"
dh_group2048 = "DHGroup2048"
ecp256 = "ECP256"
ecp384 = "ECP384"
dh_group24 = "DHGroup24"
class PfsGroup(str, Enum):
none = "None"
pfs1 = "PFS1"
pfs2 = "PFS2"
pfs2048 = "PFS2048"
ecp256 = "ECP256"
ecp384 = "ECP384"
pfs24 = "PFS24"
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from .. import core
from ..framework import Program, default_main_program, Parameter
from ..backward import _rename_arg_
dtype_to_size = {
core.VarDesc.VarType.FP16: 2,
core.VarDesc.VarType.FP32: 4,
core.VarDesc.VarType.FP64: 8,
core.VarDesc.VarType.INT16: 2,
core.VarDesc.VarType.INT32: 4,
core.VarDesc.VarType.INT64: 8,
core.VarDesc.VarType.BOOL: 1,
core.VarDesc.VarType.UINT8: 1,
}
SUB_BLOCK_OPS = [
"while", "while_grad", "parallel_do", "parallel_do_grad",
"conditional_block", "conditional_block_grad"
]
SUB_BLOCK_PAIR = [("while", "while_grad"), ("parallel_do", "parallel_do_grad"),
("conditional_block", "conditional_block_grad")]
PRINT_LOG = False
class ControlFlowGraph(object):
def __init__(self, program, ops, forward_num, skip_opt):
self._program = program
self._ops = ops
self._forward_num = forward_num
self._successors = defaultdict(set)
self._presuccessors = defaultdict(set)
self._uses = defaultdict(set)
self._defs = defaultdict(set)
self._live_in = defaultdict(set)
self._live_out = defaultdict(set)
self._skip_opt = skip_opt
def _add_connections(self, connections):
"""Populates _successors and _presuccessors for two neighbor nodes."""
for node1, node2 in connections:
self._add(node1, node2)
def _add(self, node1, node2):
self._successors[node1].add(node2)
self._presuccessors[node2].add(node1)
# TODO(panyx0718): We need to have a unified way of building intermediate
# representation.
def _build_graph(self):
"""Build a graph based on op sequence.
"""
self.op_size = len(self._ops)
op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]
self._add_connections(op_node_connections)
for i in range(self.op_size):
self._uses[i].update(self._ops[i].input_arg_names())
self._defs[i].update(self._ops[i].output_arg_names())
def _update_graph(self, old_name, new_name, begin_idx=0):
for i in range(begin_idx, self.op_size):
if old_name in self._uses[i]:
self._uses[i].remove(old_name)
self._uses[i].add(new_name)
if old_name in self._defs[i]:
self._defs[i].remove(old_name)
self._defs[i].add(new_name)
if old_name in self._live_in[i]:
self._live_in[i].remove(old_name)
self._live_out[i].add(new_name)
if old_name in self._live_out[i]:
self._live_out[i].remove(old_name)
self._live_out[i].add(new_name)
def _reach_fixed_point(self, live_in, live_out):
"""Check if the liveness set has stablized."""
if len(live_in) != len(self._live_in):
return False
if len(live_out) != len(self._live_out):
return False
for i in range(self.op_size):
if (live_in[i] != self._live_in[i] or
live_out[i] != self._live_out[i]):
return False
return True
def _dataflow_analyze(self):
self._build_graph()
live_in = defaultdict(set)
live_out = defaultdict(set)
# Repeatedly apply liveness updates until the algorithm stablize
# on a complete set live input vars and live output vars.
while True:
for i in reversed(range(self.op_size)):
live_in[i] = set(self._live_in[i])
live_out[i] = set(self._live_out[i])
for s in self._successors[i]:
self._live_out[i] |= self._live_in[s]
self._live_in[i] = self._uses[i] | (
self._live_out[i] - self._defs[i])
if self._reach_fixed_point(live_in, live_out):
break
def _get_diff(self, a, b):
u = a & b
return a - u, b - u
def _has_var(self, block_desc, var_name, is_forward):
if is_forward:
return block_desc.has_var(str(var_name))
else:
return block_desc.has_var_recursive(str(var_name))
def _find_var(self, block_desc, var_name, is_forward):
if is_forward:
return block_desc.find_var(str(var_name))
else:
return block_desc.find_var_recursive(str(var_name))
def _check_var_validity(self, block_desc, x, is_forward):
if str(x) == "@EMPTY@":
return False
if not self._has_var(block_desc, x, is_forward):
return False
if self._find_var(block_desc, x, is_forward).persistable():
return False
if self._find_var(block_desc, x,
is_forward).type() != core.VarDesc.VarType.LOD_TENSOR:
return False
if x in self._skip_opt:
return False
if not self._find_var(block_desc, x, is_forward).shape():
return False
return True
# TODO(panyx0718): This needs to be less hacky. It seems memory optimization
# doesn't consider vars copied between cpu and gpu.
def _update_skip_opt_set(self):
for i in range(self.op_size):
op = self._ops[i]
if op.type() == "fill_constant" and op.attr("force_cpu") == True:
self._skip_opt.update(op.output_arg_names())
def release_memory(self, skip_opt_set=None):
self._dataflow_analyze()
self._update_skip_opt_set()
if skip_opt_set:
self._skip_opt.update(skip_opt_set)
fwd_id = 0
bwd_id = 0
for i in range(self.op_size):
op = self._ops[i]
if op.type() in SUB_BLOCK_OPS:
continue
block_desc = op.block()
is_forward = i < self._forward_num
in_diff, out_diff = self._get_diff(self._live_in[i],
self._live_out[i])
can_optimize = filter(
lambda x: self._check_var_validity(block_desc, x, is_forward),
in_diff)
if can_optimize:
index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1
delete_op = block_desc._insert_op(index)
delete_op.set_type("delete_var")
delete_op.set_input("X", can_optimize)
if is_forward:
fwd_id += 1
else:
bwd_id += 1
def memory_optimize(self, skip_opt_set=None, level=0):
def compare_shape(x_shape, cache_shape, opt_level):
if opt_level == 0:
return x_shape == cache_shape
elif opt_level == 1:
if (x_shape[0] == -1) ^ (cache_shape[0] == -1):
return False
x_size = abs(reduce(lambda x, y: x * y, x_shape))
cache_size = abs(reduce(lambda x, y: x * y, cache_shape))
if x_size <= cache_size:
return True
else:
raise ValueError("only support opt_level 0 or 1.")
return False
self._dataflow_analyze()
self._update_skip_opt_set()
# update skip set to meet users' demand
if skip_opt_set:
self._skip_opt.update(skip_opt_set)
self.pool = []
for i in range(self.op_size):
op = self._ops[i]
if op.type() in SUB_BLOCK_OPS:
continue
block_desc = op.block()
is_forward = i < self._forward_num
if self.pool:
defs_can_optimize = filter(
lambda x: self._check_var_validity(block_desc, x, is_forward),
self._defs[i])
out_pair = [
(x, self._find_var(block_desc, x, is_forward).shape())
for x in defs_can_optimize
]
for x, x_shape in out_pair:
# If x is both in uses and defs, it can not be optimized!
if x in self._uses[i]:
continue
for index, cache_pair in enumerate(self.pool):
cache_var = cache_pair[0]
cache_shape = cache_pair[1]
if not compare_shape(x_shape, cache_shape, level):
continue
if not self._has_var(block_desc, cache_var, is_forward):
continue
x_dtype = self._find_var(block_desc, x,
is_forward).dtype()
cache_dtype = self._find_var(block_desc, cache_var,
is_forward).dtype()
# TODO(qijun): actually, we should compare
# dtype_to_size[x_dtype] and dtype_to_size[cache_dtype]
if x_dtype != cache_dtype:
continue
if PRINT_LOG:
print(("Hit Cache !!!! cache pool index "
"is %d, var name is %s, "
"cached var name is %s, "
"var shape is %s ") % (index, x, cache_var,
str(cache_shape)))
self.pool.pop(index)
if x == cache_var:
break
# Rename the var to the cache var already with
# memory allocated in order to reuse the memory.
_rename_arg_(self._ops, x, cache_var, begin_idx=i)
self._program.block(block_desc.id).var(str(
x)).desc = self._find_var(block_desc, cache_var,
is_forward)
self._update_graph(x, cache_var, begin_idx=i)
break
in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i])
can_optimize = filter(
lambda x: self._check_var_validity(block_desc, x, is_forward),
in_diff)
if can_optimize:
for var_name in can_optimize:
self.pool.append((var_name, self._find_var(
block_desc, var_name, is_forward).shape()))
def _process_sub_block_pair(pdesc, sub_block_pair):
"""Creates a list of tuple each of which tracks info of a subblock.
Note: this function doesn't handle nested subblocks yet.
TODO(panyx0718): assert if case nested subblocks happen.
:param pdesc: ProgramDesc.
:param sub_block_pair: A list op pairs. Each op pair is the forward
op and backward op. The ops in the list are special that they contain
a subblock of ops.
:return: A list of tuples, each tuple is (all ops in a subblock pair
including forward and backward, number of forward ops,
all output args names of the ops in the subblock pairs).
"""
ops_list = []
block_desc = pdesc.block(0)
op_size = block_desc.op_size()
for fwd_op, bwd_op in sub_block_pair:
sub_block_ids = []
grad_sub_block_ids = []
sub_block_id_pair = []
sub_op_dict = {}
for i in range(op_size):
op = block_desc.op(i)
if op.type() == fwd_op:
sub_block_ids.append(op.attr("sub_block").id)
sub_op_dict[op.attr("sub_block").id] = op
elif op.type() == bwd_op:
grad_sub_block_ids.append(op.attr("sub_block").id)
sub_op_dict[op.attr("sub_block").id] = op
# Find fwd_op/bwd_op block pair
for grad_id in grad_sub_block_ids:
fwd_id = pdesc.block(grad_id).get_forward_block_idx()
if fwd_id in sub_block_ids:
sub_block_id_pair.append((fwd_id, grad_id))
sub_block_ids.remove(fwd_id)
# Get fwd_op/bwd_op block ops
for fwd_id, grad_id in sub_block_id_pair:
sub_block_ops = []
sub_block = pdesc.block(fwd_id)
block_op_size = sub_block.op_size()
for i in range(block_op_size):
sub_block_ops.append(sub_block.op(i))
grad_sub_block = pdesc.block(grad_id)
grad_sub_block_op_size = grad_sub_block.op_size()
for i in range(grad_sub_block_op_size):
sub_block_ops.append(grad_sub_block.op(i))
sub_op_output = set()
sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
sub_op_output.update(sub_op_dict[grad_id].output_arg_names())
sub_op_output.update(sub_op_dict[fwd_id].input_arg_names())
sub_op_output.update(sub_op_dict[grad_id].input_arg_names())
ops_list.append((sub_block_ops, block_op_size, sub_op_output))
# Process rest fwd_op block ops
for fwd_id in sub_block_ids:
sub_block_ops = []
sub_block = pdesc.block(fwd_id)
sub_block_op_size = sub_block.op_size()
for i in range(sub_block_op_size):
sub_block_ops.append(sub_block.op(i))
sub_op_output = set()
sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
sub_op_output.update(sub_op_dict[fwd_id].input_arg_names())
ops_list.append((sub_block_ops, sub_block_op_size, sub_op_output))
return ops_list
def _get_cfgs(input_program):
"""Process each block and create ControlFlowGraph for each of them.
:param input_program: Program object.
:return: A list of ControlFlowGraph, each corresponds to a block.
"""
ops_list = []
pdesc = input_program.get_desc()
block_desc = pdesc.block(0)
op_size = block_desc.op_size()
# Only process one level of nested subblock.
ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR))
skip_opt_set = set()
for _, _, skip_opt in ops_list:
skip_opt_set.update(skip_opt)
# Get global block ops
ops_list.insert(
0, ([block_desc.op(i) for i in range(op_size)], op_size, skip_opt_set))
cfgs = [
ControlFlowGraph(input_program, ops, forward_num, skip_opt)
for ops, forward_num, skip_opt in ops_list
]
return cfgs
def memory_optimize(input_program, skip_opt_set=None, print_log=False, level=0):
"""Optimize memory by reusing var memory.
Note: it doesn't not support subblock nested in subblock.
:param input_program: Input Program
:param print_log: whether to print debug log.
:param level: If level=0, reuse if the shape is completely equal, o
:return:
"""
if level != 0 and level != 1:
raise ValueError("only support opt_level 0 or 1.")
global PRINT_LOG
PRINT_LOG = print_log
cfgs = _get_cfgs(input_program)
for cfg in cfgs:
cfg.memory_optimize(skip_opt_set=skip_opt_set, level=level)
def release_memory(input_program, skip_opt_set=None):
"""
Modify the input program and insert :code:`delete_op` to early drop not used
variables. The modification will be performed inplace.
Notes: This is an experimental API and could be removed in next few
releases. Users should not use this API.
Args:
input_program(Program): The program will be inserted :code:`delete_op`.
"""
cfgs = _get_cfgs(input_program)
for cfg in cfgs:
cfg.release_memory(skip_opt_set=skip_opt_set)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from unittest import TestCase
from pyup.providers.github import Provider
from pyup.requirements import RequirementsBundle
from pyup import errors
from github import GithubException, UnknownObjectException
from mock import Mock, patch, PropertyMock
class ProviderTest(TestCase):
def setUp(self):
self.provider = Provider(bundle=Mock())
self.provider._api = Mock()
self.repo = Mock()
def test_is_same_user(self):
this = Mock()
this.login = "this"
that = Mock()
that.login = "that"
self.assertFalse(Provider.is_same_user(this, that))
that.login = "this"
self.assertTrue(Provider.is_same_user(this, that))
@patch("pyup.providers.github.Github")
def test_api(self, github_mock):
prov = Provider(bundle=RequirementsBundle())
prov._api("foo")
github_mock.assert_called_once_with("foo", base_url=None, timeout=50, verify=True)
@patch("pyup.providers.github.Github")
def test_api_different_host_in_provider_url(self, github_mock):
url = 'localhost'
token = 'foo'
prov = Provider(bundle=RequirementsBundle(), url=url)
prov._api(token)
github_mock.assert_called_once_with(token, base_url=url, timeout=50, verify=True)
@patch("pyup.providers.github.Github")
def test_api_different_token_new_instance(self, github_mock):
token1, token2 = 'foo', 'foo2'
prov = Provider(bundle=RequirementsBundle())
prov._api(token1)
prov._api(token1)
github_mock.assert_called_once_with(token1, base_url=None, timeout=50, verify=True)
prov._api(token2)
github_mock.assert_called_with(token2, base_url=None, timeout=50, verify=True)
def test_get_user(self):
self.provider.get_user("foo")
self.provider._api().get_user.assert_called_once_with()
def test_get_repo(self):
self.provider.get_repo("token", "name")
self.provider._api().get_repo.assert_called_once_with("name")
def test_get_default_branch(self):
self.repo.default_branch = "foo"
self.assertEqual(
self.provider.get_default_branch(self.repo),
"foo"
)
p = PropertyMock(side_effect=UnknownObjectException(data="", status=1))
type(self.repo).default_branch = p
with self.assertRaises(errors.RepoDoesNotExistError):
self.provider.get_default_branch(self.repo)
def test_get_pull_request_permissions(self):
user = Mock()
user.login = "some-dude"
self.provider.get_pull_request_permissions(user, self.repo)
self.repo.add_to_collaborators.assert_called_once_with("some-dude")
self.repo.add_to_collaborators.side_effect = GithubException(data="", status=1)
with self.assertRaises(errors.NoPermissionError):
self.provider.get_pull_request_permissions(user, self.repo)
def test_iter_git_tree(self):
mocked_items = [Mock(type="type", path="path")]
self.repo.get_git_tree().tree = mocked_items
items = list(self.provider.iter_git_tree(self.repo, "some branch"))
self.assertEqual(items, [("type", "path")])
self.repo.get_git_tree.side_effect = GithubException(data="", status=999)
with self.assertRaises(GithubException):
list(self.provider.iter_git_tree(self.repo, "some branch"))
def test_get_file(self):
content, obj = self.provider.get_file(self.repo, "path", "branch")
self.assertIsNotNone(content)
self.assertIsNotNone(obj)
self.repo.get_contents.assert_called_with("path", ref="branch")
self.repo.get_contents.side_effect = GithubException(data="", status=1)
content, obj = self.provider.get_file(self.repo, "path", "branch")
self.assertIsNone(content)
self.assertIsNone(obj)
def test_get_file_placeholder(self):
# template path (e.g. cookiecutter template): '{' and '}' should not be escaped
content, obj = self.provider.get_file(self.repo, "{{path}}", "branch")
self.assertIsNotNone(content)
self.assertIsNotNone(obj)
self.repo.get_contents.assert_called_with("{{path}}", ref="branch")
def test_get_requirement_file(self):
req = self.provider.get_requirement_file(self.repo, "path", "branch")
self.assertIsNotNone(req)
self.provider.bundle.get_requirement_file_class.assert_called_once_with()
self.assertEqual(self.provider.bundle.get_requirement_file_class().call_count, 1)
self.provider.get_file = Mock(return_value = (None, None))
req = self.provider.get_requirement_file(self.repo, "path", "branch")
self.assertIsNone(req)
def test_create_branch(self):
self.provider.create_branch(self.repo, "base branch", "new branch")
self.repo.get_git_ref.assert_called_once_with("heads/base branch")
self.repo.get_git_ref.side_effect = GithubException(data="", status=1)
with self.assertRaises(errors.BranchExistsError):
self.provider.create_branch(self.repo, "base branch", "new branch")
def test_is_empty_branch(self):
with self.assertRaises(AssertionError):
self.provider.is_empty_branch(self.repo, "master", "foo", prefix="bar")
self.repo.compare().total_commits = 0
self.assertTrue(
self.provider.is_empty_branch(self.repo, "master", "pyup-foo", prefix="pyup-")
)
self.repo.compare().total_commits = 0
self.assertTrue(
self.provider.is_empty_branch(self.repo, "master", "pyup/foo", prefix="pyup/")
)
self.repo.compare().total_commits = 1
self.assertFalse(
self.provider.is_empty_branch(self.repo, "master", "pyup-foo", prefix="pyup-")
)
def test_delete_branch(self):
with self.assertRaises(AssertionError):
self.provider.delete_branch(self.repo, "foo", prefix="bar")
self.provider.delete_branch(self.repo, "pyup-foo", prefix="pyup-")
self.repo.get_git_ref.assert_called_once_with("heads/pyup-foo")
self.provider.delete_branch(self.repo, "pyup/foo", prefix="pyup/")
self.repo.get_git_ref.assert_called_with("heads/pyup/foo")
@patch("pyup.providers.github.time")
def test_create_commit(self, time):
self.repo.update_file.return_value = {"commit": Mock(), "content": Mock()}
self.provider.get_committer_data = Mock(return_value = "foo@bar.com")
self.provider.create_commit("path", "branch", "commit", "content", "sha", self.repo, "com")
self.assertEqual(self.repo.update_file.call_count, 1)
self.repo.update_file.side_effect = GithubException(data="", status=1)
with self.assertRaises(GithubException):
self.provider.create_commit("path", "branch", "commit", "content", "sha", self.repo,
"com")
def test_create_and_commit_file(self):
repo = Mock()
path, branch, content, commit_message, committer = (
'/foo.txt',
'some-branch',
'content',
'some-message',
'johnny'
)
committer_data = Mock()
committer_data.return_value = 'committer-data'
self.provider.get_committer_data = committer_data
data = self.provider.create_and_commit_file(
repo=repo,
path=path,
commit_message=commit_message,
branch=branch,
content=content,
committer=committer
)
repo.create_file.assert_called_once_with(
path=path,
message=commit_message,
content=content,
branch=branch,
committer='committer-data'
)
def test_get_committer_data(self):
committer = Mock()
committer.email = "foo@bar.com"
committer.login = "foo"
data = self.provider.get_committer_data(committer)._identity
self.assertEqual(data["name"], "foo")
self.assertEqual(data["email"], "foo@bar.com")
committer = Mock()
committer.email = None
committer.login = "foo"
committer.get_emails.return_value = [{"primary": True, "email": "primary@bar.com"},]
data = self.provider.get_committer_data(committer)._identity
self.assertEqual(data["name"], "foo")
self.assertEqual(data["email"], "primary@bar.com")
committer = Mock()
committer.email = None
committer.login = "foo"
committer.get_emails.return_value = []
with self.assertRaises(errors.NoPermissionError):
data = self.provider.get_committer_data(committer)
def test_get_pull_request_committer(self):
committ = Mock()
committ.committer = "foo"
pr = Mock()
self.repo.get_pull().get_commits.return_value = [committ]
data = self.provider.get_pull_request_committer(self.repo, pr)
self.assertEqual(data, ["foo"])
self.repo.get_pull.side_effect = UnknownObjectException(data="", status=1)
data = self.provider.get_pull_request_committer(self.repo, pr)
self.assertEqual(data, [])
def test_close_pull_request(self):
pr = Mock()
pr.head.ref = "bla"
self.repo.get_pull.return_value = pr
with self.assertRaises(AssertionError):
self.provider.close_pull_request(self.repo, self.repo, pr, "comment", prefix="pyup-")
pr.head.ref = "pyup-bla"
self.provider.close_pull_request(self.repo, self.repo, pr, "comment", prefix="pyup-")
self.assertEqual(self.repo.get_git_ref().delete.call_count, 1)
self.repo.get_pull.side_effect = UnknownObjectException(data="", status=1)
data = self.provider.close_pull_request(self.repo, self.repo, Mock(), "comment",
prefix="pyup-")
self.assertEqual(data, False)
def test_create_pull_request_with_exceeding_body(self):
body = ''.join(["a" for i in range(0, 65536 + 1)])
self.provider.create_pull_request(self.repo, "title", body, "master", "new", False, [])
self.assertEqual(self.provider.bundle.get_pull_request_class.call_count, 1)
self.assertEqual(self.provider.bundle.get_pull_request_class().call_count, 1)
def test_create_pull_request(self):
self.provider.create_pull_request(self.repo, "title", "body", "master", "new", False, [])
self.assertEqual(self.provider.bundle.get_pull_request_class.call_count, 1)
self.assertEqual(self.provider.bundle.get_pull_request_class().call_count, 1)
self.repo.create_pull.side_effect = GithubException(data="", status=1)
with self.assertRaises(errors.NoPermissionError):
self.provider.create_pull_request(self.repo, "title", "body", "master", "new", False, [])
def test_create_pull_request_with_label(self):
self.provider.create_pull_request(self.repo, "title", "body", "master", "new", "some-label", [])
self.assertEqual(self.provider.bundle.get_pull_request_class.call_count, 1)
self.assertEqual(self.provider.bundle.get_pull_request_class().call_count, 1)
def test_create_pull_request_with_assignees(self):
self.provider.create_pull_request(self.repo, "title", "body", "master", "new",
None, ["some-assignee"])
self.assertEqual(self.provider.bundle.get_pull_request_class.call_count, 1)
self.assertEqual(self.provider.bundle.get_pull_request_class().call_count, 1)
self.assertEqual(self.repo.get_issue.call_count, 1)
self.assertEqual(self.repo.get_issue().edit.call_count, 1)
def test_create_issue(self):
self.assertIsNot(self.provider.create_issue(self.repo, "title", "body"), False)
self.repo.create_issue.side_effect = GithubException(data="", status=404)
self.assertEqual(self.provider.create_issue(self.repo, "title", "body"), False)
self.repo.create_issue.side_effect = GithubException(data="", status=999)
with self.assertRaises(GithubException):
self.assertEqual(self.provider.create_issue(self.repo, "title", "body"), False)
def test_iter_issues(self):
self.repo.get_issues.return_value = [Mock(), Mock()]
issues = list(self.provider.iter_issues(self.repo, Mock()))
self.assertEqual(len(issues), 2)
def test_get_or_create_label(self):
self.provider.get_or_create_label(self.repo, "foo-label")
self.repo.get_label.assert_called_once_with(name="foo-label")
self.repo.create_label.assert_not_called()
def test_create_label(self):
# label does not exist, need to create it
self.repo.get_label.side_effect = UnknownObjectException(None, None)
self.provider.get_or_create_label(self.repo, "another-label")
self.repo.get_label.assert_called_once_with(name="another-label")
self.repo.create_label.assert_called_once_with(name="another-label", color="1BB0CE")
def test_create_label_fails(self):
# label does not exist, need to create it
self.repo.get_label.side_effect = UnknownObjectException(None, None)
self.repo.create_label.side_effect = GithubException(None, None)
label = self.provider.get_or_create_label(self.repo, "another-label")
self.assertIsNone(label)
self.repo.get_label.assert_called_once_with(name="another-label")
self.repo.create_label.assert_called_once_with(name="another-label", color="1BB0CE")
def test_ignore_ssl_should_be_default_false(self):
provider = Provider(bundle=Mock())
self.assertFalse(provider.ignore_ssl)
@patch("pyup.providers.github.Github")
def test_ignore_ssl(self, github_mock):
ignore_ssl = True
provider = Provider(bundle=RequirementsBundle(), ignore_ssl=ignore_ssl)
provider._api("foo")
self.assertTrue(provider.ignore_ssl)
github_mock.assert_called_once_with("foo", base_url=None, timeout=50, verify=(not ignore_ssl))
|
|
# -*- encoding: utf-8 -*-
'''
HubbleStack Nova module for auditing running services.
Supports both blacklisting and whitelisting services. Blacklisted services
must not be running. Whitelisted services must be running.
:maintainer: HubbleStack / basepi
:maturity: 2016.7.0
:platform: All
:requires: SaltStack
This audit module requires yaml data to execute. It will search the local
directory for any .yaml files, and if it finds a top-level 'service' key, it will
use that data.
Sample YAML data, with inline comments:
service:
# Must not be installed
blacklist:
# Unique ID for this set of audits
telnet:
data:
# 'osfinger' grain, for multiplatform support
CentOS Linux-6:
# service name : tag
- 'telnet': 'CIS-2.1.1'
# Catch-all, if no osfinger match was found
'*':
# service name : tag
- 'telnet': 'telnet-bad'
# description/alert/trigger are currently ignored, but may be used in the future
description: 'Telnet is evil'
alert: email
trigger: state
# Must be installed, no version checking (yet)
whitelist:
rsh:
data:
CentOS Linux-7:
- 'rsh': 'CIS-2.1.3'
- 'rsh-server': 'CIS-2.1.4'
'*':
- 'rsh-client': 'CIS-5.1.2'
- 'rsh-redone-client': 'CIS-5.1.2'
- 'rsh-server': 'CIS-5.1.3'
- 'rsh-redone-server': 'CIS-5.1.3'
description: 'RSH is awesome'
alert: email
trigger: state
'''
from __future__ import absolute_import
import logging
import fnmatch
import yaml
import os
import copy
import salt.utils
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.is_windows():
return False, 'This audit module only runs on linux'
return True
def audit(data_list, tags, verbose=False, show_profile=False, debug=False):
'''
Run the service audits contained in the YAML files processed by __virtual__
'''
__data__ = {}
for profile, data in data_list:
if show_profile:
_merge_yaml(__data__, data, profile)
else:
_merge_yaml(__data__, data)
__tags__ = _get_tags(__data__)
if debug:
log.debug('service audit __data__:')
log.debug(__data__)
log.debug('service audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audittype = tag_data['type']
# Blacklisted packages (must not be installed)
if audittype == 'blacklist':
if __salt__['service.status'](name):
ret['Failure'].append(tag_data)
else:
ret['Success'].append(tag_data)
# Whitelisted packages (must be installed)
elif audittype == 'whitelist':
if __salt__['service.status'](name):
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
failure = []
success = []
controlled = []
if not verbose:
# Pull out just the tag and description
tags_descriptions = set()
for tag_data in ret['Failure']:
tag = tag_data['tag']
description = tag_data.get('description')
if (tag, description) not in tags_descriptions:
failure.append({tag: description})
tags_descriptions.add((tag, description))
tags_descriptions = set()
for tag_data in ret['Success']:
tag = tag_data['tag']
description = tag_data.get('description')
if (tag, description) not in tags_descriptions:
success.append({tag: description})
tags_descriptions.add((tag, description))
control_reasons = set()
for tag_data in ret['Controlled']:
tag = tag_data['tag']
control_reason = tag_data.get('control', '')
description = tag_data.get('description')
if (tag, description, control_reason) not in control_reasons:
tag_dict = {'description': description,
'control': control_reason}
controlled.append({tag: tag_dict})
control_reasons.add((tag, description, control_reason))
else:
# Format verbose output as single-key dictionaries with tag as key
for tag_data in ret['Failure']:
tag = tag_data['tag']
failure.append({tag: tag_data})
for tag_data in ret['Success']:
tag = tag_data['tag']
success.append({tag: tag_data})
for tag_data in ret['Controlled']:
tag = tag_data['tag']
controlled.append({tag: tag_data})
ret['Controlled'] = controlled
ret['Success'] = success
ret['Failure'] = failure
if not ret['Controlled']:
ret.pop('Controlled')
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the service:blacklist and service:whitelist level
'''
if 'service' not in ret:
ret['service'] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get('service', {}):
if topkey not in ret['service']:
ret['service'][topkey] = []
for key, val in data['service'][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['service'][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfinger')
for toplist, toplevel in data.get('service', {}).iteritems():
# service:blacklist
for audit_dict in toplevel:
# service:blacklist:0
for audit_id, audit_data in audit_dict.iteritems():
# service:blacklist:0:telnet
tags_dict = audit_data.get('data', {})
# service:blacklist:0:telnet:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# service:blacklist:0:telnet:data:Debian-8
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'service',
'type': toplist}
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
|
|
from __future__ import print_function
import os, sys, inspect
import h5py
import numpy as np
import matplotlib
import random
import math
import multiprocessing
from PIL import Image
from Crypto.Random.random import randint
from functools import partial
# Load the configuration file
import config
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.append(cmd_folder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],config.caffe_path+"/python")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
sys.path.append(config.caffe_path+"/python")
# Ensure correct compilation of Caffe and Pycaffe
if config.library_compile:
cpus = multiprocessing.cpu_count()
cwd = os.getcwd()
os.chdir(config.caffe_path)
result = os.system("make all -j %s" % cpus)
if result != 0:
sys.exit(result)
result = os.system("make pycaffe -j %s" % cpus)
if result != 0:
sys.exit(result)
os.chdir(cwd)
# Import pycaffe
import caffe
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
import netconf
# General variables
# Size of a float variable
fsize = 4
def compute_memory_weights(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
memory += shape_arr[i][1]
return memory
def compute_memory_buffers(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
memory = max(memory, shape_arr[i][0])
return memory
def compute_memory_blobs(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
mem = fsize * shape_arr[i][2]
for j in range(0,len(shape_arr[i][4])):
mem *= shape_arr[i][4][j]
memory += mem
return memory
def update_shape(shape_arr, update):
last_shape = shape_arr[-1]
new_shape = [update[0](last_shape[0]), update[1](last_shape[1]), update[2](last_shape[2]),
[update[3][min(i,len(update[3])-1)](last_shape[3][i]) for i in range(0,len(last_shape[3]))],
[update[4][min(i,len(update[4])-1)](last_shape[4][i]) for i in range(0,len(last_shape[4]))]]
shape_arr += [new_shape]
print ("TEST B: %s" % [update[4][min(i,len(update[4])-1)]([1,1,1][i]) for i in range(0,3)])
return shape_arr
def data_layer(shape):
data, label = L.MemoryData(dim=shape, ntop=2)
return data, label
def conv_relu(run_shape, bottom, num_output, kernel_size=[3], stride=[1], pad=[0], dilation=[1], group=1, weight_std=0.01):
# The convolution buffer and weight memory
weight_mem = fsize * num_output * run_shape[-1][2]
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= kernel_size[min(i,len(kernel_size)-1)]
conv_buff *= run_shape[-1][4][i]
weight_mem *= kernel_size[min(i,len(kernel_size)-1)]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
conv = L.Convolution(bottom, kernel_size=kernel_size, stride=stride, dilation=dilation,
num_output=num_output, pad=pad, group=group,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
return conv, L.ReLU(conv, in_place=True, negative_slope=0.005)
def convolution(run_shape, bottom, num_output, kernel_size=[3], stride=[1], pad=[0], dilation=[1], group=1, weight_std=0.01):
# The convolution buffer and weight memory
weight_mem = fsize * num_output * run_shape[-1][2]
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= kernel_size[min(i,len(kernel_size)-1)]
conv_buff *= run_shape[-1][4][i]
weight_mem *= kernel_size[min(i,len(kernel_size)-1)]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.Convolution(bottom, kernel_size=kernel_size, stride=stride, dilation=dilation,
num_output=num_output, pad=pad, group=group,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
def max_pool(run_shape, bottom, kernel_size=[2], stride=[2], pad=[0], dilation=[1]):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: x]
update += [[lambda x, i=i: x * dilation[min(i,len(dilation)-1)] for i in range(0,len(run_shape[-1][4]))]]
# Strictly speaking this update rule is not complete, but should be sufficient for USK
if dilation[0] == 1 and kernel_size[0] == stride[0]:
update += [[lambda x, i=i: x / (kernel_size[min(i,len(kernel_size)-1)]) for i in range(0,len(run_shape[-1][4]))]]
else:
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=kernel_size, stride=stride, pad=pad, dilation=dilation)
def upconv(run_shape, bottom, num_output_dec, num_output_conv, weight_std=0.01, kernel_size=[2], stride=[2]):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: num_output_dec]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: kernel_size[min(i,len(kernel_size)-1)] * x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
deconv = L.Deconvolution(bottom, convolution_param=dict(num_output=num_output_dec, kernel_size=kernel_size, stride=stride, pad=[0], dilation=[1], group=num_output_dec,
weight_filler=dict(type='constant', value=1), bias_term=False),
param=dict(lr_mult=0, decay_mult=0))
# The convolution buffer and weight memory
weight_mem = fsize * num_output_conv * num_output_dec
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= 2
conv_buff *= run_shape[-1][4][i]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output_conv]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
conv = L.Convolution(deconv, num_output=num_output_conv, kernel_size=[1], stride=[1], pad=[0], dilation=[1], group=1,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
return deconv, conv
def mergecrop(run_shape, bottom_a, bottom_b):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: 2*x]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.MergeCrop(bottom_a, bottom_b, forward=[1,1], backward=[1,1])
def implement_usknet(net, run_shape, fmaps_start, fmaps_end):
# Chained blob list to construct the network (forward direction)
blobs = []
# All networks start with data
blobs = blobs + [net.data]
fmaps = fmaps_start
if netconf.unet_depth > 0:
# U-Net downsampling; 2*Convolution+Pooling
for i in range(0, netconf.unet_depth):
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu] # This is the blob of interest for mergecrop (index 2 + 3 * i)
pool = max_pool(run_shape, blobs[-1], kernel_size=netconf.unet_downsampling_strategy[i], stride=netconf.unet_downsampling_strategy[i])
blobs = blobs + [pool]
fmaps = netconf.unet_fmap_inc_rule(fmaps)
# If there is no SK-Net component, fill with 2 convolutions
if (netconf.unet_depth > 0 and netconf.sknet_conv_depth == 0):
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
# Else use the SK-Net instead
else:
for i in range(0, netconf.sknet_conv_depth):
# TODO: Not implemented yet (fixme)
run_shape = run_shape
if netconf.unet_depth > 0:
# U-Net upsampling; Upconvolution+MergeCrop+2*Convolution
for i in range(0, netconf.unet_depth):
deconv, conv = upconv(run_shape, blobs[-1], fmaps, netconf.unet_fmap_dec_rule(fmaps), kernel_size=netconf.unet_downsampling_strategy[i], stride=netconf.unet_downsampling_strategy[i], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [conv]
fmaps = netconf.unet_fmap_dec_rule(fmaps)
# Here, layer (2 + 3 * i) with reversed i (high to low) is picked
mergec = mergecrop(run_shape, blobs[-1], blobs[-1 + 3 * (netconf.unet_depth - i)])
blobs = blobs + [mergec]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv = convolution(run_shape, blobs[-1], fmaps_end, kernel_size=[1], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [conv]
# Return the last blob of the network (goes to error objective)
return blobs[-1]
def caffenet(netmode):
# Start Caffe proto net
net = caffe.NetSpec()
# Specify input data structures
if netmode == caffe_pb2.TEST:
if netconf.loss_function == 'malis':
fmaps_end = 11
if netconf.loss_function == 'euclid':
fmaps_end = 11
if netconf.loss_function == 'softmax':
fmaps_end = 2
net.data, net.datai = data_layer([1,1,44,132,132])
net.silence = L.Silence(net.datai, ntop=0)
# Shape specs:
# 00. Convolution buffer size
# 01. Weight memory size
# 03. Num. channels
# 04. [d] parameter running value
# 05. [w] parameter running value
run_shape_in = [[0,0,1,[1,1,1],[44,132,132]]]
run_shape_out = run_shape_in
last_blob = implement_usknet(net, run_shape_out, 64, fmaps_end)
# Implement the prediction layer
if netconf.loss_function == 'malis':
net.prob = L.Sigmoid(last_blob, ntop=1)
if netconf.loss_function == 'euclid':
net.prob = L.Sigmoid(last_blob, ntop=1)
if netconf.loss_function == 'softmax':
net.prob = L.Softmax(last_blob, ntop=1)
for i in range(0,len(run_shape_out)):
print(run_shape_out[i])
print("Max. memory requirements: %s B" % (compute_memory_buffers(run_shape_out)+compute_memory_weights(run_shape_out)+compute_memory_blobs(run_shape_out)))
print("Weight memory: %s B" % compute_memory_weights(run_shape_out))
print("Max. conv buffer: %s B" % compute_memory_buffers(run_shape_out))
else:
if netconf.loss_function == 'malis':
net.data, net.datai = data_layer([1,1,44,132,132])
net.label, net.labeli = data_layer([1,1,16,44,44])
net.label_affinity, net.label_affinityi = data_layer([1,11,16,44,44])
net.affinity_edges, net.affinity_edgesi = data_layer([1,1,11,3])
net.silence = L.Silence(net.datai, net.labeli, net.label_affinityi, net.affinity_edgesi, ntop=0)
fmaps_end = 11
if netconf.loss_function == 'euclid':
net.data, net.datai = data_layer([1,1,44,132,132])
net.label, net.labeli = data_layer([1,11,16,44,44])
net.scale, net.scalei = data_layer([1,11,16,44,44])
net.silence = L.Silence(net.datai, net.labeli, net.scalei, ntop=0)
fmaps_end = 11
if netconf.loss_function == 'softmax':
net.data, net.datai = data_layer([1,1,44,132,132])
# Currently only supports binary classification
net.label, net.labeli = data_layer([1,1,16,44,44])
net.silence = L.Silence(net.datai, net.labeli, ntop=0)
fmaps_end = 2
run_shape_in = [[0,1,1,[1,1,1],[44,132,132]]]
run_shape_out = run_shape_in
# Start the actual network
last_blob = implement_usknet(net, run_shape_out, 64, fmaps_end)
for i in range(0,len(run_shape_out)):
print(run_shape_out[i])
print("Max. memory requirements: %s B" % (compute_memory_buffers(run_shape_out)+compute_memory_weights(run_shape_out)+2*compute_memory_blobs(run_shape_out)))
print("Weight memory: %s B" % compute_memory_weights(run_shape_out))
print("Max. conv buffer: %s B" % compute_memory_buffers(run_shape_out))
# Implement the loss
if netconf.loss_function == 'malis':
last_blob = L.Sigmoid(last_blob, in_place=True)
net.loss = L.MalisLoss(last_blob, net.label_affinity, net.label, net.affinity_edges, ntop=0)
if netconf.loss_function == 'euclid':
last_blob = L.Sigmoid(last_blob, in_place=True)
net.loss = L.EuclideanLoss(last_blob, net.label, net.scale, ntop=0)
if netconf.loss_function == 'softmax':
net.loss = L.SoftmaxWithLoss(last_blob, net.label, ntop=0)
# Return the protocol buffer of the generated network
return net.to_proto()
def make_net():
with open('net/net_train.prototxt', 'w') as f:
print(caffenet(caffe_pb2.TRAIN), file=f)
with open('net/net_test.prototxt', 'w') as f:
print(caffenet(caffe_pb2.TEST), file=f)
def make_solver():
with open('net/solver.prototxt', 'w') as f:
print('train_net: \"net/net_train.prototxt\"', file=f)
print('base_lr: 0.00001', file=f)
print('momentum: 0.99', file=f)
print('weight_decay: 0.000005', file=f)
print('lr_policy: \"inv\"', file=f)
print('gamma: 0.0001', file=f)
print('power: 0.75', file=f)
print('max_iter: 100000', file=f)
print('snapshot: 2000', file=f)
print('snapshot_prefix: \"net_\"', file=f)
print('display: 50', file=f)
make_net()
make_solver()
|
|
import random
from .operators import Operator
from .states import State, Goal, PartialState
from heapq import heappop, heappush
from collections import deque
from .hsp import h_add, h_max
class GoalConditions(PartialState):
def __init__(self, conditions): # Difference is that this is a list
self.conditions = conditions
def cond(self):
return self.conditions
#def __contains__(self, state):
# return all(state[var] == value for var, value in self.cond())
class UniqueOperator(object):
next_index = 0
def __init__(self, action):
self.action = action
self.index = UniqueOperator.next_index
UniqueOperator.next_index += 1
def __repr__(self):
return str(self.action) + '#' + str(self.index)
#def effects(action):
# if isinstance(action, Operator):
# return action.effects
# if isinstance(action, State):
# return action.values # TODO - should include default false values
# if isinstance(action, Goal):
# return {}
# raise ValueError(action)
def achieves(operator, item):
(var, val) = item
if isinstance(operator, Operator):
return var in operator.effects and operator.effects[var] == val
if isinstance(operator, UniqueOperator):
return var in operator.action.effects and operator.action.effects[var] == val
if isinstance(operator, State):
return operator[var] == val
if isinstance(operator, Goal):
return False
raise ValueError(operator)
#return var in effects(action) and effects(action)[var] == val
def deletes(operator, item):
(var, val) = item
if isinstance(operator, Operator):
return var in operator.effects and operator.effects[var] != val
if isinstance(operator, UniqueOperator):
return var in operator.action.effects and operator.action.effects[var] != val
if isinstance(operator, State):
return operator[var] != val
if isinstance(operator, Goal):
return False
raise ValueError(operator)
#return var in effects(action) and effects(action)[var] != val
def add_constraint(initial_const, constraints):
if initial_const in constraints:
return constraints
consts = [initial_const] # Transitive constraints
new_constraints = constraints.copy()
while consts:
x0, x1 = consts.pop()
new_constraints.add((x0, x1))
for x, y in new_constraints:
if x == x1 and (x0, y) not in new_constraints:
consts.append((x0, y))
if y == x0 and (x, x1) not in new_constraints:
consts.append((x, x1))
return new_constraints
def possible(position, constraint):
(x, y) = position
return (y,x) not in constraint
def protect_cl_for_actions(actions, constrs, clink):
"""yields constriants that extend constrs and
protect causal link (a0, subgoal, a1)
for each action in actions
"""
if actions:
a = actions[0]
rem_actions = actions[1:]
a0, subgoal, a1 = clink
if a != a0 and a != a1 and deletes(a,subgoal):
if possible((a, a0), constrs):
new_const = add_constraint((a, a0), constrs)
for c in protect_cl_for_actions(rem_actions, new_const, clink):
yield c
if possible((a1, a), constrs):
new_const = add_constraint((a1, a), constrs)
for c in protect_cl_for_actions(rem_actions, new_const, clink):
yield c
else:
for c in protect_cl_for_actions(rem_actions,constrs,clink):
yield c
else:
yield constrs
def protect_all_cls(clinks, act, constrs):
"""yields constraints that protect all causal links from act"""
if clinks:
a0, cond, a1 = clinks[0] # select a causal link
rem_clinks = clinks[1:] # remaining causal links
if act != a0 and act != a1 and deletes(act, cond):
if possible((act, a0), constrs):
new_const = add_constraint((act, a0), constrs)
for c in protect_all_cls(rem_clinks, act, new_const):
yield c
if possible((a1, act), constrs):
new_const = add_constraint((a1, act), constrs)
for c in protect_all_cls(rem_clinks, act, new_const):
yield c
else:
for c in protect_all_cls(rem_clinks, act, constrs):
yield c
else:
yield constrs
# TODO - sorted self.agenda that allows refinement in different order
class PartialPlan(object):
def __init__(self, actions, constraints, agenda, causal_links):
"""
* actions is a set of action instances
* constraints a set of (a0,a1) pairs, representing a0<a1,
closed under transitivity
* agenda list of (subgoal,action) pairs to be achieved, where
subgoal is a (variable,value) pair
* causal_links is a set of (a0,g,a1) triples,
where ai are action instances, and g is a (variable,value) pair
"""
self.actions = actions # a set of action instances
self.constraints = constraints # a set of (a0,a1) pairs
self.agenda = agenda # list of (subgoal,action) pairs to be achieved
self.causal_links = causal_links # set of (a0,g,a1) triples
def __str__(self):
return ("actions: " + str(self.actions)+
"\nconstraints: " + str(self.constraints)+
"\nagenda: " + str(self.agenda)+
"\ncausal_links:" + str(self.causal_links))
def __repr__(self):
return str((len(self.actions), len(self.constraints), len(self.agenda), len(self.causal_links)))
def extract_plan(self):
#print(self)
other_acts = set(self.actions)
sorted_acts = []
while other_acts:
a = random.choice([a for a in other_acts if all((a1, a) not in self.constraints for a1 in other_acts)])
other_acts.remove(a)
if isinstance(a, UniqueOperator):
sorted_acts.append(a.action)
return sorted_acts
def flaws_heuristic(self, *args):
#return self.goals_heuristic() + # number of threats
pass
def goals_heuristic(self, *args):
return len(self.agenda)
def add_heursitic(self, operators): # TODO - cache this
#print(GoalConditions(self.agenda))
#return h_add(self.actions[0], GoalConditions([g for g, _ in self.agenda]), operators)
return h_max(self.actions[0], GoalConditions([g for g, _ in self.agenda]), operators)
#return h_add(self.actions[0], self.actions[1], operators)
#heuristic = flaws_heuristic
#heuristic = goals_heuristic
heuristic = add_heursitic # TODO - add all currently reachable actions?
def rank(self, operators):
#return (len(self.actions), len(self.constraints), len(self.agenda), len(self.causal_links))
return self.length() + self.heuristic(operators)
#return self.length() + 5*self.heuristic(operators)
#return self.heuristic(operators)
def length(self):
return len(self.actions) - 2
def cost(self):
pass
def solved(self):
return self.agenda == []
def neighbors(self, operators):
if self.agenda:
subgoal, act1 = self.agenda[0]
remaining_agenda = self.agenda[1:]
for act0 in self.actions:
if achieves(act0, subgoal) and possible((act0, act1), self.constraints):
consts1 = add_constraint((act0, act1), self.constraints)
new_clink = (act0, subgoal, act1)
new_cls = self.causal_links + [new_clink]
for consts2 in protect_cl_for_actions(self.actions, consts1, new_clink):
yield PartialPlan(self.actions, consts2, remaining_agenda, new_cls)
for a0 in operators:
if achieves(a0, subgoal):
new_a = UniqueOperator(a0) # This is why these things are wrapped, to allow several of the same actions
new_actions = self.actions + [new_a]
consts1 = add_constraint((self.actions[0], new_a), self.constraints)
consts2 = add_constraint((new_a, act1), consts1) # Goal constraint automatically derived
new_agenda = remaining_agenda + [(pre, new_a) for pre in a0.cond()] # NOTE - previously was bug that overwrote new_agenda
new_clink = (new_a, subgoal, act1)
new_cls = self.causal_links + [new_clink]
for consts3 in protect_all_cls(self.causal_links, new_a, consts2):
for consts4 in protect_cl_for_actions(self.actions, consts3, new_clink):
yield PartialPlan(new_actions, consts4, new_agenda, new_cls)
def pop_solve(initial, goal, operators, max_length=float('inf')):
initial_plan = PartialPlan([initial, goal],
{(initial, goal)},
[(g, goal) for g in goal.cond()],
[])
#operators.sort()
iterations = 0
expanded = 1
if initial_plan.solved():
return initial_plan.extract_plan(), (iterations, expanded)
BFS = False
if BFS:
queue = deque([initial_plan])
else:
queue = [(initial_plan.rank(operators), initial_plan)]
while queue:
#queue = deque(sorted(queue, key=lambda q: q.rank()))
if BFS:
plan = queue.popleft()
else:
_, plan = heappop(queue)
#print(len(queue), sorted(queue, key=lambda q: q.rank()))
print(iterations, expanded, plan.heuristic(operators), plan.length()) #, len(neighbors)
#print(plan)
#raw_input()
iterations += 1
for new_plan in plan.neighbors(operators):
if new_plan.length() > max_length:
continue
expanded += 1
if new_plan.solved():
return new_plan.extract_plan(), (iterations, expanded)
if BFS:
queue.append(new_plan)
else:
heappush(queue, (new_plan.rank(operators), new_plan))
return None, (iterations, expanded)
|
|
# Copyright (c) 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from pylxd.models import _model as model
from pylxd.tests import testing
class Item(model.Model):
"""A fake model."""
name = model.Attribute(readonly=True)
age = model.Attribute(int)
data = model.Attribute()
@property
def api(self):
return self.client.api.items[self.name]
class ChildItem(Item):
"""A fake model child class."""
class TestAttributeDict:
def test_from_dict(self):
a = model.AttributeDict({"foo": "bar", "baz": "bza"})
assert a.foo == "bar"
assert a.baz == "bza"
def test_iterable(self):
d = {"foo": "bar", "baz": "bza"}
a = model.AttributeDict(d)
assert dict(a) == d
class TestModel(testing.PyLXDTestCase):
"""Tests for pylxd.model.Model."""
def setUp(self):
super().setUp()
self.add_rule(
{
"json": {
"type": "sync",
"metadata": {
"name": "an-item",
"age": 1000,
"data": {"key": "val"},
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/items/an-item",
}
)
self.add_rule(
{
"json": {"type": "sync", "metadata": {}},
"method": "put",
"url": r"^http://pylxd.test/1.0/items/an-item",
}
)
self.add_rule(
{
"json": {"type": "sync", "metadata": {}},
"method": "patch",
"url": r"^http://pylxd.test/1.0/items/an-item",
}
)
self.add_rule(
{
"json": {"type": "sync", "metadata": {}},
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/items/an-item",
}
)
def test_init(self):
"""Initial attributes are set."""
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
self.assertEqual(self.client, item.client)
self.assertEqual("an-item", item.name)
@mock.patch.dict("os.environ", {"PYLXD_WARNINGS": ""})
@mock.patch("warnings.warn")
def test_init_warnings_once(self, mock_warn):
with mock.patch("pylxd.models._model._seen_attribute_warnings", new=set()):
Item(self.client, unknown="some_value")
mock_warn.assert_called_once_with(mock.ANY)
Item(self.client, unknown="some_value_as_well")
mock_warn.assert_called_once_with(mock.ANY)
Item(self.client, unknown2="some_2nd_value")
self.assertEqual(len(mock_warn.call_args_list), 2)
@mock.patch.dict("os.environ", {"PYLXD_WARNINGS": "none"})
@mock.patch("warnings.warn")
def test_init_warnings_none(self, mock_warn):
with mock.patch("pylxd.models._model._seen_attribute_warnings", new=set()):
Item(self.client, unknown="some_value")
mock_warn.assert_not_called()
@mock.patch.dict("os.environ", {"PYLXD_WARNINGS": "always"})
@mock.patch("warnings.warn")
def test_init_warnings_always(self, mock_warn):
with mock.patch("pylxd.models._model._seen_attribute_warnings", new=set()):
Item(self.client, unknown="some_value")
mock_warn.assert_called_once_with(mock.ANY)
Item(self.client, unknown="some_value_as_well")
self.assertEqual(len(mock_warn.call_args_list), 2)
@mock.patch.dict("os.environ", {"PYLXD_WARNINGS": "none"})
def test_init_unknown_attribute(self):
"""Unknown attributes aren't set."""
item = Item(self.client, name="an-item", nonexistent="SRSLY")
try:
item.nonexistent
self.fail("item.nonexistent did not raise AttributeError")
except AttributeError:
pass
def test_init_sets_attributes_on_child_class(self):
"""Ensure that .__attributes__ is set on a child class."""
item = Item(self.client)
child_item = ChildItem(self.client)
self.assertEqual(len(item.__attributes__), len(child_item.__attributes__))
def test_unknown_attribute(self):
"""Setting unknown attributes raise an exception."""
def set_unknown_attribute():
item = Item(self.client, name="an-item")
item.nonexistent = "SRSLY"
self.assertRaises(AttributeError, set_unknown_attribute)
def test_get_unknown_attribute(self):
"""Setting unknown attributes raise an exception."""
def get_unknown_attribute():
item = Item(self.client, name="an-item")
return item.nonexistent
self.assertRaises(AttributeError, get_unknown_attribute)
def test_unset_attribute_sync(self):
"""Reading unavailable attributes calls sync."""
item = Item(self.client, name="an-item")
self.assertEqual(1000, item.age)
def test_iter(self):
"""Test models can be iterated over."""
item = Item(self.client, name="an-item")
self.assertDictEqual(
{"name": "an-item", "age": 1000, "data": {"key": "val"}}, dict(item)
)
def test_sync(self):
"""A sync will update attributes from the server."""
item = Item(self.client, name="an-item")
item.sync()
self.assertEqual(1000, item.age)
def test_sync_dirty(self):
"""Sync will not overwrite local attribute changes."""
item = Item(self.client, name="an-item")
item.age = 250
item.sync()
self.assertEqual(250, item.age)
def test_rollback(self):
"""Rollback resets the object from the server."""
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.age = 50
item.rollback()
self.assertEqual(1000, item.age)
self.assertFalse(item.dirty)
def test_int_attribute_validator(self):
"""Age is set properly to be an int."""
item = Item(self.client)
item.age = "100"
self.assertEqual(100, item.age)
def test_int_attribute_invalid(self):
"""TypeError is raised when data can't be converted to type."""
def set_string():
item = Item(self.client)
item.age = "abc"
self.assertRaises(ValueError, set_string)
def test_dirty(self):
"""Changes mark the object as dirty."""
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.age = 100
self.assertTrue(item.dirty)
def test_not_dirty(self):
"""Changes mark the object as dirty."""
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
self.assertFalse(item.dirty)
def test_marshall(self):
"""The object is marshalled into a dict."""
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
result = item.marshall()
self.assertEqual({"age": 15, "data": {"key": "val"}}, result)
def test_delete(self):
"""The object is deleted, and client is unset."""
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.delete()
self.assertIsNone(item.client)
def test_save(self):
"""Attributes are written to the server; object is marked clean."""
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.age = 69
item.save()
self.assertFalse(item.dirty)
def test_put(self):
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.put({"age": 69})
# should sync back to 1000
self.assertEqual(item.age, 1000)
def test_raw_put(self):
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.age = 55
item.raw_put({"age": 69})
# should sync NOT back to 1000
self.assertEqual(item.age, 55)
def test_put_raw_async(self):
self.add_rule(
{
"json": {
"type": "async",
"metadata": {},
"operation": "/1.0/items/123456789",
},
"status_code": 202,
"method": "put",
"url": r"^http://pylxd.test/1.0/items/an-item$",
}
)
self.add_rule(
{
"json": {
"status": "Running",
"status_code": 103,
"type": "sync",
"metadata": {
"id": "123456789",
"secret": "some-long-string-of-digits",
},
},
"method": "get",
"url": r"^http://pylxd.test/1.0/operations/123456789$",
}
)
self.add_rule(
{
"json": {
"type": "sync",
},
"method": "get",
"url": r"^http://pylxd.test/1.0/operations/123456789/wait$",
}
)
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.put({"age": 69}, wait=True)
def test_patch(self):
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.patch({"age": 69})
# should sync back to 1000
self.assertEqual(item.age, 1000)
def test_patch_raw_async(self):
self.add_rule(
{
"json": {
"type": "async",
"metadata": {},
"operation": "/1.0/items/123456789",
},
"status_code": 202,
"method": "patch",
"url": r"^http://pylxd.test/1.0/items/an-item$",
}
)
self.add_rule(
{
"json": {
"status": "Running",
"status_code": 103,
"type": "sync",
"metadata": {
"id": "123456789",
"secret": "some-long-string-of-digits",
},
},
"method": "get",
"url": r"^http://pylxd.test/1.0/operations/123456789$",
}
)
self.add_rule(
{
"json": {
"type": "sync",
},
"method": "get",
"url": r"^http://pylxd.test/1.0/operations/123456789/wait$",
}
)
item = Item(self.client, name="an-item", age=15, data={"key": "val"})
item.patch({"age": 69}, wait=True)
|
|
from __future__ import division, absolute_import, print_function
import sys
import re
import os
if sys.version_info[0] < 3:
from ConfigParser import RawConfigParser
else:
from configparser import RawConfigParser
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(IOError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(IOError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo(object):
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet(object):
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return list(self._raw_data.keys())
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = dict(config.items('meta'))
for k in ['name', 'description', 'version']:
if not k in d:
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not 'requires' in d:
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = RawConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not k in vars:
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
if ovalue:
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
if not 'pkgdir' in vars and "pkgname" in vars:
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
(pkgname, meta["name"]))
mod = sys.modules[pkgname]
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
"""
Return library info for a package from its configuration file.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of directories - usually including
the NumPy base directory - where to look for npy-pkg-config files.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
misc_util.get_info, misc_util.get_pkg_info
Examples
--------
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
>>> print(npymath_info)
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
Version: 0.1 #random
"""
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
import sys
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of " \
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print(info.cflags(section))
if options.libs:
print(info.libs(section))
if options.version:
print(info.version)
if options.min_version:
print(info.version >= options.min_version)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
import pytest
from airflow.hooks.dbapi import DbApiHook
from airflow.models import Connection
class TestDbApiHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock(rowcount=0)
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestDbApiHook(DbApiHook):
conn_name_attr = 'test_conn_id'
log = mock.MagicMock()
def get_conn(self):
return conn
self.db_hook = UnitTestDbApiHook()
self.db_hook_schema_override = UnitTestDbApiHook(schema='schema-override')
def test_get_records(self):
statement = "SQL"
rows = [("hello",), ("world",)]
self.cur.fetchall.return_value = rows
assert rows == self.db_hook.get_records(statement)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_get_records_parameters(self):
statement = "SQL"
parameters = ["X", "Y", "Z"]
rows = [("hello",), ("world",)]
self.cur.fetchall.return_value = rows
assert rows == self.db_hook.get_records(statement, parameters)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement, parameters)
def test_get_records_exception(self):
statement = "SQL"
self.cur.fetchall.side_effect = RuntimeError('Great Problems')
with pytest.raises(RuntimeError):
self.db_hook.get_records(statement)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_insert_rows(self):
table = "table"
rows = [("hello",), ("world",)]
self.db_hook.insert_rows(table, rows)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
commit_count = 2 # The first and last commit
assert commit_count == self.conn.commit.call_count
sql = f"INSERT INTO {table} VALUES (%s)"
for row in rows:
self.cur.execute.assert_any_call(sql, row)
def test_insert_rows_replace(self):
table = "table"
rows = [("hello",), ("world",)]
self.db_hook.insert_rows(table, rows, replace=True)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
commit_count = 2 # The first and last commit
assert commit_count == self.conn.commit.call_count
sql = f"REPLACE INTO {table} VALUES (%s)"
for row in rows:
self.cur.execute.assert_any_call(sql, row)
def test_insert_rows_target_fields(self):
table = "table"
rows = [("hello",), ("world",)]
target_fields = ["field"]
self.db_hook.insert_rows(table, rows, target_fields)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
commit_count = 2 # The first and last commit
assert commit_count == self.conn.commit.call_count
sql = f"INSERT INTO {table} ({target_fields[0]}) VALUES (%s)"
for row in rows:
self.cur.execute.assert_any_call(sql, row)
def test_insert_rows_commit_every(self):
table = "table"
rows = [("hello",), ("world",)]
commit_every = 1
self.db_hook.insert_rows(table, rows, commit_every=commit_every)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
commit_count = 2 + divmod(len(rows), commit_every)[0]
assert commit_count == self.conn.commit.call_count
sql = f"INSERT INTO {table} VALUES (%s)"
for row in rows:
self.cur.execute.assert_any_call(sql, row)
def test_get_uri_schema_not_none(self):
self.db_hook.get_connection = mock.MagicMock(
return_value=Connection(
conn_type="conn-type",
host="host",
login="login",
password="password",
schema="schema",
port=1,
)
)
assert "conn-type://login:password@host:1/schema" == self.db_hook.get_uri()
def test_get_uri_schema_override(self):
self.db_hook_schema_override.get_connection = mock.MagicMock(
return_value=Connection(
conn_type="conn-type",
host="host",
login="login",
password="password",
schema="schema",
port=1,
)
)
assert "conn-type://login:password@host:1/schema-override" == self.db_hook_schema_override.get_uri()
def test_get_uri_schema_none(self):
self.db_hook.get_connection = mock.MagicMock(
return_value=Connection(
conn_type="conn-type", host="host", login="login", password="password", schema=None, port=1
)
)
assert "conn-type://login:password@host:1" == self.db_hook.get_uri()
def test_get_uri_special_characters(self):
self.db_hook.get_connection = mock.MagicMock(
return_value=Connection(
conn_type="conn-type",
host="host/",
login="lo/gi#! n",
password="pass*! word/",
schema="schema/",
port=1,
)
)
assert (
"conn-type://lo%2Fgi%23%21%20n:pass%2A%21%20word%2F@host%2F:1/schema%2F" == self.db_hook.get_uri()
)
def test_get_uri_login_none(self):
self.db_hook.get_connection = mock.MagicMock(
return_value=Connection(
conn_type="conn-type",
host="host",
login=None,
password="password",
schema="schema",
port=1,
)
)
assert "conn-type://:password@host:1/schema" == self.db_hook.get_uri()
def test_get_uri_password_none(self):
self.db_hook.get_connection = mock.MagicMock(
return_value=Connection(
conn_type="conn-type",
host="host",
login="login",
password=None,
schema="schema",
port=1,
)
)
assert "conn-type://login@host:1/schema" == self.db_hook.get_uri()
def test_get_uri_authority_none(self):
self.db_hook.get_connection = mock.MagicMock(
return_value=Connection(
conn_type="conn-type",
host="host",
login=None,
password=None,
schema="schema",
port=1,
)
)
assert "conn-type://host:1/schema" == self.db_hook.get_uri()
def test_run_log(self):
statement = 'SQL'
self.db_hook.run(statement)
assert self.db_hook.log.info.call_count == 2
def test_run_with_handler(self):
sql = 'SQL'
param = ('p1', 'p2')
called = 0
obj = object()
def handler(cur):
cur.execute.assert_called_once_with(sql, param)
nonlocal called
called += 1
return obj
result = self.db_hook.run(sql, parameters=param, handler=handler)
assert called == 1
assert self.conn.commit.called
assert result == obj
def test_run_with_handler_multiple(self):
sql = ['SQL', 'SQL']
param = ('p1', 'p2')
called = 0
obj = object()
def handler(cur):
cur.execute.assert_called_with(sql[0], param)
nonlocal called
called += 1
return obj
result = self.db_hook.run(sql, parameters=param, handler=handler)
assert called == 2
assert self.conn.commit.called
assert result == [obj, obj]
|
|
import logging
from typing import Union
from .Deserializer import Deserializer
from .RateLimiter import RateLimiter
from .Handlers import (
DeprecationHandler,
DeserializerAdapter,
DictionaryDeserializer,
RateLimiterAdapter,
SanitationHandler,
ThrowOnErrorHandler,
TypeCorrectorHandler,
)
from .Handlers.RateLimit import BasicRateLimiter
from ._apis import BaseApi, UrlConfig
from ._apis.league_of_legends import (
ChampionApiV3,
ChampionMasteryApiV4,
ClashApiV1,
DataDragonApi,
LeagueApiV4,
LolStatusApiV3,
LolStatusApiV4,
SpectatorApiV4,
SummonerApiV4,
MatchApiV5,
ThirdPartyCodeApiV4,
)
LOG = logging.getLogger(__name__)
class LolWatcher:
"""
LolWatcher class is intended to be the main interaction point with the APIs for
League of Legends.
"""
def __init__(
self,
api_key: str = None,
timeout: int = None,
kernel_url: str = None,
rate_limiter: RateLimiter = BasicRateLimiter(),
deserializer: Deserializer = DictionaryDeserializer(),
default_status_v4: bool = False,
**kwargs,
):
"""
Initialize a new instance of the RiotWatcher class.
:param string api_key: the API key to use for this instance
:param int timeout: Time to wait for a response before timing out a connection
to the Riot API
:param string kernel_url: URL for the kernel instance to connect to, instead of
the API. See
https://github.com/meraki-analytics/kernel for
details.
:param RateLimiter rate_limiter: Instance to be used for rate limiting.
This defaults to
Handlers.RateLimit.BasicRateLimiter.
This parameter is not used when connecting to
a kernel instance.
:param Deserializer deserializer: Instance to be used to deserialize responses
from the Riot Api. Default is
Handlers.DictionaryDeserializer.
"""
if not kernel_url and not api_key:
raise ValueError("Either api_key or kernel_url must be set!")
if kernel_url:
handler_chain = [
SanitationHandler(),
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
DeprecationHandler(),
]
else:
handler_chain = [
SanitationHandler(),
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
RateLimiterAdapter(rate_limiter),
DeprecationHandler(),
]
if kernel_url:
UrlConfig.root_url = kernel_url
else:
UrlConfig.root_url = "https://{platform}.api.riotgames.com"
self._base_api = BaseApi(api_key, handler_chain, timeout=timeout)
self._champion = ChampionApiV3(self._base_api)
self._lol_status_v3 = LolStatusApiV3(self._base_api)
self._lol_status_v4 = LolStatusApiV4(self._base_api)
self._data_dragon = DataDragonApi(self._base_api)
self._clash = ClashApiV1(self._base_api)
self._champion_mastery = ChampionMasteryApiV4(self._base_api)
self._league = LeagueApiV4(self._base_api)
self._match = MatchApiV5(self._base_api)
self._spectator = SpectatorApiV4(self._base_api)
self._summoner = SummonerApiV4(self._base_api)
self._third_party_code = ThirdPartyCodeApiV4(self._base_api)
self._lol_status = (
self._lol_status_v4 if default_status_v4 else self._lol_status_v3
)
# todo: tournament-stub
# todo: tournament
if "default_match_v5" in kwargs:
LOG.warning(
"property 'default_match_v5' has been deprecated and can be removed"
)
@property
def champion_mastery(self) -> ChampionMasteryApiV4:
"""
Interface to the ChampionMastery Endpoint
:rtype: league_of_legends.ChampionMasteryApiV4
"""
return self._champion_mastery
@property
def champion(self) -> ChampionApiV3:
"""
Interface to the Champion Endpoint
:rtype: league_of_legends.ChampionApiV3
"""
return self._champion
@property
def clash(self) -> ClashApiV1:
"""
Interface to the Clash Endpoint
:rtype: league_of_legends.ClashApiV1
"""
return self._clash
@property
def league(self) -> LeagueApiV4:
"""
Interface to the League Endpoint
:rtype: league_of_legends.LeagueApiV4
"""
return self._league
@property
def lol_status(self) -> Union[LolStatusApiV3, LolStatusApiV4]:
"""
Interface to the LoLStatus Endpoint
:rtype: league_of_legends.LolStatusApiV3
"""
return self._lol_status
@property
def lol_status_v3(self) -> LolStatusApiV3:
"""
Interface to the LoLStatus Endpoint
:rtype: league_of_legends.LolStatusApiV3
"""
return self._lol_status_v3
@property
def lol_status_v4(self) -> LolStatusApiV4:
"""
Interface to the LoLStatus Endpoint
:rtype: league_of_legends.LolStatusApiV4
"""
return self._lol_status_v4
@property
def match(self) -> MatchApiV5:
"""
Interface to the Match Endpoint
:rtype: league_of_legends.MatchApiV5
"""
return self._match
@property
def match_v4(self):
"""
This property has been deprecated. Use 'match' property instead.
Note that v4 is now permanently removed by Riot
"""
raise NotImplementedError(
"this property has been deprecated. Use 'match' property instead. Note "
+ "that v4 is now permanently removed by Riot"
)
@property
def match_v5(self):
"""this property has been deprecated. Use 'match' property instead."""
raise NotImplementedError(
"this property has been deprecated. Use 'match' property instead."
)
@property
def spectator(self) -> SpectatorApiV4:
"""
Interface to the Spectator Endpoint
:rtype: league_of_legends.SpectatorApiV4
"""
return self._spectator
@property
def data_dragon(self) -> DataDragonApi:
"""
Interface to the DataDragon Endpoint
:rtype: league_of_legends.DataDragonApi
"""
return self._data_dragon
@property
def summoner(self) -> SummonerApiV4:
"""
Interface to the Summoner Endpoint
:rtype: league_of_legends.SummonerApiV4
"""
return self._summoner
@property
def third_party_code(self) -> ThirdPartyCodeApiV4:
"""
Interface to the Third Party Code Endpoint
:rtype: league_of_legends.ThirdPartyCodeApiV4
"""
return self._third_party_code
|
|
import mock
import pytest
import time
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from addons.github.tests import factories
from addons.osfstorage.models import OsfStorageFile
from framework.auth import Auth
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from framework.exceptions import PermissionsError
from osf.models import OSFGroup, Node, OSFUser, OSFGroupLog, NodeLog
from osf.utils.permissions import MANAGER, MEMBER, MANAGE, READ, WRITE, ADMIN
from website.notifications.utils import get_all_node_subscriptions
from website.osf_groups import signals as group_signals
from .factories import (
NodeFactory,
ProjectFactory,
AuthUserFactory,
OSFGroupFactory
)
pytestmark = pytest.mark.django_db
@pytest.fixture()
def manager():
return AuthUserFactory()
@pytest.fixture()
def member():
return AuthUserFactory()
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.fixture()
def user_two():
return AuthUserFactory()
@pytest.fixture()
def user_three():
return AuthUserFactory()
@pytest.fixture()
def auth(manager):
return Auth(manager)
@pytest.fixture()
def project(manager):
return ProjectFactory(creator=manager)
@pytest.fixture()
def osf_group(manager, member):
osf_group = OSFGroupFactory(creator=manager)
osf_group.make_member(member)
return osf_group
class TestOSFGroup:
def test_osf_group_creation(self, manager, member, user_two, fake):
osf_group = OSFGroup.objects.create(name=fake.bs(), creator=manager)
# OSFGroup creator given manage permissions
assert osf_group.has_permission(manager, MANAGE) is True
assert osf_group.has_permission(user_two, MANAGE) is False
assert manager in osf_group.managers
assert manager in osf_group.members
assert manager not in osf_group.members_only
user_two.is_superuser = True
user_two.save()
# Superusers don't have permission to group
assert osf_group.has_permission(user_two, MEMBER) is False
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_make_manager(self, mock_send_mail, manager, member, user_two, user_three, osf_group):
# no permissions
with pytest.raises(PermissionsError):
osf_group.make_manager(user_two, Auth(user_three))
# member only
with pytest.raises(PermissionsError):
osf_group.make_manager(user_two, Auth(member))
# manage permissions
osf_group.make_manager(user_two, Auth(manager))
assert osf_group.has_permission(user_two, MANAGE) is True
assert user_two in osf_group.managers
assert user_two in osf_group.members
assert mock_send_mail.call_count == 1
# upgrade to manager
osf_group.make_manager(member, Auth(manager))
assert osf_group.has_permission(member, MANAGE) is True
assert member in osf_group.managers
assert member in osf_group.members
# upgrading an existing member does not re-send an email
assert mock_send_mail.call_count == 1
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_make_member(self, mock_send_mail, manager, member, user_two, user_three, osf_group):
# no permissions
with pytest.raises(PermissionsError):
osf_group.make_member(user_two, Auth(user_three))
# member only
with pytest.raises(PermissionsError):
osf_group.make_member(user_two, Auth(member))
# manage permissions
osf_group.make_member(user_two, Auth(manager))
assert osf_group.has_permission(user_two, MANAGE) is False
assert user_two not in osf_group.managers
assert user_two in osf_group.members
assert mock_send_mail.call_count == 1
# downgrade to member, sole manager
with pytest.raises(ValueError):
osf_group.make_member(manager, Auth(manager))
# downgrade to member
osf_group.make_manager(user_two, Auth(manager))
assert user_two in osf_group.managers
assert user_two in osf_group.members
osf_group.make_member(user_two, Auth(manager))
assert user_two not in osf_group.managers
assert user_two in osf_group.members
assert mock_send_mail.call_count == 1
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_add_unregistered_member(self, mock_send_mail, manager, member, osf_group, user_two):
test_fullname = 'Test User'
test_email = 'test_member@cos.io'
test_manager_email = 'test_manager@cos.io'
# Email already exists
with pytest.raises(ValueError):
osf_group.add_unregistered_member(test_fullname, user_two.username, auth=Auth(manager))
# Test need manager perms to add
with pytest.raises(PermissionsError):
osf_group.add_unregistered_member(test_fullname, test_email, auth=Auth(member))
# Add member
osf_group.add_unregistered_member(test_fullname, test_email, auth=Auth(manager))
assert mock_send_mail.call_count == 1
unreg_user = OSFUser.objects.get(username=test_email)
assert unreg_user in osf_group.members
assert unreg_user not in osf_group.managers
assert osf_group.has_permission(unreg_user, MEMBER) is True
assert osf_group._id in unreg_user.unclaimed_records
# Attempt to add unreg user as a member
with pytest.raises(ValueError):
osf_group.add_unregistered_member(test_fullname, test_email, auth=Auth(manager))
# Add unregistered manager
osf_group.add_unregistered_member(test_fullname, test_manager_email, auth=Auth(manager), role=MANAGER)
assert mock_send_mail.call_count == 2
unreg_manager = OSFUser.objects.get(username=test_manager_email)
assert unreg_manager in osf_group.members
assert unreg_manager in osf_group.managers
assert osf_group.has_permission(unreg_manager, MEMBER) is True
assert osf_group._id in unreg_manager.unclaimed_records
# Add unregistered member with blacklisted email
with pytest.raises(ValidationError):
osf_group.add_unregistered_member(test_fullname, 'test@example.com', auth=Auth(manager), role=MANAGER)
def test_remove_member(self, manager, member, user_three, osf_group):
new_member = AuthUserFactory()
osf_group.make_member(new_member)
assert new_member not in osf_group.managers
assert new_member in osf_group.members
# no permissions
with pytest.raises(PermissionsError):
osf_group.remove_member(new_member, Auth(user_three))
# member only
with pytest.raises(PermissionsError):
osf_group.remove_member(new_member, Auth(member))
# manage permissions
osf_group.remove_member(new_member, Auth(manager))
assert new_member not in osf_group.managers
assert new_member not in osf_group.members
# Remove self - member can remove themselves
osf_group.remove_member(member, Auth(member))
assert member not in osf_group.managers
assert member not in osf_group.members
def test_remove_manager(self, manager, member, user_three, osf_group):
new_manager = AuthUserFactory()
osf_group.make_manager(new_manager)
# no permissions
with pytest.raises(PermissionsError):
osf_group.remove_member(new_manager, Auth(user_three))
# member only
with pytest.raises(PermissionsError):
osf_group.remove_member(new_manager, Auth(member))
# manage permissions
osf_group.remove_member(new_manager, Auth(manager))
assert new_manager not in osf_group.managers
assert new_manager not in osf_group.members
# can't remove last manager
with pytest.raises(ValueError):
osf_group.remove_member(manager, Auth(manager))
assert manager in osf_group.managers
assert manager in osf_group.members
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_notify_group_member_email_does_not_send_before_throttle_expires(self, mock_send_mail, manager, osf_group):
member = AuthUserFactory()
assert member.member_added_email_records == {}
group_signals.member_added.send(osf_group, user=member, permission=WRITE, auth=Auth(manager))
assert mock_send_mail.call_count == 1
record = member.member_added_email_records[osf_group._id]
assert record is not None
# 2nd call does not send email because throttle period has not expired
group_signals.member_added.send(osf_group, user=member, permission=WRITE, auth=Auth(manager))
assert member.member_added_email_records[osf_group._id] == record
assert mock_send_mail.call_count == 1
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_notify_group_member_email_sends_after_throttle_expires(self, mock_send_mail, osf_group, member, manager):
throttle = 0.5
member = AuthUserFactory()
assert member.member_added_email_records == {}
group_signals.member_added.send(osf_group, user=member, permission=WRITE, auth=Auth(manager), throttle=throttle)
assert mock_send_mail.call_count == 1
time.sleep(1) # throttle period expires
# 2nd call does not send email because throttle period has not expired
assert member.member_added_email_records[osf_group._id] is not None
group_signals.member_added.send(osf_group, user=member, permission=WRITE, auth=Auth(manager), throttle=throttle)
assert mock_send_mail.call_count == 2
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_notify_group_unregistered_member_throttle(self, mock_send_mail, osf_group, member, manager):
throttle = 0.5
member = AuthUserFactory()
member.is_registered = False
member.add_unclaimed_record(osf_group, referrer=manager, given_name='grapes mcgee', email='grapes@cos.io')
member.save()
assert member.member_added_email_records == {}
group_signals.member_added.send(osf_group, user=member, permission=WRITE, auth=Auth(manager), throttle=throttle)
assert mock_send_mail.call_count == 1
assert member.member_added_email_records[osf_group._id] is not None
# 2nd call does not send email because throttle period has not expired
group_signals.member_added.send(osf_group, user=member, permission=WRITE, auth=Auth(manager))
assert mock_send_mail.call_count == 1
time.sleep(1) # throttle period expires
# 2nd call does not send email because throttle period has not expired
assert member.member_added_email_records[osf_group._id] is not None
group_signals.member_added.send(osf_group, user=member, permission=WRITE, auth=Auth(manager), throttle=throttle)
assert mock_send_mail.call_count == 2
def test_rename_osf_group(self, manager, member, user_two, osf_group):
new_name = 'Platform Team'
# no permissions
with pytest.raises(PermissionsError):
osf_group.set_group_name(new_name, Auth(user_two))
# member only
with pytest.raises(PermissionsError):
osf_group.set_group_name(new_name, Auth(member))
# manage permissions
osf_group.set_group_name(new_name, Auth(manager))
osf_group.save()
assert osf_group.name == new_name
def test_remove_group(self, manager, member, osf_group):
osf_group_name = osf_group.name
manager_group_name = osf_group.manager_group.name
member_group_name = osf_group.member_group.name
osf_group.remove_group(Auth(manager))
assert not OSFGroup.objects.filter(name=osf_group_name).exists()
assert not Group.objects.filter(name=manager_group_name).exists()
assert not Group.objects.filter(name=member_group_name).exists()
assert manager_group_name not in manager.groups.values_list('name', flat=True)
def test_remove_group_node_perms(self, manager, member, osf_group, project):
project.add_osf_group(osf_group, ADMIN)
assert project.has_permission(member, ADMIN) is True
osf_group.remove_group(Auth(manager))
assert project.has_permission(member, ADMIN) is False
def test_user_groups_property(self, manager, member, osf_group):
assert osf_group in manager.osf_groups
assert osf_group in member.osf_groups
other_group = OSFGroupFactory()
assert other_group not in manager.osf_groups
assert other_group not in member.osf_groups
def test_user_group_roles(self, manager, member, user_three, osf_group):
assert manager.group_role(osf_group) == MANAGER
assert member.group_role(osf_group) == MEMBER
assert user_three.group_role(osf_group) is None
def test_replace_contributor(self, manager, member, osf_group):
user = osf_group.add_unregistered_member('test_user', 'test@cos.io', auth=Auth(manager))
assert user in osf_group.members
assert user not in osf_group.managers
assert (
osf_group._id in
user.unclaimed_records.keys()
)
osf_group.replace_contributor(user, member)
assert user not in osf_group.members
assert user not in osf_group.managers
assert osf_group.has_permission(member, MEMBER) is True
assert osf_group.has_permission(user, MEMBER) is False
# test unclaimed_records is removed
assert (
osf_group._id not in
user.unclaimed_records.keys()
)
def test_get_users_with_perm_osf_groups(self, project, manager, member, osf_group):
# Explicitly added as a contributor
read_users = project.get_users_with_perm(READ)
write_users = project.get_users_with_perm(WRITE)
admin_users = project.get_users_with_perm(ADMIN)
assert len(project.get_users_with_perm(READ)) == 1
assert len(project.get_users_with_perm(WRITE)) == 1
assert len(project.get_users_with_perm(ADMIN)) == 1
assert manager in read_users
assert manager in write_users
assert manager in admin_users
# Added through osf groups
project.add_osf_group(osf_group, WRITE)
read_users = project.get_users_with_perm(READ)
write_users = project.get_users_with_perm(WRITE)
admin_users = project.get_users_with_perm(ADMIN)
assert len(project.get_users_with_perm(READ)) == 2
assert len(project.get_users_with_perm(WRITE)) == 2
assert len(project.get_users_with_perm(ADMIN)) == 1
assert member in read_users
assert member in write_users
assert member not in admin_users
@pytest.mark.enable_quickfiles_creation
def test_merge_users_transfers_group_membership(self, member, manager, osf_group):
# merge member
other_user = AuthUserFactory()
other_user.merge_user(member)
other_user.save()
assert osf_group.is_member(other_user)
# merge manager
other_other_user = AuthUserFactory()
other_other_user.merge_user(manager)
other_other_user.save()
assert osf_group.is_member(other_other_user)
assert osf_group.has_permission(other_other_user, MANAGE)
@pytest.mark.enable_quickfiles_creation
def test_merge_users_already_group_manager(self, member, manager, osf_group):
# merge users - both users have group membership - different roles
manager.merge_user(member)
manager.save()
assert osf_group.has_permission(manager, MANAGE)
assert osf_group.is_member(member) is False
def test_osf_group_is_admin_parent(self, project, manager, member, osf_group, user_two, user_three):
child = NodeFactory(parent=project, creator=manager)
assert project.is_admin_parent(manager) is True
assert project.is_admin_parent(member) is False
project.add_contributor(user_two, WRITE, save=True)
assert project.is_admin_parent(user_two) is False
assert child.is_admin_parent(manager) is True
child.add_contributor(user_two, ADMIN, save=True)
assert child.is_admin_parent(user_two) is True
assert child.is_admin_parent(user_three) is False
osf_group.make_member(user_three)
project.add_osf_group(osf_group, WRITE)
assert child.is_admin_parent(user_three) is False
project.update_osf_group(osf_group, ADMIN)
assert child.is_admin_parent(user_three) is True
assert child.is_admin_parent(user_three, include_group_admin=False) is False
project.remove_osf_group(osf_group)
child.add_osf_group(osf_group, WRITE)
assert child.is_admin_parent(user_three) is False
child.update_osf_group(osf_group, ADMIN)
assert child.is_admin_parent(user_three) is True
assert child.is_admin_parent(user_three, include_group_admin=False) is False
class TestNodeGroups:
def test_node_contributors_and_group_members(self, manager, member, osf_group, project, user, user_two):
assert project.contributors_and_group_members.count() == 1
project.add_osf_group(osf_group, ADMIN)
assert project.contributors_and_group_members.count() == 2
project.add_contributor(user, WRITE)
project.add_contributor(user_two, READ)
project.save()
assert project.contributors_and_group_members.count() == 4
def test_add_osf_group_to_node_already_connected(self, manager, member, osf_group, project):
project.add_osf_group(osf_group, ADMIN)
assert project.has_permission(member, ADMIN) is True
project.add_osf_group(osf_group, WRITE)
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is True
def test_osf_group_nodes(self, manager, member, project, osf_group):
nodes = osf_group.nodes
assert len(nodes) == 0
project.add_osf_group(osf_group, READ)
assert project in osf_group.nodes
project_two = ProjectFactory(creator=manager)
project_two.add_osf_group(osf_group, WRITE)
assert len(osf_group.nodes) == 2
assert project_two in osf_group.nodes
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_add_osf_group_to_node(self, mock_send_mail, manager, member, user_two, osf_group, project):
# noncontributor
with pytest.raises(PermissionsError):
project.add_osf_group(osf_group, WRITE, auth=Auth(member))
# Non-admin on project
project.add_contributor(user_two, WRITE)
project.save()
with pytest.raises(PermissionsError):
project.add_osf_group(osf_group, WRITE, auth=Auth(user_two))
project.add_osf_group(osf_group, READ, auth=Auth(manager))
assert mock_send_mail.call_count == 1
# Manager was already a node admin
assert project.has_permission(manager, ADMIN) is True
assert project.has_permission(manager, WRITE) is True
assert project.has_permission(manager, READ) is True
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is False
assert project.has_permission(member, READ) is True
project.update_osf_group(osf_group, WRITE, auth=Auth(manager))
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is True
assert project.has_permission(member, READ) is True
project.update_osf_group(osf_group, ADMIN, auth=Auth(manager))
assert project.has_permission(member, ADMIN) is True
assert project.has_permission(member, WRITE) is True
assert project.has_permission(member, READ) is True
# project admin cannot add a group they are not a manager of
other_group = OSFGroupFactory()
with pytest.raises(PermissionsError):
project.add_osf_group(other_group, ADMIN, auth=Auth(project.creator))
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_add_osf_group_to_node_emails_and_subscriptions(self, mock_send_mail, manager, member, user_two, osf_group, project):
osf_group.make_member(user_two)
# Manager is already a node contributor - already has subscriptions
assert len(get_all_node_subscriptions(manager, project)) == 2
assert len(get_all_node_subscriptions(member, project)) == 0
assert len(get_all_node_subscriptions(user_two, project)) == 0
assert mock_send_mail.call_count == 1
project.add_osf_group(osf_group, ADMIN, auth=Auth(manager))
# Three members of group, but user adding group to node doesn't get email
assert mock_send_mail.call_count == 3
assert len(get_all_node_subscriptions(manager, project)) == 2
assert len(get_all_node_subscriptions(member, project)) == 2
assert len(get_all_node_subscriptions(user_two, project)) == 2
project.remove_osf_group(osf_group, auth=Auth(manager))
assert len(get_all_node_subscriptions(manager, project)) == 2
assert len(get_all_node_subscriptions(member, project)) == 0
assert len(get_all_node_subscriptions(user_two, project)) == 0
# Member is a contributor
project.add_contributor(member, WRITE, save=True)
assert len(get_all_node_subscriptions(manager, project)) == 2
assert len(get_all_node_subscriptions(member, project)) == 2
assert len(get_all_node_subscriptions(user_two, project)) == 0
project.add_osf_group(osf_group, ADMIN, auth=Auth(manager))
assert len(get_all_node_subscriptions(manager, project)) == 2
assert len(get_all_node_subscriptions(member, project)) == 2
assert len(get_all_node_subscriptions(user_two, project)) == 2
project.remove_osf_group(osf_group, auth=Auth(manager))
assert len(get_all_node_subscriptions(manager, project)) == 2
assert len(get_all_node_subscriptions(member, project)) == 2
assert len(get_all_node_subscriptions(user_two, project)) == 0
project.add_osf_group(osf_group, ADMIN, auth=Auth(manager))
assert len(get_all_node_subscriptions(manager, project)) == 2
assert len(get_all_node_subscriptions(member, project)) == 2
assert len(get_all_node_subscriptions(user_two, project)) == 2
# Don't unsubscribe member because they belong to a group that has perms
project.remove_contributor(member, Auth(manager))
assert len(get_all_node_subscriptions(manager, project)) == 2
assert len(get_all_node_subscriptions(member, project)) == 2
assert len(get_all_node_subscriptions(user_two, project)) == 2
@mock.patch('website.osf_groups.views.mails.send_mail')
def test_add_group_to_node_throttle(self, mock_send_mail, osf_group, manager, member, project):
throttle = 100
assert manager.group_connected_email_records == {}
group_signals.group_added_to_node.send(osf_group, node=project, user=manager, permission=WRITE, auth=Auth(member), throttle=throttle)
assert mock_send_mail.call_count == 1
assert manager.group_connected_email_records[osf_group._id] is not None
# 2nd call does not send email because throttle period has not expired
group_signals.group_added_to_node.send(osf_group, node=project, user=manager, permission=WRITE, auth=Auth(member), throttle=throttle)
assert mock_send_mail.call_count == 1
throttle = 0.5
time.sleep(1) # throttle period expires
# 2nd call does not send email because throttle period has not expired
assert manager.group_connected_email_records[osf_group._id] is not None
group_signals.group_added_to_node.send(osf_group, node=project, user=manager, permission=WRITE, auth=Auth(member), throttle=throttle)
assert mock_send_mail.call_count == 2
def test_add_osf_group_to_node_default_permission(self, manager, member, osf_group, project):
project.add_osf_group(osf_group, auth=Auth(manager))
assert project.has_permission(manager, ADMIN) is True
assert project.has_permission(manager, WRITE) is True
assert project.has_permission(manager, READ) is True
# osf_group given write permissions by default
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is True
assert project.has_permission(member, READ) is True
def test_update_osf_group_node(self, manager, member, user_two, user_three, osf_group, project):
project.add_osf_group(osf_group, ADMIN)
assert project.has_permission(member, ADMIN) is True
assert project.has_permission(member, WRITE) is True
assert project.has_permission(member, READ) is True
project.update_osf_group(osf_group, READ)
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is False
assert project.has_permission(member, READ) is True
project.update_osf_group(osf_group, WRITE)
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is True
assert project.has_permission(member, READ) is True
project.update_osf_group(osf_group, ADMIN)
assert project.has_permission(member, ADMIN) is True
assert project.has_permission(member, WRITE) is True
assert project.has_permission(member, READ) is True
# Project admin who does not belong to the manager group can update group permissions
project.add_contributor(user_two, ADMIN, save=True)
project.update_osf_group(osf_group, READ, auth=Auth(user_two))
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is False
assert project.has_permission(member, READ) is True
# Project write contributor cannot update group permissions
project.add_contributor(user_three, WRITE, save=True)
with pytest.raises(PermissionsError):
project.update_osf_group(osf_group, ADMIN, auth=Auth(user_three))
assert project.has_permission(member, ADMIN) is False
def test_remove_osf_group_from_node(self, manager, member, user_two, osf_group, project):
# noncontributor
with pytest.raises(PermissionsError):
project.remove_osf_group(osf_group, auth=Auth(member))
project.add_osf_group(osf_group, ADMIN, auth=Auth(manager))
assert project.has_permission(member, ADMIN) is True
assert project.has_permission(member, WRITE) is True
assert project.has_permission(member, READ) is True
project.remove_osf_group(osf_group, auth=Auth(manager))
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is False
assert project.has_permission(member, READ) is False
# Project admin who does not belong to the manager group can remove the group
project.add_osf_group(osf_group, ADMIN, auth=Auth(manager))
project.add_contributor(user_two, ADMIN)
project.save()
project.remove_osf_group(osf_group, auth=Auth(user_two))
assert project.has_permission(member, ADMIN) is False
assert project.has_permission(member, WRITE) is False
assert project.has_permission(member, READ) is False
# Manager who is not an admin can remove the group
user_three = AuthUserFactory()
osf_group.make_manager(user_three)
project.add_osf_group(osf_group, WRITE)
assert project.has_permission(user_three, ADMIN) is False
assert project.has_permission(user_three, WRITE) is True
assert project.has_permission(user_three, READ) is True
project.remove_osf_group(osf_group, auth=Auth(user_three))
assert project.has_permission(user_three, ADMIN) is False
assert project.has_permission(user_three, WRITE) is False
assert project.has_permission(user_three, READ) is False
def test_node_groups_property(self, manager, member, osf_group, project):
project.add_osf_group(osf_group, ADMIN, auth=Auth(manager))
project.save()
assert osf_group in project.osf_groups
assert len(project.osf_groups) == 1
group_two = OSFGroupFactory(creator=manager)
project.add_osf_group(group_two, ADMIN, auth=Auth(manager))
project.save()
assert group_two in project.osf_groups
assert len(project.osf_groups) == 2
def test_get_osf_groups_with_perms_property(self, manager, member, osf_group, project):
second_group = OSFGroupFactory(creator=manager)
third_group = OSFGroupFactory(creator=manager)
fourth_group = OSFGroupFactory(creator=manager)
OSFGroupFactory(creator=manager)
project.add_osf_group(osf_group, ADMIN)
project.add_osf_group(second_group, WRITE)
project.add_osf_group(third_group, WRITE)
project.add_osf_group(fourth_group, READ)
read_groups = project.get_osf_groups_with_perms(READ)
assert len(read_groups) == 4
write_groups = project.get_osf_groups_with_perms(WRITE)
assert len(write_groups) == 3
admin_groups = project.get_osf_groups_with_perms(ADMIN)
assert len(admin_groups) == 1
with pytest.raises(ValueError):
project.get_osf_groups_with_perms('crazy')
def test_osf_group_node_can_view(self, project, manager, member, osf_group):
assert project.can_view(Auth(member)) is False
project.add_osf_group(osf_group, READ)
assert project.can_view(Auth(member)) is True
assert project.can_edit(Auth(member)) is False
project.remove_osf_group(osf_group)
project.add_osf_group(osf_group, WRITE)
assert project.can_view(Auth(member)) is True
assert project.can_edit(Auth(member)) is True
child = ProjectFactory(parent=project)
project.remove_osf_group(osf_group)
project.add_osf_group(osf_group, ADMIN)
# implicit OSF Group admin
assert child.can_view(Auth(member)) is True
assert child.can_edit(Auth(member)) is False
grandchild = ProjectFactory(parent=child)
assert grandchild.can_view(Auth(member)) is True
assert grandchild.can_edit(Auth(member)) is False
def test_node_has_permission(self, project, manager, member, osf_group):
assert project.can_view(Auth(member)) is False
project.add_osf_group(osf_group, READ)
assert project.has_permission(member, READ) is True
assert project.has_permission(member, WRITE) is False
assert osf_group.get_permission_to_node(project) == READ
project.remove_osf_group(osf_group)
project.add_osf_group(osf_group, WRITE)
assert project.has_permission(member, READ) is True
assert project.has_permission(member, WRITE) is True
assert project.has_permission(member, ADMIN) is False
assert osf_group.get_permission_to_node(project) == WRITE
child = ProjectFactory(parent=project)
project.remove_osf_group(osf_group)
project.add_osf_group(osf_group, ADMIN)
assert osf_group.get_permission_to_node(project) == ADMIN
# implicit OSF Group admin
assert child.has_permission(member, ADMIN) is False
assert child.has_permission(member, READ) is True
assert osf_group.get_permission_to_node(child) is None
grandchild = ProjectFactory(parent=child)
assert grandchild.has_permission(member, WRITE) is False
assert grandchild.has_permission(member, READ) is True
def test_node_get_permissions_override(self, project, manager, member, osf_group):
project.add_osf_group(osf_group, WRITE)
assert set(project.get_permissions(member)) == set([READ, WRITE])
project.remove_osf_group(osf_group)
project.add_osf_group(osf_group, READ)
assert set(project.get_permissions(member)) == set([READ])
anon = AnonymousUser()
assert project.get_permissions(anon) == []
def test_is_contributor(self, project, manager, member, osf_group):
assert project.is_contributor(manager) is True
assert project.is_contributor(member) is False
project.add_osf_group(osf_group, READ, auth=Auth(project.creator))
assert project.is_contributor(member) is False
assert project.is_contributor_or_group_member(member) is True
project.remove_osf_group(osf_group, auth=Auth(manager))
assert project.is_contributor_or_group_member(member) is False
project.add_contributor(member, READ)
assert project.is_contributor(member) is True
assert project.is_contributor_or_group_member(member) is True
def test_is_contributor_or_group_member(self, project, manager, member, osf_group):
project.add_osf_group(osf_group, ADMIN, auth=Auth(project.creator))
assert project.is_contributor_or_group_member(member) is True
project.remove_osf_group(osf_group, auth=Auth(manager))
assert project.is_contributor_or_group_member(member) is False
project.add_osf_group(osf_group, WRITE, auth=Auth(project.creator))
assert project.is_contributor_or_group_member(member) is True
project.remove_osf_group(osf_group, auth=Auth(manager))
assert project.is_contributor_or_group_member(member) is False
project.add_osf_group(osf_group, READ, auth=Auth(project.creator))
assert project.is_contributor_or_group_member(member) is True
project.remove_osf_group(osf_group, auth=Auth(manager))
osf_group.add_unregistered_member('jane', 'janedoe@cos.io', Auth(manager))
unreg = osf_group.members.get(username='janedoe@cos.io')
assert unreg.is_registered is False
assert project.is_contributor_or_group_member(unreg) is False
project.add_osf_group(osf_group, READ, auth=Auth(project.creator))
assert project.is_contributor_or_group_member(unreg) is True
child = ProjectFactory(parent=project)
assert child.is_contributor_or_group_member(manager) is False
def test_node_object_can_view_osfgroups(self, manager, member, project, osf_group):
project.add_contributor(member, ADMIN, save=True) # Member is explicit admin contributor on project
child = NodeFactory(parent=project, creator=manager) # Member is implicit admin on child
grandchild = NodeFactory(parent=child, creator=manager) # Member is implicit admin on grandchild
project_two = ProjectFactory(creator=manager)
project_two.add_osf_group(osf_group, ADMIN) # Member has admin permissions to project_two through osf_group
child_two = NodeFactory(parent=project_two, creator=manager) # Member has implicit admin on child_two through osf_group
grandchild_two = NodeFactory(parent=child_two, creator=manager) # Member has implicit admin perms on grandchild_two through osf_group
can_view = Node.objects.can_view(member)
assert len(can_view) == 6
assert set(list(can_view.values_list('id', flat=True))) == set((project.id,
child.id,
grandchild.id,
project_two.id,
child_two.id,
grandchild_two.id))
grandchild_two.is_deleted = True
grandchild_two.save()
can_view = Node.objects.can_view(member)
assert len(can_view) == 5
assert grandchild_two not in can_view
def test_parent_admin_users_osf_groups(self, manager, member, user_two, project, osf_group):
child = NodeFactory(parent=project, creator=manager)
project.add_osf_group(osf_group, ADMIN)
# Manager has explict admin to child, member has implicit admin.
# Manager should be in admin_users, member should be in parent_admin_users
admin_users = child.get_users_with_perm(ADMIN)
assert manager in admin_users
assert member not in admin_users
assert manager not in child.parent_admin_users
assert member in child.parent_admin_users
user_two.is_superuser = True
user_two.save()
assert user_two not in admin_users
assert user_two not in child.parent_admin_users
class TestOSFGroupLogging:
def test_logging(self, project, manager, member):
# Calling actions 2x in this test to assert we're not getting double logs
group = OSFGroup.objects.create(name='My Lab', creator_id=manager.id)
assert group.logs.count() == 2
log = group.logs.last()
assert log.action == OSFGroupLog.GROUP_CREATED
assert log.user == manager
assert log.user == manager
assert log.params['group'] == group._id
log = group.logs.first()
assert log.action == OSFGroupLog.MANAGER_ADDED
assert log.params['group'] == group._id
group.make_member(member, Auth(manager))
group.make_member(member, Auth(manager))
assert group.logs.count() == 3
log = group.logs.first()
assert log.action == OSFGroupLog.MEMBER_ADDED
assert log.user == manager
assert log.params['group'] == group._id
assert log.params['user'] == member._id
group.make_manager(member, Auth(manager))
group.make_manager(member, Auth(manager))
assert group.logs.count() == 4
log = group.logs.first()
assert log.action == OSFGroupLog.ROLE_UPDATED
assert log.user == manager
assert log.params['group'] == group._id
assert log.params['user'] == member._id
assert log.params['new_role'] == MANAGER
group.make_member(member, Auth(manager))
group.make_member(member, Auth(manager))
log = group.logs.first()
assert group.logs.count() == 5
assert log.action == OSFGroupLog.ROLE_UPDATED
assert log.user == manager
assert log.params['group'] == group._id
assert log.params['user'] == member._id
assert log.params['new_role'] == MEMBER
group.remove_member(member, Auth(manager))
group.remove_member(member, Auth(manager))
assert group.logs.count() == 6
log = group.logs.first()
assert log.action == OSFGroupLog.MEMBER_REMOVED
assert log.user == manager
assert log.params['group'] == group._id
assert log.params['user'] == member._id
group.set_group_name('New Name', Auth(manager))
group.set_group_name('New Name', Auth(manager))
assert group.logs.count() == 7
log = group.logs.first()
assert log.action == OSFGroupLog.EDITED_NAME
assert log.user == manager
assert log.params['group'] == group._id
assert log.params['name_original'] == 'My Lab'
project.add_osf_group(group, WRITE, Auth(manager))
project.add_osf_group(group, WRITE, Auth(manager))
assert group.logs.count() == 8
log = group.logs.first()
assert log.action == OSFGroupLog.NODE_CONNECTED
assert log.user == manager
assert log.params['group'] == group._id
assert log.params['node'] == project._id
assert log.params['permission'] == WRITE
node_log = project.logs.first()
assert node_log.action == NodeLog.GROUP_ADDED
assert node_log.user == manager
assert node_log.params['group'] == group._id
assert node_log.params['node'] == project._id
assert node_log.params['permission'] == WRITE
project.update_osf_group(group, READ, Auth(manager))
project.update_osf_group(group, READ, Auth(manager))
log = group.logs.first()
assert group.logs.count() == 9
assert log.action == OSFGroupLog.NODE_PERMS_UPDATED
assert log.user == manager
assert log.params['group'] == group._id
assert log.params['node'] == project._id
assert log.params['permission'] == READ
node_log = project.logs.first()
assert node_log.action == NodeLog.GROUP_UPDATED
assert node_log.user == manager
assert node_log.params['group'] == group._id
assert node_log.params['node'] == project._id
assert node_log.params['permission'] == READ
project.remove_osf_group(group, Auth(manager))
project.remove_osf_group(group, Auth(manager))
assert group.logs.count() == 10
log = group.logs.first()
assert log.action == OSFGroupLog.NODE_DISCONNECTED
assert log.user == manager
assert log.params['group'] == group._id
assert log.params['node'] == project._id
node_log = project.logs.first()
assert node_log.action == NodeLog.GROUP_REMOVED
assert node_log.user == manager
assert node_log.params['group'] == group._id
assert node_log.params['node'] == project._id
project.add_osf_group(group, WRITE, Auth(manager))
project.add_osf_group(group, WRITE, Auth(manager))
group.remove_group(auth=Auth(manager))
node_log = project.logs.first()
assert node_log.action == NodeLog.GROUP_REMOVED
assert node_log.user == manager
assert node_log.params['group'] == group._id
assert node_log.params['node'] == project._id
class TestRemovingContributorOrGroupMembers:
"""
Post OSF-Groups, the same kinds of checks you run when removing a contributor,
need to be run when a group is removed from a node (or a user is removed from a group,
or the group is deleted altogether).
The actions are only executed if the user has no perms at all: no contributorship,
and no group membership
"""
@pytest.fixture()
def project(self, user_two, user_three, external_account):
project = ProjectFactory(creator=user_two)
project.add_contributor(user_three, ADMIN)
project.add_addon('github', auth=Auth(user_two))
project.creator.add_addon('github')
project.creator.external_accounts.add(external_account)
project.creator.save()
return project
@pytest.fixture()
def file(self, project, user_two):
filename = 'my_file.txt'
project_file = OsfStorageFile.create(
target_object_id=project.id,
target_content_type=ContentType.objects.get_for_model(project),
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
project_file.save()
from addons.osfstorage import settings as osfstorage_settings
project_file.create_version(user_two, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save
project_file.checkout = user_two
project_file.save()
return project_file
@pytest.fixture()
def external_account(self):
return factories.GitHubAccountFactory()
@pytest.fixture()
def node_settings(self, project, external_account):
node_settings = project.get_addon('github')
user_settings = project.creator.get_addon('github')
user_settings.oauth_grants[project._id] = {external_account._id: []}
user_settings.save()
node_settings.user_settings = user_settings
node_settings.user = 'Queen'
node_settings.repo = 'Sheer-Heart-Attack'
node_settings.external_account = external_account
node_settings.save()
node_settings.set_auth
return node_settings
def test_remove_contributor_no_member_perms(self, project, node_settings, user_two, user_three, request_context, file):
assert project.get_addon('github').user_settings is not None
assert file.checkout is not None
assert len(get_all_node_subscriptions(user_two, project)) == 2
project.remove_contributor(user_two, Auth(user_three))
project.reload()
assert project.get_addon('github').user_settings is None
file.reload()
assert file.checkout is None
assert len(get_all_node_subscriptions(user_two, project)) == 0
def test_remove_group_from_node_no_contributor_perms(self, project, node_settings, user_two, user_three, request_context, file):
group = OSFGroupFactory(creator=user_two)
project.add_osf_group(group, ADMIN)
# Manually removing contributor
contrib_obj = project.contributor_set.get(user=user_two)
contrib_obj.delete()
project.clear_permissions(user_two)
assert project.is_contributor(user_two) is False
assert project.is_contributor_or_group_member(user_two) is True
assert node_settings.user_settings is not None
project.remove_osf_group(group)
project.reload()
assert project.get_addon('github').user_settings is None
file.reload()
assert file.checkout is None
assert len(get_all_node_subscriptions(user_two, project)) == 0
def test_remove_member_no_contributor_perms(self, project, node_settings, user_two, user_three, request_context, file):
group = OSFGroupFactory(creator=user_two)
project.add_osf_group(group, ADMIN)
group.make_manager(user_three)
# Manually removing contributor
contrib_obj = project.contributor_set.get(user=user_two)
contrib_obj.delete()
project.clear_permissions(user_two)
assert project.is_contributor(user_two) is False
assert project.is_contributor_or_group_member(user_two) is True
assert node_settings.user_settings is not None
group.remove_member(user_two)
project.reload()
assert project.get_addon('github').user_settings is None
file.reload()
assert file.checkout is None
assert len(get_all_node_subscriptions(user_two, project)) == 0
def test_delete_group_no_contributor_perms(self, project, node_settings, user_two, user_three, request_context, file):
group = OSFGroupFactory(creator=user_two)
project.add_osf_group(group, ADMIN)
group.make_manager(user_three)
# Manually removing contributor
contrib_obj = project.contributor_set.get(user=user_two)
contrib_obj.delete()
project.clear_permissions(user_two)
assert project.is_contributor(user_two) is False
assert project.is_contributor_or_group_member(user_two) is True
assert node_settings.user_settings is not None
group.remove_group()
project.reload()
assert project.get_addon('github').user_settings is None
file.reload()
assert file.checkout is None
assert len(get_all_node_subscriptions(user_two, project)) == 0
def test_remove_contributor_also_member(self, project, node_settings, user_two, user_three, request_context, file):
group = OSFGroupFactory(creator=user_two)
project.add_osf_group(group, ADMIN)
assert project.is_contributor(user_two) is True
assert project.is_contributor_or_group_member(user_two) is True
assert node_settings.user_settings is not None
project.remove_osf_group(group)
project.reload()
assert project.get_addon('github').user_settings is not None
file.reload()
assert file.checkout is not None
assert len(get_all_node_subscriptions(user_two, project)) == 2
def test_remove_osf_group_from_node_also_member(self, project, node_settings, user_two, user_three, request_context, file):
group = OSFGroupFactory(creator=user_two)
project.add_osf_group(group, ADMIN)
assert project.is_contributor(user_two) is True
assert project.is_contributor_or_group_member(user_two) is True
assert node_settings.user_settings is not None
project.remove_osf_group(group)
project.reload()
assert project.get_addon('github').user_settings is not None
file.reload()
assert file.checkout is not None
assert len(get_all_node_subscriptions(user_two, project)) == 2
def test_remove_member_also_contributor(self, project, node_settings, user_two, user_three, request_context, file):
group = OSFGroupFactory(creator=user_two)
group.make_manager(user_three)
project.add_osf_group(group, ADMIN)
assert project.is_contributor(user_two) is True
assert project.is_contributor_or_group_member(user_two) is True
assert node_settings.user_settings is not None
group.remove_member(user_two)
project.reload()
assert project.get_addon('github').user_settings is not None
file.reload()
assert file.checkout is not None
assert len(get_all_node_subscriptions(user_two, project)) == 2
def test_delete_group_also_contributor(self, project, node_settings, user_two, user_three, request_context, file):
group = OSFGroupFactory(creator=user_two)
project.add_osf_group(group, ADMIN)
group.make_manager(user_three)
assert project.is_contributor(user_two) is True
assert project.is_contributor_or_group_member(user_two) is True
assert node_settings.user_settings is not None
group.remove_group()
project.reload()
assert project.get_addon('github').user_settings is not None
file.reload()
assert file.checkout is not None
assert len(get_all_node_subscriptions(user_two, project)) == 2
|
|
# Ensure following run before this script can work:
#
# /opt/datadog-agent/embedded/bin/pip install --upgrade pysnmp
#
# sudo -u dd-agent /opt/datadog-agent/bin/mibdump.py SNMPv2-SMI KEEPALIVED-MIB
import re
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.hlapi import SnmpEngine, ContextData, nextCmd, getCmd
from pysnmp.hlapi import ObjectType, ObjectIdentity
from pysnmp.smi import builder
from pysnmp.smi.exval import noSuchInstance, noSuchObject
from pysnmp.error import PySnmpError
import pysnmp.proto.rfc1902 as snmp_type
# project
from checks.network_checks import NetworkCheck, Status
from config import _is_affirmative
# Metric type that we support
SNMP_NUMBERS= frozenset([
snmp_type.Counter32.__name__,
snmp_type.Counter64.__name__,
snmp_type.Gauge32.__name__,
snmp_type.Unsigned32.__name__,
snmp_type.Integer.__name__,
snmp_type.Integer32.__name__,
'InetAddressPrefixLength',
'InterfaceIndex'])
SNMP_STRINGS= frozenset([
'InetAddress',
'InetAddressType',
'InetScopeType',
'VrrpState',
snmp_type.OctetString.__name__])
SNMP_UNICODE_STRINGS= frozenset([
'DisplayString'])
VRRP_STATE_TO_INTEGER={
'init': 0,
'backup': 1,
'master': 2,
'fault': 3,
'unknown': 4
}
class KeepalivedVrrpCheck(NetworkCheck):
DEFAULT_RETRIES = 5
DEFAULT_TIMEOUT = 1
SC_STATUS = 'keepalived.vrrp.can_check'
SOURCE_TYPE_NAME = 'system'
def __init__(self, name, init_config, agentConfig, instances):
for instance in instances:
if 'name' not in instance:
instance['name'] = self._get_instance_key(instance)
instance['skip_event'] = True
self.generators = {}
# Load Custom MIB directory
self.mibs_path = None
self.ignore_nonincreasing_oid = False
if init_config is not None:
self.mibs_path = init_config.get("mibs_folder")
self.ignore_nonincreasing_oid = _is_affirmative(
init_config.get("ignore_nonincreasing_oid", False))
NetworkCheck.__init__(self, name, init_config, agentConfig, instances)
def _load_conf(self, instance):
tags = instance.get("tags", [])
ip_address = instance["ip_address"]
timeout = int(instance.get('timeout', self.DEFAULT_TIMEOUT))
retries = int(instance.get('retries', self.DEFAULT_RETRIES))
enforce_constraints = _is_affirmative(instance.get('enforce_mib_constraints', True))
if 'verify' in instance and 'weight' in instance:
self.raise_on_error_indication('VRRP ERROR: Can not combine use of weight and verify parameters.', instance)
if 'verify' in instance and instance['verify'] not in ('master', 'backup'):
self.raise_on_error_indication('VRRP ERROR: verify parameter value must be "master" or "backup"', instance)
instance_key = instance['name']
cmd_generator = self.generators.get(instance_key, None)
if not cmd_generator:
cmd_generator = self.create_command_generator(self.mibs_path, self.ignore_nonincreasing_oid)
self.generators[instance_key] = cmd_generator
return cmd_generator, ip_address, tags, timeout, retries, enforce_constraints
# key is name, or absent that, some combo of ip/host and port
def _get_instance_key(self, instance):
key = instance.get('name', None)
if key:
return key
host = instance.get('host', None)
ip = instance.get('ip_address', None)
port = instance.get('port', None)
if host and port:
key = "{host}:{port}".format(host=host, port=port)
elif ip and port:
key = "{host}:{port}".format(host=ip, port=port)
elif host:
key = host
elif ip:
key = ip
return key
@classmethod
def hex2inet(cls, hexstr):
ip = int(hexstr, 16)
octs = []
for i in range(0, 4):
octs.insert(0, str(ip & 0xFF))
ip = ip >> 8
return ".".join(octs)
@classmethod
def hex2inet6(cls, hexstr):
hexstr = hexstr[2:]
hexstr = hexstr.lower()
hexstr = re.sub(r"([a-f\d]{4,4})", "$1:", hexstr)
hexstr = re.sub(r"(:0000)+:", "::", hexstr)
hexstr = re.sub(r":0+([0-9a-f])", ":$1", hexstr)
hexstr = re.sub(r"([0-9a-f]):$", "$1", hexstr)
return '[%s]' % hexstr
@classmethod
def get_auth_data(cls, instance):
# Only SNMP v1 - SNMP v2 for the moment
# See http://pysnmp.sourceforge.net/docs/current/security-configuration.html
if "community" in instance:
# SNMP v1 - SNMP v2
# See http://pysnmp.sourceforge.net/docs/current/security-configuration.html
if int(instance.get("snmp_version", 2)) == 1:
return cmdgen.CommunityData(instance['community'], mpModel=0)
return cmdgen.CommunityData(instance['community'], mpModel=1)
else:
raise Exception("An authentication method needs to be provided")
def create_command_generator(self, mibs_path, ignore_nonincreasing_oid):
'''
Create a command generator to perform all the snmp query.
If mibs_path is not None, load the mibs present in the custom mibs
folder. (Need to be in pysnmp format)
'''
cmd_generator = cmdgen.CommandGenerator()
cmd_generator.ignoreNonIncreasingOid = ignore_nonincreasing_oid
if mibs_path is not None:
mib_builder = cmd_generator.snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder
mib_sources = mib_builder.getMibSources() + (builder.DirMibSource(mibs_path), )
mib_builder.setMibSources(*mib_sources)
return cmd_generator
@classmethod
def get_transport_target(cls, instance, timeout, retries):
'''
Generate a Transport target object based on the instance's configuration
'''
if "ip_address" not in instance:
raise Exception("An IP address needs to be specified")
ip_address = instance["ip_address"]
port = int(instance.get("port", 161)) # Default SNMP port
return cmdgen.UdpTransportTarget((ip_address, port), timeout=timeout, retries=retries)
def raise_on_error_indication(self, error_indication, instance, fatal=True):
if error_indication:
message = "{0} for instance {1}".format(error_indication, instance["ip_address"])
instance["service_check_error"] = message
if fatal:
raise Exception(message)
else:
self.warning(message)
def snmp_to_python_type(self, value):
snmp_class = value.__class__.__name__
if snmp_class in SNMP_NUMBERS:
if snmp_class == 'Integer32':
pretty = value.prettyOut(value).strip("'")
if pretty == str(int(value)):
return int(value)
else:
return pretty
else:
return int(value)
elif snmp_class in SNMP_STRINGS:
return value.prettyOut(value).strip("'")
elif snmp_class in SNMP_UNICODE_STRINGS:
return value.prettyOut(value).strip("'").encode('utf-8')
else:
# FIXME: should raise or at least warn of this
return None
# get a table, return as hash of hashes
def snmp_get_table(self, instance, cmd_generator, table_oid, lookup_names, timeout, retries, enforce_constraints=True):
#snmpget = cmd_generator.getCmd
snmpgetnext = cmd_generator.nextCmd
transport_target = self.get_transport_target(instance, timeout, retries)
auth_data = self.get_auth_data(instance)
if type(table_oid) != str:
table_oid = ObjectType(ObjectIdentity(*table_oid)),
else:
table_oid = ObjectType(ObjectIdentity(table_oid)),
dIndex = {}
cmd = nextCmd(SnmpEngine(), auth_data, transport_target, ContextData(), *table_oid, lookupValues=enforce_constraints, lookupNames=lookup_names, lexicographicMode=False)
for (errorIndication, errorStatus, errorIndex, varBinds) in cmd:
if errorIndication:
self.raise_on_error_indication(errorIndication, instance)
elif errorStatus:
self.raise_on_error_indication('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'), instance)
else:
for varBind in varBinds:
oid = varBind[0]
value = varBind[1]
label = oid.getLabel()
# Sometimes we key composite key for index, sometimes as seperate elements. Normalize this
sym = oid.getMibSymbol()
ridx = sym[-1]
fidx = list(filter(lambda x: x != None, ridx))
midx = list(map(lambda x: str(x), fidx))
key = ".".join(midx)
if not key in dIndex:
dIndex[key] = {}
dIndex[key][label[-1]] = self.snmp_to_python_type(value)
return dIndex
# get a single OID and return the value
def snmp_get(self, instance, cmd_generator, oid, lookup_names, timeout, retries, enforce_constraints=True):
snmpget = cmd_generator.getCmd
#snmpgetnext = cmd_generator.nextCmd
transport_target = self.get_transport_target(instance, timeout, retries)
auth_data = self.get_auth_data(instance)
if type(oid) != str:
oid = cmdgen.MibVariable(*oid),
else:
oid = [oid]
errorIndication, errorStatus, errorIndex, varBinds = snmpget(auth_data, transport_target, *oid, lookupValues=enforce_constraints, lookupNames=lookup_names)
if errorIndication:
self.raise_on_error_indication(errorIndication, instance)
elif errorStatus:
self.raise_on_error_indication('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'), instance)
else:
return varBinds[0][1]
def collect_data(self, instance, cmd_generator, lookup_names, timeout, retries, enforce_constraints=True):
try:
keepalived = self.snmp_get(instance, cmd_generator, ['KEEPALIVED-MIB', 'version', 0], lookup_names, timeout, retries, enforce_constraints)
if keepalived == 'No Such Object available on this agent at this OID':
self.raise_on_error_indication("VRRP CRITICAL: keepalived is not running", instance)
routerId = self.snmp_get(instance, cmd_generator, ['KEEPALIVED-MIB', 'routerId', 0], lookup_names, timeout, retries, enforce_constraints)
vit = self.snmp_get_table(instance, cmd_generator, ['KEEPALIVED-MIB', 'vrrpInstanceTable'], lookup_names, timeout, retries, enforce_constraints)
if not vit or len(vit) == 0:
self.raise_on_error_indication('VRRP CRITICAL: keepalived does not have a VRRP instance.', instance)
for key in sorted(list(vit.keys())):
vr = vit[key]
for oldkey in list(vr.keys()):
newkey = re.sub(r'^vrrpInstance', '', oldkey)
newkey = newkey[0].lower() + newkey[1:]
vr[newkey] = vr[oldkey]
del vr[oldkey]
vr['vips'] = []
vat = self.snmp_get_table(instance, cmd_generator, ['KEEPALIVED-MIB', 'vrrpAddressTable'], lookup_names, timeout, retries, enforce_constraints)
if not vat or len(vat) == 0:
self.raise_on_error_indication('VRRP CRITICAL: keepalived is missing virtual addresses.', instance)
for key in list(vat.keys()):
(vrIdx, addrIdx) = key.split('.')
vr = vit[vrIdx]
if vat[key]['vrrpAddressType'] == 'ipv4':
vip = self.hex2inet(vat[key]['vrrpAddressValue'])
vr['vips'].append(vip)
if vat[key]['vrrpAddressType'] == 'ipv6':
vip = self.hex2inet6(vat[key]['vrrpAddressValue'])
vr['vips'].append(vip)
# load VRRP group tables info
vsgt = self.snmp_get_table(instance, cmd_generator, ['KEEPALIVED-MIB', 'vrrpSyncGroupTable'], lookup_names, timeout, retries, enforce_constraints)
vsgmt = self.snmp_get_table(instance, cmd_generator, ['KEEPALIVED-MIB', 'vrrpSyncGroupMemberTable'], lookup_names, timeout, retries, enforce_constraints)
if vsgt:
for key in list(vsgmt.keys()):
(grpIdx, gmIdx) = key.split('.')
vr = vit[gmIdx]
grp = vsgt[grpIdx]
vr['syncGroupName'] = grp['vrrpSyncGroupName']
vr['syncGroupState'] = grp['vrrpSyncGroupState']
return (keepalived, routerId, vit)
except PySnmpError as e:
if "service_check_error" not in instance:
instance["service_check_error"] = "Fail to collect some metrics: {0}".format(e)
if "service_check_severity" not in instance:
instance["service_check_severity"] = Status.CRITICAL
self.warning("Fail to collect some metrics: {0}".format(e))
def _check(self, instance):
cmd_generator, ip_address, tags, timeout, retries, enforce_constraints = self._load_conf(instance)
if 'weight' in instance:
instance['vrrp_check_type'] = 'byWeight'
elif 'verify' in instance:
instance['vrrp_check_type'] = 'manualVerify'
else:
instance['vrrp_check_type'] = 'initialState'
tags += [
'vrrp_check_type:{0}'.format(instance['vrrp_check_type'])
]
try:
self.log.debug("Querying %s for keepalive vrrp data", ip_address)
(keepalived, routerId, vit) = self.collect_data(instance, cmd_generator, True, timeout, retries, enforce_constraints=enforce_constraints)
# do reporting end here
self.log.debug("%s - %s" % (keepalived, routerId))
self.report_vrrp_metrics(instance, keepalived, routerId, vit, tags)
except Exception as e:
if "service_check_error" not in instance:
instance["service_check_error"] = "Fail to collect metrics for {0} - {1}".format(instance['name'], e)
self.warning(instance["service_check_error"])
return [(self.SC_STATUS, Status.CRITICAL, instance["service_check_error"])]
finally:
# Report service checks
if "service_check_error" in instance:
status = Status.DOWN
if "service_check_severity" in instance:
status = instance["service_check_severity"]
return [(self.SC_STATUS, status, instance["service_check_error"])]
return [(self.SC_STATUS, Status.UP, None)]
def report_vrrp_metrics(self, instance, keepalived, routerId, data, tags):
# Create a reversed index
name_to_idx = {}
for key in data:
name_to_idx[data[key]['name']] = key
for vr_name in sorted(list(name_to_idx.keys())):
if re.match(instance['include'], vr_name) and not re.match(instance['exclude'], vr_name):
vr = data[name_to_idx[vr_name]]
vr['desiredState'] = 'backup'
if 'verify' in instance:
vr['desiredState'] = instance['verify']
elif 'weight' in instance:
if instance['weight'] <= vr['effectivePriority']:
vr['desiredState'] = 'master'
else:
vr['desiredState'] = vr['initialState']
if vr['state'] != vr['desiredState']:
if vr['desiredState'] == 'master':
vr['STATUS'] = 'CRIT'
vr['STATUS_GUAGE'] = 2
else:
vr['STATUS'] = 'WARN'
vr['STATUS_GUAGE'] = 1
else:
vr['STATUS'] = 'OKAY'
vr['STATUS_GUAGE'] = 0
# We have all out info, collected and calculated, now it's time to submit
vr_tags = list(tags)
vr_tags += [
'vrrp_router_name:{0}'.format(routerId),
'vrrp_virtual_router_id:{0}'.format(vr['virtualRouterId']),
'vrrp_virtual_router_name:{0}'.format(vr['name']),
'vrrp_sync_group:{0}'.format(vr['syncGroupName']),
'vrrp_primary_interface:{0}'.format(vr['primaryInterface']),
'vrrp_initial_state:{0}'.format(vr['initialState']),
'vrrp_desired_state:{0}'.format(vr['desiredState'])
]
for vip in vr['vips']:
vr_tags += [ 'vrrp_virtual_ip:{0}'.format(vip) ]
self.log.debug("tags: %s" % ",".join(vr_tags))
self.log.debug("state: %s" % VRRP_STATE_TO_INTEGER[vr['state']])
self.log.debug("base prio: %s" % vr['basePriority'])
self.log.debug("effective prio: %s" % vr['effectivePriority'])
self.log.debug("status: %s" % vr['STATUS_GUAGE'])
self.gauge('keepalived.vrrp.state', VRRP_STATE_TO_INTEGER[vr['state']], vr_tags)
self.gauge('keepalived.vrrp.priority.base', vr['basePriority'], vr_tags)
self.gauge('keepalived.vrrp.priority.effective', vr['effectivePriority'], vr_tags)
self.gauge('keepalived.vrrp.status', vr['STATUS_GUAGE'], vr_tags)
def report_as_service_check(self, sc_name, status, instance, msg=None):
sc_tags = ['vrrp_device:{0}'.format(instance["ip_address"])]
custom_tags = instance.get('tags', [])
tags = sc_tags + custom_tags
self.service_check(sc_name, NetworkCheck.STATUS_TO_SERVICE_CHECK[status], tags=tags, message=msg)
|
|
import logging
from collections import defaultdict
import redis
from django.utils import timezone
from django.db import models
import django_rq
import rq, rq.queue, rq.job, rq.exceptions
from stream_analysis import AnalysisTask, cleanup, get_stream_cutoff_times
from twitter_stream.models import StreamProcess, FilterTerm
from twitter_feels.libs.twitter_analysis.models import TweetStream
from swapper import load_model
logger = logging.getLogger('status')
scheduler = django_rq.get_scheduler()
def redis_running():
try:
scheduler.connection.ping()
return True
except redis.ConnectionError:
return False
def scheduler_status():
if scheduler.connection.exists(scheduler.scheduler_key) and \
not scheduler.connection.hexists(scheduler.scheduler_key, 'death'):
return True
else:
return False
def queues_status():
queues = rq.Queue.all(connection=django_rq.get_connection())
result = {}
for q in queues:
jobs = q.get_jobs()
oldest = None
state_count = defaultdict(int)
func_count = defaultdict(int)
for j in jobs:
if j.status != 'finished' and (not oldest or j.created_at < oldest):
oldest = j.created_at
job_type = j.func_name
if job_type.endswith('create_frames'):
if 'analysis.task.key' in j.meta:
job_type = "create_frames[%s]" % j.meta.get('analysis.task.key', '?')
else:
job_type = j.get_call_string().replace('stream_analysis.utils.', 'scheduler:')
elif job_type.endswith('analyze_frame'):
if 'analysis.task.key' in j.meta:
job_type = "analyze_frame[%s]" % j.meta.get('analysis.task.key', '?')
else:
job_type = j.get_call_string()
else:
job_type = j.get_call_string()
func_count[job_type] += 1
state_count[j.status] += 1
#TODO: Fix this nasty hack -- rq doesn't use UTC
if oldest:
oldest = timezone.make_aware(oldest, timezone.get_default_timezone())
func_count = sorted(func_count.items())
result[q.name] = {
'name': q.name,
'count': q.count,
'oldest': oldest,
'state_count': dict(state_count),
'func_count': func_count
}
return result
def worker_status():
workers = rq.Worker.all(connection=django_rq.get_connection())
worker_data = []
running = False
for w in workers:
if not w.stopped:
running = True
worker_data.append({
'name': w.name,
'state': w.state,
'stopped': w.stopped,
'queues': w.queue_names(),
})
result = {
"workers": worker_data,
"running": running
}
return result
def stream_status():
terms = FilterTerm.objects.filter(enabled=True)
processes = StreamProcess.get_current_stream_processes()
running = False
for p in processes:
if p.status == StreamProcess.STREAM_STATUS_RUNNING:
running = True
break
stream_class_memory_cutoffs = get_stream_cutoff_times()
Tweet = load_model("twitter_stream", "Tweet")
tweet_count = Tweet.count_approx()
analyzed_count = 'a lot'
if tweet_count < 10000000:
for stream_class, cutoff_time in stream_class_memory_cutoffs.iteritems():
if stream_class == TweetStream:
analyzed_count = TweetStream().count_before(cutoff_time)
stream = TweetStream()
earliest_time = stream.get_earliest_stream_time()
latest_time = stream.get_latest_stream_time()
avg_rate = None
if earliest_time is not None and latest_time is not None:
avg_rate = float(tweet_count) / (latest_time - earliest_time).total_seconds()
return {
'running': running,
'terms': [t.term for t in terms],
'processes': processes,
'tweet_count': tweet_count,
'analyzed_count': analyzed_count,
'earliest': earliest_time,
'latest': latest_time,
'avg_rate': avg_rate
}
def _task_status(task):
frame_class = task.get_frame_class()
stats = frame_class.get_performance_stats()
most_recent = frame_class.objects\
.filter(calculated=True)\
.aggregate(latest_start_time=models.Max('start_time'))
most_recent = most_recent['latest_start_time']
result = {
"key": task.key,
"name": task.name,
"time_frame_path": task.frame_class_path,
"duration": frame_class.DURATION.total_seconds(),
"frame_count": frame_class.count_completed(),
"avg_analysis_time": stats['analysis_time'],
"avg_cleanup_time": stats['cleanup_time'],
"running": False,
"most_recent": most_recent,
}
job = task.get_rq_job()
if job:
result["running"] = True
return result
def task_status(key=None):
"""
Returns status if there is a scheduled task with the given key.
If no key is given, returns a dictionary containing the status of all tasks.
Status is a dictionary with the task info
as well as "running" (True/False).
"""
if key:
task = AnalysisTask.get(key=key)
if task:
return _task_status(task)
else:
tasks = AnalysisTask.get()
result = dict()
for task in tasks:
result[task.key] = _task_status(task)
return result
def cancel_task(key=None):
"""
Cancels any scheduled task by key.
If no key is provided, cancels all scheduled tasks
(those created with this system).
"""
if key:
task = AnalysisTask.get(key=key)
if task:
task.cancel()
else:
for task in AnalysisTask.get():
task.cancel()
def schedule_task(key=None, cancel_first=True):
"""
Schedule a task.
If no key is given, schedules all configured tasks.
"""
if key:
task = AnalysisTask.get(key=key)
if task:
task.schedule(cancel_first=cancel_first)
else:
for task in AnalysisTask.get():
task.schedule(cancel_first=cancel_first)
def requeue_failed():
"""Requeue jobs in the failed queue."""
connection = django_rq.get_connection()
failed_queue = rq.queue.get_failed_queue(connection)
job_ids = failed_queue.job_ids
requeued = 0
for job_id in job_ids:
try:
job = rq.job.Job.fetch(job_id, connection=connection)
except rq.job.NoSuchJobError:
# Silently ignore/remove this job and return (i.e. do nothing)
failed_queue.remove(job_id)
continue
if job.status == rq.job.Status.FAILED:
failed_queue.requeue(job_id)
requeued += 1
else:
failed_queue.remove(job_id)
logger.info("Requeued %d failed jobs", requeued)
return requeued
def clear_failed():
"""Clear jobs in the failed queue."""
connection = django_rq.get_connection()
failed_queue = rq.queue.get_failed_queue(connection)
job_ids = failed_queue.job_ids
cleared = 0
for job_id in job_ids:
try:
job = rq.job.Job.fetch(job_id, connection=connection)
except rq.job.NoSuchJobError:
# Silently ignore/remove this job and return (i.e. do nothing)
failed_queue.remove(job_id)
continue
# Delete jobs for this task
task_key = job.meta.get('analysis.task.key')
if task_key:
task = AnalysisTask.get(task_key)
frame_id = job.meta.get('analysis.frame.id')
if task and frame_id:
# Delete the corresponding frame
frame_class = task.get_frame_class()
try:
frame_class.objects.filter(pk=frame_id, calculated=False).delete()
except Exception as e:
logger.warn(e, exc_info=True)
job.cancel()
cleared += 1
logger.info("Cleared %d failed jobs", cleared)
return cleared
def clean_tweets():
"""Clean old tweets we don't need anymore."""
cleanup.delay()
|
|
"""module for complete independent games"""
import functools
import itertools
import numpy as np
from gameanalysis import rsgame
from gameanalysis import utils
class _MatrixGame(rsgame._CompleteGame): # pylint: disable=protected-access
"""Matrix game representation
This represents a complete independent game more compactly than a Game, but
only works for complete independent games.
Parameters
----------
role_names : (str,)
The name of each role.
strat_names : ((str,),)
The name of each strategy per role.
payoff_matrix : ndarray
The matrix of payoffs for an asymmetric game. The last axis is the
payoffs for each player, the first axes are the strategies for each
player. matrix.shape[:-1] must correspond to the number of strategies
for each player. matrix.ndim - 1 must equal matrix.shape[-1].
"""
def __init__(self, role_names, strat_names, payoff_matrix):
super().__init__(role_names, strat_names,
np.ones(len(role_names), int))
self._payoff_matrix = payoff_matrix
self._payoff_matrix.setflags(write=False)
self._prof_offset = np.zeros(self.num_strats, int)
self._prof_offset[self.role_starts] = 1
self._prof_offset.setflags(write=False)
self._payoff_view = self._payoff_matrix.view()
self._payoff_view.shape = (self.num_profiles, self.num_roles)
def payoff_matrix(self):
"""Return the payoff matrix"""
return self._payoff_matrix.view()
@utils.memoize
def min_strat_payoffs(self):
"""Returns the minimum payoff for each role"""
mpays = np.empty(self.num_strats)
for role, (pays, min_pays, strats) in enumerate(zip(
np.rollaxis(self._payoff_matrix, -1),
np.split(mpays, self.role_starts[1:]),
self.num_role_strats)):
np.rollaxis(pays, role).reshape((strats, -1)).min(1, min_pays)
mpays.setflags(write=False)
return mpays
@utils.memoize
def max_strat_payoffs(self):
"""Returns the minimum payoff for each role"""
mpays = np.empty(self.num_strats)
for role, (pays, max_pays, strats) in enumerate(zip(
np.rollaxis(self._payoff_matrix, -1),
np.split(mpays, self.role_starts[1:]),
self.num_role_strats)):
np.rollaxis(pays, role).reshape((strats, -1)).max(1, max_pays)
mpays.setflags(write=False)
return mpays
@functools.lru_cache(maxsize=1)
def payoffs(self):
profiles = self.profiles()
payoffs = np.zeros(profiles.shape)
payoffs[profiles > 0] = self._payoff_matrix.flat
return payoffs
def compress_profile(self, profile):
"""Compress profile in array of ints
Normal profiles are an array of number of players playing a strategy.
Since matrix games always have one player per role, this compresses
each roles counts into a single int representing the played strategy
per role.
"""
utils.check(self.is_profile(profile).all(), 'must pass vaid profiles')
profile = np.asarray(profile, int)
return np.add.reduceat(np.cumsum(self._prof_offset - profile, -1),
self.role_starts, -1)
def uncompress_profile(self, comp_prof):
"""Uncompress a profile"""
comp_prof = np.asarray(comp_prof, int)
utils.check(
np.all(comp_prof >= 0) and
np.all(comp_prof < self.num_role_strats),
'must pass valid compressed profiles')
profile = np.zeros(comp_prof.shape[:-1] + (self.num_strats,), int)
inds = (comp_prof.reshape((-1, self.num_roles)) +
self.role_starts + self.num_strats *
np.arange(int(np.prod(comp_prof.shape[:-1])))[:, None])
profile.flat[inds] = 1
return profile
def get_payoffs(self, profiles):
"""Returns an array of profile payoffs"""
profiles = np.asarray(profiles, int)
ids = self.profile_to_id(profiles)
payoffs = np.zeros_like(profiles, float)
payoffs[profiles > 0] = self._payoff_view[ids].flat
return payoffs
def deviation_payoffs(self, mixture, *, jacobian=False, **_): # pylint: disable=too-many-locals
"""Computes the expected value of each pure strategy played against all
opponents playing mix.
Parameters
----------
mixture : ndarray
The mix all other players are using
jacobian : bool
If true, the second returned argument will be the jacobian of the
deviation payoffs with respect to the mixture. The first axis is
the deviating strategy, the second axis is the strategy in the mix
the jacobian is taken with respect to.
"""
rmixes = []
for role, rmix in enumerate(np.split(mixture, self.role_starts[1:])):
shape = [1] * self.num_roles
shape[role] = -1
rmixes.append(rmix.reshape(shape))
devpays = np.empty(self.num_strats)
for role, (out, strats) in enumerate(zip(
np.split(devpays, self.role_starts[1:]),
self.num_role_strats)):
pays = self._payoff_matrix[..., role].copy()
for rmix in (m for r, m in enumerate(rmixes) if r != role):
pays *= rmix
np.rollaxis(pays, role).reshape((strats, -1)).sum(1, out=out)
if not jacobian:
return devpays
jac = np.zeros((self.num_strats, self.num_strats))
for role, (jout, rstrats) in enumerate(zip(
np.split(jac, self.role_starts[1:]),
self.num_role_strats)):
for dev, (out, dstrats) in enumerate(zip(
np.split(jout, self.role_starts[1:], 1),
self.num_role_strats)):
if role == dev:
continue
pays = self._payoff_matrix[..., role].copy()
for rmix in (m for r, m in enumerate(rmixes)
if r not in {role, dev}):
pays *= rmix
np.rollaxis(np.rollaxis(pays, role), dev + (role > dev),
1).reshape((rstrats, dstrats, -1)).sum(2, out=out)
return devpays, jac
def restrict(self, restriction):
base = rsgame.empty_copy(self).restrict(restriction)
matrix = self._payoff_matrix
for i, mask in enumerate(np.split(restriction, self.role_starts[1:])):
matrix = matrix[(slice(None),) * i + (mask,)]
return _MatrixGame(base.role_names, base.strat_names, matrix.copy())
def _add_constant(self, constant):
return _MatrixGame(
self.role_names, self.strat_names,
self._payoff_matrix + constant)
def _multiply_constant(self, constant):
return _MatrixGame(
self.role_names, self.strat_names,
self._payoff_matrix * constant)
def _add_game(self, othr):
if not othr.is_complete():
return NotImplemented
try:
othr_mat = othr.payoff_matrix()
except AttributeError:
othr_mat = othr.get_payoffs(
self.all_profiles())[self.all_profiles() > 0].reshape(
self._payoff_matrix.shape)
return _MatrixGame(
self.role_names, self.strat_names,
self._payoff_matrix + othr_mat)
def _mat_to_json(self, matrix, role_index):
"""Convert a sub matrix into json representation"""
if role_index == self.num_roles:
return {role: float(pay) for role, pay
in zip(self.role_names, matrix)}
strats = self.strat_names[role_index]
role_index += 1
return {strat: self._mat_to_json(mat, role_index)
for strat, mat in zip(strats, matrix)}
def to_json(self):
res = super().to_json()
res['payoffs'] = self._mat_to_json(self._payoff_matrix, 0)
res['type'] = 'matrix.1'
return res
@utils.memoize
def __hash__(self):
return super().__hash__()
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return (super().__eq__(othr) and
# Identical payoffs
np.allclose(self._payoff_matrix, othr._payoff_matrix))
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__[1:],
self.num_role_strats)
def matgame(payoff_matrix):
"""Create a game from a dense matrix with default names
Parameters
----------
payoff_matrix : ndarray-like
The matrix of payoffs for an asymmetric game.
"""
payoff_matrix = np.ascontiguousarray(payoff_matrix, float)
return matgame_replace(
rsgame.empty(
np.ones(payoff_matrix.ndim - 1, int),
np.array(payoff_matrix.shape[:-1], int)),
payoff_matrix)
def matgame_names(role_names, strat_names, payoff_matrix):
"""Create a game from a payoff matrix with names
Parameters
----------
role_names : [str]
The name of each role.
strat_names : [[str]]
The name of each strategy for each role.
payoff_matrix : ndarray-like
The matrix mapping strategy indices to payoffs for each player.
"""
return matgame_replace(
rsgame.empty_names(
role_names, np.ones(len(role_names), int), strat_names),
payoff_matrix)
def _mat_from_json(base, dic, matrix, depth):
"""Copy roles to a matrix representation"""
if depth == base.num_roles:
for role, payoff in dic.items():
matrix[base.role_index(role)] = payoff
else:
role = base.role_names[depth]
offset = base.role_starts[depth]
depth += 1
for strat, subdic in dic.items():
ind = base.role_strat_index(role, strat) - offset
_mat_from_json(base, subdic, matrix[ind], depth)
def matgame_json(json):
"""Read a matrix game from json
In general, the json will have 'type': 'matrix...' to indicate that it's a
matrix game, but if the other fields are correct, this will still succeed.
"""
# This uses the fact that roles are always in lexicographic order
base = rsgame.empty_json(json)
matrix = np.empty(tuple(base.num_role_strats) + (base.num_roles,),
float)
_mat_from_json(base, json['payoffs'], matrix, 0)
return matgame_replace(base, matrix)
def matgame_copy(copy_game):
"""Copy a matrix game from an existing game
Parameters
----------
copy_game : RsGame
Game to copy payoff data out of. This game must be complete.
"""
utils.check(copy_game.is_complete(), 'can only copy complete games')
if hasattr(copy_game, 'payoff_matrix'):
return matgame_replace(copy_game, copy_game.payoff_matrix())
# Get payoff matrix
num_role_strats = copy_game.num_role_strats.repeat(
copy_game.num_role_players)
shape = tuple(num_role_strats) + (num_role_strats.size,)
payoff_matrix = np.empty(shape, float)
offset = copy_game.role_starts.repeat(copy_game.num_role_players)
for profile, payoffs in zip(copy_game.profiles(), copy_game.payoffs()):
inds = itertools.product(*[
set(itertools.permutations(np.arange(s.size).repeat(s))) for s
in np.split(profile, copy_game.role_starts[1:])])
for nested in inds:
ind = tuple(itertools.chain.from_iterable(nested))
payoff_matrix[ind] = payoffs[ind + offset]
# Get role names
if np.all(copy_game.num_role_players == 1):
roles = copy_game.role_names
strats = copy_game.strat_names
else:
# When we expand names, we need to make sure they stay sorted
if utils.is_sorted(r + 'p' for r in copy_game.role_names):
# We can naively append player numbers
role_names = copy_game.role_names
else:
# We have to prefix to preserve role order
maxlen = max(map(len, copy_game.role_names))
role_names = (
p + '_' * (maxlen - len(r)) + r for r, p
in zip(copy_game.role_names,
utils.prefix_strings('', copy_game.num_roles)))
roles = tuple(itertools.chain.from_iterable(
(r + s for s in utils.prefix_strings('p', p))
for r, p in zip(role_names, copy_game.num_role_players)))
strats = tuple(itertools.chain.from_iterable(
itertools.repeat(s, p) for s, p
in zip(copy_game.strat_names, copy_game.num_role_players)))
return _MatrixGame(roles, strats, payoff_matrix)
def matgame_replace(base, payoff_matrix):
"""Replace an existing game with a new payoff matrix
Parameters
----------
base : RsGame
Game to take structure out of.
payoff_matrix : ndarray-like
The new payoff matrix.
"""
payoff_matrix = np.ascontiguousarray(payoff_matrix, float)
utils.check(
np.all(base.num_role_players == 1),
'replaced game must be independent')
utils.check(
payoff_matrix.shape == (tuple(base.num_role_strats) +
(base.num_roles,)),
'payoff matrix not consistent shape with game')
return _MatrixGame(base.role_names, base.strat_names, payoff_matrix)
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer']
# 'hair straighteners',
# 'television',
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=1,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
standardise_targets=True,
unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-5,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': RecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.9),
'nonlinearity': rectify,
'learn_init': True,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from abc import ABC, abstractmethod
from dataclasses import FrozenInstanceError, dataclass
import pytest
from pants.util.meta import (
SingletonMetaclass,
classproperty,
decorated_type_checkable,
frozen_after_init,
staticproperty,
)
def test_singleton() -> None:
class One(metaclass=SingletonMetaclass):
pass
assert One() is One()
class WithProp:
_value = "val0"
@classproperty
def class_property(cls):
"""class_property docs."""
return cls._value
@classmethod
def class_method(cls):
return cls._value
@staticproperty
def static_property(): # type: ignore[misc] # MyPy expects methods to have `self` or `cls`
"""static_property docs."""
return "static_property"
@staticmethod
def static_method():
return "static_method"
class OverridingValueField(WithProp):
_value = "val1"
class OverridingValueInit(WithProp):
"""Override the class-level `_value` with an instance-level `_value` from a constructor.
The class-level methods should still return the class-level `_value`, but the new instance
methods should return the value from the constructor.
"""
def __init__(self, v):
# This will be ignored when accessed as a class method.
self._value = v
@property
def instance_property(self):
return self._value
def instance_method(self):
return self._value
class WithShadowingInstanceMethod(OverridingValueInit):
"""Override the class-level property and method with instance versions.
The instance-level methods should return the instance-level `_value` (the constructor argument)
instead of the class-level `_value` (defined in :class:`WithProp`).
"""
@property
def class_property(self):
return self._value
def class_method(self):
return self._value
class OverridingMethodDefSuper(WithProp):
_other_value = "o0"
@classproperty
def class_property(cls):
return super().class_property + cls._other_value
def test_access() -> None:
assert "val0" == WithProp.class_property
assert "val0" == WithProp().class_property
assert "val0" == WithProp.class_method()
assert "val0" == WithProp().class_method()
assert "static_property" == WithProp.static_property
assert "static_property" == WithProp().static_property
assert "static_method" == WithProp.static_method()
assert "static_method" == WithProp().static_method()
def test_has_attr() -> None:
assert hasattr(WithProp, "class_property") is True
assert hasattr(WithProp(), "class_property") is True
def test_docstring() -> None:
assert "class_property docs." == WithProp.__dict__["class_property"].__doc__
assert "static_property docs." == WithProp.__dict__["static_property"].__doc__
def test_override_value() -> None:
assert "val1" == OverridingValueField.class_property
assert "val1" == OverridingValueField().class_property
def test_override_inst_value() -> None:
obj = OverridingValueInit("v1")
assert "val0" == obj.class_property
assert "val0" == obj.class_method()
assert "v1" == obj.instance_property
assert "v1" == obj.instance_method()
def test_override_inst_method() -> None:
obj = WithShadowingInstanceMethod("v1")
assert "v1" == obj.class_property
assert "v1" == obj.class_method()
def test_override_method_super() -> None:
assert "val0o0" == OverridingMethodDefSuper.class_property
assert "val0o0" == OverridingMethodDefSuper().class_property
def test_modify_class_value() -> None:
class WithFieldToModify:
_z = "z0"
@classproperty
def class_property(cls):
return cls._z
assert "z0" == WithFieldToModify.class_property
# The classproperty reflects the change in state (is not cached by python or something else
# weird we might do).
WithFieldToModify._z = "z1"
assert "z1" == WithFieldToModify.class_property
def test_set_attr():
class SetValue:
_x = "x0"
@staticproperty
def static_property():
return "s0"
@classproperty
def class_property(cls):
return cls._x
assert "x0" == SetValue.class_property
assert "s0" == SetValue.static_property
# The @classproperty is gone, this is just a regular property now.
SetValue.class_property = "x1"
assert "x1" == SetValue.class_property
# The source field is unmodified.
assert "x0" == SetValue._x
SetValue.static_property = "s1"
assert "s1" == SetValue.static_property
def test_delete_attr():
class DeleteValue:
_y = "y0"
@classproperty
def class_property(cls):
return cls._y
@staticproperty
def static_property():
return "s0"
assert "y0" == DeleteValue.class_property
assert "s0" == DeleteValue.static_property
# The @classproperty is gone, but the source field is still alive.
del DeleteValue.class_property
assert hasattr(DeleteValue, "class_property") is False
assert hasattr(DeleteValue, "_y") is True
del DeleteValue.static_property
assert hasattr(DeleteValue, "static_property") is False
def test_abstract_classproperty():
class Abstract(ABC):
@classproperty
@property
@abstractmethod
def f(cls):
pass
with pytest.raises(TypeError) as exc:
Abstract.f
assert str(exc.value) == (
"The classproperty 'f' in type 'Abstract' was an abstractproperty, meaning that type "
"Abstract must override it by setting it as a variable in the class body or defining a "
"method with an @classproperty decorator."
)
class WithoutOverriding(Abstract):
"""Show that subclasses failing to override the abstract classproperty will raise."""
with pytest.raises(TypeError) as exc:
WithoutOverriding.f
assert str(exc.value) == (
"The classproperty 'f' in type 'WithoutOverriding' was an abstractproperty, meaning that "
"type WithoutOverriding must override it by setting it as a variable in the class body or "
"defining a method with an @classproperty decorator."
)
class Concrete(Abstract):
f = 3
assert Concrete.f == 3
class Concrete2(Abstract):
@classproperty
def f(cls):
return "hello"
assert Concrete2.f == "hello"
def test_decorated_type_checkable():
@decorated_type_checkable
def f(cls):
return f.define_instance_of(cls)
@f
class C:
pass
assert C._decorated_type_checkable_type == type(f)
assert f.is_instance(C) is True
# Check that .is_instance() is only true for exactly the decorator @g used on the class D!
@decorated_type_checkable
def g(cls):
return g.define_instance_of(cls)
@g
class D:
pass
assert D._decorated_type_checkable_type == type(g)
assert g.is_instance(D) is True
assert f.is_instance(D) is False
def test_no_init() -> None:
@frozen_after_init
class Test:
pass
test = Test()
with pytest.raises(FrozenInstanceError):
test.x = 1 # type: ignore[attr-defined]
def test_init_still_works() -> None:
@frozen_after_init
class Test:
def __init__(self, x: int) -> None:
self.x = x
self.y = "abc"
test = Test(x=0)
assert test.x == 0
assert test.y == "abc"
def test_modify_preexisting_field_after_init() -> None:
@frozen_after_init
class Test:
def __init__(self, x: int) -> None:
self.x = x
test = Test(x=0)
with pytest.raises(FrozenInstanceError):
test.x = 1
def test_add_new_field_after_init() -> None:
@frozen_after_init
class Test:
def __init__(self, x: int) -> None:
self.x = x
test = Test(x=0)
with pytest.raises(FrozenInstanceError):
test.y = "abc" # type: ignore[attr-defined]
test._unfreeze_instance() # type: ignore[attr-defined]
test.y = "abc" # type: ignore[attr-defined]
test._freeze_instance() # type: ignore[attr-defined]
with pytest.raises(FrozenInstanceError):
test.z = "abc" # type: ignore[attr-defined]
def test_explicitly_call_setattr_after_init() -> None:
@frozen_after_init
class Test:
def __init__(self, x: int) -> None:
self.x = x
test = Test(x=0)
with pytest.raises(FrozenInstanceError):
setattr(test, "x", 1)
test._unfreeze_instance() # type: ignore[attr-defined]
setattr(test, "x", 1)
test._freeze_instance() # type: ignore[attr-defined]
with pytest.raises(FrozenInstanceError):
test.y = "abc" # type: ignore[attr-defined]
def test_works_with_dataclass() -> None:
@frozen_after_init
@dataclass(frozen=False)
class Test:
x: int
y: str
def __init__(self, x: int) -> None:
self.x = x
self.y = "abc"
test = Test(x=0)
with pytest.raises(FrozenInstanceError):
test.x = 1
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import logging
from django.conf import settings
from django.contrib import auth
from django.contrib.auth import middleware
from django.contrib.auth import models
from django.utils import decorators
from django.utils import timezone
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.auth.identity import v3 as v3_auth
from keystoneclient.auth import token_endpoint
from keystoneclient import session
from keystoneclient.v2_0 import client as client_v2
from keystoneclient.v3 import client as client_v3
from six.moves.urllib import parse as urlparse
LOG = logging.getLogger(__name__)
_PROJECT_CACHE = {}
_TOKEN_TIMEOUT_MARGIN = getattr(settings, 'TOKEN_TIMEOUT_MARGIN', 0)
"""
We need the request object to get the user, so we'll slightly modify the
existing django.contrib.auth.get_user method. To do so we update the
auth middleware to point to our overridden method.
Calling the "patch_middleware_get_user" method somewhere like our urls.py
file takes care of hooking it in appropriately.
"""
def middleware_get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = get_user(request)
return request._cached_user
def get_user(request):
try:
user_id = request.session[auth.SESSION_KEY]
backend_path = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_path)
backend.request = request
user = backend.get_user(user_id) or models.AnonymousUser()
except KeyError:
user = models.AnonymousUser()
return user
def patch_middleware_get_user():
middleware.get_user = middleware_get_user
auth.get_user = get_user
""" End Monkey-Patching. """
def is_token_valid(token, margin=None):
"""Timezone-aware checking of the auth token's expiration timestamp.
Returns ``True`` if the token has not yet expired, otherwise ``False``.
.. param:: token
The openstack_auth.user.Token instance to check
.. param:: margin
A time margin in seconds to subtract from the real token's validity.
An example usage is that the token can be valid once the middleware
passed, and invalid (timed-out) during a view rendering and this
generates authorization errors during the view rendering.
A default margin can be set by the TOKEN_TIMEOUT_MARGIN in the
django settings.
"""
expiration = token.expires
# In case we get an unparseable expiration timestamp, return False
# so you can't have a "forever" token just by breaking the expires param.
if expiration is None:
return False
if margin is None:
margin = getattr(settings, 'TOKEN_TIMEOUT_MARGIN', 0)
expiration = expiration - datetime.timedelta(seconds=margin)
if settings.USE_TZ and timezone.is_naive(expiration):
# Presumes that the Keystone is using UTC.
expiration = timezone.make_aware(expiration, timezone.utc)
return expiration > timezone.now()
# From django.contrib.auth.views
# Added in Django 1.4.3, 1.5b2
# Vendored here for compatibility with old Django versions.
def is_safe_url(url, host=None):
"""Return ``True`` if the url is a safe redirection.
The safe redirection means that it doesn't point to a different host.
Always returns ``False`` on an empty url.
"""
if not url:
return False
netloc = urlparse.urlparse(url)[1]
return not netloc or netloc == host
def memoize_by_keyword_arg(cache, kw_keys):
"""Memoize a function using the list of keyword argument name as its key.
Wrap a function so that results for any keyword argument tuple are stored
in 'cache'. Note that the keyword args to the function must be usable as
dictionary keys.
:param cache: Dictionary object to store the results.
:param kw_keys: List of keyword arguments names. The values are used
for generating the key in the cache.
"""
def _decorator(func):
@functools.wraps(func, assigned=decorators.available_attrs(func))
def wrapper(*args, **kwargs):
mem_args = [kwargs[key] for key in kw_keys if key in kwargs]
mem_args = '__'.join(str(mem_arg) for mem_arg in mem_args)
if not mem_args:
return func(*args, **kwargs)
if mem_args in cache:
return cache[mem_args]
result = func(*args, **kwargs)
cache[mem_args] = result
return result
return wrapper
return _decorator
def remove_project_cache(token):
_PROJECT_CACHE.pop(token, None)
# Helper for figuring out keystone version
# Implementation will change when API version discovery is available
def get_keystone_version():
return getattr(settings, 'OPENSTACK_API_VERSIONS', {}).get('identity', 2.0)
def get_session():
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
verify = getattr(settings, 'OPENSTACK_SSL_CACERT', True)
if insecure:
verify = False
return session.Session(verify=verify)
def get_keystone_client():
if get_keystone_version() < 3:
return client_v2
else:
return client_v3
def has_in_url_path(url, sub):
"""Test if the `sub` string is in the `url` path."""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
return sub in path
def url_path_replace(url, old, new, count=None):
"""Return a copy of url with replaced path.
Return a copy of url with all occurrences of old replaced by new in the url
path. If the optional argument count is given, only the first count
occurrences are replaced.
"""
args = []
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if count is not None:
args.append(count)
return urlparse.urlunsplit((
scheme, netloc, path.replace(old, new, *args), query, fragment))
def fix_auth_url_version(auth_url):
"""Fix up the auth url if an invalid version prefix was given.
People still give a v2 auth_url even when they specify that they want v3
authentication. Fix the URL to say v3. This should be smarter and take the
base, unversioned URL and discovery.
"""
if get_keystone_version() >= 3:
if has_in_url_path(auth_url, "/v2.0"):
LOG.warning("The settings.py file points to a v2.0 keystone "
"endpoint, but v3 is specified as the API version "
"to use. Using v3 endpoint for authentication.")
auth_url = url_path_replace(auth_url, "/v2.0", "/v3", 1)
return auth_url
def get_password_auth_plugin(auth_url, username, password, user_domain_name):
if get_keystone_version() >= 3:
return v3_auth.Password(auth_url=auth_url,
username=username,
password=password,
user_domain_name=user_domain_name)
else:
return v2_auth.Password(auth_url=auth_url,
username=username,
password=password)
def get_token_auth_plugin(auth_url, token, project_id):
if get_keystone_version() >= 3:
return v3_auth.Token(auth_url=auth_url,
token=token,
project_id=project_id,
reauthenticate=False)
else:
return v2_auth.Token(auth_url=auth_url,
token=token,
tenant_id=project_id,
reauthenticate=False)
@memoize_by_keyword_arg(_PROJECT_CACHE, ('token', ))
def get_project_list(*args, **kwargs):
sess = kwargs.get('session') or get_session()
auth_url = fix_auth_url_version(kwargs['auth_url'])
auth = token_endpoint.Token(auth_url, kwargs['token'])
client = get_keystone_client().Client(session=sess, auth=auth)
if get_keystone_version() < 3:
projects = client.tenants.list()
else:
projects = client.projects.list(user=kwargs.get('user_id'))
projects.sort(key=lambda project: project.name.lower())
return projects
def default_services_region(service_catalog, request=None):
"""Returns the first endpoint region for first non-identity service.
Extracted from the service catalog.
"""
if service_catalog:
available_regions = [endpoint['region'] for service
in service_catalog for endpoint
in service['endpoints']
if service['type'] != 'identity']
if not available_regions:
# this is very likely an incomplete keystone setup
LOG.warning('No regions could be found excluding identity.')
available_regions = [endpoint['region'] for service
in service_catalog for endpoint
in service['endpoints']]
if not available_regions:
# this is a critical problem and it's not clear how this occurs
LOG.error('No regions can be found in the service catalog.')
return None
selected_region = None
if request:
selected_region = request.COOKIES.get('services_region',
available_regions[0])
if selected_region not in available_regions:
selected_region = available_regions[0]
return selected_region
return None
def set_response_cookie(response, cookie_name, cookie_value):
"""Common function for setting the cookie in the response.
Provides a common policy of setting cookies for last used project
and region, can be reused in other locations.
This method will set the cookie to expire in 365 days.
"""
now = timezone.now()
expire_date = now + datetime.timedelta(days=365)
response.set_cookie(cookie_name, cookie_value, expires=expire_date)
|
|
"""UniFi Controller abstraction."""
import asyncio
from datetime import timedelta
import ssl
from aiohttp import CookieJar
import aiounifi
from aiounifi.controller import (
DATA_CLIENT_REMOVED,
DATA_DPI_GROUP,
DATA_DPI_GROUP_REMOVED,
DATA_EVENT,
SIGNAL_CONNECTION_STATE,
SIGNAL_DATA,
)
from aiounifi.events import (
ACCESS_POINT_CONNECTED,
GATEWAY_CONNECTED,
SWITCH_CONNECTED,
WIRED_CLIENT_CONNECTED,
WIRELESS_CLIENT_CONNECTED,
WIRELESS_GUEST_CONNECTED,
)
from aiounifi.websocket import STATE_DISCONNECTED, STATE_RUNNING
import async_timeout
from homeassistant.components.device_tracker import DOMAIN as TRACKER_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import CONF_HOST
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_DPI_RESTRICTIONS,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
CONTROLLER_ID,
DEFAULT_ALLOW_BANDWIDTH_SENSORS,
DEFAULT_ALLOW_UPTIME_SENSORS,
DEFAULT_DETECTION_TIME,
DEFAULT_DPI_RESTRICTIONS,
DEFAULT_IGNORE_WIRED_BUG,
DEFAULT_POE_CLIENTS,
DEFAULT_TRACK_CLIENTS,
DEFAULT_TRACK_DEVICES,
DEFAULT_TRACK_WIRED_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
LOGGER,
UNIFI_WIRELESS_CLIENTS,
)
from .errors import AuthenticationRequired, CannotConnect
RETRY_TIMER = 15
SUPPORTED_PLATFORMS = [TRACKER_DOMAIN, SENSOR_DOMAIN, SWITCH_DOMAIN]
CLIENT_CONNECTED = (
WIRED_CLIENT_CONNECTED,
WIRELESS_CLIENT_CONNECTED,
WIRELESS_GUEST_CONNECTED,
)
DEVICE_CONNECTED = (
ACCESS_POINT_CONNECTED,
GATEWAY_CONNECTED,
SWITCH_CONNECTED,
)
class UniFiController:
"""Manages a single UniFi Controller."""
def __init__(self, hass, config_entry):
"""Initialize the system."""
self.hass = hass
self.config_entry = config_entry
self.available = True
self.api = None
self.progress = None
self.wireless_clients = None
self.listeners = []
self._site_name = None
self._site_role = None
self.entities = {}
@property
def controller_id(self):
"""Return the controller ID."""
return CONTROLLER_ID.format(host=self.host, site=self.site)
@property
def host(self):
"""Return the host of this controller."""
return self.config_entry.data[CONF_CONTROLLER][CONF_HOST]
@property
def site(self):
"""Return the site of this config entry."""
return self.config_entry.data[CONF_CONTROLLER][CONF_SITE_ID]
@property
def site_name(self):
"""Return the nice name of site."""
return self._site_name
@property
def site_role(self):
"""Return the site user role of this controller."""
return self._site_role
@property
def mac(self):
"""Return the mac address of this controller."""
for client in self.api.clients.values():
if self.host == client.ip:
return client.mac
return None
# Device tracker options
@property
def option_track_clients(self):
"""Config entry option to not track clients."""
return self.config_entry.options.get(CONF_TRACK_CLIENTS, DEFAULT_TRACK_CLIENTS)
@property
def option_track_wired_clients(self):
"""Config entry option to not track wired clients."""
return self.config_entry.options.get(
CONF_TRACK_WIRED_CLIENTS, DEFAULT_TRACK_WIRED_CLIENTS
)
@property
def option_track_devices(self):
"""Config entry option to not track devices."""
return self.config_entry.options.get(CONF_TRACK_DEVICES, DEFAULT_TRACK_DEVICES)
@property
def option_ssid_filter(self):
"""Config entry option listing what SSIDs are being used to track clients."""
return self.config_entry.options.get(CONF_SSID_FILTER, [])
@property
def option_detection_time(self):
"""Config entry option defining number of seconds from last seen to away."""
return timedelta(
seconds=self.config_entry.options.get(
CONF_DETECTION_TIME, DEFAULT_DETECTION_TIME
)
)
@property
def option_ignore_wired_bug(self):
"""Config entry option to ignore wired bug."""
return self.config_entry.options.get(
CONF_IGNORE_WIRED_BUG, DEFAULT_IGNORE_WIRED_BUG
)
# Client control options
@property
def option_poe_clients(self):
"""Config entry option to control poe clients."""
return self.config_entry.options.get(CONF_POE_CLIENTS, DEFAULT_POE_CLIENTS)
@property
def option_block_clients(self):
"""Config entry option with list of clients to control network access."""
return self.config_entry.options.get(CONF_BLOCK_CLIENT, [])
@property
def option_dpi_restrictions(self):
"""Config entry option to control DPI restriction groups."""
return self.config_entry.options.get(
CONF_DPI_RESTRICTIONS, DEFAULT_DPI_RESTRICTIONS
)
# Statistics sensor options
@property
def option_allow_bandwidth_sensors(self):
"""Config entry option to allow bandwidth sensors."""
return self.config_entry.options.get(
CONF_ALLOW_BANDWIDTH_SENSORS, DEFAULT_ALLOW_BANDWIDTH_SENSORS
)
@property
def option_allow_uptime_sensors(self):
"""Config entry option to allow uptime sensors."""
return self.config_entry.options.get(
CONF_ALLOW_UPTIME_SENSORS, DEFAULT_ALLOW_UPTIME_SENSORS
)
@callback
def async_unifi_signalling_callback(self, signal, data):
"""Handle messages back from UniFi library."""
if signal == SIGNAL_CONNECTION_STATE:
if data == STATE_DISCONNECTED and self.available:
LOGGER.warning("Lost connection to UniFi controller")
if (data == STATE_RUNNING and not self.available) or (
data == STATE_DISCONNECTED and self.available
):
self.available = data == STATE_RUNNING
async_dispatcher_send(self.hass, self.signal_reachable)
if not self.available:
self.hass.loop.call_later(RETRY_TIMER, self.reconnect, True)
else:
LOGGER.info("Connected to UniFi controller")
elif signal == SIGNAL_DATA and data:
if DATA_EVENT in data:
clients_connected = set()
devices_connected = set()
wireless_clients_connected = False
for event in data[DATA_EVENT]:
if event.event in CLIENT_CONNECTED:
clients_connected.add(event.mac)
if not wireless_clients_connected and event.event in (
WIRELESS_CLIENT_CONNECTED,
WIRELESS_GUEST_CONNECTED,
):
wireless_clients_connected = True
elif event.event in DEVICE_CONNECTED:
devices_connected.add(event.mac)
if wireless_clients_connected:
self.update_wireless_clients()
if clients_connected or devices_connected:
async_dispatcher_send(
self.hass,
self.signal_update,
clients_connected,
devices_connected,
)
elif DATA_CLIENT_REMOVED in data:
async_dispatcher_send(
self.hass, self.signal_remove, data[DATA_CLIENT_REMOVED]
)
elif DATA_DPI_GROUP in data:
for key in data[DATA_DPI_GROUP]:
if self.api.dpi_groups[key].dpiapp_ids:
async_dispatcher_send(self.hass, self.signal_update)
else:
async_dispatcher_send(self.hass, self.signal_remove, {key})
elif DATA_DPI_GROUP_REMOVED in data:
async_dispatcher_send(
self.hass, self.signal_remove, data[DATA_DPI_GROUP_REMOVED]
)
@property
def signal_reachable(self) -> str:
"""Integration specific event to signal a change in connection status."""
return f"unifi-reachable-{self.controller_id}"
@property
def signal_update(self):
"""Event specific per UniFi entry to signal new data."""
return f"unifi-update-{self.controller_id}"
@property
def signal_remove(self):
"""Event specific per UniFi entry to signal removal of entities."""
return f"unifi-remove-{self.controller_id}"
@property
def signal_options_update(self):
"""Event specific per UniFi entry to signal new options."""
return f"unifi-options-{self.controller_id}"
def update_wireless_clients(self):
"""Update set of known to be wireless clients."""
new_wireless_clients = set()
for client_id in self.api.clients:
if (
client_id not in self.wireless_clients
and not self.api.clients[client_id].is_wired
):
new_wireless_clients.add(client_id)
if new_wireless_clients:
self.wireless_clients |= new_wireless_clients
unifi_wireless_clients = self.hass.data[UNIFI_WIRELESS_CLIENTS]
unifi_wireless_clients.update_data(self.wireless_clients, self.config_entry)
async def async_setup(self):
"""Set up a UniFi controller."""
try:
self.api = await get_controller(
self.hass,
**self.config_entry.data[CONF_CONTROLLER],
async_callback=self.async_unifi_signalling_callback,
)
await self.api.initialize()
sites = await self.api.sites()
for site in sites.values():
if self.site == site["name"]:
self._site_name = site["desc"]
break
description = await self.api.site_description()
self._site_role = description[0]["site_role"]
except CannotConnect as err:
raise ConfigEntryNotReady from err
except Exception as err: # pylint: disable=broad-except
LOGGER.error("Unknown error connecting with UniFi controller: %s", err)
return False
# Restore clients that is not a part of active clients list.
entity_registry = await self.hass.helpers.entity_registry.async_get_registry()
for entity in entity_registry.entities.values():
if (
entity.config_entry_id != self.config_entry.entry_id
or "-" not in entity.unique_id
):
continue
mac = ""
if entity.domain == TRACKER_DOMAIN:
mac = entity.unique_id.split("-", 1)[0]
elif entity.domain == SWITCH_DOMAIN:
mac = entity.unique_id.split("-", 1)[1]
if mac in self.api.clients or mac not in self.api.clients_all:
continue
client = self.api.clients_all[mac]
self.api.clients.process_raw([client.raw])
LOGGER.debug(
"Restore disconnected client %s (%s)",
entity.entity_id,
client.mac,
)
wireless_clients = self.hass.data[UNIFI_WIRELESS_CLIENTS]
self.wireless_clients = wireless_clients.get_data(self.config_entry)
self.update_wireless_clients()
for platform in SUPPORTED_PLATFORMS:
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform
)
)
self.api.start_websocket()
self.config_entry.add_update_listener(self.async_config_entry_updated)
return True
@staticmethod
async def async_config_entry_updated(hass, config_entry) -> None:
"""Handle signals of config entry being updated."""
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
async_dispatcher_send(hass, controller.signal_options_update)
@callback
def reconnect(self, log=False) -> None:
"""Prepare to reconnect UniFi session."""
if log:
LOGGER.info("Will try to reconnect to UniFi controller")
self.hass.loop.create_task(self.async_reconnect())
async def async_reconnect(self) -> None:
"""Try to reconnect UniFi session."""
try:
with async_timeout.timeout(5):
await self.api.login()
self.api.start_websocket()
except (
asyncio.TimeoutError,
aiounifi.BadGateway,
aiounifi.ServiceUnavailable,
aiounifi.AiounifiException,
):
self.hass.loop.call_later(RETRY_TIMER, self.reconnect)
@callback
def shutdown(self, event) -> None:
"""Wrap the call to unifi.close.
Used as an argument to EventBus.async_listen_once.
"""
self.api.stop_websocket()
async def async_reset(self):
"""Reset this controller to default state.
Will cancel any scheduled setup retry and will unload
the config entry.
"""
self.api.stop_websocket()
for platform in SUPPORTED_PLATFORMS:
await self.hass.config_entries.async_forward_entry_unload(
self.config_entry, platform
)
for unsub_dispatcher in self.listeners:
unsub_dispatcher()
self.listeners = []
return True
async def get_controller(
hass, host, username, password, port, site, verify_ssl, async_callback=None
):
"""Create a controller object and verify authentication."""
sslcontext = None
if verify_ssl:
session = aiohttp_client.async_get_clientsession(hass)
if isinstance(verify_ssl, str):
sslcontext = ssl.create_default_context(cafile=verify_ssl)
else:
session = aiohttp_client.async_create_clientsession(
hass, verify_ssl=verify_ssl, cookie_jar=CookieJar(unsafe=True)
)
controller = aiounifi.Controller(
host,
username=username,
password=password,
port=port,
site=site,
websession=session,
sslcontext=sslcontext,
callback=async_callback,
)
try:
with async_timeout.timeout(10):
await controller.check_unifi_os()
await controller.login()
return controller
except aiounifi.Unauthorized as err:
LOGGER.warning("Connected to UniFi at %s but not registered.", host)
raise AuthenticationRequired from err
except (
asyncio.TimeoutError,
aiounifi.BadGateway,
aiounifi.ServiceUnavailable,
aiounifi.RequestError,
) as err:
LOGGER.error("Error connecting to the UniFi controller at %s", host)
raise CannotConnect from err
except aiounifi.AiounifiException as err:
LOGGER.exception("Unknown UniFi communication error occurred")
raise AuthenticationRequired from err
|
|
# Copyright 2022 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn_ops.py."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import flax.linen as nn
import jax.numpy as jnp
import numpy as np
from scenic.model_lib.layers import nn_ops
class NNOpsTest(parameterized.TestCase):
"""Tests for utilities in nn_ops.py."""
@parameterized.named_parameters([('test_both', (0, 1), (2, 3, 5, 4, 6)),
('test_rows', (0,), (1, 3, 4)),
('test_columns', (1,), (1, 5, 6))])
def test_compute_relative_positions(self, spatial_axis,
expected_output_shape):
"""Tests compute_relative_positions.
Args:
spatial_axis: position axis passed to the compute_relative_positions.
expected_output_shape: expected shape of the output.
"""
query_spatial_shape = (3, 5)
key_spatial_shape = (4, 6)
relative_positions = nn_ops.compute_relative_positions(
query_spatial_shape, key_spatial_shape, spatial_axis)
# test output shape
self.assertEqual(relative_positions.shape, expected_output_shape)
# test maximum positional distances
for dim_i, dim in enumerate(spatial_axis):
max_positional_distances = (
query_spatial_shape[dim] + key_spatial_shape[dim] - 2)
self.assertEqual(max_positional_distances,
jnp.max(relative_positions[dim_i]))
def test_weighted_max_pool(self):
"""Tests weighted_max_pool."""
inputs_shape = (16, 32, 32, 20)
window_shape = (4, 4)
strides = (4, 4)
inputs = jnp.array(np.random.normal(size=inputs_shape))
weights = jnp.ones(inputs_shape[:-1])
outputs, pooled_weights = nn_ops.weighted_max_pool(
inputs,
weights,
window_shape=window_shape,
strides=strides,
padding='VALID',
return_pooled_weights=True)
expected_outputs = nn.max_pool(
inputs, window_shape=window_shape, strides=strides, padding='VALID')
expected_pooled_weights = jnp.ones((16, 8, 8))
self.assertTrue(jnp.array_equal(outputs, expected_outputs))
self.assertTrue(jnp.array_equal(pooled_weights, expected_pooled_weights))
def test_weighted_avg_pool(self):
"""Tests weighted_avg_pool."""
inputs_shape = (16, 32, 32, 20)
window_shape = (4, 4)
strides = (4, 4)
inputs = jnp.array(np.random.normal(size=inputs_shape))
weights = jnp.ones(inputs_shape[:-1])
outputs, pooled_weights = nn_ops.weighted_avg_pool(
inputs,
weights,
window_shape=window_shape,
strides=strides,
padding='VALID',
return_pooled_weights=True)
expected_outputs = nn.avg_pool(
inputs, window_shape=window_shape, strides=strides, padding='VALID')
expected_pooled_weights = jnp.ones((16, 8, 8))
self.assertTrue(jnp.array_equal(outputs, expected_outputs))
self.assertTrue(jnp.array_equal(pooled_weights, expected_pooled_weights))
def test_extract_image_patches(self):
"""Tests extract_image_patches."""
input_shape = (16, 3, 3, 32)
inputs = np.array(np.random.normal(size=input_shape))
# patching a 3x3 image to 3x3 patches, with no stride 1x1 and no dilation
# and VALID padding should do nothing but reshaping the (bs, h, w, c) to
# (bs, 1, 1, h, w, c)
patched = nn_ops.extract_image_patches(
inputs, (1, 3, 3, 1), (1, 1, 1, 1),
padding='VALID',
rhs_dilation=(1, 1, 1, 1))
self.assertEqual(patched.shape, (16, 1, 1, 3, 3, 32))
np.testing.assert_allclose(inputs, patched.reshape(input_shape), atol=1e-2)
def test_upscale2x_nearest_neighbor(self):
"""Tests upscale2x_nearest_neighbor."""
inputs = jnp.array(np.random.normal(size=(16, 32, 32, 128)))
outputs = nn_ops.upscale2x_nearest_neighbor(inputs)
# check the output shape
self.assertEqual(outputs.shape, (16, 64, 64, 128))
def test_central_crop(self):
"""Tests upscale2x_nearest_neighbor."""
inputs = jnp.array(np.random.normal(size=(16, 32, 32, 128)))
# check the case where the outputs should be same as the inputs
outputs = nn_ops.central_crop(inputs, target_shape=(16, 32, 32, 128))
self.assertTrue(jnp.array_equal(outputs, inputs))
# check the output shape
outputs = nn_ops.central_crop(inputs, target_shape=(16, 6, 6, 128))
self.assertEqual(outputs.shape, (16, 6, 6, 128))
inputs = jnp.arange(100.).reshape((1, 10, 10, 1))
target_shape = (1, 8, 8, 1)
output = nn_ops.central_crop(inputs, target_shape)
# check up-left and down-right pixel of the output
self.assertEqual(output[0, 0, 0, 0], 11.)
self.assertEqual(output[0, -1, -1, 0], 88.)
def test_extract_patches(self):
"""Tests extract_patches."""
input_shape = (16, 3, 3, 32)
inputs = np.array(np.random.normal(size=input_shape))
# patching a 3x3 image to 3x3 patches, with no stride 1x1 should do nothing
# but reshaping the (bs, h, w, c) to (bs, 1, 1, h, w, c)
patched = nn_ops.extract_patches(inputs, (3, 3), (1, 1))
self.assertEqual(patched.shape, (16, 1, 1, 3, 3, 32))
np.testing.assert_allclose(inputs, patched.reshape(input_shape), atol=1e-2)
@parameterized.named_parameters([('test_avg_pooling', 'avg_pooling'),
('test_max_pooling', 'max_pooling'),
('test_avg_pooling_bu', 'avg_pooling'),
('test_max_pooling_bu', 'max_pooling'),
('test_space_to_depth', 'space_to_depth')])
def test_pooling(self, pooling_type):
"""Test Pooling module.
Args:
pooling_type: str; Type of pooling function from `['avg_pooling',
'max_pooling', 'space_to_depth']`
"""
inputs_shape = (16, 32, 32, 64)
window_shape = (4, 4)
strides = (4, 4)
inputs = jnp.array(np.random.normal(size=inputs_shape))
outputs = nn_ops.pooling(
inputs,
pooling_configs={'pooling_type': pooling_type},
window_shape=window_shape,
strides=strides)
if pooling_type == 'space_to_depth':
self.assertEqual(outputs.shape, (16, 8, 8, 1024))
else:
self.assertEqual(outputs.shape, (16, 8, 8, 64))
@parameterized.named_parameters([
('test_4', (4, 28, 28, 32), (4, 4), (4, 4), 'VALID', (4, 7, 7, 4, 4, 32)),
('test_4_stride', (4, 28, 28, 32), (4, 4), (1, 1), 'VALID', (4, 25, 25, 4,
4, 32)),
('test_4_stride_pad', (4, 28, 28, 32), (4, 4), (1, 1), 'SAME',
(4, 28, 28, 4, 4, 32)),
('test_6_stride', (4, 28, 28, 32), (6, 6), (1, 1), 'VALID', (4, 23, 23, 6,
6, 32)),
])
def test_image_patcher(self, input_shape, patch_size, strides, padding,
expected_output_shape):
"""Tests ImagePatcher.
Args:
input_shape: tuple; Shape of the input data.
patch_size: tuple; size of the patch: (height, width).
strides: tuple; Specifies how far two consecutive patches are in the
input.
padding: str; The type of padding algorithm to use.
expected_output_shape: expected shape of the output.
"""
inputs = jnp.zeros(input_shape)
image_patcher = functools.partial(
nn_ops.patch_image,
inputs_shape=input_shape,
patch_size=patch_size,
strides=strides,
padding=padding,
mode='i2p')
# test output shape
outputs = image_patcher(inputs)
self.assertEqual(outputs.shape, expected_output_shape)
@parameterized.named_parameters([
('test_q1k4', 1, 4, np.array([[0, 1, 2, 3]])),
('test_q5k1', 5, 1, np.array([[4], [3], [2], [1], [0]])),
('test_q2k3', 2, 3, np.array([[1, 2, 3], [0, 1, 2]])),
])
def test_compute_1d_relative_distance(self, lenq, lenk,
expected_relative_distance):
"""Tests compute_relative_positions."""
relative_distance = nn_ops.compute_1d_relative_distance(lenq, lenk)
# Test output values.
self.assertTrue(
np.array_equal(relative_distance, expected_relative_distance))
def test_compute_1d_relative_distance_min_and_max(self):
len_q = np.random.randint(0, 100, (1,))
len_k = np.random.randint(0, 100, (1,))
relative_distance = nn_ops.compute_1d_relative_distance(len_q, len_k)
self.assertEqual(relative_distance.min(), 0)
self.assertEqual(relative_distance.max(), len_q + len_k - 2)
if __name__ == '__main__':
absltest.main()
|
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import logging
import os
import unittest
import warnings
from abc import ABC, ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager
from tempfile import mkdtemp
from textwrap import dedent
from typing import Any, Iterable, List, Optional, Type, TypeVar, Union, cast
from pants.base.build_root import BuildRoot
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.exceptions import TaskError
from pants.base.specs import AddressSpec, AddressSpecs, FilesystemSpecs, Specs
from pants.build_graph.address import Address, BuildFileAddress
from pants.build_graph.build_configuration import BuildConfiguration
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants.engine.fs import PathGlobs, PathGlobsAndRoot, Snapshot
from pants.engine.legacy.graph import HydratedField
from pants.engine.legacy.structs import SourceGlobs, SourcesField
from pants.engine.rules import RootRule
from pants.engine.scheduler import SchedulerSession
from pants.engine.selectors import Params
from pants.init.engine_initializer import EngineInitializer
from pants.init.util import clean_global_runtime_state
from pants.option.global_options import BuildFileImportsBehavior
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.source.source_root import SourceRootConfig
from pants.source.wrapped_globs import EagerFilesetWithSpec
from pants.subsystem.subsystem import Subsystem
from pants.task.goal_options_mixin import GoalOptionsMixin
from pants.testutil.base.context_utils import create_context_from_options
from pants.testutil.engine.util import init_native
from pants.testutil.option.fakes import create_options_for_optionables
from pants.testutil.subsystem import util as subsystem_util
from pants.util.collections import assert_single_element
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import (
recursive_dirname,
relative_symlink,
safe_file_dump,
safe_mkdir,
safe_mkdtemp,
safe_open,
safe_rmtree,
)
from pants.util.memo import memoized_method
from pants.util.meta import classproperty
class AbstractTestGenerator(ABC):
"""A mixin that facilitates test generation at runtime."""
@classmethod
@abstractmethod
def generate_tests(cls):
"""Generate tests for a given class.
This should be called against the composing class in its defining module, e.g.
class ThingTest(TestGenerator):
...
ThingTest.generate_tests()
"""
@classmethod
def add_test(cls, method_name, method):
"""A classmethod that adds dynamic test methods to a given class.
:param string method_name: The name of the test method (e.g. `test_thing_x`).
:param callable method: A callable representing the method. This should take a 'self' argument
as its first parameter for instance method binding.
"""
assert not hasattr(
cls, method_name
), f"a test with name `{method_name}` already exists on `{cls.__name__}`!"
assert method_name.startswith("test_"), f"{method_name} is not a valid test name!"
setattr(cls, method_name, method)
class TestBase(unittest.TestCase, metaclass=ABCMeta):
"""A baseclass useful for tests requiring a temporary buildroot.
:API: public
"""
_scheduler: Optional[SchedulerSession] = None
_build_graph = None
_address_mapper = None
def build_path(self, relpath):
"""Returns the canonical BUILD file path for the given relative build path.
:API: public
"""
if os.path.basename(relpath).startswith("BUILD"):
return relpath
else:
return os.path.join(relpath, "BUILD")
def create_dir(self, relpath):
"""Creates a directory under the buildroot.
:API: public
relpath: The relative path to the directory from the build root.
"""
path = os.path.join(self.build_root, relpath)
safe_mkdir(path)
self.invalidate_for(relpath)
return path
def create_workdir_dir(self, relpath):
"""Creates a directory under the work directory.
:API: public
relpath: The relative path to the directory from the work directory.
"""
path = os.path.join(self.pants_workdir, relpath)
safe_mkdir(path)
self.invalidate_for(relpath)
return path
def invalidate_for(self, *relpaths):
"""Invalidates all files from the relpath, recursively up to the root.
Many python operations implicitly create parent directories, so we assume that touching a
file located below directories that do not currently exist will result in their creation.
"""
if self._scheduler is None:
return
files = {f for relpath in relpaths for f in recursive_dirname(relpath)}
return self._scheduler.invalidate_files(files)
def create_link(self, relsrc, reldst):
"""Creates a symlink within the buildroot.
:API: public
relsrc: A relative path for the source of the link.
reldst: A relative path for the destination of the link.
"""
src = os.path.join(self.build_root, relsrc)
dst = os.path.join(self.build_root, reldst)
relative_symlink(src, dst)
self.invalidate_for(reldst)
def create_file(self, relpath, contents="", mode="w"):
"""Writes to a file under the buildroot.
:API: public
relpath: The relative path to the file from the build root.
contents: A string containing the contents of the file - '' by default..
mode: The mode to write to the file in - over-write by default.
"""
path = os.path.join(self.build_root, relpath)
with safe_open(path, mode=mode) as fp:
fp.write(contents)
self.invalidate_for(relpath)
return path
def create_files(self, path, files):
"""Writes to a file under the buildroot with contents same as file name.
:API: public
path: The relative path to the file from the build root.
files: List of file names.
"""
for f in files:
self.create_file(os.path.join(path, f), contents=f)
def create_workdir_file(self, relpath, contents="", mode="w"):
"""Writes to a file under the work directory.
:API: public
relpath: The relative path to the file from the work directory.
contents: A string containing the contents of the file - '' by default..
mode: The mode to write to the file in - over-write by default.
"""
path = os.path.join(self.pants_workdir, relpath)
with safe_open(path, mode=mode) as fp:
fp.write(contents)
return path
def add_to_build_file(self, relpath, target):
"""Adds the given target specification to the BUILD file at relpath.
:API: public
relpath: The relative path to the BUILD file from the build root.
target: A string containing the target definition as it would appear in a BUILD file.
"""
self.create_file(self.build_path(relpath), target, mode="a")
def make_target(
self,
spec="",
target_type=Target,
dependencies=None,
derived_from=None,
synthetic=False,
make_missing_sources=True,
**kwargs,
):
"""Creates a target and injects it into the test's build graph.
:API: public
:param string spec: The target address spec that locates this target.
:param type target_type: The concrete target subclass to create this new target from.
:param list dependencies: A list of target instances this new target depends on.
:param derived_from: The target this new target was derived from.
:type derived_from: :class:`pants.build_graph.target.Target`
"""
self._init_target_subsystem()
address = Address.parse(spec)
if make_missing_sources and "sources" in kwargs:
for source in kwargs["sources"]:
if "*" not in source:
self.create_file(os.path.join(address.spec_path, source), mode="a", contents="")
kwargs["sources"] = self.sources_for(kwargs["sources"], address.spec_path)
target = target_type(
name=address.target_name, address=address, build_graph=self.build_graph, **kwargs
)
dependencies = dependencies or []
self.build_graph.apply_injectables([target])
self.build_graph.inject_target(
target,
dependencies=[dep.address for dep in dependencies],
derived_from=derived_from,
synthetic=synthetic,
)
# TODO(John Sirois): This re-creates a little bit too much work done by the BuildGraph.
# Fixup the BuildGraph to deal with non BuildFileAddresses better and just leverage it.
traversables = [target.compute_dependency_address_specs(payload=target.payload)]
for dependency_spec in itertools.chain(*traversables):
dependency_address = Address.parse(dependency_spec, relative_to=address.spec_path)
dependency_target = self.build_graph.get_target(dependency_address)
if not dependency_target:
raise ValueError(
"Tests must make targets for dependency specs ahead of them "
"being traversed, {} tried to traverse {} which does not exist.".format(
target, dependency_address
)
)
if dependency_target not in target.dependencies:
self.build_graph.inject_dependency(
dependent=target.address, dependency=dependency_address
)
target.mark_transitive_invalidation_hash_dirty()
return target
def sources_for(
self, package_relative_path_globs: List[str], package_dir: str = "",
) -> EagerFilesetWithSpec:
sources_field = SourcesField(
address=BuildFileAddress(
rel_path=os.path.join(package_dir, "BUILD"), target_name="_bogus_target_for_test",
),
arg="sources",
source_globs=SourceGlobs(*package_relative_path_globs),
)
field = self.scheduler.product_request(HydratedField, [sources_field])[0]
return cast(EagerFilesetWithSpec, field.value)
@classmethod
def alias_groups(cls):
"""
:API: public
"""
return BuildFileAliases(targets={"target": Target})
@classmethod
def rules(cls):
# Required for sources_for:
return [RootRule(SourcesField)]
@classmethod
def build_config(cls):
build_config = BuildConfiguration()
build_config.register_aliases(cls.alias_groups())
build_config.register_rules(cls.rules())
return build_config
def setUp(self):
"""
:API: public
"""
super().setUp()
# Avoid resetting the Runtracker here, as that is specific to fork'd process cleanup.
clean_global_runtime_state(reset_subsystem=True)
self.addCleanup(self._reset_engine)
safe_mkdir(self.build_root, clean=True)
safe_mkdir(self.pants_workdir)
self.addCleanup(safe_rmtree, self.build_root)
BuildRoot().path = self.build_root
self.addCleanup(BuildRoot().reset)
self.subprocess_dir = os.path.join(self.build_root, ".pids")
self.options = defaultdict(dict) # scope -> key-value mapping.
self.options[""] = {
"pants_workdir": self.pants_workdir,
"pants_supportdir": os.path.join(self.build_root, "build-support"),
"pants_distdir": os.path.join(self.build_root, "dist"),
"pants_configdir": os.path.join(self.build_root, "config"),
"pants_subprocessdir": self.subprocess_dir,
"cache_key_gen_version": "0-test",
}
self.options["cache"] = {
"read_from": [],
"write_to": [],
}
self._build_configuration = self.build_config()
self._inited_target = False
subsystem_util.init_subsystem(Target.TagAssignments)
def buildroot_files(self, relpath=None):
"""Returns the set of all files under the test build root.
:API: public
:param string relpath: If supplied, only collect files from this subtree.
:returns: All file paths found.
:rtype: set
"""
def scan():
for root, dirs, files in os.walk(os.path.join(self.build_root, relpath or "")):
for f in files:
yield os.path.relpath(os.path.join(root, f), self.build_root)
return set(scan())
def _reset_engine(self):
if self._scheduler is not None:
self._build_graph.reset()
self._scheduler.invalidate_all_files()
@contextmanager
def isolated_local_store(self):
"""Temporarily use an anonymous, empty Store for the Scheduler.
In most cases we re-use a Store across all tests, since `file` and `directory` entries are
content addressed, and `process` entries are intended to have strong cache keys. But when
dealing with non-referentially transparent `process` executions, it can sometimes be
necessary to avoid this cache.
"""
self._scheduler = None
local_store_dir = os.path.realpath(safe_mkdtemp())
self._init_engine(local_store_dir=local_store_dir)
try:
yield
finally:
self._scheduler = None
safe_rmtree(local_store_dir)
@property
def build_root(self):
return self._build_root()
@property
def pants_workdir(self):
return self._pants_workdir()
@memoized_method
def _build_root(self):
return os.path.realpath(mkdtemp(suffix="_BUILD_ROOT"))
@memoized_method
def _pants_workdir(self):
return os.path.join(self._build_root(), ".pants.d")
def _init_engine(self, local_store_dir: Optional[str] = None) -> None:
if self._scheduler is not None:
return
options_bootstrapper = OptionsBootstrapper.create(args=["--pants-config-files=[]"])
local_store_dir = (
local_store_dir
or options_bootstrapper.bootstrap_options.for_global_scope().local_store_dir
)
# NB: This uses the long form of initialization because it needs to directly specify
# `cls.alias_groups` rather than having them be provided by bootstrap options.
graph_session = EngineInitializer.setup_legacy_graph_extended(
pants_ignore_patterns=[],
local_store_dir=local_store_dir,
build_file_imports_behavior=BuildFileImportsBehavior.error,
native=init_native(),
options_bootstrapper=options_bootstrapper,
build_root=self.build_root,
build_configuration=self.build_config(),
build_ignore_patterns=None,
).new_session(zipkin_trace_v2=False, build_id="buildid_for_test")
self._scheduler = graph_session.scheduler_session
self._build_graph, self._address_mapper = graph_session.create_build_graph(
Specs(address_specs=AddressSpecs([]), filesystem_specs=FilesystemSpecs([])),
self._build_root(),
)
@property
def scheduler(self) -> SchedulerSession:
if self._scheduler is None:
self._init_engine()
self.post_scheduler_init()
return cast(SchedulerSession, self._scheduler)
def post_scheduler_init(self):
"""Run after initializing the Scheduler, it will have the same lifetime."""
pass
@property
def address_mapper(self):
if self._address_mapper is None:
self._init_engine()
return self._address_mapper
@property
def build_graph(self):
if self._build_graph is None:
self._init_engine()
return self._build_graph
def reset_build_graph(self, reset_build_files=False, delete_build_files=False):
"""Start over with a fresh build graph with no targets in it."""
if delete_build_files or reset_build_files:
files = [f for f in self.buildroot_files() if os.path.basename(f) == "BUILD"]
if delete_build_files:
for f in files:
os.remove(os.path.join(self.build_root, f))
self.invalidate_for(*files)
if self._build_graph is not None:
self._build_graph.reset()
_P = TypeVar("_P")
def request_single_product(
self, product_type: Type["TestBase._P"], subject: Union[Params, Any]
) -> "TestBase._P":
result = assert_single_element(self.scheduler.product_request(product_type, [subject]))
return cast(TestBase._P, result)
def set_options_for_scope(self, scope, **kwargs):
self.options[scope].update(kwargs)
def context(
self,
for_task_types=None,
for_subsystems=None,
options=None,
target_roots=None,
console_outstream=None,
workspace=None,
scheduler=None,
address_mapper=None,
**kwargs,
):
"""
:API: public
:param dict **kwargs: keyword arguments passed in to `create_options_for_optionables`.
"""
# Many tests use source root functionality via the SourceRootConfig.global_instance().
# (typically accessed via Target.target_base), so we always set it up, for convenience.
for_subsystems = set(for_subsystems or ())
for subsystem in for_subsystems:
if subsystem.options_scope is None:
raise TaskError(
"You must set a scope on your subsystem type before using it in tests."
)
optionables = {SourceRootConfig} | self._build_configuration.optionables() | for_subsystems
for_task_types = for_task_types or ()
for task_type in for_task_types:
scope = task_type.options_scope
if scope is None:
raise TaskError("You must set a scope on your task type before using it in tests.")
optionables.add(task_type)
# If task is expected to inherit goal-level options, register those directly on the task,
# by subclassing the goal options registrar and settings its scope to the task scope.
if issubclass(task_type, GoalOptionsMixin):
subclass_name = "test_{}_{}_{}".format(
task_type.__name__,
task_type.goal_options_registrar_cls.options_scope,
task_type.options_scope,
)
optionables.add(
type(
subclass_name,
(task_type.goal_options_registrar_cls,),
{"options_scope": task_type.options_scope},
)
)
# Now expand to all deps.
all_optionables = set()
for optionable in optionables:
all_optionables.update(si.optionable_cls for si in optionable.known_scope_infos())
# Now default the option values and override with any caller-specified values.
# TODO(benjy): Get rid of the options arg, and require tests to call set_options.
options = options.copy() if options else {}
for s, opts in self.options.items():
scoped_opts = options.setdefault(s, {})
scoped_opts.update(opts)
fake_options = create_options_for_optionables(all_optionables, options=options, **kwargs)
Subsystem.reset(reset_options=True)
Subsystem.set_options(fake_options)
scheduler = scheduler or self.scheduler
address_mapper = address_mapper or self.address_mapper
context = create_context_from_options(
fake_options,
target_roots=target_roots,
build_graph=self.build_graph,
build_configuration=self._build_configuration,
address_mapper=address_mapper,
console_outstream=console_outstream,
workspace=workspace,
scheduler=scheduler,
)
return context
def tearDown(self):
"""
:API: public
"""
super().tearDown()
Subsystem.reset()
@classproperty
def subsystems(cls):
"""Initialize these subsystems when running your test.
If your test instantiates a target type that depends on any subsystems, those subsystems need to
be initialized in your test. You can override this property to return the necessary subsystem
classes.
:rtype: list of type objects, all subclasses of Subsystem
"""
return Target.subsystems()
def _init_target_subsystem(self):
if not self._inited_target:
subsystem_util.init_subsystems(self.subsystems)
self._inited_target = True
def target(self, spec):
"""Resolves the given target address to a Target object.
:API: public
address: The BUILD target address to resolve.
Returns the corresponding Target or else None if the address does not point to a defined Target.
"""
self._init_target_subsystem()
address = Address.parse(spec)
self.build_graph.inject_address_closure(address)
return self.build_graph.get_target(address)
def targets(self, address_spec):
"""Resolves a target spec to one or more Target objects.
:API: public
spec: Either BUILD target address or else a target glob using the siblings ':' or
descendants '::' suffixes.
Returns the set of all Targets found.
"""
address_spec = CmdLineSpecParser(self.build_root).parse_spec(address_spec)
assert isinstance(address_spec, AddressSpec)
targets = []
for address in self.build_graph.inject_address_specs_closure([address_spec]):
targets.append(self.build_graph.get_target(address))
return targets
def create_library(
self,
*,
path: str,
target_type: str,
name: str,
sources: Optional[List[str]] = None,
java_sources: Optional[List[str]] = None,
provides: Optional[str] = None,
dependencies: Optional[List[str]] = None,
requirements: Optional[str] = None,
):
"""Creates a library target of given type at the BUILD file at path with sources.
:API: public
path: The relative path to the BUILD file from the build root.
target_type: valid pants target type.
name: Name of the library target.
sources: List of source file at the path relative to path.
java_sources: List of java sources.
provides: Provides with a format consistent with what should be rendered in the resulting BUILD
file, eg: "artifact(org='org.pantsbuild.example', name='hello-greet', repo=public)"
dependencies: List of dependencies: [':protobuf-2.4.1']
requirements: Python requirements with a format consistent with what should be in the resulting
build file, eg: "[python_requirement(foo==1.0.0)]"
"""
if sources:
self.create_files(path, sources)
sources_str = f"sources={repr(sources)}," if sources else ""
if java_sources is not None:
formatted_java_sources = ",".join(f'"{str_target}"' for str_target in java_sources)
java_sources_str = f"java_sources=[{formatted_java_sources}],"
else:
java_sources_str = ""
provides_str = f"provides={provides}," if provides is not None else ""
dependencies_str = f"dependencies={dependencies}," if dependencies is not None else ""
requirements_str = f"requirements={requirements}," if requirements is not None else ""
self.add_to_build_file(
path,
dedent(
f"""
{target_type}(name='{name}',
{sources_str}
{java_sources_str}
{provides_str}
{dependencies_str}
{requirements_str}
)
"""
),
)
return self.target(f"{path}:{name}")
def create_resources(self, path, name, *sources):
"""
:API: public
"""
return self.create_library(path=path, target_type="resources", name=name, sources=sources,)
def assertUnorderedPrefixEqual(self, expected, actual_iter):
"""Consumes len(expected) items from the given iter, and asserts that they match, unordered.
:API: public
"""
actual = list(itertools.islice(actual_iter, len(expected)))
self.assertEqual(sorted(expected), sorted(actual))
def assertPrefixEqual(self, expected, actual_iter):
"""Consumes len(expected) items from the given iter, and asserts that they match, in order.
:API: public
"""
self.assertEqual(expected, list(itertools.islice(actual_iter, len(expected))))
def assertInFile(self, string, file_path):
"""Verifies that a string appears in a file.
:API: public
"""
with open(file_path, "r") as f:
content = f.read()
self.assertIn(string, content, f'"{string}" is not in the file {f.name}:\n{content}')
@contextmanager
def assertRaisesWithMessage(self, exception_type, error_text):
"""Verifies than an exception message is equal to `error_text`.
:param type exception_type: The exception type which is expected to be raised within the body.
:param str error_text: Text that the exception message should match exactly with
`self.assertEqual()`.
:API: public
"""
with self.assertRaises(exception_type) as cm:
yield cm
self.assertEqual(error_text, str(cm.exception))
@contextmanager
def assertRaisesWithMessageContaining(self, exception_type, error_text):
"""Verifies that the string `error_text` appears in an exception message.
:param type exception_type: The exception type which is expected to be raised within the body.
:param str error_text: Text that the exception message should contain with `self.assertIn()`.
:API: public
"""
with self.assertRaises(exception_type) as cm:
yield cm
self.assertIn(error_text, str(cm.exception))
@contextmanager
def assertDoesNotRaise(self, exc_class: Type[BaseException] = Exception):
"""Verifies that the block does not raise an exception of the specified type.
:API: public
"""
try:
yield
except exc_class as e:
raise AssertionError(f"section should not have raised, but did: {e}") from e
def get_bootstrap_options(self, cli_options=()):
"""Retrieves bootstrap options.
:param cli_options: An iterable of CLI flags to pass as arguments to `OptionsBootstrapper`.
"""
args = tuple(["--pants-config-files=[]"]) + tuple(cli_options)
return OptionsBootstrapper.create(args=args).bootstrap_options.for_global_scope()
def make_snapshot(self, files):
"""Makes a snapshot from a collection of files.
:param files: a dictionary, where key=filename, value=file_content where both are of type String.
:return: a Snapshot.
"""
with temporary_dir() as temp_dir:
for file_name, content in files.items():
safe_file_dump(os.path.join(temp_dir, file_name), content)
return self.scheduler.capture_snapshots(
(PathGlobsAndRoot(PathGlobs(("**",)), temp_dir),)
)[0]
def make_snapshot_of_empty_files(self, files: Iterable[str]) -> Snapshot:
"""Makes a snapshot with empty content for each file.
This is a convenience around `TestBase.make_snapshot`, which allows specifying the content
for each file.
"""
return cast(Snapshot, self.make_snapshot({fp: "" for fp in files}))
class LoggingRecorder:
"""Simple logging handler to record warnings."""
def __init__(self):
self._records = []
self.level = logging.DEBUG
def handle(self, record):
self._records.append(record)
def _messages_for_level(self, levelname):
return [
f"{record.name}: {record.getMessage()}"
for record in self._records
if record.levelname == levelname
]
def infos(self):
return self._messages_for_level("INFO")
def warnings(self):
return self._messages_for_level("WARNING")
def errors(self):
return self._messages_for_level("ERROR")
@contextmanager
def captured_logging(self, level=None):
root_logger = logging.getLogger()
old_level = root_logger.level
root_logger.setLevel(level or logging.NOTSET)
handler = self.LoggingRecorder()
root_logger.addHandler(handler)
try:
yield handler
finally:
root_logger.setLevel(old_level)
root_logger.removeHandler(handler)
@contextmanager
def warnings_catcher(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
def assertWarning(self, w, category, warning_text):
single_warning = assert_single_element(w)
self.assertEqual(single_warning.category, category)
warning_message = single_warning.message
self.assertEqual(warning_text, str(warning_message))
def retrieve_single_product_at_target_base(self, product_mapping, target):
mapping_for_target = product_mapping.get(target)
single_base_dir = assert_single_element(list(mapping_for_target.keys()))
single_product = assert_single_element(mapping_for_target[single_base_dir])
return single_product
def populate_target_dict(self, target_map):
"""Return a dict containing targets with files generated according to `target_map`.
The keys of `target_map` are target address strings, while the values of `target_map` should be
a dict which contains keyword arguments fed into `self.make_target()`, along with a few special
keys. Special keys are:
- 'key': used to access the target in the returned dict. Defaults to the target address spec.
- 'filemap': creates files at the specified relative paths to the target.
An `OrderedDict` of 2-tuples must be used with the targets topologically ordered, if
they have dependencies on each other. Note that dependency cycles are not currently supported
with this method.
:param target_map: Dict mapping each target address to generate -> kwargs for
`self.make_target()`, along with a 'key' and optionally a 'filemap' argument.
:return: Dict mapping the required 'key' argument -> target instance for each element of
`target_map`.
:rtype: dict
"""
target_dict = {}
# Create a target from each specification and insert it into `target_dict`.
for address_spec, target_kwargs in target_map.items():
unprocessed_kwargs = target_kwargs.copy()
target_base = Address.parse(address_spec).spec_path
# Populate the target's owned files from the specification.
filemap = unprocessed_kwargs.pop("filemap", {})
for rel_path, content in filemap.items():
buildroot_path = os.path.join(target_base, rel_path)
self.create_file(buildroot_path, content)
# Ensure any dependencies exist in the target dict (`target_map` must then be an
# OrderedDict).
# The 'key' is used to access the target in `target_dict`, and defaults to `target_spec`.
target_address = Address.parse(address_spec)
key = unprocessed_kwargs.pop("key", target_address.target_name)
dep_targets = []
for dep_spec in unprocessed_kwargs.pop("dependencies", []):
existing_tgt_key = target_map[dep_spec]["key"]
dep_targets.append(target_dict[existing_tgt_key])
# Register the generated target.
generated_target = self.make_target(
spec=address_spec, dependencies=dep_targets, **unprocessed_kwargs
)
target_dict[key] = generated_target
return target_dict
|
|
#!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage JJB yaml feature implementation
import copy
import fnmatch
import io
import itertools
import logging
import os
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
import jenkins_jobs.local_yaml as local_yaml
from jenkins_jobs import utils
__all__ = [
"YamlParser"
]
logger = logging.getLogger(__name__)
def matches(what, glob_patterns):
"""
Checks if the given string, ``what``, matches any of the glob patterns in
the iterable, ``glob_patterns``
:arg str what: String that we want to test if it matches a pattern
:arg iterable glob_patterns: glob patterns to match (list, tuple, set,
etc.)
"""
return any(fnmatch.fnmatch(what, glob_pattern)
for glob_pattern in glob_patterns)
def combination_matches(combination, match_combinations):
"""
Checks if the given combination is matches for any of the given combination
globs, being those a set of combinations where if a key is missing, it's
considered matching
(key1=2, key2=3)
would match the combination match:
(key2=3)
but not:
(key1=2, key2=2)
"""
for cmatch in match_combinations:
for key, val in combination.items():
if cmatch.get(key, val) != val:
break
else:
return True
return False
class YamlParser(object):
def __init__(self, jjb_config=None):
self.data = {}
self.jobs = []
self.views = []
self.jjb_config = jjb_config
self.keep_desc = jjb_config.yamlparser['keep_descriptions']
self.path = jjb_config.yamlparser['include_path']
def load_files(self, fn):
# handle deprecated behavior, and check that it's not a file like
# object as these may implement the '__iter__' attribute.
if not hasattr(fn, '__iter__') or hasattr(fn, 'read'):
logger.warning(
'Passing single elements for the `fn` argument in '
'Builder.load_files is deprecated. Please update your code '
'to use a list as support for automatic conversion will be '
'removed in a future version.')
fn = [fn]
files_to_process = []
for path in fn:
if not hasattr(path, 'read') and os.path.isdir(path):
files_to_process.extend([os.path.join(path, f)
for f in os.listdir(path)
if (f.endswith('.yml')
or f.endswith('.yaml'))])
else:
files_to_process.append(path)
# symlinks used to allow loading of sub-dirs can result in duplicate
# definitions of macros and templates when loading all from top-level
unique_files = []
for f in files_to_process:
if hasattr(f, 'read'):
unique_files.append(f)
continue
rpf = os.path.realpath(f)
if rpf not in unique_files:
unique_files.append(rpf)
else:
logger.warning("File '%s' already added as '%s', ignoring "
"reference to avoid duplicating yaml "
"definitions." % (f, rpf))
for in_file in unique_files:
# use of ask-for-permissions instead of ask-for-forgiveness
# performs better when low use cases.
if hasattr(in_file, 'name'):
fname = in_file.name
else:
fname = in_file
logger.debug("Parsing YAML file {0}".format(fname))
if hasattr(in_file, 'read'):
self._parse_fp(in_file)
else:
self.parse(in_file)
def _parse_fp(self, fp):
# wrap provided file streams to ensure correct encoding used
data = local_yaml.load(utils.wrap_stream(fp), search_path=self.path)
if data:
if not isinstance(data, list):
raise JenkinsJobsException(
"The topmost collection in file '{fname}' must be a list,"
" not a {cls}".format(fname=getattr(fp, 'name', fp),
cls=type(data)))
for item in data:
cls, dfn = next(iter(item.items()))
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item "
"named '{0}'. Missing indent?"
.format(n))
# allow any entry to specify an id that can also be used
id = dfn.get('id', dfn['name'])
if id in group:
self._handle_dups(
"Duplicate entry found in '{0}: '{1}' already "
"defined".format(fp.name, id))
group[id] = dfn
self.data[cls] = group
def parse(self, fn):
with io.open(fn, 'r', encoding='utf-8') as fp:
self._parse_fp(fp)
def _handle_dups(self, message):
if not self.jjb_config.yamlparser['allow_duplicates']:
logger.error(message)
raise JenkinsJobsException(message)
else:
logger.warning(message)
def _getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def _getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _applyDefaults(self, data, override_dict=None):
if override_dict is None:
override_dict = {}
whichdefaults = data.get('defaults', 'global')
defaults = copy.deepcopy(self.data.get('defaults',
{}).get(whichdefaults, {}))
if defaults == {} and whichdefaults != 'global':
raise JenkinsJobsException("Unknown defaults set: '{0}'"
.format(whichdefaults))
for key in override_dict.keys():
if key in defaults.keys():
defaults[key] = override_dict[key]
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def _formatDescription(self, job):
if self.keep_desc:
description = job.get("description", None)
else:
description = job.get("description", '')
if description is not None:
job["description"] = description + \
self._get_managed_string().lstrip()
def expandYaml(self, registry, jobs_glob=None):
changed = True
while changed:
changed = False
for module in registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self.data):
changed = True
for job in self.data.get('job', {}).values():
if jobs_glob and not matches(job['name'], jobs_glob):
logger.debug("Ignoring job {0}".format(job['name']))
continue
logger.debug("Expanding job '{0}'".format(job['name']))
job = self._applyDefaults(job)
self._formatDescription(job)
self.jobs.append(job)
for view in self.data.get('view', {}).values():
logger.debug("Expanding view '{0}'".format(view['name']))
self._formatDescription(view)
self.views.append(view)
for project in self.data.get('project', {}).values():
logger.debug("Expanding project '{0}'".format(project['name']))
# use a set to check for duplicate job references in projects
seen = set()
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = next(iter(jobspec.items()))
if not isinstance(jobparams, dict):
jobparams = {}
else:
jobname = jobspec
jobparams = {}
job = self._getJob(jobname)
if job:
# Just naming an existing defined job
if jobname in seen:
self._handle_dups("Duplicate job '{0}' specified "
"for project '{1}'"
.format(jobname, project['name']))
seen.add(jobname)
continue
# see if it's a job group
group = self._getJobGroup(jobname)
if group:
for group_jobspec in group['jobs']:
if isinstance(group_jobspec, dict):
group_jobname, group_jobparams = \
next(iter(group_jobspec.items()))
if not isinstance(group_jobparams, dict):
group_jobparams = {}
else:
group_jobname = group_jobspec
group_jobparams = {}
job = self._getJob(group_jobname)
if job:
if group_jobname in seen:
self._handle_dups(
"Duplicate job '{0}' specified for "
"project '{1}'".format(group_jobname,
project['name']))
seen.add(group_jobname)
continue
template = self._getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = type(project)(project)
d.update(jobparams)
d.update(group)
d.update(group_jobparams)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self._expandYamlForTemplateJob(d, template,
jobs_glob)
continue
# see if it's a template
template = self._getJobTemplate(jobname)
if template:
d = type(project)(project)
d.update(jobparams)
self._expandYamlForTemplateJob(d, template, jobs_glob)
else:
raise JenkinsJobsException("Failed to find suitable "
"template named '{0}'"
.format(jobname))
# check for duplicate generated jobs
seen = set()
# walk the list in reverse so that last definition wins
for job in self.jobs[::-1]:
if job['name'] in seen:
self._handle_dups("Duplicate definitions for job '{0}' "
"specified".format(job['name']))
self.jobs.remove(job)
seen.add(job['name'])
return self.jobs, self.views
def _expandYamlForTemplateJob(self, project, template, jobs_glob=None):
dimensions = []
template_name = template['name']
# reject keys that are not useful during yaml expansion
for k in ['jobs']:
project.pop(k)
excludes = project.pop('exclude', [])
for (k, v) in project.items():
tmpk = '{{{0}}}'.format(k)
if tmpk not in template_name:
continue
if type(v) == list:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params = self._applyDefaults(params, template)
try:
expanded_values = {}
for (k, v) in values:
if isinstance(v, dict):
inner_key = next(iter(v))
expanded_values[k] = inner_key
expanded_values.update(v[inner_key])
else:
expanded_values[k] = v
except TypeError:
project_name = project.pop('name')
logger.error(
"Exception thrown while expanding template '%s' for "
"project '%s', with expansion arguments of:\n%s\n"
"Original project input variables for template:\n%s\n"
"Most likely the inputs have items indented incorrectly "
"to describe how they should be applied.\n\nNote yaml "
"'null' is mapped to python's 'None'", template_name,
project_name,
"".join(local_yaml.dump({k: v}, default_flow_style=False)
for (k, v) in values),
local_yaml.dump(project, default_flow_style=False))
raise
params.update(expanded_values)
params = deep_format(params, params)
if combination_matches(params, excludes):
logger.debug('Excluding combination %s', str(params))
continue
for key in template.keys():
if key not in params:
params[key] = template[key]
params['template-name'] = template_name
expanded = deep_format(
template, params,
self.jjb_config.yamlparser['allow_empty_variables'])
job_name = expanded.get('name')
if jobs_glob and not matches(job_name, jobs_glob):
continue
self._formatDescription(expanded)
self.jobs.append(expanded)
def _get_managed_string(self):
# The \n\n is not hard coded, because they get stripped if the
# project does not otherwise have a description.
return "\n\n" + MAGIC_MANAGE_STRING
|
|
# coding=utf-8
import re
from datetime import datetime, timedelta
from uuid import uuid4
from multiprocessing import Queue, freeze_support
import os
from flask import Flask, render_template, request, url_for, session, g
from flask.json import jsonify
from flask_login import LoginManager, login_user, login_required, logout_user
from flask_debugtoolbar import DebugToolbarExtension
from werkzeug.utils import redirect
from wsgi.db import Storage
from wsgi.engine import reddit_get_new, reddit_search, Retriever
from wsgi.processes import SubredditProcessWorker, SubredditUpdater, PostUpdater, update_stored_posts
from wsgi.properties import SRC_SEARCH, SRC_OBSERV, logger, default_time_min, test_mode
from wsgi.wake_up import WakeUp, WakeUpStorage
__author__ = '4ikist'
log = logger.getChild("web")
cur_dir = os.path.dirname(__file__)
app = Flask("rr", template_folder=cur_dir + "/templates", static_folder=cur_dir + "/static")
app.secret_key = 'fooooooo'
app.config['SESSION_TYPE'] = 'filesystem'
if os.environ.get("test", False):
log.info("will run at test mode")
app.config["SECRET_KEY"] = "foooo"
app.debug = True
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
toolbar = DebugToolbarExtension(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
db = Storage("server")
class User(object):
def __init__(self, name, pwd):
self.id = str(uuid4().get_hex())
self.auth = False
self.active = False
self.anonymous = False
self.name = name
self.pwd = pwd
def is_authenticated(self):
return self.auth
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
class UsersHandler(object):
def __init__(self):
self.users = {}
self.auth_users = {}
def get_guest(self):
user = User("Guest", "")
user.anonymous = True
self.users[user.id] = user
return user
def get_by_id(self, id):
found = self.users.get(id)
if not found:
found = db.users.find_one({"user_id": id})
if found:
user = User(found.get('name'), found.get("pwd"))
user.id = found.get("user_id")
self.users[user.id] = user
found = user
return found
def auth_user(self, name, pwd):
authed = db.check_user(name, pwd)
if authed:
user = self.get_by_id(authed)
if not user:
user = User(name, pwd)
user.id = authed
user.auth = True
user.active = True
self.users[user.id] = user
return user
def logout(self, user):
user.auth = False
user.active = False
self.users[user.id] = user
def add_user(self, user):
self.users[user.id] = user
db.add_user(user.name, user.pwd, user.id)
usersHandler = UsersHandler()
log.info("users handler was initted")
usersHandler.add_user(User("3030", "89231950908zozo"))
@app.before_request
def load_user():
if session.get("user_id"):
user = usersHandler.get_by_id(session.get("user_id"))
else:
# user = None
user = usersHandler.get_guest()
g.user = user
@login_manager.user_loader
def load_user(userid):
return usersHandler.get_by_id(userid)
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect(url_for('login'))
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
login = request.form.get("name")
password = request.form.get("password")
remember_me = request.form.get("remember") == u"on"
user = usersHandler.auth_user(login, password)
if user:
try:
login_user(user, remember=remember_me)
return redirect(url_for("main"))
except Exception as e:
log.exception(e)
return render_template("login.html")
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
rq = Queue()
tq = Queue()
@app.route("/subreddit/add", methods=['POST'])
@login_required
def add_subreddit():
name = request.form.get("name") or "funny"
params = {}
params['rate_min'] = int(request.form.get("rate_min") or 0)
params['rate_max'] = int(request.form.get("rate_max") or 99999)
params['reposts_max'] = int(request.form.get("reposts_max") or 10)
params['lrtime'] = int(request.form.get("lrtime") or 1800)
params['time_min'] = request.form.get("time_min") or default_time_min
log.info("Add %s with params: \n%s" % (name, "\n".join(["%s : %s" % (k, v) for k, v in params.iteritems()])))
db.add_subreddit(name, params, params['lrtime'])
try:
tq.put({"name": name})
except Exception as e:
log.exception(e)
return redirect(url_for('main'))
@app.route("/subreddit/add_to_queue/<name>", methods=["POST"])
@login_required
def add_to_queue(name):
tq.put({"name": name})
return jsonify(**{"ok": True})
@app.route("/subreddit/del", methods=["POST"])
@login_required
def del_subreddit():
name = request.form.get("name")
db.subreddits.delete_one({'name': name})
db.restart_statistic_cache()
return redirect(url_for('main'))
@app.route("/subbredit/info/<name>", methods=["GET"])
@login_required
def info_subreddit(name):
user = g.user
posts = db.get_posts_of_subreddit(name)
sbrdt_info = db.get_subreddists_statistic()[name]
return render_template("subbredit_info.html", **{"username": user.name,
"posts": posts,
"el": sbrdt_info, })
@app.route("/post/del/<fullname>/<video_id>", methods=["GET"])
@login_required
def del_post(fullname, video_id):
db.delete_post(fullname, video_id)
return jsonify(**{"ok": True, "updated":{"deleted":True}})
@app.route("/post/update/<fullname>/<video_id>", methods=["GET"])
@login_required
def update_post(fullname, video_id):
found = db.get_post(fullname, video_id)
if found:
update_stored_posts(db, [found])
found = db.get_post(fullname, video_id)
return jsonify(**{"ok": True, "updated": found})
return jsonify(**{"ok": False, "detail": "Post %s %s not found" % (fullname, video_id)})
@app.route("/", methods=["GET"])
@login_required
def main():
user = g.user
result = db.get_subreddists_statistic()
search_results_names = db.get_search_results_names()
return render_template("main.html", **{"username": user.name,
"result": result,
"search_results_names": search_results_names,
"go": True})
@app.route("/chart/<name>", methods=["GET"])
@login_required
def get_chart_data(name):
loaded = db.get_posts_of_subreddit(name, SRC_OBSERV)
loaded_fns = set(map(lambda x: x.get("fullname"), loaded))
all = db.get_raw_posts(name)
if not all:
all = reddit_get_new(name)
db.add_raw_posts(name, all)
first_element = all[-1]
fe_time = first_element.get("created_utc")
sbrdt = db.get_subreddit(name)
sbrdt_params = sbrdt.get("params")
all = filter(lambda x: x.get("video_id") is not None, all)
all = filter(
lambda x: x.get("ups") >= sbrdt_params.get("rate_min") and x.get("ups") <= sbrdt_params.get("rate_max"),
all)
all = filter(lambda x: x.get("fullname") not in loaded_fns, all)
search = db.get_posts_of_subreddit(name, SRC_SEARCH)
def post_chart_data(post):
return [int(post.get("created_utc") - fe_time), post.get("ups")]
def post_comments_data(post):
return [int(post.get("created_utc") - fe_time), post.get("comments_count")]
def post_copies_data(post):
return [int(post.get("created_utc") - fe_time), post.get("reposts_count")]
def get_info(posts):
return [(int(post.get("created_utc") - fe_time), "%s\n%s" % (post.get("fullname"), post.get("video_id"))) for
post in posts if post.get("created_utc")]
info = dict(get_info(all), **dict(get_info(loaded)))
data = {"series": [
{"label": "loaded", "data": [post_chart_data(post) for post in loaded]},
{"label": "all", "data": [post_chart_data(post) for post in all]},
{"label": SRC_SEARCH, "data": [post_chart_data(post) for post in search]}
],
"series_prms": [
{"label": "comment_counts", "data": [post_comments_data(post) for post in loaded + search]},
{"label": "copies_count", "data": [post_comments_data(post) for post in loaded + search]},
],
"info": info}
return jsonify(**data)
@app.route("/experiment/search", methods=["GET", "POST"])
@login_required
def ex_search():
if request.method == "POST":
q = request.form.get("q")
result = reddit_search(q)
if len(result):
return render_template("ex_search.html",
**{"heads": result[0].keys(), "posts": result, "content_present": True,
"count": len(result)})
return render_template("ex_search.html", **{"content_present": False})
@app.route("/search/result/<name>", methods=["GET"])
@login_required
def search_result(name):
prms = db.get_search_params(name)
if not prms:
return redirect(url_for('main'))
p, s = prms
posts = db.get_posts_of_subreddit(name, SRC_SEARCH)
for post in posts:
if not post.get("reddit_url"):
post['reddit_url'] = "http://reddit.com/" + post.get("fullname")
p['words'] = ", ".join(p.get('words', []))
p['before'] = p.get('before', datetime.utcnow()).strftime("%d/%m/%Y")
count = len(posts)
return render_template("search.html", **{"params": p, "statistic": s, "posts": posts, "content_present": count > 0,
"count": count, "name": name})
@app.route("/search/load", methods=["POST"])
@login_required
def search_load():
params = {}
params['name'] = name = request.form.get("name")
if not name:
return jsonify(**{"ok": False, "detail": "name required"})
params['rate_min'] = int(request.form.get("rate_min") or 0)
params['rate_max'] = int(request.form.get("rate_max") or 99999)
params['reposts_max'] = int(request.form.get("reposts_max") or 10)
params['time_min'] = request.form.get("time_min") or default_time_min
before_raw = request.form.get("before")
if before_raw and len(before_raw):
before = datetime.strptime(before_raw, "%d/%m/%Y")
else:
before = datetime.utcnow() - timedelta(days=30)
words_raw = str(request.form.get("words"))
params['before'] = before
params['words'] = words = re.split("[;,:\.]\s?", words_raw)
db.add_search_params(name, params, {})
log.info("will search for %s before %s \nwith params:%s" % (name, before, params))
video_ids = set()
all_posts = []
for word in words:
query = "site:youtube.com title:%s subreddit:%s" % (word, name)
log.info("Start search: %s" % query)
posts = reddit_search(query)
posts = filter(
lambda x: (before - x.get("created_dt")).total_seconds() > 0 and x.get("video_id") not in video_ids,
posts)
cur_v_ids = set(map(lambda x: x.get("video_id"), posts))
difference = cur_v_ids.difference(video_ids)
difference = filter(
lambda x: not db.is_post_video_id_present(x), difference
)
log.info("New posts: %s" % len(difference))
if difference:
map(lambda x: video_ids.add(x), difference)
all_posts.extend([el for el in posts if el['video_id'] in difference])
elif len(video_ids) > 0:
break
log.info("will process %s posts..." % len(all_posts))
rtrv = Retriever()
for post in rtrv.process_subreddit(all_posts, params):
db.save_post(post, SRC_SEARCH)
db.add_search_params(name, params, rtrv.statistic)
return jsonify(**{"ok": True, "name": name})
if not test_mode:
spw = SubredditProcessWorker(tq, rq, db)
spw.daemon = True
spw.start()
su = SubredditUpdater(tq, db)
su.daemon = True
su.start()
pu = PostUpdater(db)
pu.daemon = True
pu.start()
wu = WakeUp()
wu.daemon = True
wu.start()
ws = WakeUpStorage('server')
@app.route("/wake_up/<salt>", methods=["POST"])
def wake_up(salt):
log.info("wake up from %s" % request.remote_addr)
return jsonify(**{"result": salt})
@app.route("/wake_up", methods=["GET", "POST"])
def wake_up_manage():
if request.method == "POST":
urls = request.form.get("urls")
urls = urls.split("\n")
for url in urls:
url = url.strip()
if url:
ws.add_url(url)
urls = ws.get_urls()
return render_template("wake_up.html", **{"urls": urls})
if __name__ == '__main__':
freeze_support()
print os.path.dirname(__file__)
app.run(port=65010)
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, cast
from pants.core.util_rules import archive
from pants.core.util_rules.archive import ExtractedArchive
from pants.engine.fs import Digest, DownloadFile, FileDigest
from pants.engine.platform import Platform
from pants.engine.rules import Get, collect_rules, rule
from pants.option.subsystem import Subsystem
from pants.util.logging import LogLevel
from pants.util.meta import classproperty
class UnknownVersion(Exception):
pass
class ExternalToolError(Exception):
pass
@dataclass(frozen=True)
class ExternalToolRequest:
download_file_request: DownloadFile
exe: str
@dataclass(frozen=True)
class DownloadedExternalTool:
digest: Digest
exe: str
class ExternalTool(Subsystem, metaclass=ABCMeta):
"""Configuration for an invocable tool that we download from an external source.
Subclass this to configure a specific tool.
Idiomatic use:
class MyExternalTool(ExternalTool):
options_scope = "my-external-tool"
default_version = "1.2.3"
default_known_versions = [
"1.2.3|darwin|deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef|222222",
"1.2.3|linux |1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd|333333",
]
def generate_url(self, plat: Platform) -> str:
...
def generate_exe(self, plat: Platform) -> str:
return "./path-to/binary
@rule
def my_rule(my_external_tool: MyExternalTool) -> Foo:
downloaded_tool = await Get(
DownloadedExternalTool,
ExternalToolRequest,
my_external_tool.get_request(Platform.current)
)
...
"""
# The default values for --version and --known-versions.
# Subclasses must set appropriately.
default_version: str
default_known_versions: List[str]
@classproperty
def name(cls):
"""The name of the tool, for use in user-facing messages.
Derived from the classname, but subclasses can override, e.g., with a classproperty.
"""
return cls.__name__.lower()
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--version",
type=str,
default=cls.default_version,
advanced=True,
help=f"Use this version of {cls.name}.",
)
help_str = textwrap.dedent(
f"""
Known versions to verify downloads against.
Each element is a pipe-separated string of `version|platform|sha256|length`, where:
- `version` is the version string
- `platform` is one of [{','.join(Platform.__members__.keys())}],
- `sha256` is the 64-character hex representation of the expected sha256
digest of the download file, as emitted by `shasum -a 256`
- `length` is the expected length of the download file in bytes, as emmitted by
`wc -c`
E.g., `3.1.2|darwin|6d0f18cd84b918c7b3edd0203e75569e0c7caecb1367bbbe409b44e28514f5be|42813`.
Values are space-stripped, so pipes can be indented for readability if necessary.
"""
)
# Note that you can compute the length and sha256 conveniently with:
# `curl -L $URL | tee >(wc -c) >(shasum -a 256) >/dev/null`
register(
"--known-versions",
type=list,
member_type=str,
default=cls.default_known_versions,
advanced=True,
help=help_str,
)
@property
def version(self) -> str:
return cast(str, self.options.version)
@property
def known_versions(self) -> Tuple[str, ...]:
return tuple(self.options.known_versions)
@abstractmethod
def generate_url(self, plat: Platform) -> str:
"""Returns the URL for the given version of the tool, runnable on the given os+arch.
os and arch default to those of the current system.
Implementations should raise ExternalToolError if they cannot resolve the arguments
to a URL. The raised exception need not have a message - a sensible one will be generated.
"""
pass
def generate_exe(self, plat: Platform) -> str:
"""Returns the path to the tool executable.
If the downloaded artifact is the executable itself, you can leave this unimplemented.
If the downloaded artifact is an archive, this should be overridden to provide a
relative path in the downloaded archive, e.g. `./bin/protoc`.
"""
return f"./{self.generate_url(plat).rsplit('/', 1)[-1]}"
def get_request(self, plat: Platform) -> ExternalToolRequest:
"""Generate a request for this tool."""
for known_version in self.known_versions:
try:
ver, plat_val, sha256, length = (x.strip() for x in known_version.split("|"))
except ValueError:
raise ExternalToolError(
f"Bad value for --known-versions (see {self.options.pants_bin_name} "
f"help-advanced {self.options_scope}): {known_version}"
)
if plat_val == plat.value and ver == self.version:
digest = FileDigest(fingerprint=sha256, serialized_bytes_length=int(length))
try:
url = self.generate_url(plat)
exe = self.generate_exe(plat)
except ExternalToolError as e:
raise ExternalToolError(
f"Couldn't find {self.name} version {self.version} on {plat.value}"
) from e
return ExternalToolRequest(DownloadFile(url=url, expected_digest=digest), exe)
raise UnknownVersion(
f"No known version of {self.name} {self.version} for {plat.value} found in "
f"{self.known_versions}"
)
class TemplatedExternalTool(ExternalTool):
"""Extends the ExternalTool to allow url templating for custom/self-hosted source.
In addition to ExternalTool functionalities, it is needed to set, e.g.:
default_url_template = "https://tool.url/{version}/{platform}-mytool.zip"
default_url_platform_mapping = {
"darwin": "osx",
"linux": "linux",
}
The platform mapping dict is optional.
"""
default_url_template: str
default_url_platform_mapping: Optional[Dict[str, str]] = None
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--url-template",
type=str,
default=cls.default_url_template,
advanced=True,
help=(
"URL to download the tool, either as a single binary file or a compressed file "
"(e.g. zip file). You can change this to point to your own hosted file, e.g. to "
"work with proxies or for access via the filesystem through a file:// URL.\n\nUse "
"`{version}` to have the value from --version substituted, and `{platform}` to "
"have a value from --url-platform-mapping substituted in, depending on the "
"current platform. For example, "
"https://github.com/.../protoc-{version}-{platform}.zip."
),
)
register(
"--url-platform-mapping",
type=dict,
default=cls.default_url_platform_mapping,
advanced=True,
help=(
"A dictionary mapping platforms to strings to be used when generating the URL "
"to download the tool.\n\nIn --url-template, anytime the `{platform}` string is "
"used, Pants will determine the current platform, and substitute `{platform}` with "
"the respective value from your dictionary.\n\nFor example, if you define "
'`{"darwin": "apple-darwin", "linux": "unknown-linux"}, and run Pants on '
"Linux, then `{platform}` will be substituted in the --url-template option with "
"unknown-linux."
),
)
@property
def url_template(self) -> str:
return cast(str, self.options.url_template)
@property
def url_platform_mapping(self) -> Optional[Dict[str, str]]:
return cast(Optional[Dict[str, str]], self.options.url_platform_mapping)
def generate_url(self, plat: Platform):
platform = self.url_platform_mapping[plat.value] if self.url_platform_mapping else ""
return self.url_template.format(version=self.version, platform=platform)
@rule(level=LogLevel.DEBUG)
async def download_external_tool(request: ExternalToolRequest) -> DownloadedExternalTool:
digest = await Get(Digest, DownloadFile, request.download_file_request)
extracted_archive = await Get(ExtractedArchive, Digest, digest)
return DownloadedExternalTool(extracted_archive.digest, request.exe)
def rules():
return (*collect_rules(), *archive.rules())
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual
network gateway operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _delete_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkGateway
:rtype:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayPaged[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'}
def list_connections(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
VirtualNetworkGatewayConnectionListEntity
:rtype:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionListEntityPaged[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionListEntity]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_connections.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'}
def _reset_initial(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reset.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Resets the primary of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to
the begin reset of the active-active feature enabled gateway.
:type gateway_vip: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'}
def _generatevpnclientpackage_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generatevpnclientpackage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN client package for P2S client of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'}
def _generate_vpn_profile_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generate_vpn_profile.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generate_vpn_profile(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN profile for P2S client of the virtual network gateway in
the specified resource group. Used for IKEV2 and radius based
authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'}
def _get_vpn_profile_package_url_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_vpn_profile_package_url.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpn_profile_package_url(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets pre-generated VPN profile for P2S client of the virtual network
gateway in the specified resource group. The profile needs to be
generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'}
def _get_bgp_peer_status_initial(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_bgp_peer_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_bgp_peer_status(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns BgpPeerStatusListResult
or ClientRawResponse<BgpPeerStatusListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.BgpPeerStatusListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.BgpPeerStatusListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'}
def supported_vpn_devices(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.supported_vpn_devices.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'}
def _get_learned_routes_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_learned_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_learned_routes(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
has learned, including routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'}
def _get_advertised_routes_initial(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_advertised_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_advertised_routes(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
is advertising to the specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'}
def vpn_device_configuration_script(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection for which the configuration script
is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device
script operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VpnDeviceScriptParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.vpn_device_configuration_script.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'}
|
|
# mininode.py - Ulm P2P network half-a-node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a ulm node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# ulm/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from binascii import hexlify, unhexlify
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
import ulm_hash
BIP0031_VERSION = 60000
MY_VERSION = 70206 # current MIN_PEER_PROTO_VERSION
MY_SUBVERSION = b"/python-mininode-tester:0.0.2/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000L # 1 btc in satoshis
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ulmhash(s):
return ulm_hash.getPoWHash(s)
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000L:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(unhexlify(hex_string.encode('ascii'))))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return hexlify(obj.serialize()).decode('ascii')
# Objects that map to ulmd objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 9000000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(ulmhash(r))
self.hash = encode(ulmhash(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in ulmd indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command)(conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
}
MAGIC_BYTES = {
"mainnet": b"\xbf\x0c\x6b\xbd", # mainnet
"testnet3": b"\xce\xe2\xca\xff", # testnet3
"regtest": b"\xfc\xc1\xb7\xdc" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to Ulm Node IP # ' + dstaddr + ':' \
+ str(dstport)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print 'got_data:', repr(e)
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
|
#!/usr/bin/python
import web
import os
#import gc
#gc.set_debug(gc.DEBUG_STATS)
import logging
logging.basicConfig(level=logging.INFO)
#import logging.handlers
import json
import time
import base64
import gzip
# own modules
from datalogger import DataLogger as DataLogger
from datalogger import TimeseriesStats as TimeseriesStats
urls = (
"/(.*)", "DataLoggerWeb",
)
basedir = "/var/rrd"
application = web.application(urls, globals()).wsgifunc()
#handler = logging.handlers.RotatingFileHandler(
# os.path.join(basedir, "/var/log/apache2/datalogger.log"),
# maxBytes=10 * 1024 * 1024,
# backupCount=5)
#logging.getLogger("").addHandler(handler)
#logging.getLogger("").setLevel(level=logging.DEBUG)
def calllogger(func):
"""
decorator to log and measure call durations
"""
def inner(*args, **kwds):
starttime = time.time()
call_str = "%s(%s, %s)" % (func.__name__, args, kwds)
logging.debug("calling %s", call_str)
try:
ret_val = func(*args, **kwds)
logging.debug("duration of call %s : %s", call_str, (time.time() - starttime))
return ret_val
except StandardError as exc:
logging.exception(exc)
logging.error("call to %s caused StandardError", call_str)
return "call to %s caused StandardError" % call_str
# set inner function __name__ and __doc__ to original ones
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
MEMCACHE = {}
MAXAGE = 300
def memcache(func):
"""
decorator to cache return values according to used function parameters
"""
def inner(*args, **kwds):
starttime = time.time()
thiskey = unicode((func.__name__, args, kwds))
logging.info("number of keys in cache %d", len(MEMCACHE.keys()))
logging.info("key to look for %s", thiskey)
# get rid of old cache entries
for key in MEMCACHE.keys():
if (MEMCACHE[key]["ts"] + MAXAGE) < starttime:
logging.info("deleting aged cache entry for key %s", key)
del MEMCACHE[key]
# is there an entry for this key
if thiskey in MEMCACHE:
if (MEMCACHE[thiskey]["ts"] + MAXAGE) > starttime:
logging.info("returning from cache for key %s", thiskey)
return MEMCACHE[thiskey]["data"]
#logging.info("createing new cache entry for %s", key)
try:
ret_val = func(*args, **kwds)
#logging.info("Storing returned data in cache for %s s", MAXAGE)
MEMCACHE[thiskey] = {
"ts" : starttime,
"data" : ret_val,
}
return ret_val
except StandardError as exc:
logging.exception(exc)
# set inner function __name__ and __doc__ to original ones
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
class DataLoggerWeb(object):
"""retrieve Data from RRD Archive"""
def __init__(self):
"""__init__"""
def GET(self, args):
"""
GET Multiplexer function, according to first argument in URL
call this function, and resturn result to client
parameters:
/<str>function_name/...
return:
return function_name(what is left of arguments)
"""
method = args.split("/")[0].lower()
logging.info("calling method %s", method)
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
web.header("Content-Type", "application/json")
# web.header('Content-type', 'text/html')
method_args = args.split("/")[1:] # all without method name
method_func_dict = {
"doc" : self.doc,
"get_index_keynames" : self.get_index_keynames,
"get_value_keynames" : self.get_value_keynames,
"get_ts_keyname" : self.get_ts_keyname,
"get_projects" : self.get_projects,
"get_tablenames" : self.get_tablenames,
"get_wikiname" : self.get_wikiname,
"get_headers" : self.get_headers,
"get_last_business_day_datestring" : self.get_last_business_day_datestring,
"get_datewalk" : self.get_datewalk,
"get_caches" : self.get_caches,
"get_tsa" : self.get_tsa,
"get_tsa_adv" : self.get_tsa_adv,
"get_ts" : self.get_ts,
"get_tsastats" : self.get_tsastats,
"get_stat_func_names" : self.get_stat_func_names,
"get_quantilles" : self.get_quantile, # deprecated
"get_quantile" : self.get_quantile,
"get_quantilles_web" : self.get_quantile_web, # deprecated
"get_quantile_web" : self.get_quantile_web,
"get_chart_data_ungrouped" : self.get_chart_data_ungrouped,
"get_hc_daily_data" : self.get_hc_daily_data,
"get_ts_caches" : self.get_ts_caches,
"get_tsstat_caches" : self.get_tsstat_caches,
"get_caches_dict" : self.get_caches_dict,
"get_scatter_data" : self.get_scatter_data,
"get_longtime_data" : self.get_longtime_data,
"get_tsastats_table" : self.get_tsastats_table,
"get_tsastats_func" : self.get_tsastats_func,
"sr_vicenter_unused_cpu_cores" : self.sr_vicenter_unused_cpu_cores,
"sr_vicenter_unused_mem" : self.sr_vicenter_unused_mem,
"sr_hrstorageram_unused" : self.sr_hrstorageram_unused,
"sr_hrstorage_unused" : self.sr_hrstorage_unused,
}
try:
return method_func_dict[method](method_args)
except KeyError as exc:
logging.debug("unknown method called %s", method)
return "There is no method called %s" % method
def POST(self, args):
"""
GET Multiplexer function, according to first argument in URL
call this function, and resturn result to client
parameters:
/<str>function_name/...
return:
return function_name(what is left of arguments)
"""
method = args.split("/")[0]
logging.info("method %s should be called", method)
#web.header('Access-Control-Allow-Origin', '*')
#web.header('Access-Control-Allow-Credentials', 'true')
method_args = args.split("/")[1:] # all without method name
if method == "upload_raw_file":
return self.upload_raw_file(method_args)
else:
return "There is no method called %s" % method
def doc(self, args):
"""
get docstrings from methods available
ex: DataLogger/doc/get_projects/something/or/nothing
only the first argument after doc is evaluated,
the remaining is ignored
"""
# use only the fist argument to find function
web.header("Content-Type", "text/html")
outbuffer = ["<html><body>"]
try:
func = eval("self.%s" % args[0])
doc = func.__doc__
name = func.__name__
outbuffer.append("<h1 class=datalogger-function-name>def %s(*args, **kwds)</h1>" % name)
outbuffer.append("<div class=datalogger-function-doc>")
if doc is not None:
outbuffer.append(doc.replace("\n", "<br>"))
outbuffer.append("</div>")
except AttributeError as exc:
logging.info(exc)
outbuffer.append(str(exc))
outbuffer.append("</body></html>")
return "\n".join(outbuffer)
@staticmethod
@memcache
def get_projects(args):
"""
get available projects for this Datalogger Server
ex: Datalogger/get_projects/...
there is no further argument needed
returns:
json(existing project names)
"""
return json.dumps(DataLogger.get_projects(basedir))
@staticmethod
@memcache
def get_tablenames(args):
"""
get available tablenames, for one particular project
uses directory listing in raw subdirectory for this purpose
ex: Datalogger/get_tablenames/{projectname}
<projectname> has to be something from Datalogger/get_projects
returns:
json(list of possible tablenames for given project)
"""
project = args[0]
return json.dumps(DataLogger.get_tablenames(basedir, project))
@staticmethod
@memcache
def get_wikiname(args):
"""
return WikiName for given project/tablename
special method for generating wiki reports
ex: Datalogger/get_wikiname/{projectname}/{tablename}
returns:
json(str to use as WikiName)
"""
project, tablename = args[:2]
return json.dumps("DataLoggerReport%s%s" % (project.capitalize(), tablename.capitalize()))
@staticmethod
@memcache
def get_headers(args):
"""
get name of all headers (all columns so ts_keyname + index_keynames + value_keynames)
ex: Datalogger/get_headers/{projectname}/{tablename}
returns:
json(list of header names)
"""
project, tablename = args[:2]
datalogger = DataLogger(basedir, project, tablename)
return json.dumps(datalogger.headers)
@staticmethod
@memcache
def get_index_keynames(args):
"""
get name of index columns for project/tablename
ex: Datalogger/get_index_keynames/{projectname}/{tablename}
returns:
json(list of columns names of index columns defined)
"""
project, tablename = args[:2]
datalogger = DataLogger(basedir, project, tablename)
return json.dumps(datalogger.index_keynames)
@staticmethod
@memcache
def get_value_keynames(args):
"""
get name of value columns for project/tablename
all value_keynames have to be strictly numeric, in special are floats
ex: Datalogger/get_value_keynames/{projectname}/{tablename}
returns:
json(list of column names of value columns defined)
"""
project, tablename = args[:2]
datalogger = DataLogger(basedir, project, tablename)
return json.dumps(datalogger.value_keynames)
@staticmethod
@memcache
def get_ts_keyname(args):
"""
get name of timestamp column
ex: Datalogger/get_ts_keyname/{projectname}/{tablename}
returns:
json(column name of timestamp)
"""
project, tablename = args[:2]
datalogger = DataLogger(basedir, project, tablename)
return json.dumps(datalogger.ts_keyname)
@staticmethod
@memcache
def get_ts_caches(args):
"""
DEPRECATED use get_caches instead
get name of all index keys found in one specific TimeseriesArray
useful to build autofill input fields
attention: there are only ts caches if the raw data is already converted
ex: Datalogger/get_ts_caches/{projectname}/{tablename}/{datestring}
datstring has to be in format YYYY-MM-DD
returns:
json(list of all index keys)
"""
project, tablename, datestring = args[:3]
datalogger = DataLogger(basedir, project, tablename)
keys = []
for cache in datalogger.list_ts_caches(datestring):
keys.append(cache[1])
return json.dumps(keys)
@staticmethod
@memcache
def get_tsstat_caches(args):
"""
DEPRECATED use get_caches instead
get a list of all available TimeseriesStats available
attention: there are only tsstat caches if raw data is already analyzed
ex: Datalogger/get_tsstat_caches/{projectname}/{tablename}/{datestring}
returns:
json(list of all available TimeseriesStats data)
"""
project, tablename, datestring = args[:3]
datalogger = DataLogger(basedir, project, tablename)
keys = []
for cache in datalogger.list_tsstat_caches(datestring):
keys.append(cache[1])
return json.dumps(keys)
@staticmethod
@memcache
def get_caches_dict(args):
"""
DEPRECATED use get_caches instead
get name of all index keys found in one specific TimeseriesArray
parameters:
/<str>project/<str>tablename/<str>datestring
returns:
<json><list> of all index combinations
"""
# the same for all vicenter data
project, tablename, datestring = args[:3]
datalogger = DataLogger(basedir, project, tablename)
keys = []
for cache in datalogger.list_ts_caches(datestring):
key = dict(zip(datalogger.index_keynames, cache[1][1]))
keys.append(key)
return json.dumps(keys)
@staticmethod
@memcache
def get_last_business_day_datestring(args):
"""
get datestring of last businessday Mo.-Fr.
ex: Dataloger/get_last_business_day_datestring/...
returns:
json(datestring of last businessday)
"""
return json.dumps(DataLogger.get_last_business_day_datestring())
@staticmethod
@memcache
def get_datewalk(args):
"""
get list of datestrings between two datestrings
ex: Datalogger/get_datewalk/{datestring1}/{datestring2}
returns:
json(list of datestrings)
"""
datestring1, datestring2 = args[:2]
data = tuple(DataLogger.datewalker(datestring1, datestring2))
return json.dumps(data)
@staticmethod
def get_caches(args):
"""
return dictionary of caches available for this project/tablename/datestring combination
ex: Datalogger/get_caches/{project}/{tablename}/{datestring}
{
"tsastat" : {
"keys" : dictionary of available keys,
"pattern" : filename pattern,
},
"tsstat" : {
"keys" : dictionary of available keys,
"pattern" : filename pattern,
},
"tsa":
"keys" : dictionary of available keys,
"pattern" : filename pattern,
},
"ts" : {
"keys" : dictionary of available keys,
"pattern" : filename pattern,
},
"raw" : None or filename of raw data,
}
if return_date["raw"] == null it means, there is no raw data available
else if something (tsa,ts,tsastat,tsstat) is missing you can call get_tsastat to generate all caches
returns:
json(dictionary of caches and available data)
"""
project, tablename, datestring = args[:3]
datalogger = DataLogger(basedir, project, tablename)
caches = {}
try:
caches = datalogger.get_caches(datestring)
except StandardError as exc:
logging.exception(exc)
logging.error(caches)
return json.dumps(caches)
def get_tsa(self, args):
"""
return exported TimeseriesArray json formatted
"""
project, tablename, datestring = args
datalogger = DataLogger(basedir, project, tablename)
tsa = datalogger[datestring]
web.header('Content-type', 'text/html')
# you must not set this option, according to
# http://stackoverflow.com/questions/11866333/ioerror-when-trying-to-serve-file
# web.header('Transfer-Encoding','chunked')
yield "[" + json.dumps(tsa.export().next())
for chunk in tsa.export():
#logging.info("yielding %s", chunk)
yield "," + json.dumps(chunk)
yield "]"
def get_tsa_adv(self, args):
"""
return exported TimeseriesArray json formatted
"""
group_funcs = {
"avg" : lambda a, b: (a+b)/2,
"min" : min,
"max" : max,
"sum" : lambda a, b: a+b,
}
logging.info(args)
project, tablename, datestring, groupkeys_enc, group_func_name, index_pattern_enc = args
groupkeys_dec = eval(base64.b64decode(groupkeys_enc)) # should be tuple
logging.info("groupkeys_dec: %s", groupkeys_dec)
index_pattern = base64.b64decode(index_pattern_enc)
if index_pattern == "None":
index_pattern = None
logging.info("index_pattern: %s", index_pattern)
assert group_func_name in group_funcs.keys()
datalogger = DataLogger(basedir, project, tablename)
tsa = None
# gete data
if groupkeys_dec is not None:
logging.info("groupkeys is %s", groupkeys_dec)
groupkeys = tuple([unicode(key_value) for key_value in groupkeys_dec])
tsa1 = datalogger.load_tsa(datestring, index_pattern=index_pattern)
tsa = datalogger.group_by(datestring, tsa1, groupkeys, group_funcs[group_func_name])
else:
logging.info("groupkeys is None, fallback to get ungrouped tsa")
tsa = datalogger.load_tsa(datestring, index_pattern=index_pattern)
logging.info(tsa.keys()[0])
web.header('Content-type', 'text/html')
# you must not set this option, according to
# http://stackoverflow.com/questions/11866333/ioerror-when-trying-to-serve-file
# web.header('Transfer-Encoding','chunked')
yield "[" + json.dumps(tsa.export().next())
for chunk in tsa.export():
#logging.info("yielding %s", chunk)
yield "," + json.dumps(chunk)
yield "]"
def get_ts(self, args):
"""
get TimeseriesArray object with one particular Timeseries selected by key
parameters:
/<str>project/<str>tablename/<str>datestring/base64endoded(tuple(key))
returns:
tsa exported in JSON format
"""
assert len(args) == 4
project, tablename, datestring, key_str = args
key = tuple([unicode(key_value) for key_value in eval(base64.b64decode(key_str))])
logging.info("project : %s", project)
logging.info("tablename : %s", tablename)
logging.info("datestring : %s", datestring)
logging.info("key : %s", key)
datalogger = DataLogger(basedir, project, tablename)
key_dict = dict(zip(datalogger.index_keynames, key))
tsa = datalogger.load_tsa(datestring, filterkeys=key_dict)
yield "[" + json.dumps(tsa.export().next())
for chunk in tsa.export():
yield "," + json.dumps(chunk)
yield "]"
#outbuffer = json.dumps(tuple(tsa.export()))
#return outbuffer
def get_tsastats(self, args):
"""
return exported TimeseriesArrayStats json formatted
[
list of index_keys,
list of value_keys,
list of [
index_key : tsstat_dictionary
]
]
returns:
json(tsastats_dict)
"""
project, tablename, datestring = args[:3]
datalogger = DataLogger(basedir, project, tablename)
tsastats = datalogger.load_tsastats(datestring)
return tsastats.to_json()
@memcache
def get_stat_func_names(self, args):
"""
return defined stat_func_names in TimeseriesStats objects
ex: Datalogger/get_stat_func_names/
returns:
json(list of statistical function_names for tsstat)
"""
stat_func_names = TimeseriesStats.stat_funcs.keys()
return json.dumps(stat_func_names)
def get_quantile(self, args):
"""
return exported QuantileArray json formatted
ex: Datalogger/get_quantile/{projectname}/{tablename}/{datestring}
[
dict of index_keys : dict of quantile,
list of index_keys,
list of value_names,
]
returns:
json(quantile_dict)
"""
project, tablename, datestring = args[:3]
datalogger = DataLogger(basedir, project, tablename)
quantile = datalogger.load_quantile(datestring)
return quantile.to_json()
def get_quantile_web(self, args):
"""
return exported QuantileArray json formatted, special
version for use in webpages to render with tablesorter
in difference to get_quantile the value_keyname has to be given
ex: Datalogger/get_quantile/{projectname}/{tablename}/{datestring}
[
dict of index_keys : dict of quantile,
list of index_keys,
list of value_names,
]
returns:
json(quantile_dict)
"""
project, tablename, datestring, value_keyname = args[:4]
datalogger = DataLogger(basedir, project, tablename)
qa = datalogger.load_quantile(datestring)
ret_data = []
# build header
ret_data.append(list(datalogger.index_keynames) + ["Q0", "Q1", "Q2", "Q3", "Q4"])
# data part
for k, v in qa[value_keyname].quantile.items():
ret_data.append(list(k) + v.values())
return json.dumps(ret_data)
def get_chart_data_ungrouped(self, args):
"""
get values from RAW Archive
parameters:
/<str>project/<str>tablename/<str>datestring/<str>key/<str>value_keys/<str>datetype/<str>group_str
keyids=hostname:srvszp2orb.tilak.cc means
this is only useful if keyids are unique
return data like this:
[
{
name: "name of this series" usually this is the counter name
data : [[ts, value], ...]
},
...
]
"""
assert len(args) == 7
project, tablename, datestring, keys_str, value_keys_str, datatype_str, group_str = args
# key_str should be a tuple string, convert to unicode tuple
keys = tuple([unicode(key_value) for key_value in eval(base64.b64decode(keys_str))])
value_keys = ()
if json.loads(value_keys_str) is not None:
value_keys = tuple(json.loads(value_keys_str))
datatype = json.loads(datatype_str)
group_by = ()
if json.loads(group_str) is not None:
group_by = (json.loads(group_str),)
logging.info("project : %s", project)
logging.info("tablename : %s", tablename)
logging.info("datestring : %s", datestring)
logging.info("keys : %s", keys)
logging.info("value_keys : %s", value_keys)
logging.info("datatype : %s", datatype)
logging.info("group_by : %s", group_by)
datalogger = DataLogger(basedir, project, tablename)
keys_dict = dict(zip(datalogger.index_keynames, keys))
# build filter if any group_by is given
filterkeys = keys_dict # default
if len(group_by) > 0:
filterkeys = {}
for key in group_by:
filterkeys[key] = keys_dict[key]
logging.info("useing filterkeys: %s", filterkeys)
tsa = datalogger.load_tsa(datestring, filterkeys=filterkeys)
logging.info("got tsa with %d keys", len(tsa))
# is there something to calculate, lets do it
if datatype != u"absolute":
new_value_keys = []
for value_key in value_keys:
new_value_key = None
if datatype == "derive":
new_value_key = "%s_d" % value_key
logging.info("deriving %s to %s", value_key, new_value_key)
tsa.add_derive_col(value_key, new_value_key)
elif datatype == "per_s":
new_value_key = "%s_s" % value_key
logging.info("deriving %s to %s", value_key, new_value_key)
tsa.add_per_s_col(value_key, new_value_key)
tsa.remove_col(value_key)
new_value_keys.append(new_value_key)
value_keys = new_value_keys
#logging.info(tsa.get_value_keys())
# grouping stuff if necessary
data = None # holds finally calculated data
stats = None
if len(group_by) > 0:
logging.info("generating new key for left possible keys in grouped tsa")
key_dict = dict(zip(datalogger.index_keynames, keys))
new_key = tuple((key_dict[key] for key in group_by))
logging.info("key after grouping would be %s", new_key)
logging.info("grouping tsa by %s", group_by)
new_tsa = datalogger.group_by(datestring, tsa, group_by, group_func=lambda a, b: a + b)
#new_tsa = tsa.get_group_by_tsa(group_by, group_func=lambda a: sum(a))
tsa = new_tsa
data = tsa[new_key].dump_dict()
stats = tsa[new_key].stats.htmltable()
else:
data = tsa[keys].dump_dict()
stats = tsa[keys].stats.htmltable()
result = {
"stats" : stats,
"data" : [],
}
# holds return data
logging.info("data keys : %s", data[data.keys()[0]].keys())
for value_key in value_keys:
# ist important to sort by timestamp, to not confuse
# highcharts
result["data"].append(
{
"name" : value_key,
"data" : tuple(((ts * 1000, row_dict[value_key]) for ts, row_dict in sorted(data.items())))
}
)
return json.dumps(result)
def get_hc_daily_data(self, args):
"""
get values(min 1) from TimeseriesArray to use for highcharts graphing
parameters:
/project/tablename/datestring/index_key/value_keynames/index_keyname
<b>poject</b> <str> defines which project to use
<b>tablename</b> <str> defines which tablename to use
<b>datestring</b> <str> in form of YYYY-MM-DD to define whih day to use
<b>index_key</b> base64 encoded tuple, defines which Timeseries to use, ex. (u'srvcl14db2.tilak.cc', u'DB2', u'ablagsys', u'data only')
<b>value_keynames</b> json encoded list of value_keynames to show in graph
each value_keyname will be a separate highchart line
<b>index_keynam</b> json encoded <str> or null
if given, the data will be grouped on this given index_keyname
if hostname is given the above example will be gruped by hostname=u'srvcl14db2.tilak.cc'
and all possible Timeseries will be summed up
return data json encoded like this
[
{ name : "timeseries value_name 1",
data : [[ts, value], ...]
},
{ name : "timeseries value name 2",
data : [[ts, value], ...]
}
...
]
this structure could already be used in highcharts.data
"""
assert len(args) == 6
project, tablename, datestring, index_key_b64, value_keynames_str, index_keyname_str = args
# key_str should be a tuple string, convert to unicode tuple
index_key = tuple([unicode(key_value) for key_value in eval(base64.b64decode(index_key_b64))])
value_keynames = ()
if json.loads(value_keynames_str) is not None:
value_keynames = tuple(json.loads(value_keynames_str))
index_keyname = ()
if json.loads(index_keyname_str) is not None:
index_keyname = (json.loads(index_keyname_str),)
logging.info("project : %s", project)
logging.info("tablename : %s", tablename)
logging.info("datestring : %s", datestring)
logging.info("index_key : %s", index_key)
logging.info("value_keynames : %s", value_keynames)
logging.info("index_keyname : %s", index_keyname)
datalogger = DataLogger(basedir, project, tablename)
index_key_dict = dict(zip(datalogger.index_keynames, index_key))
# build filter if any group_by is given
filterkeys = index_key_dict # default
if len(index_keyname) > 0:
filterkeys = {}
for key in index_keyname:
filterkeys[key] = index_key_dict[key]
logging.info("using filterkeys: %s", filterkeys)
tsa = datalogger.load_tsa(datestring, filterkeys=filterkeys)
logging.info("got tsa with %d keys", len(tsa))
# grouping stuff if necessary
data = None # holds finally calculated data
stats = None # holds tsstats informations
if len(index_keyname) > 0:
# grouping by key named
logging.info("generating new key for left possible keys in grouped tsa")
new_key = tuple((index_key_dict[key] for key in index_keyname))
logging.info("key after grouping would be %s", new_key)
logging.info("grouping tsa by %s", index_keyname)
new_tsa = datalogger.group_by(datestring, tsa, index_keyname, group_func=lambda a, b: a + b)
tsa = new_tsa
data = tsa[new_key].dump_dict()
stats = tsa[new_key].stats.get_stats()
else:
# not grouping, simple
data = tsa[index_key].dump_dict()
stats = tsa[index_key].stats.get_stats()
# holds return data
logging.info("data keys : %s", data[data.keys()[0]].keys())
# get in highcharts shape
result = {
"stats" : stats,
"data" : [], # holds highchart data
}
for value_keyname in value_keynames:
# its important to sort by timestamp, to not confuse
# highcharts
result["data"].append(
{
"name" : value_keyname,
"data" : tuple(((ts * 1000, row_dict[value_keyname]) for ts, row_dict in sorted(data.items())))
}
)
return json.dumps(result)
def get_longtime_data(self, args):
"""
get values from RAW Archive
parameters:
/<str>project/<str>tablename/<str>datestring/<str>key/<str>value_keys
keyids=hostname:srvszp2orb.tilak.cc means
this is only useful if keyids are unique
return data like this:
[
{
name: "name of this series" usually this is the counter name
data : [[ts, value], ...]
},
...
]
"""
assert len(args) == 5
project, tablename, monthstring, keys_str, value_key = args
if len(monthstring) > 7:
return "monthstring, has to be in YYYY-MM format"
# key_str should be a tuple string, convert to unicode tuple
keys = tuple([unicode(key_value) for key_value in eval(base64.b64decode(keys_str))])
logging.info("project : %s", project)
logging.info("tablename : %s", tablename)
logging.info("monthstring : %s", monthstring)
logging.info("keys : %s", keys)
logging.info("value_keys : %s", value_key)
datalogger = DataLogger(basedir, project, tablename)
data = datalogger.get_tsastats_longtime_hc(monthstring, keys, value_key)
#logging.info("got data: %s", data)
hc_data = [{"name" : funcname, "data" : data[funcname]} for funcname in data.keys()]
return json.dumps(hc_data)
def upload_raw_file(self, args):
"""
save receiving file into datalogger structure
/project/tablename/datestring
"""
assert len(args) == 3
project, tablename, datestring = args
logging.info("basedir: %s", basedir)
logging.info("tablename: %s", tablename)
logging.info("datestring:%s", datestring)
datalogger = DataLogger(basedir, project, tablename)
filename = os.path.join(datalogger.raw_basedir, "%s_%s.csv.gz" % (tablename, datestring))
if os.path.isfile(filename):
logging.info("File already exists")
return "File already exists"
try:
filehandle = gzip.open(filename, "wb")
x = web.input(myfile={})
logging.info(x.keys())
logging.info("Storing data to %s", filename)
if "filedata" in x: # curl type
filehandle.write(x["filedata"])
else: # requests or urllib3 type
filehandle.write(x["myfile"].file.read())
filehandle.close()
except StandardError as exc:
logging.exception(exc)
os.unlink(filename)
logging.info("Error while saving received data to")
return "Error while saving received data to"
try:
tsa = datalogger[str(datestring)] # read received data
except StandardError as exc:
logging.exception(exc)
os.unlink(filename)
logging.info("Invalid data in uploaded file, see apache error log for details, uploaded file not stored")
return "Invalid data in uploaded file, see apache error log for details, uploaded file not stored"
logging.info("File stored")
return "File stored"
def get_tsastats_table(self, args):
"""
return html renderer table from tsatstats data
"""
def csv_to_table(csvdata, keys):
outbuffer = []
outbuffer.append("<thead><tr>")
[outbuffer.append("<th>%s</th>" % header) for header in csvdata[0]]
outbuffer.append("</tr></thead><tbody>")
for values in csvdata[1:]:
outbuffer.append("<tr>")
[outbuffer.append("<td >%s</td>" % value) for value in values[0:keys]]
[outbuffer.append("<td type=numeric>%0.2f</td>" % value) for value in values[keys:]]
outbuffer.append("</tr>")
outbuffer.append("</tbody>")
return outbuffer
project, tablename, datestring, stat_func_name = args
datalogger = DataLogger(basedir, project, tablename)
tsastats = datalogger.load_tsastats(datestring)
return json.dumps("\n".join(csv_to_table(tsastats.to_csv(stat_func_name), len(tsastats.index_keys))))
def get_tsastats_func(self, args):
"""
return json data to render html table from it
parameters:
<b>project</b> project string
<b>tablename</b> tablename string
<b>datestring</b> datestring in YYYY-MM-DD form
<b>stat_func_name</b> statistical function
"""
project, tablename, datestring, stat_func_name = args
datalogger = DataLogger(basedir, project, tablename)
tsastats = datalogger.load_tsastats(datestring)
return json.dumps(tsastats.to_csv(stat_func_name))
@staticmethod
@calllogger
def get_scatter_data(args):
"""
gets scatter plot data of two value_keys of the same tablename
ex: Datalogger/{projectname}/{tablename}/{datestring}/{value_keyname1}/{value_keyname2}/{stat function name}
value_keyname{1/2} has to be one of get_value_keynames
stat function name has to be one of get_stat_func_names
returns:
json(highgraph data)
"""
assert len(args) == 6
project, tablename, datestring, value_key1, value_key2, stat_func_name = args
logging.info("project : %s", project)
logging.info("tablename : %s", tablename)
logging.info("datestring : %s", datestring)
logging.info("value_key1 : %s", value_key1)
logging.info("value_key2 : %s", value_key2)
datalogger = DataLogger(basedir, project, tablename)
tsastats = datalogger.load_tsastats(datestring)
hc_scatter_data = []
for key, tsstat in tsastats.items():
hc_scatter_data.append({
"name" : str(key),
"data" : ((tsstat[value_key1]["avg"], tsstat[value_key2]["avg"]), )
})
return json.dumps(hc_scatter_data)
@staticmethod
def sr_vicenter_unused_cpu_cores(args):
"""
special report to find virtual machine which re not used their virtual core entirely
on this machine there is a possibility to save some virtual cores
works only for VMware machines, in special virtualMachineCpuStats
"""
datestring = args[0]
datalogger = DataLogger(basedir, "vicenter", "virtualMachineCpuStats")
tsastat = datalogger.load_tsastats(datestring)
tsastat_g = datalogger.tsastat_group_by(tsastat, ("hostname", ))
data = []
data.append(("hostname", "avg_idle_min", "avg_used_avg", "avg_used_max"))
for key in tsastat_g.keys():
num_cpu = sum([key[0] in index_key for index_key in tsastat.keys()])
if num_cpu < 3:
continue
data.append((key[0], "%0.2f" % tsastat_g[key]["cpu.idle.summation"]["min"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["avg"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["max"]))
return json.dumps(data)
@staticmethod
def sr_vicenter_unused_mem(args):
"""
special resport to find virtual machine which are not used their ram entirely
on this machines there is a possibility to save some virtual memory
works only for VMware machine, in special virtualMachineMemoryStats
"""
datestring = args[0]
datalogger = DataLogger(basedir, "vicenter", "virtualMachineMemoryStats")
tsastat = datalogger.load_tsastats(datestring)
tsastat_g = datalogger.tsastat_group_by(tsastat, ("hostname", ))
data = []
data.append(("hostname", "avg_active_max", "avg_granted_min", "avg_notused_min"))
for key in tsastat_g.keys():
not_used = tsastat_g[key]["mem.granted.average"]["min"] - tsastat_g[key]["mem.active.average"]["max"]
data.append((key[0], "%0.2f" % tsastat_g[key]["mem.active.average"]["max"], "%0.3f" % tsastat_g[key]["mem.granted.average"]["min"], "%0.2f" % not_used))
return json.dumps(data)
@staticmethod
def sr_hrstorageram_unused(args):
"""
special report to find servers which are not using their ram entirely
specially on virtual machines are is a huge saving potential
works only for snmp data especially hrStorageTable
"""
datestring = args[0]
datalogger = DataLogger(basedir, "snmp", "hrStorageTable")
tsastat = datalogger.load_tsastats(datestring)
data = []
data.append(("hostname", "hrStorageSizeKb", "hrStorageUsedKb", "hrStorageNotUsedKbMin", "hrStorageNotUsedPct"))
for index_key in tsastat.keys():
# (u'srvcacdbp1.tilak.cc', u'Physical Memory',
# u'HOST-RESOURCES-TYPES::hrStorageRam')
if u'HOST-RESOURCES-TYPES::hrStorageRam' not in index_key:
del tsastat[index_key]
for key, tsstat in datalogger.tsastat_group_by(tsastat, ("hostname", )).items():
sizekb = tsstat["hrStorageSize"]["min"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
usedkb = tsstat["hrStorageUsed"]["max"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
notused = sizekb - usedkb
notused_pct = 100.0 * notused / sizekb
data.append((key[0], "%0.2f" % sizekb, "%0.2f" % usedkb, "%0.2f" % notused, "%0.2f" % notused_pct))
return json.dumps(data)
@staticmethod
def sr_hrstorage_unused(args):
"""
special report to get a report of unused SNMP Host Storage
works only with snmp/hrStorageTable
"""
datestring, storage_type = args[:2]
datalogger = DataLogger(basedir, "snmp", "hrStorageTable")
tsastat = datalogger.load_tsastats(datestring)
data = []
data.append(("hostname", "hrStorageDescr", "hrStorageSizeKb", "hrStorageUsedKb", "hrStorageNotUsedKbMin", "hrStorageNotUsedPct"))
for index_key in tsastat.keys():
# (u'srvcacdbp1.tilak.cc', u'Physical Memory',
# u'HOST-RESOURCES-TYPES::hrStorageRam')
if (u"HOST-RESOURCES-TYPES::%s" % storage_type) not in index_key:
del tsastat[index_key]
if index_key[1][:4] in (u"/run", u"/dev", u"/sys"):
del tsastat[index_key]
for key, tsstat in tsastat.items():
sizekb = tsstat["hrStorageSize"]["min"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
usedkb = tsstat["hrStorageUsed"]["max"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
notused = sizekb - usedkb
notused_pct = 0.0
try:
notused_pct = 100.0 * notused / sizekb
except ZeroDivisionError:
pass
data.append((key[0], key[1], "%0.2f" % sizekb, "%0.2f" % usedkb, "%0.2f" % notused, "%0.2f" % notused_pct))
return json.dumps(data)
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
|
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .gtfsobjectbase import GtfsObjectBase
from . import problems as problems_module
from . import util
class Route(GtfsObjectBase):
"""Represents a single route."""
_REQUIRED_FIELD_NAMES = [
'route_id', 'route_short_name', 'route_long_name', 'route_type'
]
_FIELD_NAMES = _REQUIRED_FIELD_NAMES + [
'agency_id', 'route_desc', 'route_url', 'route_color', 'route_text_color',
'bikes_allowed'
]
_ROUTE_TYPES = {
0: {'name':'Tram', 'max_speed':100},
1: {'name':'Subway', 'max_speed':150},
2: {'name':'Rail', 'max_speed':300},
3: {'name':'Bus', 'max_speed':100},
4: {'name':'Ferry', 'max_speed':80},
5: {'name':'Cable Car', 'max_speed':50},
6: {'name':'Gondola', 'max_speed':50},
7: {'name':'Funicular', 'max_speed':50},
}
# Create a reverse lookup dict of route type names to route types.
_ROUTE_TYPE_IDS = set(_ROUTE_TYPES.keys())
_ROUTE_TYPE_NAMES = dict((v['name'], k) for k, v in _ROUTE_TYPES.items())
_TABLE_NAME = 'routes'
def __init__(self, short_name=None, long_name=None, route_type=None,
route_id=None, agency_id=None, field_dict=None):
self._schedule = None
self._trips = []
if not field_dict:
field_dict = {}
if short_name is not None:
field_dict['route_short_name'] = short_name
if long_name is not None:
field_dict['route_long_name'] = long_name
if route_type is not None:
if route_type in self._ROUTE_TYPE_NAMES:
self.route_type = self._ROUTE_TYPE_NAMES[route_type]
else:
field_dict['route_type'] = route_type
if route_id is not None:
field_dict['route_id'] = route_id
if agency_id is not None:
field_dict['agency_id'] = agency_id
self.__dict__.update(field_dict)
def AddTrip(self, schedule=None, headsign=None, service_period=None,
trip_id=None):
"""Add a trip to this route.
Args:
schedule: a Schedule object which will hold the new trip or None to use
the schedule of this route.
headsign: headsign of the trip as a string
service_period: a ServicePeriod object or None to use
schedule.GetDefaultServicePeriod()
trip_id: optional trip_id for the new trip
Returns:
a new Trip object
"""
if schedule is None:
assert self._schedule is not None
schedule = self._schedule
if trip_id is None:
trip_id = util.FindUniqueId(schedule.trips)
if service_period is None:
service_period = schedule.GetDefaultServicePeriod()
trip_class = self.GetGtfsFactory().Trip
trip_obj = trip_class(route=self, headsign=headsign,
service_period=service_period, trip_id=trip_id)
schedule.AddTripObject(trip_obj)
return trip_obj
def _AddTripObject(self, trip):
# Only class Schedule may call this. Users of the API should call
# Route.AddTrip or schedule.AddTripObject.
self._trips.append(trip)
def __getattr__(self, name):
"""Return None or the default value if name is a known attribute.
This method overrides GtfsObjectBase.__getattr__ to provide backwards
compatible access to trips.
"""
if name == 'trips':
return self._trips
else:
return GtfsObjectBase.__getattr__(self, name)
def GetPatternIdTripDict(self):
"""Return a dictionary that maps pattern_id to a list of Trip objects."""
d = {}
for t in self._trips:
d.setdefault(t.pattern_id, []).append(t)
return d
def ValidateRouteIdIsPresent(self, problems):
if util.IsEmpty(self.route_id):
problems.MissingValue('route_id')
def ValidateRouteTypeIsPresent(self, problems):
if util.IsEmpty(self.route_type):
problems.MissingValue('route_type')
def ValidateRouteShortAndLongNamesAreNotBlank(self, problems):
if util.IsEmpty(self.route_short_name) and \
util.IsEmpty(self.route_long_name):
problems.InvalidValue('route_short_name',
self.route_short_name,
'Both route_short_name and '
'route_long name are blank.')
def ValidateRouteShortNameIsNotTooLong(self, problems):
if self.route_short_name and len(self.route_short_name) > 6:
problems.InvalidValue('route_short_name',
self.route_short_name,
'This route_short_name is relatively long, which '
'probably means that it contains a place name. '
'You should only use this field to hold a short '
'code that riders use to identify a route. '
'If this route doesn\'t have such a code, it\'s '
'OK to leave this field empty.',
type=problems_module.TYPE_WARNING)
def ValidateRouteLongNameDoesNotContainShortName(self, problems):
if self.route_short_name and self.route_long_name:
short_name = self.route_short_name.strip().lower()
long_name = self.route_long_name.strip().lower()
if (long_name.startswith(short_name + ' ') or
long_name.startswith(short_name + '(') or
long_name.startswith(short_name + '-')):
problems.InvalidValue('route_long_name',
self.route_long_name,
'route_long_name shouldn\'t contain '
'the route_short_name value, as both '
'fields are often displayed '
'side-by-side.',
type=problems_module.TYPE_WARNING)
def ValidateRouteShortAndLongNamesAreNotEqual(self, problems):
if self.route_short_name and self.route_long_name:
short_name = self.route_short_name.strip().lower()
long_name = self.route_long_name.strip().lower()
if long_name == short_name:
problems.InvalidValue('route_long_name',
self.route_long_name,
'route_long_name shouldn\'t be the same '
'the route_short_name value, as both '
'fields are often displayed '
'side-by-side. It\'s OK to omit either the '
'short or long name (but not both).',
type=problems_module.TYPE_WARNING)
def ValidateRouteDescriptionNotTheSameAsRouteName(self, problems):
if (self.route_desc and
((self.route_desc == self.route_short_name) or
(self.route_desc == self.route_long_name))):
problems.InvalidValue('route_desc',
self.route_desc,
'route_desc shouldn\'t be the same as '
'route_short_name or route_long_name')
def ValidateRouteTypeHasValidValue(self, problems):
if self.route_type is not None:
try:
if not isinstance(self.route_type, int):
self.route_type = util.NonNegIntStringToInt(self.route_type, problems)
except (TypeError, ValueError):
problems.InvalidValue('route_type', self.route_type)
else:
if self.route_type not in self._ROUTE_TYPE_IDS:
problems.InvalidValue('route_type',
self.route_type,
type=problems_module.TYPE_WARNING)
def ValidateRouteUrl(self, problems):
if self.route_url:
util.ValidateURL(self.route_url, 'route_url', problems)
def ValidateRouteColor(self, problems):
if self.route_color:
if not util.IsValidHexColor(self.route_color):
problems.InvalidValue('route_color', self.route_color,
'route_color should be a valid color description '
'which consists of 6 hexadecimal characters '
'representing the RGB values. Example: 44AA06')
self.route_color = None
def ValidateRouteTextColor(self, problems):
if self.route_text_color:
if not util.IsValidHexColor(self.route_text_color):
problems.InvalidValue('route_text_color', self.route_text_color,
'route_text_color should be a valid color '
'description, which consists of 6 hexadecimal '
'characters representing the RGB values. '
'Example: 44AA06')
self.route_text_color = None
def ValidateRouteAndTextColors(self, problems):
if self.route_color:
bg_lum = util.ColorLuminance(self.route_color)
else:
bg_lum = util.ColorLuminance('ffffff') # white (default)
if self.route_text_color:
txt_lum = util.ColorLuminance(self.route_text_color)
else:
txt_lum = util.ColorLuminance('000000') # black (default)
if abs(txt_lum - bg_lum) < 510/7.:
# http://www.w3.org/TR/2000/WD-AERT-20000426#color-contrast recommends
# a threshold of 125, but that is for normal text and too harsh for
# big colored logos like line names, so we keep the original threshold
# from r541 (but note that weight has shifted between RGB components).
problems.InvalidValue('route_color', self.route_color,
'The route_text_color and route_color should '
'be set to contrasting colors, as they are used '
'as the text and background color (respectively) '
'for displaying route names. When left blank, '
'route_text_color defaults to 000000 (black) and '
'route_color defaults to FFFFFF (white). A common '
'source of issues here is setting route_color to '
'a dark color, while leaving route_text_color set '
'to black. In this case, route_text_color should '
'be set to a lighter color like FFFFFF to ensure '
'a legible contrast between the two.',
type=problems_module.TYPE_WARNING)
def ValidateBikesAllowed(self, problems):
if self.bikes_allowed:
util.ValidateYesNoUnknown(self.bikes_allowed, 'bikes_allowed', problems)
def ValidateBeforeAdd(self, problems):
self.ValidateRouteIdIsPresent(problems)
self.ValidateRouteTypeIsPresent(problems)
self.ValidateRouteShortAndLongNamesAreNotBlank(problems)
self.ValidateRouteShortNameIsNotTooLong(problems)
self.ValidateRouteLongNameDoesNotContainShortName(problems)
self.ValidateRouteShortAndLongNamesAreNotEqual(problems)
self.ValidateRouteDescriptionNotTheSameAsRouteName(problems)
self.ValidateRouteTypeHasValidValue(problems)
self.ValidateRouteUrl(problems)
self.ValidateRouteColor(problems)
self.ValidateRouteTextColor(problems)
self.ValidateRouteAndTextColors(problems)
self.ValidateBikesAllowed(problems)
# None of these checks are blocking
return True
def ValidateAfterAdd(self, problems):
return
def AddToSchedule(self, schedule, problems):
schedule.AddRouteObject(self, problems)
def Validate(self, problems=problems_module.default_problem_reporter):
self.ValidateBeforeAdd(problems)
self.ValidateAfterAdd(problems)
|
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
determine_ext,
int_or_none,
sanitized_Request,
ExtractorError,
urlencode_postdata
)
class FunimationIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?funimation\.com/shows/[^/]+/videos/(?:official|promotional)/(?P<id>[^/?#&]+)'
_NETRC_MACHINE = 'funimation'
_TESTS = [{
'url': 'http://www.funimation.com/shows/air/videos/official/breeze',
'info_dict': {
'id': '658',
'display_id': 'breeze',
'ext': 'mp4',
'title': 'Air - 1 - Breeze',
'description': 'md5:1769f43cd5fc130ace8fd87232207892',
'thumbnail': 're:https?://.*\.jpg',
},
'skip': 'Access without user interaction is forbidden by CloudFlare, and video removed',
}, {
'url': 'http://www.funimation.com/shows/hacksign/videos/official/role-play',
'info_dict': {
'id': '31128',
'display_id': 'role-play',
'ext': 'mp4',
'title': '.hack//SIGN - 1 - Role Play',
'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd',
'thumbnail': 're:https?://.*\.jpg',
},
'skip': 'Access without user interaction is forbidden by CloudFlare',
}, {
'url': 'http://www.funimation.com/shows/attack-on-titan-junior-high/videos/promotional/broadcast-dub-preview',
'info_dict': {
'id': '9635',
'display_id': 'broadcast-dub-preview',
'ext': 'mp4',
'title': 'Attack on Titan: Junior High - Broadcast Dub Preview',
'description': 'md5:f8ec49c0aff702a7832cd81b8a44f803',
'thumbnail': 're:https?://.*\.(?:jpg|png)',
},
'skip': 'Access without user interaction is forbidden by CloudFlare',
}]
_LOGIN_URL = 'http://www.funimation.com/login'
def _download_webpage(self, *args, **kwargs):
try:
return super(FunimationIE, self)._download_webpage(*args, **kwargs)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
response = ee.cause.read()
if b'>Please complete the security check to access<' in response:
raise ExtractorError(
'Access to funimation.com is blocked by CloudFlare. '
'Please browse to http://www.funimation.com/, solve '
'the reCAPTCHA, export browser cookies to a text file,'
' and then try again with --cookies YOUR_COOKIE_FILE.',
expected=True)
raise
def _extract_cloudflare_session_ua(self, url):
ci_session_cookie = self._get_cookies(url).get('ci_session')
if ci_session_cookie:
ci_session = compat_urllib_parse_unquote_plus(ci_session_cookie.value)
# ci_session is a string serialized by PHP function serialize()
# This case is simple enough to use regular expressions only
return self._search_regex(
r'"user_agent";s:\d+:"([^"]+)"', ci_session, 'user agent',
default=None)
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
data = urlencode_postdata({
'email_field': username,
'password_field': password,
})
user_agent = self._extract_cloudflare_session_ua(self._LOGIN_URL)
if not user_agent:
user_agent = 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'
login_request = sanitized_Request(self._LOGIN_URL, data, headers={
'User-Agent': user_agent,
'Content-Type': 'application/x-www-form-urlencoded'
})
login_page = self._download_webpage(
login_request, None, 'Logging in as %s' % username)
if any(p in login_page for p in ('funimation.com/logout', '>Log Out<')):
return
error = self._html_search_regex(
r'(?s)<div[^>]+id=["\']errorMessages["\'][^>]*>(.+?)</div>',
login_page, 'error messages', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
display_id = self._match_id(url)
errors = []
formats = []
ERRORS_MAP = {
'ERROR_MATURE_CONTENT_LOGGED_IN': 'matureContentLoggedIn',
'ERROR_MATURE_CONTENT_LOGGED_OUT': 'matureContentLoggedOut',
'ERROR_SUBSCRIPTION_LOGGED_OUT': 'subscriptionLoggedOut',
'ERROR_VIDEO_EXPIRED': 'videoExpired',
'ERROR_TERRITORY_UNAVAILABLE': 'territoryUnavailable',
'SVODBASIC_SUBSCRIPTION_IN_PLAYER': 'basicSubscription',
'SVODNON_SUBSCRIPTION_IN_PLAYER': 'nonSubscription',
'ERROR_PLAYER_NOT_RESPONDING': 'playerNotResponding',
'ERROR_UNABLE_TO_CONNECT_TO_CDN': 'unableToConnectToCDN',
'ERROR_STREAM_NOT_FOUND': 'streamNotFound',
}
USER_AGENTS = (
# PC UA is served with m3u8 that provides some bonus lower quality formats
('pc', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'),
# Mobile UA allows to extract direct links and also does not fail when
# PC UA fails with hulu error (e.g.
# http://www.funimation.com/shows/hacksign/videos/official/role-play)
('mobile', 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'),
)
user_agent = self._extract_cloudflare_session_ua(url)
if user_agent:
USER_AGENTS = ((None, user_agent),)
for kind, user_agent in USER_AGENTS:
request = sanitized_Request(url)
request.add_header('User-Agent', user_agent)
webpage = self._download_webpage(
request, display_id,
'Downloading %s webpage' % kind if kind else 'Downloading webpage')
playlist = self._parse_json(
self._search_regex(
r'var\s+playersData\s*=\s*(\[.+?\]);\n',
webpage, 'players data'),
display_id)[0]['playlist']
items = next(item['items'] for item in playlist if item.get('items'))
item = next(item for item in items if item.get('itemAK') == display_id)
error_messages = {}
video_error_messages = self._search_regex(
r'var\s+videoErrorMessages\s*=\s*({.+?});\n',
webpage, 'error messages', default=None)
if video_error_messages:
error_messages_json = self._parse_json(video_error_messages, display_id, fatal=False)
if error_messages_json:
for _, error in error_messages_json.items():
type_ = error.get('type')
description = error.get('description')
content = error.get('content')
if type_ == 'text' and description and content:
error_message = ERRORS_MAP.get(description)
if error_message:
error_messages[error_message] = content
for video in item.get('videoSet', []):
auth_token = video.get('authToken')
if not auth_token:
continue
funimation_id = video.get('FUNImationID') or video.get('videoId')
preference = 1 if video.get('languageMode') == 'dub' else 0
if not auth_token.startswith('?'):
auth_token = '?%s' % auth_token
for quality, height in (('sd', 480), ('hd', 720), ('hd1080', 1080)):
format_url = video.get('%sUrl' % quality)
if not format_url:
continue
if not format_url.startswith(('http', '//')):
errors.append(format_url)
continue
if determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url + auth_token, display_id, 'mp4', entry_protocol='m3u8_native',
preference=preference, m3u8_id='%s-hls' % funimation_id, fatal=False))
else:
tbr = int_or_none(self._search_regex(
r'-(\d+)[Kk]', format_url, 'tbr', default=None))
formats.append({
'url': format_url + auth_token,
'format_id': '%s-http-%dp' % (funimation_id, height),
'height': height,
'tbr': tbr,
'preference': preference,
})
if not formats and errors:
raise ExtractorError(
'%s returned error: %s'
% (self.IE_NAME, clean_html(error_messages.get(errors[0], errors[0]))),
expected=True)
self._sort_formats(formats)
title = item['title']
artist = item.get('artist')
if artist:
title = '%s - %s' % (artist, title)
description = self._og_search_description(webpage) or item.get('description')
thumbnail = self._og_search_thumbnail(webpage) or item.get('posterUrl')
video_id = item.get('itemId') or display_id
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
|
|
#!/usr/bin/env python
################################################################################
# #
# armor.py #
# #
# Core ArMOR analysis and maniupulation routines. #
# #
################################################################################
# #
# The ArMOR project #
# #
# Copyright (C) 2015 Daniel Lustig, Princeton University #
# #
# This software may be modified and distributed under the terms #
# of the MIT license. See the LICENSE file for details. #
# #
################################################################################
import sys
import re
import unittest
import logging
class ArMORError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class InsufficientMOR(ArMORError):
pass
class StrengthLevel():
"""For now, assume a total order on strength levels. This could be changed
if necessary, but for now it makes the coding easier"""
strengths = '-LNMS'
def __init__(self, level):
self.level = level
# Primitive comparison operators:
def __eq__(self, other):
return self.level == other.level
def __le__(self, other):
return (StrengthLevel.strengths.index(self.level) <=
StrengthLevel.strengths.index(other.level))
# Non-primitive comparison operators (defined in terms of the primitive
# operators)
def __ne__(self, other):
return not self == other
def __ge__(self, other):
return other <= self
def __lt__(self, other):
return self <= other and self != other
def __gt__(self, other):
return self >= other and self != other
def __add__(self, other):
"""The join (/\) operator, represented using the '+' in Python. Since
we are assuming a total order on strength levels for now, the join is
implemented as the max of the input strength levels."""
return StrengthLevel.strengths[
max(StrengthLevel.strengths.index(self.level),
StrengthLevel.strengths.index(other.level))]
def __sub__(self, other):
"""The subtraction operator, represented using the '-' in Python.
Subtraction here is a conservative approximation: for 'a - b', if b
is weaker than a, return all of a (i.e., ignore the fact that some
parts of a have been enforced by b, and consider them still pending
anyway)."""
if self <= other:
return StrengthLevel.strengths[0]
else:
return self.level
def APart(op):
"""A and B parts of operations:
We provide the ability for multiple B operation types to map onto the same
A operation type. This is most useful (so far) when describing "same
address" vs. "different address" mappings. For example, the MOST for TSO
looks like this:
+-----------------------------------------|
TSO: | Load/Same Addr | Load/Diff Addr | Store |
+-------+----------------+----------------+-------|
| Load | S | S | S |
|-------+----------------+----------------+-------|
| Store | L | - | M |
+-------+-----------------------------------------|
...and we want to map both types of B load onto the same type of A load.
The syntax for this is as follows:
If a B operation has an ampersand (&) in its name, then it is mapped onto
the A operation which has the name matching the portion of the name coming
before the ampersand. For example, the above table would look like this:
+--------------------+
TSO: | ld&sa | ld&da | st |
+-----+-------+-------+----|
| ld | S | S | S |
|-----+-------+-------+----|
| st | L | - | M |
+-----+--------------------+
"""
return re.sub("&.*", "", op)
class MOST():
"""A memory ordering specification table.
A MOST is built using nested dictionaries. Each entry in the outer
dictionary represents a particular row within the MOST (the key is the row
heading, and the value is the set of inner dictionaries). Each inner
dictionary represents cell in the MOST (the key is the column heading, and
the value is the MOST cell strength value).
"""
def __init__(self, most):
self.most = most
def __hash__(self):
"""Allow MOSTs to serve as dictionary keys"""
try:
return self.memoized_hash
except AttributeError:
pass
# In Python, can only take the hash of an immutable object. Therefore,
# create an immutable equivalent to the MOST before hashing. This
# requires recursion into the second level of dicts.
subhashes = {}
for k, v in self.most.items():
subhashes[k] = hash(frozenset(v.items()))
self.memoized_hash = hash(frozenset(subhashes.items()))
return self.memoized_hash
def __repr__(self):
"""Return a string concatenating the orderings in the the table, row
by row."""
try:
return self.memoized_str
except:
pass
self.memoized_str = ''
for _, bs in self.most.items():
for _, v in bs.items():
self.memoized_str += v
self.memoized_str += ' '
# drop the trailing space
self.memoized_str = self.memoized_str[:-1]
return self.memoized_str
def LaTeXRepr(self, name=None):
"""Return a LaTeX tabular environment depicting the MOST."""
s = "\\begin{tabular}{|c|"
columns = set()
for row in self.most.values():
columns |= set(row.keys())
for _ in columns:
s += "c|"
s += "}\n"
if name is not None:
s += ("\\multicolumn{%d}{c}{%s} \\\\\n" %
(len(columns) + 1,
name.replace("_", "\\_")))
s += "\cline{2-%d}\n" % (len(columns) + 1)
s += "\\multicolumn{1}{c|}{} "
for b in columns:
s += "& %s " % b.replace("&", "\\&")
s += "\\\\\\hline\n"
for a, bs in self.most.items():
s += "%s " % a
for b in columns:
s += "& %s " % bs.get(b, "?")
s += "\\\\\\hline\n"
s += "\\end{tabular}"
return s
# Primitive comparison operators:
def __eq__(self, other):
for (a, bs) in self.most.items():
for (b, s) in bs.items():
if s != other.most[a][b]:
return False
return True
def __le__(self, other):
for (a, bs) in self.most.items():
for (b, s) in bs.items():
if not StrengthLevel(s) <= StrengthLevel(other.most[a][b]):
return False
return True
# Non-primitive comparison operators (defined in terms of the primitive
# operators)
def __ne__(self, other):
return not self == other
def __ge__(self, other):
return other <= self
def __lt__(self, other):
return self <= other and self != other
def __gt__(self, other):
return self >= other and self != other
def KeepColumn(self, op):
"""Return a MOST of the same shape (i.e., with the same rows and
columns) but with no ordering requirements in columns other than the
one specified."""
result = {}
for a, bs in self.most.items():
result[a] = {}
for b, s in bs.items():
if b == op:
result[a][b] = s
else:
result[a][b] = '-'
return MOST(result)
def KeepRow(self, op):
"""Return a MOST of the same shape (i.e., with the same rows and
columns) but with no ordering requirements in rows other than the
one specified."""
result = {}
for a, bs in self.most.items():
result[a] = {}
for b, s in bs.items():
if a == APart(op):
result[a][b] = s
else:
result[a][b] = '-'
return MOST(result)
def __add__(self, other):
"""The join (/\) operator, represented using the '+' in Python."""
result = {}
for a, bs in self.most.items():
result[a] = {}
for b, s in bs.items():
result[a][b] = StrengthLevel(s) + StrengthLevel(other.most[a][b])
return MOST(result)
def __sub__(self, other):
"""The subtraction operator, represented using the '-' in Python."""
result = {}
for a, bs in self.most.items():
result[a] = {}
for b, s in bs.items():
result[a][b] = StrengthLevel(s) - StrengthLevel(other.most[a][b])
return MOST(result)
def EmptyMOSTOfSameShape(self):
"""Return a MOST of the same shape (i.e., with the same rows and
columns) but with no ordering requirements."""
result = {}
for a, bs in self.most.items():
result[a] = {}
for b, _ in bs.items():
result[a][b] = '-'
return MOST(result)
def Superset(self, other):
"""Return a MOST that describes the merging of two states. If a pair
of corresponding cells is the same, keep the value. If a pair of
corresponding cells is different, make the resulting cell '*' to make
it clear that the original input differ in that cell."""
result = {}
for a, bs in self.most.items():
result.setdefault(a, {})
for b, s in bs.items():
if s != other.most.get(a, {}).get(b, '*'):
result[a][b] = '*'
else:
result[a][b] = s
for a, bs in other.most.items():
result.setdefault(a, {})
for b, s in bs.items():
if s != self.most.get(a, {}).get(b, '*'):
result[a][b] = '*'
else:
result[a][b] = s
return MOST(result)
def TableString(t, a_keys=None, b_keys=None, newline='\n', sep=' ', start='', end='', print_keys=True):
"""Return the contents of "t" nicely formatted as a text table"""
def spaces(n):
return ''.join([' ' for _ in range(n)])
result = start
if not a_keys:
a_keys = t.keys()
max_a_length = max(map(len, a_keys))
a_keys = list(a_keys)
a_keys.sort()
if not b_keys:
b_keys = set()
for a_values in t.values():
b_keys |= set(a_values.keys())
b_keys = list(b_keys)
b_keys.sort()
# Calculate width of each column
max_b_length = {}
for a in a_keys:
for b in b_keys:
max_b_length.setdefault(b, len(b))
max_b_length[b] = max(
max_b_length[b], len(str(t.get(a, {}).get(b, ' '))))
# Top left corner
# to make the hash (which may be negative) printable
if print_keys:
result += spaces(max_a_length)
# Column headings
if print_keys:
for b in b_keys:
result += sep + b
# Rows
for a in a_keys:
# Put the newline here so that there's no newline at the very end
result += newline;
# Row heading
if print_keys:
result += a + spaces(max_a_length - len(a))
# Cells
for b in b_keys:
cell = str(t.get(a, {}).get(b, ' '))
result += sep + cell + spaces(max_b_length[b] - len(cell))
result += end
return result
class Architecture():
def __init__(self, name, ppo, mosts, visible_ops, invisible_ops=set(), verify=True):
"""The name of an architecture has two parts: a printable part and a
non-printable part. There does not need to be a non-printable part,
but if there is, it is separated from the printable part by a '#".
The non-printable part is useful for distinguishing two different
representations for the same architecture: e.g., "tso#2x2" vs. "tso#4x4"
"""
self.code_name = name
self.print_name = re.sub("#.*", "", name).upper()
self.ppo = ppo
self.mosts = mosts
self.visible_ops = visible_ops
self.invisible_ops = invisible_ops
if verify:
self.Verify()
def __repr__(self):
result = "Architecture %s (%s)\n" % (self.print_name, self.code_name)
result += "\t%s\tPPO" % self.ppo
for k, v in self.mosts.items():
result += "\n\t%s\t%s" % (v, k)
return result
def Dump(self):
result = "Architecture %s (%s)\n" % (self.print_name, self.code_name)
result += "PPO:\n%s\n" % TableString(self.ppo.most)
for k, v in self.mosts.items():
result += "%s:\n%s\n" % (k, TableString(v.most))
return result
def Verify(self):
for n, m in self.mosts.items():
if not m >= self.ppo:
logging.info("Architecture %s (%s) MOST %s is not as strong as PPO" %
(self.print_name, self.code_name, n))
logging.info("PPO:")
for ln in TableString(m.most).split('\n'):
logging.info(ln)
logging.info("MOST %s:" % n)
for ln in TableString(self.ppo.most).split('\n'):
logging.info(n)
diff = self.ppo - m
logging.info("PPO - MOST %s" % n)
for ln in TableString(diff.most).split('\n'):
logging.info(ln)
newmost = self.ppo + m
logging.info("Updating to:")
for ln in TableString(newmost.most).split('\n'):
logging.info(ln)
self.mosts[n] = newmost
class Transition():
"""A transition from one shim FSM state to another"""
def __init__(self, src, dst, mor_in, mors_out):
self.src = src
self.dst = dst
self.mor_in = mor_in
self.mors_out = mors_out
def __eq__(self, other):
return self.src == other.src and self.dst == other.dst and \
self.mor_in == other.mor_in and \
self.mors_out == other.mors_out
def __hash__(self):
return hash((self.src, self.dst, self.mor_in, frozenset(self.mors_out)))
def __repr__(self):
return "%s --(%s/%s)--> %s" % (
self.src, self.mor_in, str(self.mors_out), self.dst)
def MinimalMOST(l, orderingsToEnforce):
"""Find the minimal MOST among the MOSTs in l, where l is a dictionary of
(name, MOST) pairs"""
minimal = None
# TODO: Very naive inefficient algorithm!
for i_name, i_most in l.items():
not_min = False
for j_name, j_most in l.items():
if i_most > j_most:
logging.debug("%s not minimal: greater than %s" %
(i_most, j_most))
not_min = True
break
else:
logging.info("%s maybe minimal: not greater than %s" %
(i_most, j_most))
if not not_min:
if minimal:
logging.info("More than one minimal MOST within set!")
logging.info("\t%s (requirement)" % orderingsToEnforce)
logging.info("\t%s %s" % (minimal[1], minimal[0]))
logging.info("\t%s %s" % (i_most, i_name))
logging.info("\t(and maybe more...)")
else:
minimal = i_name, i_most
return minimal
def WeakestSufficientMOR(orderingsToEnforce, mosts):
"""Find the minimal MOST among the MOSTs in mosts, where mosts is a
dictionary of (name, MOST) pairs"""
valid_mosts = {}
for name, most in mosts.items():
if orderingsToEnforce <= most:
logging.info("\t sufficient: %s %s" % (most, name))
valid_mosts[name] = most
else:
logging.info("\tinsufficient: %s %s" % (most, name))
logging.debug("\t\t%s" % (orderingsToEnforce - most))
if not valid_mosts:
logging.warning("No sufficiently strong MOST!")
raise InsufficientMOR("No sufficiently strong MOST!")
return MinimalMOST(valid_mosts, orderingsToEnforce)
def NextState(upstream, downstream, assumedReqs, currentState, op):
"""Given the current state of a shim, the upstream and downstream PPO, the
assumedReqs, and the input operation type, calculate the next state to go
to and the set of downstream MORs (if any) to insert"""
logging.info("")
logging.info("Next State Iteration")
logging.debug("Upstream:")
for ln in str(upstream).split('\n'):
logging.debug(ln)
logging.debug("Downstream:")
for ln in str(downstream).split('\n'):
logging.debug(ln)
logging.info("Current state: %s" % str(currentState))
logging.info("MOR: %s" % str(op))
# The orderings that need to be enforced are:
# 1) those in the PPO MOST column corresponding to the input operation
# (for input operation types without an explicit MOST, e.g., loads
# and stores)
# 2) those in the MOST associated with the input operation
# (for input operation types with an explicit MOST, e.g., fences)
# 3) the assumedReqs (i.e., any accesses which can't be observed directly
# and which therefore must be conservatively assumed to be required
orderingsToEnforce = currentState.KeepColumn(op)
if op in upstream.mosts.keys():
orderingsToEnforce += upstream.mosts[op]
orderingsToEnforce += assumedReqs
# The orderings that will be marked pending once this input operation has
# been processed are those which:
# 1) are in the row of the upstream MOST corresponding the the input
# operation
# 2) are not enforced by the corresponding entries in the downstream MOST
# row corresponding to the input operation
newOrderings = (upstream.ppo - downstream.ppo).KeepRow(op)
# If PPO is sufficiently strong by itself to enforce all of the necessary
# orderings, then don't insert anything. Otherwise, insert the weakest
# sufficiently strong MOST
if orderingsToEnforce <= downstream.ppo:
logging.info("\tPPO sufficient")
insertedMORs = []
insertedMORMOST = downstream.ppo
nextState = currentState + newOrderings + assumedReqs
else:
logging.info("\tPPO insufficient")
logging.info("\t\t %s" % orderingsToEnforce)
logging.info("\t\t- %s" % downstream.ppo)
logging.info("\t\t= %s" % (orderingsToEnforce - downstream.ppo))
insertedMORName, insertedMORMOST = WeakestSufficientMOR(
orderingsToEnforce, downstream.mosts)
insertedMORs = [insertedMORName]
nextState = currentState - insertedMORMOST + newOrderings + assumedReqs
# One hard-coded optimization:
# (st->st, check_S) - (st->st, check_M) = (st->ld, check)
# In other words, the difference between single- and multi-copy atomicity
# is only observable by a store following a load to the same address.
# Single-copy atomicity prevents store buffer forwarding, while multi-copy
# atomicity allows it. For cases in which store->load ordering already
# needs to be enforced, then the (check_S - check_M) store->store ordering
# becomes redundant.
for a in nextState.most.keys():
if a != "st":
continue
for b in nextState.most[a].keys():
if APart(b) == "st" and \
nextState.most[a][b] == 'S' and \
insertedMORMOST.most[a][b] == 'M':
found_cell = False
for k in nextState.most[a].keys():
if APart(k) == 'ld':
if nextState.most[a][k] != 'S':
logging.info(("No optimization: %s->%s is %s, not S") %
(a, k, nextState.most[a][k]))
found_cell = False
break
nextState.most[a][k] = 'S'
found_cell = True
logging.info(("Optimization! (%s->%s, check_S) - " +
"(%s->%s, check_M) = (%s->%s, check)") %
(a, b, a, b, a, k))
if found_cell:
nextState.most[a][b] = '-'
logging.info(("Optimization! (%s->%s, check_S) - " +
"(%s->%s, check_M) = (%s->%s, -)") %
(a, b, a, b, a, b))
# If the input operation has a corresponding downstream operation (e.g.,
# loads and stores), then insert it (after the other necessary MOST(s) (if
# any) have been inserted).
if APart(op) in downstream.ppo.most.keys():
insertedMORs.append(APart(op))
logging.info("Inserted MORs: %s" % str(insertedMORs))
logging.info("Next state: %s" % str(nextState))
return Transition(currentState, nextState, op, insertedMORs)
def AssumedReqs(upstream, downstream):
"""Return the assumedReqs: the MOST corresponding to operations which cannot
be directly observed and must therefore conservatively be assumed to always
be pending enforcement"""
result = upstream.ppo.EmptyMOSTOfSameShape()
for op in downstream.invisible_ops:
result += upstream.ppo.KeepRow(op)
result += upstream.ppo.KeepColumn(op)
logging.info("AssumedReqs for %s -> %s: %s" % (
upstream.code_name, downstream.code_name, result))
return result
class FSM():
def __init__(self, upstream, downstream, edges):
self.upstream = upstream
self.downstream = downstream
self.nodes = {}
for e in edges:
self.nodes.setdefault(e.src, {})
self.nodes.setdefault(e.dst, {})
self.nodes[e.src][e.mor_in] = (e.dst, e.mors_out)
self.edges = edges
def StateID(self, node):
return self.nodes.keys().index(node)
def __eq__(self, other):
# FIXME: check if the upstreams and downstreams are actually the same
if self.upstream.code_name != other.upstream.code_name:
return False
if self.downstream.code_name != other.downstream.code_name:
return False
for e in self.edges:
if e not in other.edges:
return False
for e in other.edges:
if e not in self.edges:
return False
return True
def DOTGraph(self, label=""):
"""Return a DOT graph representing the FSM"""
s = ""
s += "digraph armor {\n"
s += "labelloc=t;\n"
if label:
label = " (" + label + ")"
s += ('label="%s -> %s%s";\n' %
(self.upstream.print_name, self.downstream.print_name, label))
# Use courier so that the FSMs are properly aligned
s += '\tnode [fontname="courier"];\n'
s += '\tedge [fontname="courier"];\n'
# Print the edges
for e in self.edges:
# Print a user-readable form of the edge as a comment
s += '\t//%s\n' % str(e)
# Edge color scheme:
# Red = edge for which at least one extra MOR had to be inserted
# Green = edge for which no extra MOR had to be inserted
# Blue = an edge with no MORs at all (uncommon?)
if len(e.mors_out) == 0:
logging.info("No MORs inserted...redundant fence? %s" % e)
edge_color = "blue"
mors_out = '-'
elif len(e.mors_out) == 1:
edge_color = "black"
mors_out = e.mors_out
else:
edge_color = "red"
mors_out = e.mors_out
# Print the edge in DOT notation
s += ('\tn%s -> n%s [label="%s/%s";color=%s;penwidth=5];\n' %
(str(hash(e.src)).replace('-', '_'),
str(hash(e.dst)).replace('-', '_'),
e.mor_in,
';'.join(mors_out),
edge_color))
# Print the nodes
for n in self.nodes.keys():
# The node shows the pending orderings table
label = TableString(n.most, sep=' ', newline='\\n')
s += ('\tn%s [label="%s";penwidth=5]\n' %
(str(hash(n)).replace("-", "_"), label))
# End the graph
s += "}\n"
return s
def LaTeXRepr(self):
"""Return a LaTeX tablular environment showing the FSM transition
table"""
s = ""
s += "\\begin{tabular}{|c|c|c|c|c|}\n"
s += "\\hline\n"
s += "\\multicolumn{3}{|c|}{Input} & \\multicolumn{2}{c|}{Output} \\\\\\hline\n"
s += "State & MOST & Op. & Op(s). & Next State \\\\\\hline\n"
for src, dsts in self.nodes.items():
for op, edge in dsts.items():
s += "%d & \\texttt{%s} & %s & " % (
self.StateID(src), str(src).replace("-", "{-}"),
op.replace("_", "\\_").replace("&", "\\&"))
s += '; '.join(
map(lambda x: x.replace("_", "\\_").replace("&", "\\&"),
edge[1])) if edge[1] else '-'
s += " & %s \\\\\\hline\n" % self.StateID(edge[0])
s += "\\end{tabular}\n"
return s
def TextRepr(self):
"""Return a text form of the FSM transition table"""
d = {"0": {"1": "State", "2": "MOST", "3": "Input", "4": "|",
"5": "Output", "6": "Next State"}}
for src, dsts in self.nodes.items():
for op, edge in dsts.items():
d[str(len(d.keys()))] = {
"1": str(self.StateID(src)),
"2": src,
"3": op.replace("_", "\\_").replace("&", "\\&"),
"4": "|",
"5": '; '.join(edge[1]) if edge[1] else '-',
"6": self.StateID(edge[0])}
return TableString(d, sep=" | ", print_keys=False)
def MinimizedFSM(self, merge):
"""Given an FSM, merge any states which are identical in behavior"""
logging.debug("MIN\tStart")
# Calculate a pairwise set of conditions under which each pair of two
# states may be identical. comparison[i][j] is either 1) an assertion
# that for i to be equal to j, a must be equal to b, or 2) an assertion
# that i cannot be equivalent to j
comparison = {}
for ik, iv in self.nodes.items():
for jk, jv in self.nodes.items():
if hash(ik) >= hash(jk):
continue
comparison.setdefault(ik, {})
comparison[ik][jk] = set()
for op, dst in iv.items():
if dst[1] != jv[op][1]:
# The transitions for input "op" is not the same;
# hence, the two source states cannot be the same
logging.info("MIN\t%s != %s because on op %s:" %
(ik, jk, op))
logging.info("MIN\t\t%s: %s %s" %
(ik, dst[1], iv[op][1]))
logging.info("MIN\t\t%s: %s %s" %
(jk, dst[1], jv[op][1]))
comparison[ik][jk] = None
break
else:
# The transitions for input "op" are the same; hence,
# the two source states may be the same, but only if
# the two destination states (a, b) are also the same
a = dst[0]
b = jv[op][0]
if a != b:
assert not a == b
if hash(a) < hash(b):
comparison[ik][jk].add((a, b))
else:
comparison[ik][jk].add((b, a))
# Sanity check: there should have been (n*(n-1))/2 comparisons
# performed
len_comparison = 0
for k, v in comparison.items():
len_comparison += len(v.keys())
assert len_comparison == (len(self.nodes) * (len(self.nodes) - 1)) / 2
# Loop through the state equivalence conditions calculated above and
# check whether any of them fail. If they do, then mark the potential
# state equalities depending on those conditions as invalid. Loop
# until convergence.
iterate = True
while iterate:
iterate = False
for i, js in comparison.items():
for j, conditions in js.items():
if conditions is None:
continue
for (a, b) in conditions:
if comparison[a][b] is None:
logging.info("MIN\t%s != %s because %s != %s" %
(i, j, a, b))
comparison[i][j] = None
iterate = True
# At this point, any remaining conditional equivalences hold true.
# Group them.
equal_states = set()
for i, js in comparison.items():
for j, conditions in js.items():
if conditions is None:
continue
# i and j are equal. Find any other equivalences involving i
# or j and merge them together with (i=j).
new_eq = {i, j}
# Track the old equivalences that are superseded by the new
# merged equivalence
old_eqs = set()
for eq in equal_states:
if i in eq or j in eq:
# i and j are equivalent to all of the states in eq
new_eq |= eq
# eq is now superseded by new_eq; remove it
old_eqs |= {eq}
equal_states -= old_eqs
equal_states |= {frozenset(new_eq)}
logging.info("MIN\tEqual states list: %s" % equal_states)
# Map each node to a canonical representative
mapping = {}
replacement = {}
for n in self.nodes.keys():
mapping[n] = n
for eq in equal_states:
choice = min(eq) # it doesn't really matter which one is chosen
replacement[choice] = choice
for x in eq:
replacement[choice] = merge(replacement[choice], x)
mapping[x] = choice
logging.debug("MIN\tReplacement mapping:")
for k, v in replacement.items():
logging.debug("MIN\t\t%s -> %s" % (k, v))
logging.info("MIN\tEqual states mapping:")
for k, v in mapping.items():
logging.info("MIN\t\t%s -> %s -> %s" %
(k, v, replacement.get(v, v)))
# Merge the nodes
new_edges = set()
for a, bs in self.nodes.items():
if mapping[a] != a:
# this edge is now redundant: skip it
continue
for b, v in bs.items():
dst, mors = self.nodes[a][b]
new_edges.add(Transition(replacement.get(a, a),
replacement.get(mapping[dst], mapping[dst]), b, mors))
return FSM(self.upstream, self.downstream, new_edges)
def RemoveTransientNodes(self):
"""Remove any transient nodes: nodes which are not reachable from all
other nodes in the graph"""
# Calculate the adjacency list of the FSM
adj = {}
for e in self.edges:
adj.setdefault(e.src, {})
adj[e.src][e.dst] = True
# Take the transitive closure of the adjacency list
for k in self.nodes:
for i in self.nodes:
for j in self.nodes:
if adj.get(i, {}).get(j, False) and \
adj.get(j, {}).get(k, False):
adj[i][k] = True
# If j is not reachable from i, then mark j as transient
transient_nodes = set()
for i in self.nodes:
for j in self.nodes:
if not adj.get(i, {}).get(j, False):
logging.info("MIN\tTransient node %s" % j)
transient_nodes.add(j)
# Calculate the FSM with transient nodes removed
new_edges = set()
for e in self.edges:
if e.src not in transient_nodes and e.dst not in transient_nodes:
new_edges.add(e)
return FSM(self.upstream, self.downstream, new_edges)
def GenerateFSM(upstream, downstream):
"""Given upstream and downstream architectures, generate the (not yet
minimal) ArMOR shim FSM for translating from upstream to downstream."""
assumedReqs = AssumedReqs(upstream, downstream)
edges = set()
visitedStates = set()
# The starting state would be the empty state, except that the assumedReqs
# is always considered pending, so the starting state is actually equal to
# assumedReqs
pendingStates = [assumedReqs]
logging.info("")
logging.info("")
logging.info("Generating FSM for %s --> %s" %
(upstream.code_name, downstream.code_name))
for ln in upstream.Dump().split('\n'):
logging.debug(ln)
for ln in downstream.Dump().split('\n'):
logging.debug(ln)
while pendingStates:
logging.debug("%d states remaining" % len(pendingStates))
# Pick a pending state; it doesn't matter which one
currentState = pendingStates.pop()
# Add it to the list of visited states (so we don't enumerate it twice)
visitedStates.add(hash(currentState))
# Calculate the outgoing edge corresponding to each input operation
for op in upstream.visible_ops:
# Calculate the next state
transition = NextState(upstream, downstream, assumedReqs,
currentState, op)
edges.add(transition)
# If this is a not-yet-visited state, add it to the list of
# states pending exploration
if hash(transition.dst) not in visitedStates and \
hash(transition.dst) not in map(hash, pendingStates):
logging.info("New state %s" % str(transition.dst))
pendingStates.append(transition.dst)
else:
logging.info("Repeated next state %s" % str(transition.dst))
return FSM(upstream, downstream, edges)
################################################################################
def GenerateFSMAndPrintDOTGraph(graphfilepath, upstream, downstream):
fsm = GenerateFSM(upstream, downstream)
for e in fsm.edges:
logging.info("\t%s" % str(e))
# Check whether the minimized version of the graph is actually smaller
minimized_fsm_is_smaller = False
def f_merge(x, y):
return x.Superset(y)
fsm_min = fsm.MinimizedFSM(f_merge).RemoveTransientNodes()
for e in fsm.edges:
if e not in fsm_min.edges:
minimized_fsm_is_smaller = True
logging.info("Minimized graph is smaller!")
logging.info("Minimized FSM:")
for e in fsm_min.edges:
logging.info("\t%s" % str(e))
break
if graphfilepath is not None:
if minimized_fsm_is_smaller:
# If the minimized FSM is actually smaller, print the before and
# the after as two separate graphs
if graphfilepath == '-':
f1 = sys.stdout
f2 = sys.stdout
else:
f1 = open(graphfilepath + "/%s_%s_nonminimized.gv" %
(upstream.print_name, downstream.print_name), 'w')
f2 = open(graphfilepath + "/%s_%s_minimized.gv" %
(upstream.print_name, downstream.print_name), 'w')
f1.write(fsm.DOTGraph("Non-minimized"))
f2.write(fsm_min.DOTGraph("Minimized"))
else:
# Otherwise, if the before and the after are the same, print just
# the one graph
if graphfilepath == '-':
f1 = sys.stdout
f2 = sys.stdout
else:
f1 = open(graphfilepath + "/%s_%s_nonminimized.gv" %
(upstream.print_name, downstream.print_name), 'w')
f2 = open(graphfilepath + "/%s_%s_minimized.gv" %
(upstream.print_name, downstream.print_name), 'w')
f1.write(fsm.DOTGraph())
f2.write(fsm.DOTGraph())
f = open(graphfilepath + "/%s_%s_minimized.txt" %
(upstream.print_name, downstream.print_name), 'w')
f.write(fsm_min.TextRepr())
# Return the generated graphs
return fsm_min, fsm if minimized_fsm_is_smaller else None
################################################################################
class ArMORUnitTests(unittest.TestCase):
def test_minimize(self):
"""from http://en.wikibooks.org/wiki/Digital_Circuits/Optimization
{frozenset({3, 7}), frozenset({2, 5, 6}), frozenset({0, 1, 4})}"""
self.assertEqual(
FSM(Architecture("upstream", MOST({}), {}, set()),
Architecture("downstream", MOST({}), {}, set()),
{ Transition(0, 7, 0, [0]), Transition(0, 2, 1, [0]),
Transition(1, 7, 0, [0]), Transition(1, 5, 1, [0]),
Transition(2, 7, 0, [1]), Transition(2, 0, 1, [0]),
Transition(3, 0, 0, [1]), Transition(3, 7, 1, [0]),
Transition(4, 3, 0, [0]), Transition(4, 6, 1, [0]),
Transition(5, 3, 0, [1]), Transition(5, 1, 1, [0]),
Transition(6, 3, 0, [1]), Transition(6, 4, 1, [0]),
Transition(7, 4, 0, [1]), Transition(7, 3, 1, [0])})
.MinimizedFSM(min),
FSM(Architecture("upstream", MOST({}), {}, set()),
Architecture("downstream", MOST({}), {}, set()),
{
Transition(0, 3, 0, [0]), Transition(0, 2, 1, [0]),
Transition(2, 3, 0, [1]), Transition(2, 0, 1, [0]),
Transition(3, 0, 0, [1]), Transition(3, 3, 1, [0])}))
if __name__ == "__main__":
unittest.main()
|
|
from app import app
from flask import request, url_for
from threading import Lock
from urllib import quote
import time
from es_connection import es
from es_queries import _build_email_query
from es_query_utils import _query_emails, _count_emails, _query_email_attachments, _map_emails_to_row,_map_node
from es_series import count_associated_addresses, count_email_attachments
# contains a cache of all email_address.addr, email_address
_EMAIL_ADDR_CACHE = {}
_EMAIL_ADDR_CACHE_LOCK = Lock()
_graph_fields = ["community", "community_id", "addr", "attachments_count", "received_count", "sent_count", "recepient.email_id", "sender.email_id", "starred"]
# Sort which will add sent + rcvd and sort most to top
_sort_email_addrs_by_total={ "_script":{"type": "number","order": "desc","script": { "source": "doc['sent_count'].value + doc['received_count'].value", "lang": "painless" }}}
_query_all = {"bool":{"must":[{"match_all":{}}]}}
def count(index, type="emails", start="2000-01-01", end="now"):
# TODO apply filter to query not to body
filter = {"range" : {"datetime" : { "gte": start, "lte": end }}}
all_query = {"bool":{"must":[{"match_all":{}}]}}
count = es().count(index=index, doc_type=type, body={"query" : all_query})
return count["count"]
# Get attachment info from the email_address type
def _get_attachment_info_from_email_address(index, email_address, date_time=None):
query_email_addr = {"query":{"filtered" : {
"query" : _query_all,
"filter" : {"bool":{
"must":[
{"term" : { "addr" : email_address}}
]
}}}}}
resp = es().search(index=index, doc_type="email_address", body=query_email_addr)
app.logger.debug("resp: %s" % (resp))
return resp
# Get search all
def _search_ranked_email_addrs(index, start, end, size):
graph_body= {"_source": _graph_fields, "sort" : _sort_email_addrs_by_total, "query" : _query_all}
app.logger.debug("query: %s" % (graph_body))
resp = es().search(index=index, doc_type="email_address", size=size, body=graph_body)
app.logger.debug("resp: %s" % (resp))
return resp
def initialize_email_addr_cache(ingest_ids, update=False):
'''
Initialize the cache --
:param ingest_ids: comma seperated list of ingest_ids
:param update:
:return:
'''
global _EMAIL_ADDR_CACHE
_email_addr_cache_fields= ["community", "community_id", "addr", "received_count", "sent_count", "attachments_count", "ingest_id"]
for ingest_id in ingest_ids.split(","):
if ingest_id in _EMAIL_ADDR_CACHE and not update:
app.logger.info("APPLICATION CACHE -- index=%s"% ingest_id)
continue
_EMAIL_ADDR_CACHE_LOCK.acquire()
try:
app.logger.info("INITIALIZING CACHE -- index=%s"% ingest_id)
body={"query" : {"match_all" : {}}}
num = count(ingest_id,"email_address")
addrs = es().search(index=ingest_id, doc_type="email_address", size=num, _source_include=_email_addr_cache_fields, body=body)
addr_index = {f["addr"] : f for f in [hit["_source"] for hit in addrs["hits"]["hits"]]}
_EMAIL_ADDR_CACHE[ingest_id] = addr_index
app.logger.debug("done: %s"% num)
except Exception as e:
app.logger.error("FAILED initializing cache for -- index={0} Exception={1}".format( ingest_id, e))
finally:
_EMAIL_ADDR_CACHE_LOCK.release()
app.logger.info("INITIALIZING CACHE COMPLETE! -- index=%s"% ingest_id)
return {"acknowledge" : "ok"}
def get_cached_email_addr(index, addr):
return _EMAIL_ADDR_CACHE[index][addr]
# This will generate the graph structure for a emails list provided.
def _build_graph_for_emails(data_set_id, docs):
start = time.time()
# List of all nodes - will contain duplicate node names for as they are not unique between the datasets
nodes = []
# List of edges that map between nodes
edge_map = {}
# quick lookup from address to the index in the nodes list i.e. ["from_addr"]=node_index
addr_nodeid_lookup = {}
addr_to_ingest_ids = {}
total = count(data_set_id, "email_address")
# Initialize all datasets
initialize_email_addr_cache(data_set_id)
for email in docs:
ingest_id = email["original_ingest_id"]
from_addr = email["from"]
if from_addr not in _EMAIL_ADDR_CACHE[ingest_id]:
app.logger.warn("From email address not found in cache <%s>" % email)
continue;
if from_addr not in addr_to_ingest_ids :
addr_to_ingest_ids[from_addr] = [ingest_id]
nodes.append(_map_node(_EMAIL_ADDR_CACHE[ingest_id][from_addr],total, addr_to_ingest_ids[from_addr]))
addr_nodeid_lookup[from_addr] = len(nodes)-1
elif ingest_id not in addr_to_ingest_ids[from_addr]:
addr_to_ingest_ids[from_addr].append(ingest_id)
for rcvr_addr in email["to"]+email["cc"]+email["bcc"]:
if rcvr_addr not in _EMAIL_ADDR_CACHE[ingest_id]:
app.logger.warn("RCVR email address not found in cache <%s>" % rcvr_addr)
continue;
if rcvr_addr not in addr_to_ingest_ids:
addr_to_ingest_ids[rcvr_addr] = [ingest_id]
nodes.append(_map_node(_EMAIL_ADDR_CACHE[ingest_id][rcvr_addr], total, addr_to_ingest_ids[rcvr_addr] ))
addr_nodeid_lookup[rcvr_addr] = len(nodes)-1
elif ingest_id not in addr_to_ingest_ids[rcvr_addr]:
addr_to_ingest_ids[rcvr_addr].append(ingest_id)
edge_key = from_addr+"#"+rcvr_addr
if edge_key not in edge_map:
edge_map[edge_key] = {"source" : addr_nodeid_lookup[from_addr],"target": addr_nodeid_lookup[rcvr_addr],"value": 1}
else:
edge_map[edge_key]["value"]=edge_map[edge_key]["value"]+1
resp = {"graph":{"nodes":nodes, "links":edge_map.values()}, "rows": [_map_emails_to_row(email) for email in docs], "data_set_id" : data_set_id}
app.logger.info("TIMING: total document hits = %s, TIME_ELAPSED=%g" % (len(docs),time.time()-start))
return resp
def _search_url(data_set_id, email_address, qs, start_datetime, end_datetime, encrypted, size, _from=0):
'''
Generate a `
:param data_set_id:
:param email_address:
:param qs:
:param start_datetime:
:param end_datetime:
:param encrypted:
:param size:
:param _from:
:return: escaped search url string
'''
root_context=''
if 'HTTP_X_FORWARDED_HOST' in request.headers.environ:
host = request.headers.environ.get('HTTP_X_FORWARDED_HOST')
host_tokens = host.split(":")
if host_tokens[1] == '443':
base_url = "https://"+ host_tokens[0] + ("/" + root_context if root_context else '')
else:
base_url = "http://" + request.headers.environ.get('HTTP_HOST') + ("/" + root_context if root_context else '')
service_path = "search/search/email/void" if email_address else "search/search/all"
return u"{0}/{1}?data_set_id={2}&qs={3}&email_address={4}&encrypted={5}&size={6}&from={7}&start_datetime={8}&end_datetime={9}".format(
base_url,
service_path,
data_set_id,
quote(qs.encode("utf-8")) if qs else '',
quote(email_address.encode("utf-8")) if email_address else '',
encrypted if encrypted else '',
size,
_from,
start_datetime,
end_datetime
)
def _search(data_set_id, email_address, qs, start_datetime, end_datetime, encrypted, size, _from=0):
app.logger.debug("email_address=%s, qs=%s" % ((str(email_address)), qs))
email_addrs=[email_address] if email_address else None
query = _build_email_query(email_addrs=email_addrs, qs=qs, date_bounds=(start_datetime, end_datetime), encrypted=encrypted)
app.logger.debug("query: %s" % (query))
results = _query_emails(data_set_id, query, size, _from)
graph = _build_graph_for_emails(data_set_id, results["hits"])
graph["edge_total"] = len(graph["graph"]["links"])
query = _build_email_query(email_addrs=email_addrs, qs=qs, date_bounds=(start_datetime, end_datetime), attachments_only=True, encrypted=encrypted)
app.logger.debug("attachment-query: %s" % (query))
attachments = _query_email_attachments(data_set_id, query, size, _from)
graph["attachments"] = attachments["hits"]
graph["attachments_total"] = attachments["attachments_total"]
graph["data_set_id"] = data_set_id
graph["query_hits"] = results["total"]
graph["from"] = _from
return graph
def _search_summary(data_set_id, email_address, qs, start_datetime, end_datetime, encrypted, size,_from=0):
app.logger.debug("email_address=%s, qs=%s" % ((str(email_address)), qs))
pre_search_results = {}
email_addrs=[email_address] if email_address else None
url = _search_url(data_set_id,email_address,qs,start_datetime,end_datetime,encrypted,size,_from)
# app.logger.debug("graph url: %s" % (url))
# url = quote(url.encode('utf-8'))
# app.logger.debug("graph url quotes: %s" % (url))
if not email_address:
query = _build_email_query(email_addrs=email_addrs, qs=qs, date_bounds=(start_datetime, end_datetime), encrypted=encrypted)
app.logger.debug("query: %s" % (query))
pre_search_results["emails_total"] = _count_emails(data_set_id, query)["total"]
pre_search_results["url"] = url
pre_search_results["emails_sent"] =""
pre_search_results["emails_received"] =""
else:
senders_query = _build_email_query(sender_addrs=email_addrs, qs=qs, date_bounds=(start_datetime, end_datetime), encrypted=encrypted)
app.logger.debug("senders query: %s" % (senders_query))
pre_search_results["emails_sent"] = _count_emails(data_set_id, senders_query)["total"]
pre_search_results["url"] = url
rcvr_query = _build_email_query(recipient_addrs=email_addrs, qs=qs, date_bounds=(start_datetime, end_datetime), encrypted=encrypted)
app.logger.debug("rcvr query: %s" % (rcvr_query))
pre_search_results["emails_received"] = _count_emails(data_set_id, rcvr_query)["total"]
pre_search_results["emails_total"] = pre_search_results["emails_sent"] + pre_search_results["emails_received"]
pre_search_results["edges_total"] = count_associated_addresses(data_set_id=data_set_id, email_address=email_addrs, qs=qs, start_datetime=start_datetime, end_datetime=end_datetime)
attach_query = _build_email_query(email_addrs=email_addrs, qs=qs, date_bounds=(start_datetime, end_datetime), attachments_only=True, encrypted=encrypted)
app.logger.debug("attachment-query: %s" % (attach_query))
pre_search_results["attachments_total"] = count_email_attachments(data_set_id=data_set_id, email_address=email_addrs, qs=qs, start_datetime=start_datetime, end_datetime=end_datetime)
# graph["attachments"] = attachments
pre_search_results["data_set_id"] = data_set_id
return pre_search_results
def _es_get_all_attachment_hash(data_set_id, attachment_hash, qs, start_datetime, end_datetime, size):
app.logger.debug("attachment_hash=%s, qs=%s" % ((str(attachment_hash)), qs))
query = _build_email_query(attachment_hash=attachment_hash, qs=qs, date_bounds=(start_datetime, end_datetime))
app.logger.debug("query: %s" % (query))
results = _query_emails(data_set_id, query, size)
graph = _build_graph_for_emails(data_set_id, results["hits"])
graph["edge_total"] = len(graph["graph"]["links"])
query = _build_email_query(attachment_hash=attachment_hash, qs=qs, date_bounds=(start_datetime, end_datetime), attachments_only=True)
app.logger.debug("attachment-query: %s" % (query))
attachments = _query_email_attachments(data_set_id, query, size)
graph["attachments"] = attachments["hits"]
graph["attachments_total"] = attachments["attachments_total"]
graph["data_set_id"] = data_set_id
graph["query_hits"] = results["total"]
return graph
# Get all rows for two or more email addresses, results will be sorted by time asc
def es_get_all_email_by_conversation_forward_backward(data_set_id, sender, recipients, start_datetime, end_datetime, size, sort_order="asc"):
app.logger.debug("sender=%s, recipients=%s" % (str(sender),str(recipients)))
# apply query with address intersection behaviour
query = _build_email_query(sender_addrs=[sender], recipient_addrs=recipients, qs='', date_bounds=(start_datetime, end_datetime), sort_order=sort_order, date_mode_inclusive=False, address_filter_mode="conversation")
app.logger.debug("query: %s" % (query))
results = _query_emails(data_set_id, query, size)
# If you do not want to generate a graph each time this is called use this code
# return {"graph":{"nodes":[], "links":[]}, "rows": [_map_emails_to_row(email) for email in results["hits"]], "query_hits" : results["total"]}
graph = _build_graph_for_emails(data_set_id, results["hits"])
# Get attachments for community
query = _build_email_query(sender_addrs=[sender], recipient_addrs=recipients, qs='', date_bounds=(start_datetime, end_datetime), sort_order=sort_order, date_mode_inclusive=False, address_filter_mode="conversation", attachments_only=True)
app.logger.debug("attachment-query: %s" % (query))
attachments = _query_email_attachments(data_set_id, query, size)
graph["attachments"] = attachments["hits"]
graph["attachments_total"] = attachments["attachments_total"]
graph["data_set_id"] = data_set_id
graph["query_hits"] = results["total"]
return graph
# Get all rows , graph, attachments for two or more email addresses attempt to center around the start_date
# Return: current_index will indicate the offset in rows where the current date is located
# offset in attachments should be found using the email id if applicable --i.e. email may not have attachments
def es_get_conversation(data_set_id, sender, recipients, start_datetime, end_datetime, size, document_uid, current_datetime):
app.logger.debug("senders=%s, recipients=%s" % (str(sender),str(recipients)))
#start_datetime = default_min_timeline_bound()
# apply query with address intersection behavior
query = _build_email_query(sender_addrs=[sender], recipient_addrs=recipients, qs='', date_bounds=(current_datetime, end_datetime), sort_order='asc', date_mode_inclusive=True, address_filter_mode="conversation")
app.logger.debug("query-after: %s" % (query))
emails_asc = _query_emails(data_set_id, query, size)
query = _build_email_query(sender_addrs=[sender], recipient_addrs=recipients, qs='', date_bounds=(start_datetime, current_datetime), sort_order='desc', date_mode_inclusive=False, address_filter_mode="conversation")
app.logger.debug("query-before: %s" % (query))
emails_desc = _query_emails(data_set_id, query, size)
total = emails_asc["total"] + emails_desc["total"]
emails_desc = emails_desc['hits']
emails_desc.reverse()
current_index= len(emails_desc)
emails = emails_desc + emails_asc['hits']
# return {"graph":{"nodes":[], "links":[]}, "rows": [ascw(email)results["totaldesc+ results["total"] for email in results["hits"]], "query_hits" : results["total"]}
graph = _build_graph_for_emails(data_set_id, emails)
graph['current_index'] = current_index
# Get attachments for community
query = _build_email_query(sender_addrs=[sender], recipient_addrs=recipients, qs='', date_bounds=(current_datetime, end_datetime), sort_order='asc', date_mode_inclusive=True, address_filter_mode="conversation", attachments_only=True)
app.logger.debug("attachment-query-after: %s" % (query))
attachments_asc = _query_email_attachments(data_set_id, query, size)
query = _build_email_query(sender_addrs=[sender], recipient_addrs=recipients, qs='', date_bounds=(start_datetime, current_datetime), sort_order='desc', date_mode_inclusive=False, address_filter_mode="conversation", attachments_only=True)
app.logger.debug("attachment-query-after: %s)"% (query))
attachments_desc = _query_email_attachments(data_set_id, query, size)
attachments_desc["hits"].reverse()
# Find the first index in the attachment array where the current emails attachments start or -1
graph["attachments"] = attachments_desc["hits"]+attachments_asc["hits"]
graph["attachments_total"] = attachments_desc["attachments_total"]+attachments_asc["attachments_total"]
def find_attch():
for i,attch in enumerate(graph["attachments"]):
if attch["email_id"] == document_uid:
return i
return -1
graph["attachments_index"] = find_attch()
graph["data_set_id"] = data_set_id
graph["query_hits"] = total
return graph
# Get all rows for a community, sorted by time asc
def es_get_all_email_by_community(data_set_id, community, email_address_list, qs, start_datetime, end_datetime, encrypted, size):
app.logger.debug("community=%s, email_address_list=%s" % (str(community), str(email_address_list)))
query = _build_email_query(email_addrs=email_address_list, qs='', date_bounds=(start_datetime, end_datetime), community=[community], encrypted=encrypted)
app.logger.debug("es_search.es_get_all_email_by_community(query: %s)" % (query))
results = _query_emails(data_set_id, query, size)
graph = _build_graph_for_emails(data_set_id, results["hits"])
# Get attachments for community
query = _build_email_query(email_addrs=email_address_list, qs='', date_bounds=(start_datetime, end_datetime), community=[community], attachments_only=True, encrypted=encrypted)
app.logger.debug("attachment-query: %s" % (query))
attachments = _query_email_attachments(data_set_id, query, size)
graph["attachments"] = attachments["hits"]
graph["attachments_total"] = attachments["attachments_total"]
graph["data_set_id"] = data_set_id
graph["query_hits"] = results["total"]
return graph
# Get all rows for a community, sorted by time asc
def es_get_all_email_by_topic(data_set_id, topic, email_address_list, qs, start_datetime, end_datetime, encrypted, size):
app.logger.debug("email_address_list=%s, topic=%s" % ( str(email_address_list), str(topic)))
query = _build_email_query(email_addrs=email_address_list, qs='', topic=topic, sort_mode="topic", sort_order="desc", date_bounds=(start_datetime, end_datetime), encrypted=encrypted)
app.logger.debug("query: %s" % (query))
# Get emails graph for topics
emails = _query_emails(data_set_id, query, size, additional_fields=["topic_scores.idx_"+str(topic["idx"])])
graph = _build_graph_for_emails(data_set_id, emails["hits"])
# Get attachments for top score topic
query = _build_email_query(email_addrs=email_address_list, qs='', topic=topic, sort_mode="topic", sort_order="desc", date_bounds=(start_datetime, end_datetime), attachments_only=True, encrypted=encrypted)
app.logger.debug("attachment-query: %s" % (query))
attachments = _query_email_attachments(data_set_id, query, size)
graph["attachments"] = attachments["hits"]
graph["attachments_total"] = attachments["attachments_total"]
graph["data_set_id"] = data_set_id
graph["query_hits"] = emails["total"]
return graph
|
|
"""Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0, lower values are worse.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
|
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import random
import matplotlib.pyplot as plt
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
import common.proto_utils as proto_utils
from modules.map.proto import map_pb2
class Map:
def __init__(self):
self.map_pb = map_pb2.Map()
self.colors = []
self.init_colors()
def init_colors(self):
color_num = 6
self.colors = []
values = range(color_num)
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def load(self, map_file_name):
res = proto_utils.get_pb_from_file(map_file_name, self.map_pb)
return res != None
def draw_roads(self, ax):
cnt = 1
for road in self.map_pb.road:
color_val = self.colors[cnt % len(self.colors)]
self.draw_road(ax, road, color_val)
cnt += 1
def draw_road(self, ax, road, color_val):
for section in road.section:
for edge in section.boundary.outer_polygon.edge:
for segment in edge.curve.segment:
if segment.HasField('line_segment'):
px = []
py = []
for p in segment.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
def draw_lanes(self, ax, is_show_lane_ids, laneids, is_show_lane_details):
cnt = 1
for lane in self.map_pb.lane:
color_val = self.colors[cnt % len(self.colors)]
if len(laneids) == 0:
self._draw_lane_boundary(lane, ax, color_val)
self._draw_lane_central(lane, ax, color_val)
else:
if lane.id.id in laneids:
self._draw_lane_boundary(lane, ax, color_val)
self._draw_lane_central(lane, ax, color_val)
if is_show_lane_ids:
self._draw_lane_id(lane, ax, color_val)
elif is_show_lane_details:
self._draw_lane_details(lane, ax, color_val)
elif lane.id.id in laneids:
print str(lane)
self._draw_lane_id(lane, ax, color_val)
cnt += 1
def _draw_lane_id(self, lane, ax, color_val):
"""draw lane id"""
x, y = self._find_lane_central_point(lane)
self._draw_label(lane.id.id, (x, y), ax, color_val);
def _draw_lane_details(self, lane, ax, color_val):
"""draw lane id"""
labelxys = []
labelxys.append((40, -40))
labelxys.append((-40, -40))
labelxys.append((40, 40))
labelxys.append((-40, 40))
has = ['right', 'left', 'right', 'left']
vas = ['bottom', 'bottom', 'top', 'top']
idx = random.randint(0, 3)
lxy = labelxys[idx]
x, y = self._find_lane_central_point(lane)
details = str(lane.id.id)
for predecessor_id in lane.predecessor_id:
details += '\npre:' + str(predecessor_id.id)
for successor_id in lane.successor_id:
details += '\nsuc:' + str(successor_id.id)
for left_neighbor_forward_lane_id in lane.left_neighbor_forward_lane_id:
details += '\nlnf:' + str(left_neighbor_forward_lane_id.id)
for right_neighbor_forward_lane_id in lane.right_neighbor_forward_lane_id:
details += '\nrnf:' + str(right_neighbor_forward_lane_id.id)
for left_neighbor_reverse_lane_id in lane.left_neighbor_reverse_lane_id:
details += '\nlnr:' + str(left_neighbor_reverse_lane_id.id)
for right_neighbor_reverse_lane_id in lane.right_neighbor_reverse_lane_id:
details += '\nrnr:' + str(right_neighbor_reverse_lane_id.id)
plt.annotate(
details,
xy=(x, y), xytext=lxy,
textcoords='offset points', ha=has[idx], va=vas[idx],
bbox=dict(boxstyle='round,pad=0.5', fc=color_val, alpha=0.5),
arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.2',
fc=color_val, ec=color_val, alpha=0.5))
def draw_pnc_junctions(self, ax):
cnt = 1
for pnc_junction in self.map_pb.pnc_junction:
color_val = self.colors[cnt % len(self.colors)]
self._draw_polygon_boundary(pnc_junction.polygon, ax, color_val)
self._draw_pnc_junction_id(pnc_junction, ax, color_val)
cnt += 1
def _draw_pnc_junction_id(self, pnc_junction, ax, color_val):
x = pnc_junction.polygon.point[0].x
y = pnc_junction.polygon.point[0].y
self._draw_label(pnc_junction.id.id, (x, y), ax, color_val);
def draw_crosswalks(self, ax):
cnt = 1
for crosswalk in self.map_pb.crosswalk:
color_val = self.colors[cnt % len(self.colors)]
self._draw_polygon_boundary(crosswalk.polygon, ax, color_val)
self._draw_crosswalk_id(crosswalk, ax, color_val)
cnt += 1
def _draw_crosswalk_id(self, crosswalk, ax, color_val):
x = crosswalk.polygon.point[0].x
y = crosswalk.polygon.point[0].y
self._draw_label(crosswalk.id.id, (x, y), ax, color_val);
@staticmethod
def _draw_label(label_id, point, ax, color_val):
"""draw label id"""
labelxys = []
labelxys.append((40, -40))
labelxys.append((-40, -40))
labelxys.append((40, 40))
labelxys.append((-40, 40))
has = ['right', 'left', 'right', 'left']
vas = ['bottom', 'bottom', 'top', 'top']
idx = random.randint(0, 3)
lxy = labelxys[idx]
plt.annotate(
label_id,
xy=(point[0], point[1]), xytext=lxy,
textcoords='offset points', ha=has[idx], va=vas[idx],
bbox=dict(boxstyle='round,pad=0.5', fc=color_val, alpha=0.5),
arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.2',
fc=color_val, ec=color_val, alpha=0.5))
@staticmethod
def _find_lane_central_point(lane):
segment_idx = len(lane.left_boundary.curve.segment) / 2
median_segment = lane.left_boundary.curve.segment[segment_idx]
left_point_idx = len(median_segment.line_segment.point) / 2
left_median_point = median_segment.line_segment.point[left_point_idx]
segment_idx = len(lane.right_boundary.curve.segment) / 2
median_segment = lane.right_boundary.curve.segment[segment_idx]
right_point_idx = len(median_segment.line_segment.point) / 2
right_median_point = median_segment.line_segment.point[right_point_idx]
x = (left_median_point.x + right_median_point.x) / 2
y = (left_median_point.y + right_median_point.y) / 2
return x, y
@staticmethod
def _get_median_point(points):
"""get_median_point"""
if len(points) % 2 == 1:
point = points[len(points) / 2]
return point.x, point.y
else:
point1 = points[len(points) / 2 - 1]
point2 = points[len(points) / 2]
return (point1.x + point2.x) / 2.0, (point1.y + point2.y) / 2.0
@staticmethod
def _draw_lane_boundary(lane, ax, color_val):
"""draw boundary"""
for curve in lane.left_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
for curve in lane.right_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
@staticmethod
def _draw_lane_central(lane, ax, color_val):
"""draw boundary"""
for curve in lane.central_curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls=':', c=color_val, alpha=0.5)
@staticmethod
def _draw_polygon_boundary(polygon, ax, color_val):
"""draw polygon boundary"""
px = []
py = []
for point in polygon.point:
px.append(point.x)
py.append(point.y)
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
def draw_signal_lights(self, ax):
"""draw_signal_lights"""
for signal in self.map_pb.signal:
for stop_line in signal.stop_line:
for curve in stop_line.segment:
self._draw_stop_line(curve.line_segment, signal.id.id, ax, "mistyrose")
def draw_stop_signs(self, ax):
"""draw_stop_signs"""
for stop_sign in self.map_pb.stop_sign:
for stop_line in stop_sign.stop_line:
for curve in stop_line.segment:
self._draw_stop_line(curve.line_segment, stop_sign.id.id, ax, "yellow")
@staticmethod
def _draw_stop_line(line_segment, label, ax, label_color_val):
"""draw a signal"""
px = []
py = []
for p in line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, 'o-')
lxy = [random.randint(20, 80) * random.sample([-1, 1], 1)[0],
random.randint(20, 80) * random.sample([-1, 1], 1)[0]]
xy = (sum(px) / len(px), sum(py) / len(py))
plt.annotate(
label,
xy=xy, xytext=lxy,
textcoords='offset points',
bbox=dict(boxstyle='round,pad=0.5', fc=label_color_val, alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
|
|
# Copyright (c) 2011-2015 by California Institute of Technology
# 2014 by The Regents of the University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER(S) OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
Check Linear Discrete-Time-Invariant System reachability between polytopes
Primary functions:
- L{solve_feasible}
- L{createLM}
- L{get_max_extreme}
See Also
========
L{find_controller}
"""
import logging
logger = logging.getLogger(__name__)
from collections import Iterable
import numpy as np
import polytope as pc
from cvxopt import matrix, solvers
lp_solver = 'mosek'
def is_feasible(
from_region, to_region, sys, N,
closed_loop=True,
use_all_horizon=False,
trans_set=None
):
"""Return True if to_region is reachable from_region.
For details see solve_feasible.
"""
S0 = solve_feasible(
from_region, to_region, sys, N,
closed_loop, use_all_horizon,
trans_set
)
return from_region <= S0
def is_feasible_alternative(
from_region, to_region, sys, N,
):
"""Return True if to_region is reachable from_region.
An alternative implementation of feasibility of transitions via an
open loop policy.
Supposed to be faster as it does not require set difference
Conservative for non-convex regions (might say infeasible when feasible)
"""
# solve a set of LP feasibility problems for each corner
for f1 in from_region: # from all
count = 0
for f2 in to_region: # to some
for vert in pc.extreme(f1):
bad_vert = False
u = exists_input(vert, sys, f1, f2, N)
if u is None:
bad_vert = True # not possible to reach f2
break
if bad_vert == False: # possible to reach to_region from f1
count = count+1
break
if count == len(from_region):
res = True
else:
res = False
return res
def exists_input(x0, ssys, P1, P3, N):
"""Checks if there exists a sequence u_seq such that:
- x(t+1) = A x(t) + B u(t) + K
- x(k) \in P1 for k = 0,...,N - 1
- x(N - 1) \in P3
- [u(k); x(k)] \in PU
See Also
========
get_input_helper
"""
n = ssys.A.shape[1]
m = ssys.B.shape[1]
list_P = []
list_P.append(P1)
for i in xrange(N-1,0,-1):
list_P.append(P1)
list_P.append(P3)
L,M = createLM(ssys, N, list_P)
# Remove first constraint on x(0)
L = L[range(list_P[0].A.shape[0], L.shape[0]),:]
M = M[range(list_P[0].A.shape[0], M.shape[0]),:]
# Separate L matrix
Lx = L[:,range(n)]
Lu = L[:,range(n,L.shape[1])]
M = M - Lx.dot(x0).reshape(Lx.shape[0],1)
# Constraints
G = matrix(Lu)
h = matrix(M)
c = matrix(np.zeros(G.size[1], dtype=float))
sol = solvers.lp(c, G, h, None, None, lp_solver)
if sol['status'] != "optimal":
return None
else:
u = np.array(sol['x']).flatten()
return u
def solve_feasible(
P1, P2, ssys, N=1, closed_loop=True,
use_all_horizon=False, trans_set=None, max_num_poly=5
):
"""Compute S0 \subseteq P1 from which P2 is N-reachable.
N-reachable = reachable within horizon N.
The system dynamics are C{ssys}.
The closed-loop algorithm solves for one step at a time,
which keeps the dimension of the polytopes down.
@type P1: C{Polytope} or C{Region}
@type P2: C{Polytope} or C{Region}
@type ssys: L{LtiSysDyn}
@param N: The horizon length
@param closed_loop: If true, take 1 step at a time.
This keeps down polytope dimension and
handles disturbances better.
@type closed_loop: bool
@param use_all_horizon: Used for closed loop algorithm. If true
(STILL UNDER DEVELOPMENT, CURRENTLY UNAVAILABLE), then allow
reachability also in less than N steps.
@type use_all_horizon: bool
@param trans_set: If specified,
then force transitions to be in this set.
Otherwise, P1 is used.
@return: the subset S0 of P1 from which P2 is reachable
@rtype: C{Polytope} or C{Region}
"""
if use_all_horizon:
raise ValueError('solve_feasible() with use_all_horizon=True is still '
'under development\nand currently unavailable.')
if closed_loop:
return solve_closed_loop(
P1, P2, ssys, N,
use_all_horizon=use_all_horizon,
trans_set=trans_set
)
else:
return solve_open_loop(
P1, P2, ssys, N,
trans_set=trans_set,
max_num_poly=max_num_poly
)
def solve_closed_loop(
P1, P2, ssys, N,
use_all_horizon=False, trans_set=None
):
"""Compute S0 \subseteq P1 from which P2 is closed-loop N-reachable.
@type P1: C{Polytope} or C{Region}
@type P2: C{Polytope} or C{Region}
@param ssys: system dynamics
@param N: horizon length
@type N: int > 0
@param use_all_horizon:
- if True, then take union of S0 sets
- Otherwise, chain S0 sets (funnel-like)
@type use_all_horizon: bool
@param trans_set: If provided,
then intermediate steps are allowed
to be in trans_set.
Otherwise, P1 is used.
"""
if use_all_horizon:
raise ValueError('solve_closed_loop() with use_all_horizon=True '
'is still under development\nand currently '
'unavailable.')
p1 = P1.copy() # Initial set
p2 = P2.copy() # Terminal set
if trans_set is not None:
Pinit = trans_set
else:
Pinit = p1
# backwards in time
s0 = pc.Region()
reached = False
for i in xrange(N, 0, -1):
# first step from P1
if i == 1:
Pinit = p1
p2 = solve_open_loop(Pinit, p2, ssys, 1, trans_set)
s0 = s0.union(p2, check_convex=True)
s0 = pc.reduce(s0)
# empty target polytope ?
if not pc.is_fulldim(p2):
break
old_reached = reached
# overlaps initial set ?
if p1.intersect(p2):
s0 = s0.union(p2, check_convex=True)
s0 = pc.reduce(s0)
# we went past it -> don't continue
if old_reached is True and reached is False:
logger.info('stopped intersecting si')
#break
if reached is True:
break
if not pc.is_fulldim(s0):
return pc.Polytope()
s0 = pc.reduce(s0)
return s0
def solve_open_loop(
P1, P2, ssys, N,
trans_set=None, max_num_poly=5
):
r1 = P1.copy() # Initial set
r2 = P2.copy() # Terminal set
# use the max_num_poly largest volumes for reachability
r1 = volumes_for_reachability(r1, max_num_poly)
r2 = volumes_for_reachability(r2, max_num_poly)
if len(r1) > 0:
start_polys = r1
else:
start_polys = [r1]
if len(r2) > 0:
target_polys = r2
else:
target_polys = [r2]
# union of s0 over all polytope combinations
s0 = pc.Polytope()
for p1 in start_polys:
for p2 in target_polys:
cur_s0 = poly_to_poly(p1, p2, ssys, N, trans_set)
s0 = s0.union(cur_s0, check_convex=True)
return s0
def poly_to_poly(p1, p2, ssys, N, trans_set=None):
"""Compute s0 for open-loop polytope to polytope N-reachability.
"""
p1 = p1.copy()
p2 = p2.copy()
if trans_set is None:
trans_set = p1
# stack polytope constraints
L, M = createLM(ssys, N, p1, trans_set, p2)
s0 = pc.Polytope(L, M)
s0 = pc.reduce(s0)
# Project polytope s0 onto lower dim
n = np.shape(ssys.A)[1]
dims = range(1, n+1)
s0 = s0.project(dims)
return pc.reduce(s0)
def volumes_for_reachability(part, max_num_poly):
if len(part) <= max_num_poly:
return part
vol_list = np.zeros(len(part) )
for i in xrange(len(part) ):
vol_list[i] = part[i].volume
ind = np.argsort(-vol_list)
temp = []
for i in ind[range(max_num_poly) ]:
temp.append(part[i] )
part = pc.Region(temp, [])
return part
def createLM(ssys, N, list_P, Pk=None, PN=None, disturbance_ind=None):
"""Compute the components of the polytope::
L [x(0)' u(0)' ... u(N-1)']' <= M
which stacks the following constraints:
- x(t+1) = A x(t) + B u(t) + E d(t)
- [u(k); x(k)] \in ssys.Uset for all k
If list_P is a C{Polytope}:
- x(0) \in list_P if list_P
- x(k) \in Pk for k= 1,2, .. N-1
- x(N) \in PN
If list_P is a list of polytopes:
- x(k) \in list_P[k] for k= 0, 1 ... N
The returned polytope describes the intersection of the polytopes
for all possible inputs.
@param ssys: system dynamics
@type ssys: L{LtiSysDyn}
@param N: horizon length
@type list_P: list of Polytopes or C{Polytope}
@type Pk: C{Polytope}
@type PN: C{Polytope}
@param disturbance_ind: list indicating which k's
that disturbance should be taken into account.
Default is [1,2, ... N]
"""
if not isinstance(list_P, Iterable):
list_P = [list_P] +(N-1) *[Pk] +[PN]
if disturbance_ind is None:
disturbance_ind = range(1,N+1)
A = ssys.A
B = ssys.B
E = ssys.E
K = ssys.K
D = ssys.Wset
PU = ssys.Uset
n = A.shape[1] # State space dimension
m = B.shape[1] # Input space dimension
p = E.shape[1] # Disturbance space dimension
# non-zero disturbance matrix E ?
if not np.all(E==0):
if not pc.is_fulldim(D):
E = np.zeros(K.shape)
list_len = np.array([P.A.shape[0] for P in list_P])
sumlen = np.sum(list_len)
LUn = np.shape(PU.A)[0]
Lk = np.zeros([sumlen, n+N*m])
LU = np.zeros([LUn*N, n+N*m])
Mk = np.zeros([sumlen, 1])
MU = np.tile(PU.b.reshape(PU.b.size, 1), (N, 1))
Gk = np.zeros([sumlen, p*N])
GU = np.zeros([LUn*N, p*N])
K_hat = np.tile(K, (N, 1))
B_diag = B
E_diag = E
for i in xrange(N-1):
B_diag = _block_diag2(B_diag, B)
E_diag = _block_diag2(E_diag, E)
A_n = np.eye(n)
A_k = np.zeros([n, n*N])
sum_vert = 0
for i in xrange(N+1):
Li = list_P[i]
if not isinstance(Li, pc.Polytope):
logger.warn('createLM: Li of type: ' +str(type(Li) ) )
######### FOR M #########
idx = range(sum_vert, sum_vert + Li.A.shape[0])
Mk[idx, :] = Li.b.reshape(Li.b.size,1) - \
Li.A.dot(A_k).dot(K_hat)
######### FOR G #########
if i in disturbance_ind:
idx = np.ix_(
range(sum_vert, sum_vert + Li.A.shape[0]),
range(Gk.shape[1])
)
Gk[idx] = Li.A.dot(A_k).dot(E_diag)
if (PU.A.shape[1] == m+n) and (i < N):
A_k_E_diag = A_k.dot(E_diag)
d_mult = np.vstack([np.zeros([m, p*N]), A_k_E_diag])
idx = np.ix_(range(LUn*i, LUn*(i+1)), range(p*N))
GU[idx] = PU.A.dot(d_mult)
######### FOR L #########
AB_line = np.hstack([A_n, A_k.dot(B_diag)])
idx = np.ix_(
range(sum_vert, sum_vert + Li.A.shape[0]),
range(0,Lk.shape[1])
)
Lk[idx] = Li.A.dot(AB_line)
if i >= N:
continue
if PU.A.shape[1] == m:
idx = np.ix_(
range(i*LUn, (i+1)*LUn),
range(n + m*i, n + m*(i+1))
)
LU[idx] = PU.A
elif PU.A.shape[1] == m+n:
uk_line = np.zeros([m, n + m*N])
idx = np.ix_(range(m), range(n+m*i, n+m*(i+1)))
uk_line[idx] = np.eye(m)
A_mult = np.vstack([uk_line, AB_line])
b_mult = np.zeros([m+n, 1])
b_mult[range(m, m+n), :] = A_k.dot(K_hat)
idx = np.ix_(
range(i*LUn, (i+1)*LUn),
range(n+m*N)
)
LU[idx] = PU.A.dot(A_mult)
MU[range(i*LUn, (i+1)*LUn), :] -= PU.A.dot(b_mult)
####### Iterate #########
sum_vert += Li.A.shape[0]
A_n = A.dot(A_n)
A_k = A.dot(A_k)
idx = np.ix_(range(n), range(i*n, (i+1)*n))
A_k[idx] = np.eye(n)
# Get disturbance sets
if not np.all(Gk==0):
G = np.vstack([Gk, GU])
D_hat = get_max_extreme(G, D, N)
else:
D_hat = np.zeros([sumlen + LUn*N, 1])
# Put together matrices L, M
L = np.vstack([Lk, LU])
M = np.vstack([Mk, MU]) - D_hat
msg = 'Computed S0 polytope: L x <= M, where:\n\t L = \n'
msg += str(L) +'\n\t M = \n' + str(M) +'\n'
logger.debug(msg)
return L,M
def get_max_extreme(G,D,N):
"""Calculate the array d_hat such that::
d_hat = max(G*DN_extreme),
where DN_extreme are the vertices of the set D^N.
This is used to describe the polytope::
L*x <= M - G*d_hat.
Calculating d_hat is equivalen to taking the intersection
of the polytopes::
L*x <= M - G*d_i
for every possible d_i in the set of extreme points to D^N.
@param G: The matrix to maximize with respect to
@param D: Polytope describing the disturbance set
@param N: Horizon length
@return: d_hat: Array describing the maximum possible
effect from the disturbance
"""
D_extreme = pc.extreme(D)
nv = D_extreme.shape[0]
dim = D_extreme.shape[1]
DN_extreme = np.zeros([dim*N, nv**N])
for i in xrange(nv**N):
# Last N digits are indices we want!
ind = np.base_repr(i, base=nv, padding=N)
for j in xrange(N):
DN_extreme[range(j*dim,(j+1)*dim),i] = D_extreme[int(ind[-j-1]),:]
d_hat = np.amax(np.dot(G,DN_extreme), axis=1)
return d_hat.reshape(d_hat.size,1)
def _block_diag2(A,B):
"""Like block_diag() in scipy.linalg, but restricted to 2 inputs.
Old versions of the linear algebra package in SciPy (i.e.,
scipy.linalg) do not have a block_diag() function. Providing
_block_diag2() here until most folks are using sufficiently
up-to-date SciPy installations improves portability.
"""
if len(A.shape) == 1: # Cast 1d array into matrix
A = np.array([A])
if len(B.shape) == 1:
B = np.array([B])
C = np.zeros((A.shape[0]+B.shape[0], A.shape[1]+B.shape[1]))
C[:A.shape[0], :A.shape[1]] = A.copy()
C[A.shape[0]:, A.shape[1]:] = B.copy()
return C
|
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to to generate or modify data samples."""
from typing import List, Dict
import numpy as np
import pandas as pd
import tensorflow as tf
class Variable(object):
def __init__(self, index, name, mean, std):
self.index = index
self.mean = mean
self.name = name
self.std = std
def get_normalization_info(df: pd.DataFrame) -> Dict[str, Variable]:
"""Computes means, standard deviation to normalize a data frame.
Any variable xxxx_validity is considered a boolean validity indicator
for variable xxxx, and will not be normalized. A value of 1
indicates the value xxxx is valid, and 0 indicates xxx is invalid.
Args:
df: Pandas dataframe with numeric feature data.
Returns:
A dict with Variable.name, Variable.
"""
variables = {}
for column in df:
if not np.issubdtype(df[column].dtype, np.number):
raise ValueError("The feature column %s is not numeric." % column)
if column.endswith("_validity"):
vmean = 0.0
vstd = 1.0
else:
vmean = df[column].mean()
vstd = df[column].std()
variable = Variable(
index=df.columns.get_loc(column),
name=column,
mean=vmean,
std=vstd)
variables[column] = variable
return variables
def get_column_order(normalization_info: Dict[str, Variable]) -> List[str]:
"""Returns a list of column names, as strings, in model order."""
return [
var.name
for var in sorted(normalization_info.values(), key=lambda var: var.index)
]
def normalize(df: pd.DataFrame,
normalization_info: Dict[str, Variable]) -> pd.DataFrame:
"""Normalizes an input Dataframe of features.
Args:
df: Pandas DataFrame of M rows with N real-valued features
normalization_info: dict of name, variable types containing mean, and std.
Returns:
Pandas M x N DataFrame with normalized features.
"""
df_norm = pd.DataFrame()
for column in get_column_order(normalization_info):
df_norm[column] = (df[column] - normalization_info[column].mean
) / normalization_info[column].std
return df_norm
def denormalize(df_norm: pd.DataFrame,
normalization_info: Dict[str, Variable]) -> pd.DataFrame:
"""Reverts normalization an input Dataframe of features.
Args:
df_norm: Pandas DataFrame of M rows with N real-valued normalized features
normalization_info: dict of name, variable types containing mean, and std.
Returns:
Pandas M x N DataFrame with denormalized features.
"""
df = pd.DataFrame()
for column in get_column_order(normalization_info):
df[column] = df_norm[column] * normalization_info[
column].std + normalization_info[column].mean
return df
def write_normalization_info(normalization_info: Dict[str, Variable],
filename: str):
"""Writes variable normalization info to CSV."""
def to_df(normalization_info):
df = pd.DataFrame(columns=["index", "mean", "std"])
for variable in normalization_info:
df.loc[variable] = [
normalization_info[variable].index, normalization_info[variable].mean,
normalization_info[variable].std
]
return df
with tf.io.gfile.GFile(filename, "w") as csv_file:
to_df(normalization_info).to_csv(csv_file, sep="\t")
def read_normalization_info(
filename: str) -> Dict[str, Variable]:
"""Reads variable normalization info from CSV."""
def from_df(df):
normalization_info = {}
for name, row in df.iterrows():
normalization_info[name] = Variable(
row["index"], name, row["mean"], row["std"])
return normalization_info
normalization_info = {}
if not tf.io.gfile.exists(filename):
raise AssertionError("{} does not exist".format(filename))
with tf.io.gfile.GFile(filename, "r") as csv_file:
df = pd.read_csv(csv_file, header=0, index_col=0, sep="\t")
normalization_info = from_df(df)
return normalization_info
def get_neg_sample(pos_sample: pd.DataFrame,
n_points: int,
do_permute: bool = False,
delta: float = 0.0) -> pd.DataFrame:
"""Creates a negative sample from the cuboid bounded by +/- delta.
Where, [min - delta, max + delta] for each of the dimensions.
If do_permute, then rather than uniformly sampling, simply
randomly permute each dimension independently.
The positive sample, pos_sample is a pandas DF that has a column
labeled 'class_label' where 1.0 indicates Normal, and
0.0 indicates anomalous.
Args:
pos_sample: DF with numeric dimensions
n_points: number points to be returned
do_permute: permute or sample
delta: fraction of [max - min] to extend the sampling.
Returns:
A dataframe with the same number of columns, and a label column
'class_label' where every point is 0.
"""
df_neg = pd.DataFrame()
pos_sample_n = pos_sample.sample(n=n_points, replace=True)
for field_name in list(pos_sample):
if field_name == "class_label":
continue
if do_permute:
df_neg[field_name] = np.random.permutation(
np.array(pos_sample_n[field_name]))
else:
low_val = min(pos_sample[field_name])
high_val = max(pos_sample[field_name])
delta_val = high_val - low_val
df_neg[field_name] = np.random.uniform(
low=low_val - delta * delta_val,
high=high_val + delta * delta_val,
size=n_points)
df_neg["class_label"] = [0 for _ in range(n_points)]
return df_neg
def apply_negative_sample(positive_sample: pd.DataFrame, sample_ratio: float,
sample_delta: float) -> pd.DataFrame:
"""Returns a dataset with negative and positive sample.
Args:
positive_sample: actual, observed sample where each col is a feature.
sample_ratio: the desired ratio of negative to positive points
sample_delta: the extension beyond observed limits to bound the neg sample
Returns:
DataFrame with features + class label, with 1 being observed and 0 negative.
"""
positive_sample["class_label"] = 1
n_neg_points = int(len(positive_sample) * sample_ratio)
negative_sample = get_neg_sample(
positive_sample, n_neg_points, do_permute=False, delta=sample_delta)
training_sample = pd.concat([positive_sample, negative_sample],
ignore_index=True,
sort=True)
return training_sample.reindex(np.random.permutation(training_sample.index))
def get_pos_sample(df_input: pd.DataFrame, n_points: int) -> pd.DataFrame:
"""Draws n_points from the data sample, and adds a class_label column."""
df_pos = df_input.sample(n=n_points)
df_pos["class_label"] = 1
return df_pos
def get_train_data(input_df: pd.DataFrame,
n_points: int,
sample_ratio: float = 1.0,
do_permute: bool = True):
"""Generates a test and train data set for buidlings a test model.
Args:
input_df: dataframe containing observed, real-valued data, where each field
is a dimension.
n_points: total number points to be returned (positive and negative)
sample_ratio: rtio, neg sample / pos sample sizes (e.g., 2 = means 2x neg
points as pos)
do_permute: False, uniformly sample; True. sample positive and permute cols
Returns:
x: Dataframe with d-Dim cols and, n_points rows
y: class labels, with 1 = Normal/positive and 0 = Anomalous/negative class
"""
# Create the positive class sample, with mean at the origin and a
# rotated covariance matrix.
n_pos = int(n_points / (sample_ratio + 1.0))
n_neg = n_points - n_pos
# Gather a random subsample of length n, as the positive set.
df_pos = get_pos_sample(input_df, n_pos)
if sample_ratio > 0.0:
# Generate a random negative sample.
df_neg = get_neg_sample(df_pos, n_neg, do_permute)
# Combine both, randomize and split.
df_combined = pd.concat([df_pos, df_neg], ignore_index=True)
else:
df_combined = df_pos.sample(n=n_points)
df_combined = df_combined.iloc[np.random.permutation(len(df_combined))]
# Separate the labels, and remove the column.
y = df_combined["class_label"]
x = df_combined.drop(columns=["class_label"])
return x, y
def get_pos_sample_synthetic(mean: float, cov: float,
n_points: int) -> pd.DataFrame:
"""Generates a positive sample from a Gaussian distribution with n_points.
Args:
mean: d-dimensional vector of mean values.
cov: dxd dimensional covariance matrix.
n_points: Number of points to return.
Returns:
DataFrame with cols x001...x[d] and n_points rows drawn from Guassian with
mean and cov.
"""
pos_mat = np.random.multivariate_normal(mean, cov, n_points).T
df_pos = pd.DataFrame({"class_label": [1 for _ in range(n_points)]})
for i in range(pos_mat.shape[0]):
df_pos["x%03d" % (i + 1)] = pos_mat[i]
return df_pos
|
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for spectroscopy specific tools (spectrum fitting etc).
"""
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from six.moves import zip
logger = logging.getLogger(__name__)
from scipy.integrate import simps
from .fitting import fit_quad_to_peak
def align_and_scale(energy_list, counts_list, pk_find_fun=None):
"""
Parameters
----------
energy_list : iterable of ndarrays
list of ndarrays with the energy of each element
counts_list : iterable of ndarrays
list of ndarrays of counts/element
pk_find_fun : function or None
A function which takes two ndarrays and returns parameters
about the largest peak. If None, defaults to `find_largest_peak`.
For this demo, the output is (center, height, width), but this sould
be pinned down better.
Returns
-------
out_e : list of ndarray
The aligned/scaled energy arrays
out_c : list of ndarray
The count arrays (should be the same as the input)
"""
if pk_find_fun is None:
pk_find_fun = find_largest_peak
base_sigma = None
out_e, out_c = [], []
for e, c in zip(energy_list, counts_list):
E0, max_val, sigma = pk_find_fun(e, c)
if base_sigma is None:
base_sigma = sigma
out_e.append((e - E0) * base_sigma / sigma)
out_c.append(c)
return out_e, out_c
def find_largest_peak(x, y, window=None):
"""
Finds and estimates the location, width, and height of
the largest peak. Assumes the top of the peak can be
approximated as a Gaussian. Finds the peak properties
using least-squares fitting of a parabola to the log of
the counts.
The region around the peak can be approximated by
Y = Y0 * exp(- (X - X0)**2 / (2 * sigma **2))
Parameters
----------
x : ndarray
The independent variable
y : ndarary
Dependent variable sampled at positions X
window : int, optional
The size of the window around the maximum to use
for the fitting
Returns
-------
x0 : float
The location of the peak
y0 : float
The magnitude of the peak
sigma : float
Width of the peak
"""
# make sure they are _really_ arrays
x = np.asarray(x)
y = np.asarray(y)
# get the bin with the largest number of counts
j = np.argmax(y)
if window is not None:
roi = slice(np.max(j - window, 0), j + window + 1)
else:
roi = slice(0, -1)
(w, x0, y0), r2 = fit_quad_to_peak(x[roi], np.log(y[roi]))
return x0, np.exp(y0), 1/np.sqrt(-2*w)
def integrate_ROI_spectrum(bin_edges, counts, x_min, x_max):
"""Integrate region(s) of histogram.
If `x_min` and `x_max` are arrays/lists they must be equal in
length. The values contained in the 'x_value_array' must be
monotonic (up or down). The returned value is the sum of all the
regions and a single scalar value is returned. Each region is
computed independently, if regions overlap the overlapped area will
be included multiple times in the final sum.
`bin_edges` is an array of the left edges and the final right
edges of the bins. `counts` is the value in each of those bins.
The bins who's centers fall with in the integration limits are
included in the sum.
Parameters
----------
bin_edges : array
Independent variable, any unit.
Must be one longer in length than counts
counts : array
Dependent variable, any units
x_min : float or array
The lower edge of the integration region(s).
x_max : float or array
The upper edge of the integration region(s).
Returns
-------
float
The totals integrated value in same units as `counts`
"""
bin_edges = np.asarray(bin_edges)
return integrate_ROI(bin_edges[:-1] + np.diff(bin_edges),
counts, x_min, x_max)
def _formatter_array_regions(x, centers, window=1, tab_count=0):
"""Returns a formatted string of sub-sections of an array
Each value in center generates a section of the string like:
{tab_count*\t}c : [x[c - n] ... x[c] ... x[c + n + 1]]
Parameters
----------
x : array
The array to be looked into
centers : iterable
The locations to print out around
window : int, optional
how many values on either side of center to include
defaults to 1
tab_count : int, optional
The number of tabs to pre-fix lines with
default is 0
Returns
-------
str
The formatted string
"""
xl = len(x)
x = np.asarray(x)
header = ("\t"*tab_count + 'center\tarray values\n' +
"\t"*tab_count + '------\t------------\n')
return header + '\n'.join(["\t"*tab_count +
"{c}: \t {vals}".format(c=c,
vals=x[np.max([0, c-window]):
np.min([xl, c + window + 1])])
for c in centers])
def integrate_ROI(x, y, x_min, x_max):
"""Integrate region(s) of input data.
If `x_min` and `x_max` are arrays/lists they must be equal in
length. The values contained in the 'x' must be monotonic (up or
down). The returned value is the sum of all the regions and a
single scalar value is returned. Each region is computed
independently, if regions overlap the overlapped area will be
included multiple times in the final sum.
This function assumes that `y` is a function of
`x` sampled at `x`.
Parameters
----------
x : array
Independent variable, any unit
y : array
Dependent variable, any units
x_min : float or array
The lower edge of the integration region(s)
in units of x.
x_max : float or array
The upper edge of the integration region(s)
in units of x.
Returns
-------
float
The totals integrated value in same units as `y`
"""
# make sure x (x-values) and y (y-values) are arrays
x = np.asarray(x)
y = np.asarray(y)
if x.shape != y.shape:
raise ValueError("Inputs (x and y) must be the same "
"size. x.shape = {0} and y.shape = "
"{1}".format(x.shape, y.shape))
# use np.sign() to obtain array which has evaluated sign changes in all
# diff in input x_value array. Checks and tests are then run on the
# evaluated sign change array.
eval_x_arr_sign = np.sign(np.diff(x))
# check to make sure no outliers exist which violate the monotonically
# increasing requirement, and if exceptions exist, then error points to the
# location within the source array where the exception occurs.
if not np.all(eval_x_arr_sign == eval_x_arr_sign[0]):
error_locations = np.where(eval_x_arr_sign != eval_x_arr_sign[0])[0]
raise ValueError("Independent variable must be monotonically "
"increasing. Erroneous values found at x-value "
"array index locations:\n" +
_formatter_array_regions(x, error_locations))
# check whether the sign of all diff measures are negative in the
# x. If so, then the input array for both x_values and
# count are reversed so that they are positive, and monotonically increase
# in value
if eval_x_arr_sign[0] == -1:
x = x[::-1]
y = y[::-1]
logging.debug("Input values for 'x' were found to be "
"monotonically decreasing. The 'x' and "
"'y' arrays have been reversed prior to "
"integration.")
# up-cast to 1d and make sure it is flat
x_min = np.atleast_1d(x_min).ravel()
x_max = np.atleast_1d(x_max).ravel()
# verify that the number of minimum and maximum boundary values are equal
if len(x_min) != len(x_max):
raise ValueError("integration bounds must have same lengths")
# verify that the specified minimum values are actually less than the
# sister maximum value, and raise error if any minimum value is actually
# greater than the sister maximum value.
if np.any(x_min >= x_max):
raise ValueError("All lower integration bounds must be less than "
"upper integration bounds.")
# check to make sure that all specified minimum and maximum values are
# actually contained within the extents of the independent variable array
if np.any(x_min < x[0]):
error_locations = np.where(x_min < x[0])[0]
raise ValueError("Specified lower integration boundary values are "
"outside the spectrum range. All minimum integration "
"boundaries must be greater than, or equal to the "
"lowest value in spectrum range. The erroneous x_min_"
"array indices are:\n" +
_formatter_array_regions(x_min,
error_locations, window=0))
if np.any(x_max > x[-1]):
error_locations = np.where(x_max > x[-1])[0]
raise ValueError("Specified upper integration boundary values "
"are outside the spectrum range. All maximum "
"integration boundary values must be less "
"than, or equal to the highest value in the spectrum "
"range. The erroneous x_max array indices are: "
"\n" +
_formatter_array_regions(x_max,
error_locations, window=0))
# find the bottom index of each integration bound
bottom_indx = x.searchsorted(x_min)
# find the top index of each integration bound
# NOTE: +1 required for correct slicing for integration function
top_indx = x.searchsorted(x_max) + 1
# set up temporary variables
accum = 0
# integrate each region
for bot, top in zip(bottom_indx, top_indx):
# Note: If an odd number of intervals is specified, then the
# even='avg' setting calculates and averages first AND last
# N-2 intervals using trapezoidal rule.
# If calculation speed become an issue, then consider changing
# setting to 'first', or 'last' in which case trap rule is only
# applied to either first or last N-2 intervals.
accum += simps(y[bot:top], x[bot:top], even='avg')
return accum
|
|
# Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Jason Narad <jason.narad@gmail.com>
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
import math
from nltk.grammar import parse_dependency_grammar
from dependencygraph import *
#################################################################
# DependencyScorerI - Interface for Graph-Edge Weight Calculation
#################################################################
class DependencyScorerI(object):
"""
A scorer for calculated the weights on the edges of a weighted
dependency graph. This is used by a
C{ProbabilisticNonprojectiveParser} to initialize the edge
weights of a C{DependencyGraph}. While typically this would be done
by training a binary classifier, any class that can return a
multidimensional list representation of the edge weights can
implement this interface. As such, it has no necessary
fields.
"""
def __init__(self):
if self.__class__ == DependencyScorerI:
raise TypeError('DependencyScorerI is an abstract interface')
def train(self, graphs):
"""
@type graphs: A list of C{DependencyGraph}
@param graphs: A list of dependency graphs to train the scorer.
Typically the edges present in the graphs can be used as
positive training examples, and the edges not present as negative
examples.
"""
raise AssertionError('DependencyScorerI is an abstract interface')
def score(self, graph):
"""
@type graph: A C{DependencyGraph}
@param graph: A dependency graph whose set of edges need to be
scored.
@rtype: A three-dimensional list of numbers.
@return: The score is returned in a multidimensional(3) list, such
that the outer-dimension refers to the head, and the
inner-dimension refers to the dependencies. For instance,
scores[0][1] would reference the list of scores corresponding to
arcs from node 0 to node 1. The node's 'address' field can be used
to determine its number identification.
For further illustration, a score list corresponding to Fig.2 of
Keith Hall's 'K-best Spanning Tree Parsing' paper:
scores = [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
When used in conjunction with a MaxEntClassifier, each score would
correspond to the confidence of a particular edge being classified
with the positive training examples.
"""
raise AssertionError('DependencyScorerI is an abstract interface')
#////////////////////////////////////////////////////////////
# Comparisons
#////////////////////////////////////////////////////////////
def __cmp__(self, other):
raise AssertionError('DependencyScorerI is an abstract interface')
def __hash__(self, other):
raise AssertionError('DependencyScorerI is an abstract interface')
#################################################################
# NaiveBayesDependencyScorer
#################################################################
class NaiveBayesDependencyScorer(DependencyScorerI):
"""
A dependency scorer built around a MaxEnt classifier. In this
particular class that classifier is a C{NaiveBayesClassifier}.
It uses head-word, head-tag, child-word, and child-tag features
for classification.
"""
def __init__(self):
print # Do nothing without throwing error?
def train(self, graphs):
"""
Trains a C{NaiveBayesClassifier} using the edges present in
graphs list as positive examples, the edges not present as
negative examples. Uses a feature vector of head-word,
head-tag, child-word, and child-tag.
@type graphs: A list of C{DependencyGraph}
@param graphs: A list of dependency graphs to train the scorer.
"""
# Create training labeled training examples
labeled_examples = []
for graph in graphs:
for head_node in graph.nodelist:
for child_index in range(len(graph.nodelist)):
child_node = graph.get_by_address(child_index)
if child_index in head_node['deps']:
label = "T"
else:
label = "F"
features = [head_node['word'], head_node['tag'], child_node['word'], child_node['tag']]
labeled_examples.append((dict(a=head_node['word'],b=head_node['tag'],c=child_node['word'],d=child_node['tag']), label))
# Train the classifier
import nltk
nltk.usage(nltk.ClassifierI)
self.classifier = nltk.classify.NaiveBayesClassifier.train(labeled_examples)
def score(self, graph):
"""
Converts the graph into a feature-based representation of
each edge, and then assigns a score to each based on the
confidence of the classifier in assigning it to the
positive label. Scores are returned in a multidimensional list.
@type graph: C{DependencyGraph}
@param graph: A dependency graph to score.
@rtype: 3 dimensional list
@return: Edge scores for the graph parameter.
"""
# Convert graph to feature representation
edges = []
for i in range(len(graph.nodelist)):
for j in range(len(graph.nodelist)):
head_node = graph.get_by_address(i)
child_node = graph.get_by_address(j)
print head_node
print child_node
edges.append((dict(a=head_node['word'],b=head_node['tag'],c=child_node['word'],d=child_node['tag'])))
# Score edges
edge_scores = []
row = []
count = 0
for pdist in self.classifier.batch_prob_classify(edges):
print '%.4f %.4f' % (pdist.prob('T'), pdist.prob('F'))
row.append([math.log(pdist.prob("T"))])
count += 1
if count == len(graph.nodelist):
edge_scores.append(row)
row = []
count = 0
return edge_scores
#################################################################
# A Scorer for Demo Purposes
#################################################################
# A short class necessary to show parsing example from paper
class DemoScorer:
def train(self, graphs):
print 'Training...'
def score(self, graph):
# scores for Keith Hall 'K-best Spanning Tree Parsing' paper
return [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
#################################################################
# Non-Projective Probabilistic Parsing
#################################################################
class ProbabilisticNonprojectiveParser(object):
"""
A probabilistic non-projective dependency parser. Nonprojective
dependencies allows for "crossing branches" in the parse tree
which is necessary for representing particular linguistic
phenomena, or even typical parses in some languages. This parser
follows the MST parsing algorithm, outlined in McDonald(2005),
which likens the search for the best non-projective parse to
finding the maximum spanning tree in a weighted directed graph.
"""
def __init__(self):
"""
Creates a new non-projective parser.
"""
print 'initializing prob. nonprojective...'
def train(self, graphs, dependency_scorer):
"""
Trains a C{DependencyScorerI} from a set of C{DependencyGraph} objects,
and establishes this as the parser's scorer. This is used to
initialize the scores on a C{DependencyGraph} during the parsing
procedure.
@type graphs: A list of C{DependencyGraph}
@param graphs: A list of dependency graphs to train the scorer.
@type dependency_scorer: C{DependencyScorerI}
@param dependency_scorer: A scorer which implements the
C{DependencyScorerI} interface.
"""
self._scorer = dependency_scorer
self._scorer.train(graphs)
def initialize_edge_scores(self, graph):
"""
Assigns a score to every edge in the C{DependencyGraph} graph.
These scores are generated via the parser's scorer which
was assigned during the training process.
@type graph: C{DependencyGraph}
@param graph: A dependency graph to assign scores to.
"""
self.scores = self._scorer.score(graph)
def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph):
"""
Takes a list of nodes that have been identified to belong to a cycle,
and collapses them into on larger node. The arcs of all nodes in
the graph must be updated to account for this.
@type new_node: Node.
@param new_node: A Node (Dictionary) to collapse the cycle nodes into.
@type cycle_path: A list of integers.
@param cycle_path: A list of node addresses, each of which is in the cycle.
@type g_graph, b_graph, c_graph: C{DependencyGraph}
@param g_graph, b_graph, c_graph: Graphs which need to be updated.
"""
print 'Collapsing nodes...'
# Collapse all cycle nodes into v_n+1 in G_Graph
for cycle_node_index in cycle_path:
g_graph.remove_by_address(cycle_node_index)
g_graph.nodelist.append(new_node)
g_graph.redirect_arcs(cycle_path, new_node['address'])
def update_edge_scores(self, new_node, cycle_path):
"""
Updates the edge scores to reflect a collapse operation into
new_node.
@type new_node: A Node.
@param new_node: The node which cycle nodes are collapsed into.
@type cycle_path: A list of integers.
@param cycle_path: A list of node addresses that belong to the cycle.
"""
print 'cycle', cycle_path
cycle_path = self.compute_original_indexes(cycle_path)
print 'old cycle ', cycle_path
print 'Prior to update:\n', self.scores
for i, row in enumerate(self.scores):
for j, column in enumerate(self.scores[i]):
print self.scores[i][j]
if j in cycle_path and not i in cycle_path and len(self.scores[i][j]) > 0:
new_vals = []
subtract_val = self.compute_max_subtract_score(j, cycle_path)
print self.scores[i][j], ' - ', subtract_val
for cur_val in self.scores[i][j]:
new_vals.append(cur_val - subtract_val)
self.scores[i][j] = new_vals
for i, row in enumerate(self.scores):
for j, cell in enumerate(self.scores[i]):
if i in cycle_path and j in cycle_path:
self.scores[i][j] = []
print 'After update:\n', self.scores
def compute_original_indexes(self, new_indexes):
"""
As nodes are collapsed into others, they are replaced
by the new node in the graph, but it's still necessary
to keep track of what these original nodes were. This
takes a list of node addresses and replaces any collapsed
node addresses with their original addresses.
@type new_address: A list of integers.
@param new_addresses: A list of node addresses to check for
subsumed nodes.
"""
swapped = True
while(swapped):
originals = []
swapped = False
for new_index in new_indexes:
if self.inner_nodes.has_key(new_index):
for old_val in self.inner_nodes[new_index]:
if not old_val in originals:
originals.append(old_val)
swapped = True
else:
originals.append(new_index)
new_indexes = originals
return new_indexes
def compute_max_subtract_score(self, column_index, cycle_indexes):
"""
When updating scores the score of the highest-weighted incoming
arc is subtracted upon collapse. This returns the correct
amount to subtract from that edge.
@type column_index: integer.
@param column_index: A index representing the column of incoming arcs
to a particular node being updated
@type cycle_indexes: A list of integers.
@param cycle_indexes: Only arcs from cycle nodes are considered. This
is a list of such nodes addresses.
"""
max_score = -100000
for row_index in cycle_indexes:
for subtract_val in self.scores[row_index][column_index]:
if subtract_val > max_score:
max_score = subtract_val
return max_score
def best_incoming_arc(self, node_index):
"""
Returns the source of the best incoming arc to the
node with address: node_index
@type node_index: integer.
@param node_index: The address of the 'destination' node,
the node that is arced to.
"""
originals = self.compute_original_indexes([node_index])
print 'originals:', originals
max_arc = None
max_score = None
for row_index in range(len(self.scores)):
for col_index in range(len(self.scores[row_index])):
# print self.scores[row_index][col_index]
if col_index in originals and self.scores[row_index][col_index] > max_score:
max_score = self.scores[row_index][col_index]
max_arc = row_index
print row_index, ',', col_index
print max_score
for key in self.inner_nodes:
replaced_nodes = self.inner_nodes[key]
if max_arc in replaced_nodes:
return key
return max_arc
def original_best_arc(self, node_index):
"""
???
"""
originals = self.compute_original_indexes([node_index])
max_arc = None
max_score = None
max_orig = None
for row_index in range(len(self.scores)):
for col_index in range(len(self.scores[row_index])):
if col_index in originals and self.scores[row_index][col_index] > max_score:
max_score = self.scores[row_index][col_index]
max_arc = row_index
max_orig = col_index
return [max_arc, max_orig]
def parse(self, tokens, tags):
"""
Parses a list of tokens in accordance to the MST parsing algorithm
for non-projective dependency parses. Assumes that the tokens to
be parsed have already been tagged and those tags are provided. Various
scoring methods can be used by implementing the C{DependencyScorerI}
interface and passing it to the training algorithm.
@type tokens: A list of C{String}.
@param tokens: A list of words or punctuation to be parsed.
@type tags: A List of C{String}.
@param tags: A list of tags corresponding by index to the words in the tokens list.
"""
self.inner_nodes = {}
# Initialize g_graph
g_graph = DependencyGraph()
for index, token in enumerate(tokens):
g_graph.nodelist.append({'word':token, 'tag':tags[index], 'deps':[], 'rel':'NTOP', 'address':index+1})
# Fully connect non-root nodes in g_graph
g_graph.connect_graph()
original_graph = DependencyGraph()
for index, token in enumerate(tokens):
original_graph.nodelist.append({'word':token, 'tag':tags[index], 'deps':[], 'rel':'NTOP', 'address':index+1})
# Initialize b_graph
b_graph = DependencyGraph()
b_graph.nodelist = []
# Initialize c_graph
c_graph = DependencyGraph()
c_graph.nodelist = [{'word':token, 'tag':tags[index], 'deps':[],
'rel':'NTOP', 'address':index+1}
for index, token in enumerate(tokens)]
# Assign initial scores to g_graph edges
self.initialize_edge_scores(g_graph)
print self.scores
# Initialize a list of unvisited vertices (by node address)
unvisited_vertices = [vertex['address'] for vertex in c_graph.nodelist]
# Iterate over unvisited vertices
nr_vertices = len(tokens)
betas = {}
while(len(unvisited_vertices) > 0):
# Mark current node as visited
current_vertex = unvisited_vertices.pop(0)
print 'current_vertex:', current_vertex
# Get corresponding node n_i to vertex v_i
current_node = g_graph.get_by_address(current_vertex)
print 'current_node:', current_node
# Get best in-edge node b for current node
best_in_edge = self.best_incoming_arc(current_vertex)
betas[current_vertex] = self.original_best_arc(current_vertex)
print 'best in arc: ', best_in_edge, ' --> ', current_vertex
# b_graph = Union(b_graph, b)
for new_vertex in [current_vertex, best_in_edge]:
b_graph.add_node({'word':'TEMP', 'deps':[], 'rel': 'NTOP', 'address': new_vertex})
b_graph.add_arc(best_in_edge, current_vertex)
# Beta(current node) = b - stored for parse recovery
# If b_graph contains a cycle, collapse it
cycle_path = b_graph.contains_cycle()
if cycle_path:
# Create a new node v_n+1 with address = len(nodes) + 1
new_node = {'word': 'NONE', 'deps':[], 'rel': 'NTOP', 'address': nr_vertices + 1}
# c_graph = Union(c_graph, v_n+1)
c_graph.add_node(new_node)
# Collapse all nodes in cycle C into v_n+1
self.update_edge_scores(new_node, cycle_path)
self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph)
for cycle_index in cycle_path:
c_graph.add_arc(new_node['address'], cycle_index)
# self.replaced_by[cycle_index] = new_node['address']
self.inner_nodes[new_node['address']] = cycle_path
# Add v_n+1 to list of unvisited vertices
unvisited_vertices.insert(0, nr_vertices + 1)
# increment # of nodes counter
nr_vertices += 1
# Remove cycle nodes from b_graph; B = B - cycle c
for cycle_node_address in cycle_path:
b_graph.remove_by_address(cycle_node_address)
print 'g_graph:\n', g_graph
print
print 'b_graph:\n', b_graph
print
print 'c_graph:\n', c_graph
print
print 'Betas:\n', betas
print 'replaced nodes', self.inner_nodes
print
#Recover parse tree
print 'Final scores:\n', self.scores
print 'Recovering parse...'
for i in range(len(tokens) + 1, nr_vertices + 1):
betas[betas[i][1]] = betas[i]
print 'Betas: ', betas
new_graph = DependencyGraph()
for node in original_graph.nodelist:
node['deps'] = []
for i in range(1, len(tokens) + 1):
# print i, betas[i]
original_graph.add_arc(betas[i][0], betas[i][1])
# print original_graph
return original_graph
print 'Done.'
#################################################################
# Rule-based Non-Projective Parser
#################################################################
class NonprojectiveDependencyParser(object):
"""
A non-projective, rule-based, dependency parser. This parser
will return the set of all possible non-projective parses based on
the word-to-word relations defined in the parser's dependency
grammar, and will allow the branches of the parse tree to cross
in order to capture a variety of linguistic phenomena that a
projective parser will not.
"""
def __init__(self, dependency_grammar):
"""
Creates a new C{NonprojectiveDependencyParser}.
@param dependency_grammar: a grammar of word-to-word relations.
@type depenedncy_grammar: C{DependencyGrammar}
"""
self._grammar = dependency_grammar
def parse(self, tokens):
"""
Parses the input tokens with respect to the parser's grammar. Parsing
is accomplished by representing the search-space of possible parses as
a fully-connected directed graph. Arcs that would lead to ungrammatical
parses are removed and a lattice is constructed of length n, where n is
the number of input tokens, to represent all possible grammatical
traversals. All possible paths through the lattice are then enumerated
to produce the set of non-projective parses.
param tokens: A list of tokens to parse.
type tokens: A C{list} of L{String}.
return: A set of non-projective parses.
rtype: A C{list} of L{DependencyGraph}
"""
# Create graph representation of tokens
self._graph = DependencyGraph()
self._graph.nodelist = [] # Remove the default root
for index, token in enumerate(tokens):
self._graph.nodelist.append({'word':token, 'deps':[], 'rel':'NTOP', 'address':index})
for head_node in self._graph.nodelist:
deps = []
for dep_node in self._graph.nodelist:
if self._grammar.contains(head_node['word'], dep_node['word']) and not head_node['word'] == dep_node['word']:
deps.append(dep_node['address'])
head_node['deps'] = deps
# Create lattice of possible heads
roots = []
possible_heads = []
for i, word in enumerate(tokens):
heads = []
for j, head in enumerate(tokens):
if (i != j) and self._grammar.contains(head, word):
heads.append(j)
if len(heads) == 0:
roots.append(i)
possible_heads.append(heads)
# Set roots to attempt
if len(roots) > 1:
print "No parses found."
return False
elif len(roots) == 0:
for i in range(len(tokens)):
roots.append(i)
# Traverse lattice
analyses = []
for root in roots:
stack = []
analysis = [[] for i in range(len(possible_heads))]
i = 0
forward = True
while(i >= 0):
if forward:
if len(possible_heads[i]) == 1:
analysis[i] = possible_heads[i][0]
elif len(possible_heads[i]) == 0:
analysis[i] = -1
else:
head = possible_heads[i].pop()
analysis[i] = head
stack.append([i, head])
if not forward:
index_on_stack = False
for stack_item in stack:
# print stack_item
if stack_item[0] == i:
index_on_stack = True
orig_length = len(possible_heads[i])
# print len(possible_heads[i])
if index_on_stack and orig_length == 0:
for j in xrange(len(stack) -1, -1, -1):
stack_item = stack[j]
if stack_item[0] == i:
possible_heads[i].append(stack.pop(j)[1])
# print stack
elif index_on_stack and orig_length > 0:
head = possible_heads[i].pop()
analysis[i] = head
stack.append([i, head])
forward = True
# print 'Index on stack:', i, index_on_stack
if i + 1 == len(possible_heads):
analyses.append(analysis[:])
forward = False
if forward:
i += 1
else:
i -= 1
# Filter parses
graphs = []
#ensure 1 root, every thing has 1 head
for analysis in analyses:
root_count = 0
root = []
for i, cell in enumerate(analysis):
if cell == -1:
root_count += 1
root = i
if root_count == 1:
graph = DependencyGraph()
graph.nodelist[0]['deps'] = root + 1
for i in range(len(tokens)):
node = {'word':tokens[i], 'address':i+1}
node['deps'] = [j+1 for j in range(len(tokens)) if analysis[j] == i]
graph.nodelist.append(node)
# cycle = graph.contains_cycle()
# if not cycle:
graphs.append(graph)
return graphs
#################################################################
# Demos
#################################################################
def demo():
# hall_demo()
nonprojective_conll_parse_demo()
rule_based_demo()
def hall_demo():
npp = ProbabilisticNonprojectiveParser()
npp.train([], DemoScorer())
parse_graph = npp.parse(['v1', 'v2', 'v3'], [None, None, None])
print parse_graph
def nonprojective_conll_parse_demo():
graphs = [DependencyGraph(entry)
for entry in conll_data2.split('\n\n') if entry]
npp = ProbabilisticNonprojectiveParser()
npp.train(graphs, NaiveBayesDependencyScorer())
parse_graph = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc'])
print parse_graph
def rule_based_demo():
grammar = parse_dependency_grammar("""
'taught' -> 'play' | 'man'
'man' -> 'the' | 'in'
'in' -> 'corner'
'corner' -> 'the'
'play' -> 'golf' | 'dachshund' | 'to'
'dachshund' -> 'his'
""")
print grammar
ndp = NonprojectiveDependencyParser(grammar)
graphs = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])
print 'Graphs:'
for graph in graphs:
print graph
if __name__ == '__main__':
demo()
|
|
from collections import namedtuple
import taxcalc
import dropq
import os
import requests
from requests.exceptions import Timeout, RequestException
import json
import pandas as pd
import time
#
# Prepare user params to send to DropQ/Taxcalc
#
NUM_BUDGET_YEARS = int(os.environ.get('NUM_BUDGET_YEARS', 10))
START_YEAR = int(os.environ.get('START_YEAR', 2015))
#Hard fail on lack of dropq workers
dropq_workers = os.environ.get('DROPQ_WORKERS', '')
DROPQ_WORKERS = dropq_workers.split(",")
TAXCALC_COMING_SOON_FIELDS = [
'_Dividend_rt1', '_Dividend_thd1',
'_Dividend_rt2', '_Dividend_thd2',
'_Dividend_rt3', '_Dividend_thd3', '_BE_inc', '_BE_sub',
'_BE_cg_per', '_BE_cg_trn'
]
TAXCALC_COMING_SOON_INDEXED_BY_MARS = [
'_CG_thd1', '_CG_thd2', '_Dividend_thd1','_Dividend_thd2', '_Dividend_thd3'
]
TIMEOUT_IN_SECONDS = 1.0
MAX_ATTEMPTS_SUBMIT_JOB = 20
#
# Display TaxCalc result data
#
TAXCALC_RESULTS_START_YEAR = START_YEAR
TAXCALC_RESULTS_MTABLE_COL_LABELS = taxcalc.TABLE_LABELS
TAXCALC_RESULTS_DFTABLE_COL_LABELS = taxcalc.DIFF_TABLE_LABELS
TAXCALC_RESULTS_MTABLE_COL_FORMATS = [
# divisor, unit, decimals
[ 1000, None, 0], # 'Returns',
[1000000000, 'Dollars', 1], # 'AGI',
[ 1000, None, 0], # 'Standard Deduction Filers',
[1000000000, 'Dollars', 1], # 'Standard Deduction',
[ 1000, None, 0], # 'Itemizers',
[1000000000, 'Dollars', 1], # 'Itemized Deduction',
[1000000000, 'Dollars', 1], # 'Personal Exemption',
[1000000000, 'Dollars', 1], # 'Taxable Income',
[1000000000, 'Dollars', 1], # 'Regular Tax',
[1000000000, 'Dollars', 1], # 'AMTI',
[ 1000, None, 0], # 'AMT Filers',
[1000000000, 'Dollars', 1], # 'AMT',
[1000000000, 'Dollars', 1], # 'Tax before Credits',
[1000000000, 'Dollars', 1], # 'Non-refundable Credits',
[1000000000, 'Dollars', 1], # 'Tax before Refundable Credits',
[1000000000, 'Dollars', 1], # 'Refundable Credits',
[1000000000, 'Dollars', 1], # 'Revenue'
]
TAXCALC_RESULTS_DFTABLE_COL_FORMATS = [
[ 1000, None, 0], # "Inds. w/ Tax Cut",
[ 1000, None, 0], # "Inds. w/ Tax Increase",
[ 1000, None, 0], # "Count",
[ 1, 'Dollars', 0], # "Mean Tax Difference",
[1000000000, 'Dollars', 1], # "Total Tax Difference",
[ 1, '%', 1], # "%age Tax Increase",
[ 1, '%', 1], # "%age Tax Decrease",
[ 1, '%', 1], # "Share of Overall Change"
]
TAXCALC_RESULTS_BIN_ROW_KEYS = dropq.dropq.bin_row_names
TAXCALC_RESULTS_BIN_ROW_KEY_LABELS = {
'less_than_10':'Less than 10',
'ten_twenty':'10-20',
'twenty_thirty':'20-30',
'thirty_forty':'30-40',
'forty_fifty':'40-50',
'fifty_seventyfive':'50-75',
'seventyfive_hundred':'75-100',
'hundred_twohundred':'100-200',
'twohundred_fivehundred':'200-500',
'fivehundred_thousand':'500-1000',
'thousand_up':'1000+',
'all':'All'
}
TAXCALC_RESULTS_DEC_ROW_KEYS = dropq.dropq.decile_row_names
TAXCALC_RESULTS_DEC_ROW_KEY_LABELS = {
'perc0-10':'0-10%',
'perc10-20':'10-20%',
'perc20-30':'20-30%',
'perc30-40':'30-40%',
'perc40-50':'40-50%',
'perc50-60':'50-60%',
'perc60-70':'60-70%',
'perc70-80':'70-80%',
'perc80-90':'80-90%',
'perc90-100':'90-100%',
'all':'All'
}
TAXCALC_RESULTS_TABLE_LABELS = {
'mX_dec': 'Base plan tax vars, weighted avg per AGI decile',
'mY_dec': 'User plan tax vars, weighted avg per AGI decile',
'df_dec': 'Difference between Base and User plans by AGI decile',
'mX_bin': 'Base plan tax vars, weighted avg per income bin',
'mY_bin': 'User plan tax vars, weighted avg per income bin',
'df_bin': 'Difference between Base and User plans by income bin',
'fiscal_tots': 'Total Revenue Change by Calendar Year',
}
def expand_1D(x, num_years):
"""
Expand the given data to account for the given number of budget years.
Expanded entries are None by default
"""
if len(x) >= num_years:
return list(x)
else:
ans = [None] * num_years
ans[:len(x)] = x
return ans
def expand_2D(x, num_years):
"""
Expand the given data to account for the given number of budget years.
For 2D arrays, we expand out the number of rows until we have num_years
number of rows. Added rows have all 'None' entries
"""
if len(x) >= num_years:
return list(x)
else:
ans = []
for i in range(0, num_years):
ans.append([None] * len(x[0]))
for i, arr in enumerate(x):
ans[i] = arr
return ans
def expand_list(x, num_years):
"""
Dispatch to either expand_1D or expand2D depending on the dimension of x
Parameters:
-----------
x : value to expand
num_years: int
Number of budget years to expand
Returns:
--------
expanded list
"""
if isinstance(x[0], list):
return expand_2D(x, num_years)
else:
return expand_1D(x, num_years)
def convert_to_floats(tsi):
"""
A helper function that tax all of the fields of a TaxSaveInputs model
and converts them to floats, or list of floats
"""
def numberfy_one(x):
if isinstance(x, float):
return x
else:
return float(x)
def numberfy(x):
if isinstance(x, list):
return [numberfy_one(i) for i in x]
else:
return numberfy_one(x)
attrs = vars(tsi)
return { k:numberfy(v) for k,v in attrs.items() if v}
def leave_name_in(key, val, dd):
"""
Under certain conditions, we will remove 'key' and its value
from the dictionary we pass to the dropq package. This function
will test those conditions and return a Bool.
Parameters:
-----------
key: a field name to potentially pass to the dropq package
dd: the default dictionary of data in taxcalc Parameters
Returns:
--------
Bool: True if we allow this field to get passed on. False
if it should be removed.
"""
if key in dd:
return True
else:
print "Don't have this pair: ", key, val
underscore_name_in_defaults = "_" + key in dd
is_cpi_name = key.endswith("_cpi")
is_array_name = (key.endswith("_0") or key.endswith("_1") or
key.endswith("_2") or key.endswith("_3"))
if (underscore_name_in_defaults or is_cpi_name or is_array_name):
return True
else:
return False
def package_up_vars(user_values):
dd = taxcalc.parameters.default_data(start_year=START_YEAR)
for k, v in user_values.items():
if not leave_name_in(k, v, dd):
print "Removing ", k, v
del user_values[k]
name_stems = {}
ans = {}
#Find the 'broken out' array values, these have special treatment
for k, v in user_values.items():
if (k.endswith("_0") or k.endswith("_1") or k.endswith("_2")
or k.endswith("_3")):
vals = name_stems.setdefault(k[:-2], [])
vals.append(k)
#For each array value, expand as necessary based on default data
#then add user values. It is acceptable to leave 'blanks' as None.
#This is handled on the taxcalc side
for k, vals in name_stems.items():
if k in dd:
default_data = dd[k]
param = k
else:
#add a leading underscore
default_data = dd["_" + k]
param = "_" + k
# get max number of years to advance
_max = 0
for name in vals:
num_years = len(user_values[name])
if num_years > _max:
_max = num_years
expnded = expand_list(default_data, _max)
#Now copy necessary data to expanded array
for name in vals:
idx = int(name[-1]) # either 0, 1, 2, 3
user_arr = user_values[name]
for new_arr, user_val in zip(expnded, user_arr):
new_arr[idx] = int(user_val)
del user_values[name]
ans[param] = expnded
#Process remaining values set by user
for k, v in user_values.items():
if k in dd:
default_data = dd[k]
param = k
elif k.endswith("_cpi"):
if k[:-4] in dd:
ans[k] = v
else:
ans['_' + k] = v
continue
else:
#add a leading underscore
default_data = dd["_" + k]
param = "_" + k
num_years = len(v)
expnded = expand_list(default_data, num_years)
for i, new_val in enumerate(v):
expnded[i] = new_val
ans[param] = expnded
return ans
#
# Gather data to assist in displaying TaxCalc param form
#
class TaxCalcField(object):
"""
An atomic unit of data for a TaxCalcParam, which can be stored as a field
Used for both CSV float fields (value column data) and boolean fields (cpi)
"""
def __init__(self, id, label, values, param):
self.id = id
self.label = label
self.values = values
self.param = param
self.values_by_year = {}
for i, value in enumerate(values):
year = param.start_year + i
self.values_by_year[year] = value
self.default_value = self.values_by_year[START_YEAR]
class TaxCalcParam(object):
"""
A collection of TaxCalcFields that represents all configurable details
for one of TaxCalc's Parameters
"""
def __init__(self, param_id, attributes):
self.__load_from_json(param_id, attributes)
def __load_from_json(self, param_id, attributes):
values_by_year = attributes['value']
col_labels = attributes['col_label']
self.tc_id = param_id
self.nice_id = param_id[1:] if param_id[0] == '_' else param_id
self.name = attributes['long_name']
self.info = " ".join([
attributes['description'],
attributes.get('irs_ref') or "", # sometimes this is blank
attributes.get('notes') or "" # sometimes this is blank
]).strip()
# Pretend the start year is 2015 (instead of 2013),
# until values for that year are provided by taxcalc
#self.start_year = int(attributes['start_year'])
self.start_year = START_YEAR
self.coming_soon = (self.tc_id in TAXCALC_COMING_SOON_FIELDS)
# normalize single-year default lists [] to [[]]
if not isinstance(values_by_year[0], list):
values_by_year = [values_by_year]
# organize defaults by column [[A1,B1],[A2,B2]] to [[A1,A2],[B1,B2]]
values_by_col = [list(x) for x in zip(*values_by_year)]
#
# normalize and format column labels
#
if self.tc_id in TAXCALC_COMING_SOON_INDEXED_BY_MARS:
col_labels = ["Single", "Married filing Jointly",
"Married filing Separately", "Head of Household"]
values_by_col = ['0','0','0','0']
elif isinstance(col_labels, list):
if col_labels == ["0kids", "1kid", "2kids", "3+kids"]:
col_labels = ["0 Kids", "1 Kid", "2 Kids", "3+ Kids"]
elif col_labels == ["single", "joint", "separate", "head of household",
"widow", "separate"] or col_labels == \
["single", "joint", "separate", "head of household",
"widow", "separate","dependent"]:
col_labels = ["Single", "Married filing Jointly",
"Married filing Separately", "Head of Household"]
else:
if col_labels == "NA" or col_labels == "":
col_labels = [""]
elif col_labels == "0kids 1kid 2kids 3+kids":
col_labels = ["0 Kids", "1 Kid", "2 Kids", "3+ Kids"]
# create col params
self.col_fields = []
if len(col_labels) == 1:
self.col_fields.append(TaxCalcField(
self.nice_id,
col_labels[0],
values_by_col[0],
self
))
else:
for col, label in enumerate(col_labels):
self.col_fields.append(TaxCalcField(
self.nice_id + "_{0}".format(col),
label,
values_by_col[col],
self
))
# we assume we can CPI inflate if first value isn't a ratio
first_value = self.col_fields[0].values[0]
self.inflatable = first_value > 1 and self.tc_id != '_ACTC_ChildNum'
if self.inflatable:
self.cpi_field = TaxCalcField(self.nice_id + "_cpi", "CPI", [True], self)
# Create a list of default parameters
TAXCALC_DEFAULT_PARAMS_JSON = taxcalc.parameters.default_data(metadata=True, start_year=2015)
default_taxcalc_params = {}
for k,v in TAXCALC_DEFAULT_PARAMS_JSON.iteritems():
param = TaxCalcParam(k,v)
default_taxcalc_params[param.nice_id] = param
#Behavior Effects not in params.json yet. Add in the appropriate info so that
#the params dictionary has the right info
# value, col_label, long_name, description, irs_ref, notes
be_params = []
be_inc_param = {'value':[0], 'col_label':['label'], 'long_name':'Income Effect',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_sub_param = {'value':[0], 'col_label':['label'], 'long_name':'Substitution Effect',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_cg_per_param = {'value':[0], 'col_label':['label'], 'long_name':'Persistent',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_cg_trn_param= {'value':[0], 'col_label':['label'], 'long_name':'Transitory',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_params.append(('_BE_inc', be_inc_param))
be_params.append(('_BE_sub', be_sub_param))
be_params.append(('_BE_cg_per', be_cg_per_param))
be_params.append(('_BE_cg_trn', be_cg_trn_param))
for k,v in be_params:
param = TaxCalcParam(k,v)
default_taxcalc_params[param.nice_id] = param
TAXCALC_DEFAULT_PARAMS = default_taxcalc_params
# Debug TaxParams
"""
for k, param in TAXCALC_DEFAULT_PARAMS.iteritems():
print(' -- ' + k + ' -- ')
print('TC id: ' + param.tc_id)
print('Nice id: ' + param.nice_id)
print('name: ' + param.name)
print('info: ' + param.info + '\n')
if param.inflatable:
field = param.cpi_field
print(field.id + ' - ' + field.label + ' - ' + str(field.values))
for field in param.col_fields:
print(field.id + ' - ' + field.label + ' - ' + str(field.values))
print('\n')
"""
def taxcalc_results_to_tables(results):
"""
Take various results from dropq, i.e. mY_dec, mX_bin, df_dec, etc
Return organized and labeled table results for display
"""
num_years = len(results['fiscal_tots'])
years = list(range(TAXCALC_RESULTS_START_YEAR,
TAXCALC_RESULTS_START_YEAR + num_years))
tables = {}
for table_id in results:
# Debug inputs
"""
print('\n ----- inputs ------- ')
print('looking at {0}'.format(table_id))
if table_id == 'fiscal_tots':
print('{0}'.format(results[table_id]))
else:
print('{0}'.format(results[table_id].keys()))
print(' ----- inputs ------- \n')
"""
if table_id in ['mX_dec', 'mY_dec']:
row_keys = TAXCALC_RESULTS_DEC_ROW_KEYS
row_labels = TAXCALC_RESULTS_DEC_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_MTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_MTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id in ['mX_bin', 'mY_bin']:
row_keys = TAXCALC_RESULTS_BIN_ROW_KEYS
row_labels = TAXCALC_RESULTS_BIN_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_MTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_MTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'df_dec':
row_keys = TAXCALC_RESULTS_DEC_ROW_KEYS
row_labels = TAXCALC_RESULTS_DEC_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_DFTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_DFTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'df_bin':
row_keys = TAXCALC_RESULTS_BIN_ROW_KEYS
row_labels = TAXCALC_RESULTS_BIN_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_DFTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_DFTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'fiscal_tots':
# todo - move these into the above TC result param constants
row_keys = ['totals']
row_labels = {'totals': 'Total Revenue'}
col_labels = years
col_formats = [ [1000000000, 'Dollars', 1] for y in years]
table_data = {'totals': results[table_id]}
multi_year_cells = False
table = {
'col_labels': col_labels,
'cols': [],
'label': TAXCALC_RESULTS_TABLE_LABELS[table_id],
'rows': [],
'multi_valued': multi_year_cells
}
for col_key, label in enumerate(col_labels):
table['cols'].append({
'label': label,
'divisor': col_formats[col_key][0],
'units': col_formats[col_key][1],
'decimals': col_formats[col_key][2],
})
col_count = len(col_labels)
for row_key in row_keys:
row = {
'label': row_labels[row_key],
'cells': []
}
for col_key in range(0, col_count):
cell = {
'year_values': {},
'format': {
'divisor': table['cols'][col_key]['divisor'],
'decimals': table['cols'][col_key]['decimals'],
}
}
if multi_year_cells:
for yi, year in enumerate(years):
value = table_data["{0}_{1}".format(row_key, yi)][col_key]
if value[-1] == "%":
value = value[:-1]
cell['year_values'][year] = value
cell['first_value'] = cell['year_values'][TAXCALC_RESULTS_START_YEAR]
else:
value = table_data[row_key][col_key]
if value[-1] == "%":
value = value[:-1]
cell['value'] = value
row['cells'].append(cell)
table['rows'].append(row)
tables[table_id] = table
# Debug results
"""
print('\n ----- result ------- ')
print('{0}'.format(table))
print(' ----- result ------- \n')
"""
tables['result_years'] = years
return tables
def format_csv(tax_results, url_id):
"""
Takes a dictionary with the tax_results, having these keys:
[u'mY_bin', u'mX_bin', u'mY_dec', u'mX_dec', u'df_dec', u'df_bin',
u'fiscal_tots']
And then returns a list of list of strings for CSV output. The format
of the lines is as follows:
#URL: http://www.ospc.org/taxbrain/ID/csv/
#fiscal tots data
YEAR_0, ... YEAR_K
val, val, ... val
#mX_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mY_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#df_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mX_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mY_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#df_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
"""
res = []
#URL
res.append(["#URL: http://www.ospc.org/taxbrain/" + str(url_id) + "/"])
#FISCAL TOTS
res.append(["#fiscal totals data"])
ft = tax_results.get('fiscal_tots', [])
yrs = [START_YEAR + i for i in range(0, len(ft))]
if yrs:
res.append(yrs)
if ft:
res.append(ft)
#MX_DEC
res.append(["#mX_dec"])
mxd = tax_results.get('mX_dec', {})
if mxd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(mxd[row+"_" + str(count)])
#MY_DEC
res.append(["#mY_dec"])
myd = tax_results.get('mY_dec', {})
if myd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(myd[row+"_" + str(count)])
#DF_DEC
res.append(["#df_dec"])
dfd = tax_results.get('df_dec', {})
if dfd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_DFTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(dfd[row+"_" + str(count)])
#MX_BIN
res.append(["#mX_bin"])
mxb = tax_results.get('mX_bin', {})
if mxb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(mxb[row+"_" + str(count)])
#MY_BIN
res.append(["#mY_bin"])
myb = tax_results.get('mY_bin', {})
if myb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(myb[row+"_" + str(count)])
#DF_BIN
res.append(["#df_bin"])
dfb = tax_results.get('df_bin', {})
if dfb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_DFTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(dfb[row+"_" + str(count)])
return res
def submit_dropq_calculation(mods):
print "mods is ", mods
user_mods = package_up_vars(mods)
if not bool(user_mods):
return False
print "user_mods is ", user_mods
print "submit work"
user_mods={START_YEAR:user_mods}
years = list(range(0,NUM_BUDGET_YEARS))
hostnames = DROPQ_WORKERS
num_hosts = len(hostnames)
data = {}
data['user_mods'] = json.dumps(user_mods)
job_ids = []
hostname_idx = 0
for y in years:
year_submitted = False
attempts = 0
while not year_submitted:
data['year'] = str(y)
theurl = "http://{hn}/dropq_start_job".format(hn=hostnames[hostname_idx])
try:
response = requests.post(theurl, data=data, timeout=TIMEOUT_IN_SECONDS)
if response.status_code == 200:
print "submitted: ", str(y), hostnames[hostname_idx]
year_submitted = True
job_ids.append((response.text, hostnames[hostname_idx]))
hostname_idx = (hostname_idx + 1) % num_hosts
else:
print "FAILED: ", str(y), hostnames[hostname_idx]
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
except Timeout:
print "Couldn't submit to: ", hostnames[hostname_idx]
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
except RequestException as re:
print "Something unexpected happened: ", re
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
if attempts > MAX_ATTEMPTS_SUBMIT_JOB:
print "Exceeded max attempts. Bailing out."
raise IOError()
return job_ids
def dropq_results_ready(job_ids):
jobs_done = [False] * len(job_ids)
for idx, id_hostname in enumerate(job_ids):
id_, hostname = id_hostname
result_url = "http://{hn}/dropq_query_result".format(hn=hostname)
job_response = requests.get(result_url, params={'job_id':id_})
if job_response.status_code == 200: # Valid response
rep = job_response.text
if rep == 'YES':
jobs_done[idx] = True
print "got one!: ", id_
return all(jobs_done)
def dropq_get_results(job_ids):
ans = []
for idx, id_hostname in enumerate(job_ids):
id_, hostname = id_hostname
result_url = "http://{hn}/dropq_get_result".format(hn=hostname)
job_response = requests.get(result_url, params={'job_id':id_})
if job_response.status_code == 200: # Valid response
ans.append(job_response.json())
mY_dec = {}
mX_dec = {}
df_dec = {}
mY_bin = {}
mX_bin = {}
df_bin = {}
fiscal_tots = []
for result in ans:
mY_dec.update(result['mY_dec'])
mX_dec.update(result['mX_dec'])
df_dec.update(result['df_dec'])
mY_bin.update(result['mY_bin'])
mX_bin.update(result['mX_bin'])
df_bin.update(result['df_bin'])
fiscal_tots.append(result['fiscal_tots'])
results = {'mY_dec': mY_dec, 'mX_dec': mX_dec, 'df_dec': df_dec,
'mY_bin': mY_bin, 'mX_bin': mX_bin, 'df_bin': df_bin,
'fiscal_tots': fiscal_tots}
return results
|
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Some portions of this file Copyright 2018 Uber Technologies, Inc
# and licensed under the Apache License, Version 2.0
#
# This file is adapted from https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py
from __future__ import print_function
import argparse
import horovod.torch as hvd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import datasets, transforms
from zoo.ray import RayContext
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca.learn.horovod import HorovodRayRunner
def run_horovod():
# Temporary patch this script until the MNIST dataset download issue get resolved
# https://github.com/pytorch/vision/issues/1938
import urllib
try:
# For python 2
class AppURLopener(urllib.FancyURLopener):
version = "Mozilla/5.0"
urllib._urlopener = AppURLopener()
except AttributeError:
# For python 3
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
batch_size = 64
test_batch_size = 1000
epochs = 10
lr = 0.01
momentum = 0.5
seed = 43
log_interval = 10
fp16_allreduce = False
use_adasum = False
# Horovod: initialize library.
hvd.init()
torch.manual_seed(seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(4)
kwargs = {}
train_dataset = \
datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler, **kwargs)
test_dataset = \
datasets.MNIST('data-%d' % hvd.rank(), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# Horovod: use DistributedSampler to partition the test data.
test_sampler = torch.utils.data.distributed.DistributedSampler(
test_dataset, num_replicas=hvd.size(), rank=hvd.rank())
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=test_batch_size,
sampler=test_sampler, **kwargs)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
model = Net()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not use_adasum else 1
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler,
momentum=momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if fp16_allreduce else hvd.Compression.none
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if use_adasum else hvd.Average)
def train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# Horovod: use train_sampler to determine the number of examples in
# this worker's partition.
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_sampler),
100. * batch_idx / len(train_loader), loss.item()))
def metric_average(val, name):
tensor = torch.tensor(val)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
def test():
model.eval()
test_loss = 0.
test_accuracy = 0.
for data, target in test_loader:
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()
# Horovod: use test_sampler to determine the number of examples in
# this worker's partition.
test_loss /= len(test_sampler)
test_accuracy /= len(test_sampler)
# Horovod: average metric values across workers.
test_loss = metric_average(test_loss, 'avg_loss')
test_accuracy = metric_average(test_accuracy, 'avg_accuracy')
# Horovod: print output only on first rank.
if hvd.rank() == 0:
print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(
test_loss, 100. * test_accuracy))
for epoch in range(1, epochs + 1):
train(epoch)
test()
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster.')
parser.add_argument("--slave_num", type=int, default=2,
help="The number of slave nodes to be used in the cluster."
"You can change it depending on your own cluster setting.")
parser.add_argument("--cores", type=int, default=8,
help="The number of cpu cores you want to use on each node. "
"You can change it depending on your own cluster setting.")
parser.add_argument("--memory", type=str, default="10g",
help="The size of slave(executor)'s memory you want to use."
"You can change it depending on your own cluster setting.")
if __name__ == "__main__":
args = parser.parse_args()
num_nodes = 1 if args.cluster_mode == "local" else args.slave_num
init_orca_context(cluster_mode=args.cluster_mode, cores=args.cores, num_nodes=num_nodes,
memory=args.memory)
runner = HorovodRayRunner(RayContext.get())
runner.run(func=run_horovod)
stop_orca_context()
|
|
"""The tests for the Rfxtrx switch platform."""
import unittest
import pytest
from homeassistant.bootstrap import setup_component
from homeassistant.components import rfxtrx as rfxtrx_core
from tests.common import get_test_home_assistant
@pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'")
class TestSwitchRfxtrx(unittest.TestCase):
"""Test the Rfxtrx switch platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
self.hass.config.components = ['rfxtrx']
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
if rfxtrx_core.RFXOBJECT:
rfxtrx_core.RFXOBJECT.close_connection()
self.hass.stop()
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_valid_config_int_device_id(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{710000141010170: {
'name': 'Test',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_invalid_config1(self):
self.assertFalse(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'2FF7f216': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51',
'signal_repetitions': 3}
}}}))
def test_invalid_config2(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'invalid_key': 'afda',
'devices':
{'213c7f216': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_invalid_config3(self):
self.assertFalse(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'213c7f216': {
'name': 'Test',
'packetid': 'AA1100cd0213c7f210010f51',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_invalid_config4(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'213c7f216': {
'name': 'Test',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_default_config(self):
"""Test with 0 switches."""
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'devices':
{}}}))
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_old_config(self):
"""Test with 1 switch."""
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'devices':
{'123efab1': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51'}}}}))
import RFXtrx as rfxtrxmod
rfxtrx_core.RFXOBJECT =\
rfxtrxmod.Core("", transport_protocol=rfxtrxmod.DummyTransport)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.assumed_state)
self.assertEqual(entity.signal_repetitions, 1)
self.assertFalse(entity.should_fire_event)
self.assertFalse(entity.should_poll)
self.assertFalse(entity.is_on)
entity.turn_on()
self.assertTrue(entity.is_on)
entity.turn_off()
self.assertFalse(entity.is_on)
def test_one_switch(self):
"""Test with 1 switch."""
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test'}}}}))
import RFXtrx as rfxtrxmod
rfxtrx_core.RFXOBJECT =\
rfxtrxmod.Core("", transport_protocol=rfxtrxmod.DummyTransport)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.assumed_state)
self.assertEqual(entity.signal_repetitions, 1)
self.assertFalse(entity.should_fire_event)
self.assertFalse(entity.should_poll)
self.assertFalse(entity.is_on)
entity.turn_on()
self.assertTrue(entity.is_on)
entity.turn_off()
self.assertFalse(entity.is_on)
entity_id = rfxtrx_core.RFX_DEVICES['213c7f216'].entity_id
entity_hass = self.hass.states.get(entity_id)
self.assertEqual('Test', entity_hass.name)
self.assertEqual('off', entity_hass.state)
entity.turn_on()
entity_hass = self.hass.states.get(entity_id)
self.assertEqual('on', entity_hass.state)
entity.turn_off()
entity_hass = self.hass.states.get(entity_id)
self.assertEqual('off', entity_hass.state)
def test_several_switches(self):
"""Test with 3 switches."""
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'signal_repetitions': 3,
'devices':
{'0b1100cd0213c7f230010f71': {
'name': 'Test'},
'0b1100100118cdea02010f70': {
'name': 'Bath'},
'0b1100101118cdea02010f70': {
'name': 'Living'}}}}))
self.assertEqual(3, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
entity = rfxtrx_core.RFX_DEVICES[id]
self.assertEqual(entity.signal_repetitions, 3)
if entity.name == 'Living':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Living: off>', entity.__str__())
elif entity.name == 'Bath':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Bath: off>', entity.__str__())
elif entity.name == 'Test':
device_num = device_num + 1
self.assertEqual('off', entity.state)
self.assertEqual('<Entity Test: off>', entity.__str__())
self.assertEqual(3, device_num)
def test_discover_switch(self):
"""Test with discovery of switches."""
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['118cdea2']
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual('<Entity 0b1100100118cdea01010f70: on>',
entity.__str__())
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0b1100100118cdeb02010f70')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x00, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['118cdeb2']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual('<Entity 0b1100120118cdea02000070: on>',
entity.__str__())
# Trying to add a sensor
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a light
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x11, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x02, 0x0f, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a rollershutter
event = rfxtrx_core.get_rfx_object('0a1400adf394ab020e0060')
event.data = bytearray([0x0A, 0x14, 0x00, 0xAD, 0xF3, 0x94,
0xAB, 0x02, 0x0E, 0x00, 0x60])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
def test_discover_switch_noautoadd(self):
"""Test with discovery of switch when auto add is False."""
self.assertTrue(setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': False,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0b1100100118cdeb02010f70')
event.data = bytearray([0x0b, 0x11, 0x00, 0x12, 0x01, 0x18,
0xcd, 0xea, 0x02, 0x00, 0x00, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a sensor
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a light
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x11, 0x10, 0x01,
0x18, 0xcd, 0xea, 0x01, 0x02, 0x0f, 0x70])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a rollershutter
event = rfxtrx_core.get_rfx_object('0a1400adf394ab020e0060')
event.data = bytearray([0x0A, 0x14, 0x00, 0xAD, 0xF3, 0x94,
0xAB, 0x02, 0x0E, 0x00, 0x60])
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
|
|
import os
import json
import bisect
import datetime
import cass_connection
from collections import defaultdict
import cassandra.query as cql
M5NR_VERSION = 1
def rmqLogger(channel, stype, statement, bulk=0):
if not channel:
return
truncate = (statement[:98] + '..') if len(statement) > 100 else statement
body = {
'timestamp': datetime.datetime.now().isoformat(),
'type': stype,
'statement': truncate,
'bulk': bulk
}
if 'HOSTNAME' in os.environ:
body['host'] = os.environ['HOSTNAME']
try:
channel.basic_publish(
exchange='',
routing_key=cass_connection.RM_QUEUE,
body=json.dumps(body),
properties=cass_connection.RMQ_PROP
)
except:
# silently ignore broken logging
pass
class M5nrHandle(object):
def __init__(self, hosts, version=M5NR_VERSION):
keyspace = "m5nr_v"+str(version)
self.session = cass_connection.create(hosts).connect(keyspace)
self.session.default_timeout = 300
self.session.row_factory = cql.dict_factory
self.channel = None
try:
self.channel = cass_connection.rmqConnection().channel()
except:
pass
def close(self):
cass_connection.destroy()
### retrieve M5NR records
def get_records_by_md5(self, md5s, source=None, index=False, iterator=False):
found = []
table = "midx_annotation" if index else "md5_annotation"
md5_str = ",".join(map(lambda x: "'"+x+"'", md5s))
if source:
query = "SELECT * FROM %s WHERE md5 IN (%s) AND source='%s'"%(table, md5_str, source)
else:
query = "SELECT * FROM %s WHERE md5 IN (%s)"%(table, md5_str)
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
if iterator:
return rows
else:
for r in rows:
r['is_protein'] = 1 if r['is_protein'] else 0
found.append(r)
return found
def get_functions_by_id(self, ids, compress, iterator=False):
id_str = ",".join(map(str, ids))
query = "SELECT * FROM functions WHERE id IN (%s)"%(id_str)
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
if iterator:
return rows
else:
if compress == 1:
found = {}
for r in rows:
found[ r['id'] ] = r['name']
else:
found = []
for r in rows:
found.append( {'function_id': r['id'], 'function': r['name']} )
return found
### retrieve full hierarchies
def get_taxa_hierarchy(self):
found = {}
query = "SELECT * FROM organisms_ncbi"
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
for r in rows:
found[r['name']] = [r['tax_domain'], r['tax_phylum'], r['tax_class'], r['tax_order'], r['tax_family'], r['tax_genus'], r['tax_species']]
return found
def get_ontology_hierarchy(self, source=None):
found = {}
query = "SELECT * FROM ontologies"
if source:
query += " WHERE source = ?"
prep = self.session.prepare(query)
for r in self.session.execute(prep, [source]):
found[r['name']] = [r['level1'], r['level2'], r['level3'], r['level4']]
else:
for r in self.session.execute(query):
if r['source'] not in found:
found[r['source']] = {}
found[r['source']][r['name']] = [r['level1'], r['level2'], r['level3'], r['level4']]
rmqLogger(self.channel, 'select', query)
return found
### retrieve hierarchy mapping: leaf -> level
def get_org_taxa_map(self, taxa):
found = {}
tname = "tax_"+taxa.lower()
query = "SELECT * FROM "+tname
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
for r in rows:
found[r['name']] = r[tname]
return found
def get_ontology_map(self, level, source=None):
found = {}
level = level.lower()
query = "SELECT * FROM ont_%s"%level
if source:
query += " WHERE source = ?"
prep = self.session.prepare(query)
for r in self.session.execute(prep, [source]):
found[r['name']] = r[level]
else:
for r in self.session.execute(query):
if r['source'] not in found:
found[r['source']] = {}
found[r['source']][r['name']] = r[level]
rmqLogger(self.channel, 'select', query)
return found
### retrieve hierarchy: leaf list for a level
def get_organism_by_taxa(self, taxa, match=None):
# if match is given, return subset that contains match, else all
found = set()
tname = "tax_"+taxa.lower()
query = "SELECT * FROM "+tname
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
for r in rows:
if match and (match.lower() in r[tname].lower()):
found.add(r['name'])
elif not match:
found.add(r['name'])
return list(found)
def get_ontology_by_level(self, source, level, match=None):
# if match is given, return subset that contains match, else all
found = set()
level = level.lower()
query = "SELECT * FROM ont_%s WHERE source = ?"%level
rmqLogger(self.channel, 'select', query)
prep = self.session.prepare(query)
rows = self.session.execute(prep, [source])
for r in rows:
if match and (match.lower() in r[level].lower()):
found.add(r['name'])
elif not match:
found.add(r['name'])
return list(found)
class JobHandle(object):
def __init__(self, hosts, version=M5NR_VERSION):
keyspace = "mgrast_abundance"
self.version = int(version)
self.session = cass_connection.create(hosts).connect(keyspace)
self.session.default_timeout = 300
self.session.row_factory = cql.tuple_factory
self.channel = None
try:
self.channel = cass_connection.rmqConnection().channel()
except:
pass
def close(self):
cass_connection.destroy()
## get iterator for md5 records of a job
def get_job_records(self, job, fields, swap=None, evalue=None, identity=None, alength=None):
job = int(job)
if swap:
identity, alength = alength, identity
query = "SELECT "+",".join(fields)+" FROM job_md5s WHERE version = ? AND job = ?"
where = [self.version, job]
if evalue:
query += " AND exp_avg <= ?"
where.append(int(evalue) * -1)
if identity:
query += " AND ident_avg >= ?"
where.append(int(identity))
if alength:
query += " AND len_avg >= ?"
where.append(int(alength))
if evalue or identity or alength:
query += " ALLOW FILTERING"
rmqLogger(self.channel, 'select', query)
prep = self.session.prepare(query)
return self.session.execute(prep, where)
## get iterator for lca records of a job
def get_lca_records(self, job, fields, swap=None, evalue=None, identity=None, alength=None):
job = int(job)
if swap:
identity, alength = alength, identity
query = "SELECT "+",".join(fields)+" FROM job_lcas WHERE version = ? AND job = ?"
where = [self.version, job]
if evalue:
query += " AND exp_avg <= ?"
where.append(int(evalue) * -1)
if identity:
query += " AND ident_avg >= ?"
where.append(int(identity))
if alength:
query += " AND len_avg >= ?"
where.append(int(alength))
if evalue or identity or alength:
query += " ALLOW FILTERING"
rmqLogger(self.channel, 'select', query)
prep = self.session.prepare(query)
return self.session.execute(prep, where)
## get index for one md5
def get_md5_record(self, job, md5):
job = int(job)
query = "SELECT seek, length FROM job_md5s WHERE version = ? AND job = ? AND md5 = ?"
rmqLogger(self.channel, 'select', query)
prep = self.session.prepare(query)
rows = self.session.execute(prep, [self.version, job, md5])
if (len(rows.current_rows) > 0) and (rows[0][1] > 0):
return [ rows[0][0], rows[0][1] ]
else:
return None
## get indexes for given md5 list or cutoff values
def get_md5_records(self, job, swap=None, md5s=None, evalue=None, identity=None, alength=None):
job = int(job)
if swap:
identity, alength = alength, identity
found = []
query = "SELECT seek, length FROM job_md5s WHERE version = %d AND job = %d"%(self.version, job)
if md5s and (len(md5s) > 0):
query += " AND md5 IN (" + ",".join(map(lambda x: "'"+x+"'", md5s)) + ")"
elif evalue or identity or alength:
if evalue:
query += " AND exp_avg <= %d"%(int(evalue) * -1)
if identity:
query += " AND ident_avg >= %d"%(int(identity))
if alength:
query += " AND len_avg >= %d"%(int(alength))
query += " ALLOW FILTERING"
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
for r in rows:
if r[1] == 0 or r[0] is None: # skip row if zero length, or row is corrupt
continue
pos = bisect.bisect(found, (r[0], None))
if (pos > 0) and ((found[pos-1][0] + found[pos-1][1]) == r[0]):
found[pos-1] = (found[pos-1][0], found[pos-1][1] + r[1])
else:
bisect.insort(found, (r[0], r[1]))
return found
## row counts based on info table counter
def get_info_count(self, job, val):
job = int(job)
query = "SELECT %ss FROM job_info WHERE version = %d AND job = %d"%(val, self.version, job)
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
if len(rows.current_rows) > 0:
return rows[0][0]
else:
return 0
## row counts based on data tables
def get_data_count(self, job, val):
job = int(job)
query = "SELECT COUNT(*) FROM job_%ss WHERE version = %d AND job = %d"%(val, self.version, job)
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
if len(rows.current_rows) > 0:
return rows[0][0]
else:
return 0
## does job exist
def has_job(self, job):
job = int(job)
query = "SELECT * FROM job_info WHERE version = %d AND job = %d"%(self.version, job)
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
if len(rows.current_rows) > 0:
return 1
else:
return 0
## job status
def last_updated(self, job):
job = int(job)
query = "SELECT updated_on FROM job_info WHERE version = %d AND job = %d"%(self.version, job)
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
if len(rows.current_rows) > 0:
return rows[0][0]
else:
return None
def is_loaded(self, job):
job = int(job)
query = "SELECT loaded FROM job_info WHERE version = %d AND job = %d"%(self.version, job)
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
if (len(rows.current_rows) > 0) and rows[0][0]:
return 1
else:
return 0
## get all info
def get_job_info(self, job):
job = int(job)
query = "SELECT md5s, lcas, loaded, updated_on FROM job_info WHERE version = %d AND job = %d"%(self.version, job)
rmqLogger(self.channel, 'select', query)
rows = self.session.execute(query)
if len(rows.current_rows) > 0:
load = 'true' if rows[0][2] else 'false'
return dict(md5s=rows[0][0], lcas=rows[0][1], loaded=load, updated_on=rows[0][3])
else:
return None
## update job_info table
def set_loaded(self, job, loaded):
job = int(job)
value = True if loaded else False
cmd = "UPDATE job_info SET loaded = ?, updated_on = ? WHERE version = ? AND job = ?"
rmqLogger(self.channel, 'update', cmd)
update = cql.BoundStatement(
self.session.prepare(cmd),
consistency_level=cql.ConsistencyLevel.QUORUM
).bind([value, datetime.datetime.now(), self.version, job])
self.session.execute(update)
def update_info_md5s(self, job, md5s, loaded):
job = int(job)
value = True if loaded else False
cmd = "UPDATE job_info SET md5s = ?, loaded = ?, updated_on = ? WHERE version = ? AND job = ?"
rmqLogger(self.channel, 'update', cmd)
update = cql.BoundStatement(
self.session.prepare(cmd),
consistency_level=cql.ConsistencyLevel.QUORUM
).bind([int(md5s), value, datetime.datetime.now(), self.version, job])
self.session.execute(update)
def update_info_lcas(self, job, lcas, loaded):
job = int(job)
value = True if loaded else False
cmd = "UPDATE job_info SET lcas = ?, loaded = ?, updated_on = ? WHERE version = ? AND job = ?"
rmqLogger(self.channel, 'update', cmd)
update = cql.BoundStatement(
self.session.prepare(cmd),
consistency_level=cql.ConsistencyLevel.QUORUM
).bind([int(lcas), value, datetime.datetime.now(), self.version, job])
self.session.execute(update)
def insert_job_info(self, job):
job = int(job)
cmd = "INSERT INTO job_info (version, job, md5s, lcas, updated_on, loaded) VALUES (?, ?, ?, ?, ?, ?)"
rmqLogger(self.channel, 'insert', cmd)
insert = cql.BoundStatement(
self.session.prepare(cmd),
consistency_level=cql.ConsistencyLevel.QUORUM
).bind([self.version, job, 0, 0, datetime.datetime.now(), False])
self.session.execute(insert)
## add rows to job data tables, return current total loaded
def insert_job_md5s(self, job, rows):
job = int(job)
cmd = "INSERT INTO job_md5s (version, job, md5, abundance, exp_avg, ident_avg, len_avg, seek, length) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
rmqLogger(self.channel, 'insert', cmd, len(rows))
insert = self.session.prepare(cmd)
batch = cql.BatchStatement(consistency_level=cql.ConsistencyLevel.QUORUM)
for (md5, abundance, exp_avg, ident_avg, len_avg, seek, length) in rows:
if not seek:
seek = 0
if not length:
length = 0
batch.add(insert, (self.version, job, md5, int(abundance), float(exp_avg), float(ident_avg), float(len_avg), int(seek), int(length)))
# update job_info
loaded = self.get_info_count(job, 'md5') + len(rows)
cmd = "UPDATE job_info SET md5s = ?, loaded = ?, updated_on = ? WHERE version = ? AND job = ?"
rmqLogger(self.channel, 'update', cmd)
update = self.session.prepare(cmd)
batch.add(update, (loaded, False, datetime.datetime.now(), self.version, job))
# execute atomic batch
self.session.execute(batch)
return loaded
def insert_job_lcas(self, job, rows):
job = int(job)
cmd = "INSERT INTO job_lcas (version, job, lca, abundance, exp_avg, ident_avg, len_avg, md5s, level) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
rmqLogger(self.channel, 'insert', cmd, len(rows))
insert = self.session.prepare(cmd)
batch = cql.BatchStatement(consistency_level=cql.ConsistencyLevel.QUORUM)
for (lca, abundance, exp_avg, ident_avg, len_avg, md5s, level) in rows:
batch.add(insert, (self.version, job, lca, int(abundance), float(exp_avg), float(ident_avg), float(len_avg), int(md5s), int(level)))
# update job_info
loaded = self.get_info_count(job, 'lca') + len(rows)
cmd = "UPDATE job_info SET lcas = ?, loaded = ?, updated_on = ? WHERE version = ? AND job = ?"
rmqLogger(self.channel, 'update', cmd)
update = self.session.prepare(cmd)
batch.add(update, (loaded, False, datetime.datetime.now(), self.version, job))
# execute atomic batch
self.session.execute(batch)
return loaded
## delete all job data
def delete_job(self, job):
job = int(job)
batch = cql.BatchStatement(consistency_level=cql.ConsistencyLevel.QUORUM)
idel = "DELETE FROM job_info WHERE version = %d AND job = %d"%(self.version, job)
rmqLogger(self.channel, 'delete', idel)
batch.add(cql.SimpleStatement(idel))
mdel = "DELETE FROM job_md5s WHERE version = %d AND job = %d"%(self.version, job)
rmqLogger(self.channel, 'delete', mdel)
batch.add(cql.SimpleStatement(mdel))
ldel = "DELETE FROM job_lcas WHERE version = %d AND job = %d"%(self.version, job)
rmqLogger(self.channel, 'delete', ldel)
batch.add(cql.SimpleStatement(ldel))
self.session.execute(batch)
|
|
from aiohttp.web import Response, json_response
from functools import wraps
from flask import current_app
from uuid import UUID
import sentry_sdk
from zeus import auth
from zeus.exceptions import InvalidPublicKey, UnknownRevision
from zeus.utils.sentry import span
from .utils import get_vcs, save_revision
def log_errors(func):
@wraps(func)
def wrapper(*args, **kwargs):
async def tmp():
try:
return await func(*args, **kwargs)
except Exception as e:
current_app.logger.exception(str(e))
raise
return tmp()
return wrapper
def api_request(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
@log_errors
async def tmp():
repo_id = request.query.get("repo_id")
if not repo_id:
return json_response({"error": "missing_arg"}, status=403)
repo_id = UUID(repo_id)
tenant = auth.get_tenant_from_headers(request.headers)
if not tenant:
return json_response({"error": "access_denied"}, status=401)
if getattr(tenant, "user_id", None):
with sentry_sdk.configure_scope() as scope:
scope.user = {"id": str(tenant.user_id)}
if not tenant.has_permission(repo_id):
current_app.logger.debug(
"vcs-server.invalid-request command=%s tenant=%s reason=invalid-repo",
func.__name__,
tenant,
)
return json_response({"error": "access_denied"}, status=401)
with sentry_sdk.configure_scope() as scope:
scope.set_tag("repository_id", str(repo_id))
current_app.logger.debug(
"vcs-server.request repo_id=%s command=%s tenant=%s",
repo_id,
func.__name__,
tenant,
)
async with request.app["db_pool"].acquire() as conn:
vcs = await get_vcs(conn, repo_id)
try:
return await func(request, vcs, repo_id, *args, **kwargs)
except InvalidPublicKey:
current_app.logger.exception(
"vcs-server.invalid-pubkey repo_id=%s", repo_id
)
return json_response({"error": "invalid_pubkey"}, status=400)
except UnknownRevision as exc:
current_app.logger.info(
"vcs-server.invalid-revision repo_id=%s ref=%s",
repo_id,
exc.ref,
exc_info=True,
)
return json_response(
{"error": "invalid_ref", "ref": exc.ref}, status=400
)
except Exception:
current_app.logger.exception(
"vcs-server.unhandled-error repo_id=%s", repo_id
)
return json_response({"error": "unknown_error"}, status=500)
return tmp()
return wrapper
@span("health_check")
@log_errors
async def health_check(request):
return Response(text='{"ok": true}')
@span("stmt.log")
@log_errors
@api_request
async def stmt_log(request, vcs, repo_id):
queue = request.app["queue"]
parent = request.query.get("parent")
branch = request.query.get("branch")
offset = int(request.query.get("offset") or 0)
limit = int(request.query.get("limit") or 100)
try:
log_results = await vcs.log(
parent=parent, branch=branch, offset=offset, limit=limit
)
except UnknownRevision:
# we're running a lazy update here if it didnt already exist
log_results = await vcs.log(
parent=parent,
branch=branch,
offset=offset,
limit=limit,
update_if_exists=True,
)
results = []
for revision in log_results:
results.append(
{
"sha": revision.sha,
"message": revision.message,
"authors": revision.get_authors(),
"author_date": revision.author_date.isoformat(),
"committer": revision.get_committer(),
"committer_date": (
revision.committer_date or revision.author_date
).isoformat(),
"parents": revision.parents,
}
)
await queue.put(["revision", {"repo_id": repo_id, "revision": revision}])
return json_response({"log": results})
@span("stmt.resolve")
@log_errors
@api_request
async def stmt_resolve(request, vcs, repo_id):
ref = request.query.get("ref")
if not ref:
return json_response({"error": "missing_arg"}, status=403)
try:
log_results = await vcs.log(parent=ref, limit=1)
except UnknownRevision:
current_app.logger.info(
"stmt.resolve.unknown-revision-retry",
extra={"repository_id": repo_id, "ref": ref},
)
# we're running a lazy update here if it didnt already exist
log_results = await vcs.log(parent=ref, limit=1, update_if_exists=True)
revision = log_results[0]
async with request.app["db_pool"].acquire() as conn:
await save_revision(conn, repo_id, revision)
result = {
"sha": revision.sha,
"message": revision.message,
"authors": revision.get_authors(),
"author_date": revision.author_date.isoformat(),
"committer": revision.get_committer(),
"committer_date": (revision.committer_date or revision.author_date).isoformat(),
"parents": revision.parents,
}
return json_response({"resolve": result})
@span("stmt.export")
@log_errors
@api_request
async def stmt_export(request, vcs, repo_id):
sha = request.query.get("sha")
if not sha:
return json_response({"error": "missing_arg"}, status=403)
return json_response({"export": await vcs.export(sha)})
@span("stmt.show")
@log_errors
@api_request
async def stmt_show(request, vcs, repo_id):
sha = request.query.get("sha")
if not sha:
return json_response({"error": "missing_arg"}, status=403)
filename = request.query.get("filename")
if not filename:
return json_response({"error": "missing_arg"}, status=403)
return json_response({"show": await vcs.show(sha, filename)})
@span("stmt.branches")
@log_errors
@api_request
async def stmt_branches(request, vcs, repo_id):
return json_response({"branches": await vcs.get_known_branches()})
def register_api_routes(app):
app.router.add_route("GET", "/stmt/branches", stmt_branches)
app.router.add_route("GET", "/stmt/export", stmt_export)
app.router.add_route("GET", "/stmt/log", stmt_log)
app.router.add_route("GET", "/stmt/resolve", stmt_resolve)
app.router.add_route("GET", "/stmt/show", stmt_show)
app.router.add_route("GET", "/healthz", health_check)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility functions for use with the mapreduce library."""
__all__ = [
"create_datastore_write_config",
"for_name",
"get_short_name",
"handler_for_name",
"is_generator",
"parse_bool",
"total_seconds",
"try_serialize_handler",
"try_deserialize_handler",
]
import inspect
import pickle
import types
from google.appengine.datastore import datastore_rpc
def _enum(**enums):
"""Helper to create enum."""
return type("Enum", (), enums)
def total_seconds(td):
"""convert a timedelta to seconds.
This is patterned after timedelta.total_seconds, which is only
available in python 27.
Args:
td: a timedelta object.
Returns:
total seconds within a timedelta. Rounded up to seconds.
"""
secs = td.seconds + td.days * 24 * 3600
if td.microseconds:
secs += 1
return secs
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
pass
raise
def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
return resolved_name()
elif isinstance(resolved_name, types.MethodType):
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
return resolved_name
def try_serialize_handler(handler):
"""Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None.
"""
if (isinstance(handler, types.InstanceType) or
(isinstance(handler, object) and
not inspect.isfunction(handler) and
not inspect.ismethod(handler)) and
hasattr(handler, "__call__")):
return pickle.dumps(handler)
return None
def try_deserialize_handler(serialized_handler):
"""Reverse function of try_serialize_handler.
Args:
serialized_handler: serialized handler str or None.
Returns:
handler instance or None.
"""
if serialized_handler:
return pickle.loads(serialized_handler)
def is_generator(obj):
"""Return true if the object is generator or generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
if isinstance(obj, types.GeneratorType):
return True
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
def get_short_name(fq_name):
"""Returns the last component of the name."""
return fq_name.split(".")[-1:][0]
def parse_bool(obj):
"""Return true if the object represents a truth value, false otherwise.
For bool and numeric objects, uses Python's built-in bool function. For
str objects, checks string against a list of possible truth values.
Args:
obj: object to determine boolean value of; expected
Returns:
Boolean value according to 5.1 of Python docs if object is not a str
object. For str objects, return True if str is in TRUTH_VALUE_SET
and False otherwise.
http://docs.python.org/library/stdtypes.html
"""
if type(obj) is str:
TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"]
return obj.lower() in TRUTH_VALUE_SET
else:
return bool(obj)
def create_datastore_write_config(mapreduce_spec):
"""Creates datastore config to use in write operations.
Args:
mapreduce_spec: current mapreduce specification as MapreduceSpec.
Returns:
an instance of datastore_rpc.Configuration to use for all write
operations in the mapreduce.
"""
force_writes = parse_bool(mapreduce_spec.params.get("force_writes", "false"))
if force_writes:
return datastore_rpc.Configuration(force_writes=force_writes)
else:
return datastore_rpc.Configuration()
|
|
import os
import re
import sys
import uuid
import prettytable
from manilaclient import exceptions
from manilaclient.openstack.common import strutils
def arg(*args, **kwargs):
"""Decorator for CLI args."""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*vars, **kwargs):
"""
returns the first environment variable set
if none are non-empty, defaults to '' or keyword arg default
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def add_arg(f, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'arguments'):
f.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in f.arguments:
# Because of the sematics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
f.arguments.insert(0, (args, kwargs))
def add_resource_manager_extra_kwargs_hook(f, hook):
"""Adds hook to bind CLI arguments to ResourceManager calls.
The `do_foo` calls in shell.py will receive CLI args and then in turn pass
them through to the ResourceManager. Before passing through the args, the
hooks registered here will be called, giving us a chance to add extra
kwargs (taken from the command-line) to what's passed to the
ResourceManager.
"""
if not hasattr(f, 'resource_manager_kwargs_hooks'):
f.resource_manager_kwargs_hooks = []
names = [h.__name__ for h in f.resource_manager_kwargs_hooks]
if hook.__name__ not in names:
f.resource_manager_kwargs_hooks.append(hook)
def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False):
"""Return extra_kwargs by calling resource manager kwargs hooks."""
hooks = getattr(f, "resource_manager_kwargs_hooks", [])
extra_kwargs = {}
for hook in hooks:
hook_name = hook.__name__
hook_kwargs = hook(args)
conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys())
if conflicting_keys and not allow_conflicts:
raise Exception("Hook '%(hook_name)s' is attempting to redefine"
" attributes '%(conflicting_keys)s'" % locals())
extra_kwargs.update(hook_kwargs)
return extra_kwargs
def unauthenticated(f):
"""
Adds 'unauthenticated' attribute to decorated function.
Usage:
@unauthenticated
def mymethod(f):
...
"""
f.unauthenticated = True
return f
def isunauthenticated(f):
"""
Checks to see if the function is marked as not requiring authentication
with the @unauthenticated decorator. Returns True if decorator is
set to True, False otherwise.
"""
return getattr(f, 'unauthenticated', False)
def service_type(stype):
"""
Adds 'service_type' attribute to decorated function.
Usage:
@service_type('share')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""
Retrieves service type from function
"""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def print_list(objs, fields, formatters={}):
mixed_case_fields = ['serverId']
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
print strutils.safe_encode(pt.get_string(sortby=fields[0]))
def print_dict(d, property="Property"):
pt = prettytable.PrettyTable([property, 'Value'], caching=False)
pt.aligns = ['l', 'l']
[pt.add_row(list(r)) for r in d.iteritems()]
print strutils.safe_encode(pt.get_string(sortby=property))
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
# now try to get entity as uuid
try:
uuid.UUID(strutils.safe_decode(name_or_id))
return manager.get(name_or_id)
except (ValueError, exceptions.NotFound):
pass
try:
try:
return manager.find(human_id=name_or_id)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
try:
return manager.find(display_name=name_or_id)
except (UnicodeDecodeError, exceptions.NotFound):
try:
# Volumes does not have name, but display_name
return manager.find(display_name=name_or_id)
except exceptions.NotFound:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = ("Multiple %s matches found for '%s', use an ID to be more"
" specific." % (manager.resource_class.__name__.lower(),
name_or_id))
raise exceptions.CommandError(msg)
def _format_servers_list_networks(server):
output = []
for (network, addresses) in server.networks.items():
if len(addresses) == 0:
continue
addresses_csv = ', '.join(addresses)
group = "%s=%s" % (network, addresses_csv)
output.append(group)
return '; '.join(output)
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
def safe_issubclass(*args):
"""Like issubclass, but will just return False if not a class."""
try:
if issubclass(*args):
return True
except TypeError:
pass
return False
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
# http://code.activestate.com/recipes/
# 577257-slugify-make-a-string-usable-in-a-url-or-filename/
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
if not isinstance(value, unicode):
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)
|
|
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple Mako renderer.
Just a wrapper around the mako rendering library.
"""
import getopt
import imp
import os
import cPickle as pickle
import shutil
import sys
from mako.lookup import TemplateLookup
from mako.runtime import Context
from mako.template import Template
import bunch
import yaml
# Imports a plugin
def import_plugin(name):
_, base_ex = os.path.split(name)
base, _ = os.path.splitext(base_ex)
with open(name, 'r') as plugin_file:
plugin_code = plugin_file.read()
plugin_module = imp.new_module(base)
exec plugin_code in plugin_module.__dict__
return plugin_module
def out(msg):
print >> sys.stderr, msg
def showhelp():
out('mako-renderer.py [-o out] [-m cache] [-P preprocessed_input] [-d dict] [-d dict...]'
' [-t template] [-w preprocessed_output]')
def main(argv):
got_input = False
module_directory = None
preprocessed_output = None
dictionary = {}
json_dict = {}
got_output = False
plugins = []
output_name = None
got_preprocessed_input = False
output_merged = None
try:
opts, args = getopt.getopt(argv, 'hM:m:d:o:p:t:P:w:')
except getopt.GetoptError:
out('Unknown option')
showhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
out('Displaying showhelp')
showhelp()
sys.exit()
elif opt == '-o':
if got_output:
out('Got more than one output')
showhelp()
sys.exit(3)
got_output = True
output_name = arg
elif opt == '-m':
if module_directory is not None:
out('Got more than one cache directory')
showhelp()
sys.exit(4)
module_directory = arg
elif opt == '-M':
if output_merged is not None:
out('Got more than one output merged path')
showhelp()
sys.exit(5)
output_merged = arg
elif opt == '-P':
assert not got_preprocessed_input
assert json_dict == {}
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'plugins')))
with open(arg, 'r') as dict_file:
dictionary = pickle.load(dict_file)
got_preprocessed_input = True
elif opt == '-d':
assert not got_preprocessed_input
with open(arg, 'r') as dict_file:
bunch.merge_json(json_dict, yaml.load(dict_file.read()))
elif opt == '-p':
plugins.append(import_plugin(arg))
elif opt == '-w':
preprocessed_output = arg
if not got_preprocessed_input:
for plugin in plugins:
plugin.mako_plugin(json_dict)
if output_merged:
with open(output_merged, 'w') as yaml_file:
yaml_file.write(yaml.dump(json_dict))
for k, v in json_dict.items():
dictionary[k] = bunch.to_bunch(v)
if preprocessed_output:
with open(preprocessed_output, 'w') as dict_file:
pickle.dump(dictionary, dict_file)
cleared_dir = False
for arg in args:
got_input = True
with open(arg) as f:
srcs = list(yaml.load_all(f.read()))
for src in srcs:
if isinstance(src, basestring):
assert len(srcs) == 1
template = Template(src,
filename=arg,
module_directory=module_directory,
lookup=TemplateLookup(directories=['.']))
with open(output_name, 'w') as output_file:
template.render_context(Context(output_file, **dictionary))
else:
# we have optional control data: this template represents
# a directory
if not cleared_dir:
if not os.path.exists(output_name):
pass
elif os.path.isfile(output_name):
os.unlink(output_name)
else:
shutil.rmtree(output_name, ignore_errors=True)
cleared_dir = True
items = []
if 'foreach' in src:
for el in dictionary[src['foreach']]:
if 'cond' in src:
args = dict(dictionary)
args['selected'] = el
if not eval(src['cond'], {}, args):
continue
items.append(el)
assert items
else:
items = [None]
for item in items:
args = dict(dictionary)
args['selected'] = item
item_output_name = os.path.join(
output_name, Template(src['output_name']).render(**args))
if not os.path.exists(os.path.dirname(item_output_name)):
os.makedirs(os.path.dirname(item_output_name))
template = Template(src['template'],
filename=arg,
module_directory=module_directory,
lookup=TemplateLookup(directories=['.']))
with open(item_output_name, 'w') as output_file:
template.render_context(Context(output_file, **args))
if not got_input and not preprocessed_output:
out('Got nothing to do')
showhelp()
if __name__ == '__main__':
main(sys.argv[1:])
|
|
# Copyright (c) 2015 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from oslo_utils import uuidutils
from neutron.extensions import l3
from neutron.tests.unit.api.v2 import test_base
from neutron_lib.api.definitions import bgpvpn as bgpvpn_api_def
from neutron_lib.api.definitions import bgpvpn_routes_control as rc_api_def
from neutron_lib.utils import test
from webob import exc
from networking_bgpvpn.neutron.extensions import bgpvpn
from networking_bgpvpn.neutron.extensions \
import bgpvpn_routes_control as bgpvpn_rc
from networking_bgpvpn.tests.unit.extensions import test_bgpvpn_rc_base
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
BGPVPN_PREFIX = 'bgpvpn'
BGPVPN_URI = BGPVPN_PREFIX + '/' + 'bgpvpns'
class TestPlugin(bgpvpn.BGPVPNPluginBase,
bgpvpn_rc.BGPVPNRoutesControlPluginBase):
supported_exsupported_extension_aliases = [bgpvpn_api_def.ALIAS,
rc_api_def.ALIAS]
TEST_PLUGIN_CLASS = '%s.%s' % (TestPlugin.__module__, TestPlugin.__name__)
class BgpvpnRoutesControlExtensionTestCase(
test_bgpvpn_rc_base.BGPVPNRCExtensionTestCase):
def setUp(self):
super(BgpvpnRoutesControlExtensionTestCase, self).setUp()
self._setUpExtensions(
TEST_PLUGIN_CLASS,
bgpvpn_api_def.ALIAS,
[l3.L3, bgpvpn.Bgpvpn, bgpvpn_rc.Bgpvpn_routes_control],
BGPVPN_PREFIX,
translate_resource_name=True)
self.instance = self.plugin.return_value
self.bgpvpn_id = _uuid()
self.net_id = _uuid()
self.router_id = _uuid()
self.net_assoc_id = _uuid()
self.router_assoc_id = _uuid()
self.port_id = _uuid()
self.port_assoc_id = _uuid()
self.NET_ASSOC_URI = BGPVPN_URI + '/' + self.bgpvpn_id + \
'/network_associations'
self.ROUTER_ASSOC_URI = BGPVPN_URI + '/' + self.bgpvpn_id + \
'/router_associations'
self.PORT_ASSOC_URI = BGPVPN_URI + '/' + self.bgpvpn_id + \
'/port_associations'
def _invalid_data_for_creation(self, target):
return [None, {}, {target: None}, {target: {}}
]
@test.unstable_test("bug/1791256")
def test_router_association_update(self):
data = {
'router_association': {
'router_id': self.router_id,
'project_id': _uuid()
}
}
self.api.post(_get_path(self.ROUTER_ASSOC_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
update_data = {'router_association': {
'advertise_extra_routes': False,
}}
return_value = {
'project_id': _uuid(),
'advertise_extra_routes': False,
}
self.instance.update_bgpvpn_router_association.return_value = (
return_value)
res = self.api.put(_get_path(self.ROUTER_ASSOC_URI,
id=self.router_assoc_id,
fmt=self.fmt),
self.serialize(update_data),
content_type='application/%s' % self.fmt)
self.instance.update_bgpvpn_router_association.assert_called_with(
mock.ANY, self.router_assoc_id,
bgpvpn_id=self.bgpvpn_id, router_association=update_data
)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('router_association', res)
self.assertEqual(return_value, res['router_association'])
def _invalid_data_for_port_assoc(self):
return [
({'advertise_fixed_ips': 'foo'},
"cannot be converted to boolean"),
({'routes': 'bla'},
"is not a list"),
({'routes': [{
'type': 'flumox'}]},
"No valid key specs"),
({'routes': [{
'type': 'prefix',
'something_else_than_prefix': 'foo'
}]},
"No valid key specs"),
({'routes': [{
'type': 'prefix',
'prefix': '1.1.1.352'
}]},
"No valid key specs"),
({'routes': [{
'type': 'prefix',
'something_else_than_bgpvpn_id': 'foo'
}]},
"No valid key specs"),
({'routes': [{
'type': 'prefix',
'prefix': '12.1.2.3',
'local_pref': -1,
}]},
"No valid key specs"),
({'routes': [{
'type': 'prefix',
'prefix': '12.1.2.3/20',
'local_pref': 2 ** 32,
}]},
"No valid key specs")
]
def test_port_association_create(self):
data = {
'port_association': {
'port_id': self.port_id,
'tenant_id': _uuid()
}
}
return_value = copy.copy(data['port_association'])
return_value.update({'id': self.port_assoc_id})
self.instance.create_bgpvpn_port_association.return_value = \
return_value
res = self.api.post(_get_path(self.PORT_ASSOC_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertTrue(self.instance.create_bgpvpn_port_association.called)
self.assertEqual(self.bgpvpn_id,
self.instance.create_bgpvpn_port_association.
call_args[1]['bgpvpn_id'])
self.assertDictSupersetOf(
data['port_association'],
self.instance.create_bgpvpn_port_association.
call_args[1]['port_association']['port_association'])
self.assertIn('port_association', res)
res = self.deserialize(res)
self.assertDictSupersetOf(return_value,
res['port_association'])
def _test_port_association_create_with_invalid_data(self, port_assoc, msg):
res = self.api.post(_get_path(self.PORT_ASSOC_URI, fmt=self.fmt),
self.serialize(port_assoc),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
self.assertFalse(
self.instance.create_bgpvpn_port_association.called)
self.assertIn(msg, str(res.body))
def test_port_association_create_with_invalid_assoc(self):
for data in self._invalid_data_for_creation('port_association'):
res = self.api.post(_get_path(self.PORT_ASSOC_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertFalse(
self.instance.create_bgpvpn_port_association.called)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_port_association_create_with_invalid_content(self):
for port_assoc_attrs, msg in self._invalid_data_for_port_assoc():
data = {'port_association': {
'port_id': self.port_id,
'project_id': _uuid()
}
}
data['port_association'].update(port_assoc_attrs)
self._test_port_association_create_with_invalid_data(data, msg)
def test_port_association_get(self):
return_value = {'id': self.port_assoc_id,
'port_id': self.port_id}
self.instance.get_bgpvpn_port_association.return_value = \
return_value
res = self.api.get(_get_path(self.PORT_ASSOC_URI,
id=self.port_assoc_id,
fmt=self.fmt))
self.instance.get_bgpvpn_port_association.assert_called_with(
mock.ANY, self.port_assoc_id, self.bgpvpn_id, fields=mock.ANY
)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('port_association', res)
self.assertEqual(return_value, res['port_association'])
def test_port_association_update(self):
data = {
'port_association': {
'port_id': self.port_id,
'project_id': _uuid()
}
}
self.api.post(_get_path(self.PORT_ASSOC_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
update_data = {'port_association': {
'advertise_fixed_ips': False,
'routes': [
{'type': 'prefix',
'prefix': '1.2.3.0/24',
'local_pref': 42},
{'type': 'bgpvpn',
'bgpvpn_id': _uuid()},
]
}}
return_value = {
'port_id': self.port_id,
'project_id': _uuid(),
'advertise_fixed_ips': False,
'routes': [
{'type': 'prefix',
'prefix': '1.2.3.0/24',
'local_pref': 42},
{'type': 'bgpvpn',
'prefix': '1.2.3.0/24'},
]
}
self.instance.update_bgpvpn_port_association.return_value = (
return_value)
res = self.api.put(_get_path(self.PORT_ASSOC_URI,
id=self.port_assoc_id,
fmt=self.fmt),
self.serialize(update_data),
content_type='application/%s' % self.fmt)
self.instance.update_bgpvpn_port_association.assert_called_with(
mock.ANY, self.port_assoc_id,
bgpvpn_id=self.bgpvpn_id, port_association=update_data
)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('port_association', res)
self.assertEqual(res['port_association'], return_value)
def test_port_association_delete(self):
res = self.api.delete(_get_path(self.PORT_ASSOC_URI,
id=self.port_assoc_id,
fmt=self.fmt))
self.instance.delete_bgpvpn_port_association.assert_called_with(
mock.ANY, self.port_assoc_id, self.bgpvpn_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
|
|
# coding=utf-8
import re
_RE_FIND_FIRST_CAP = re.compile('(.)([A-Z][a-z]+)')
_RE_SPAN_OF_CAPS = re.compile('([a-z0-9])([A-Z])')
def camelcase_to_underscore(name):
return _RE_SPAN_OF_CAPS.sub(r'\1_\2',
_RE_FIND_FIRST_CAP.sub(r'\1_\2', name)
).lower()
class binary:
"""
Store the value in bits so we can convert between things easily
"""
value = None
def __init__(self, value=None, unit=None):
self.do(value=value, unit=unit)
@staticmethod
def convert(value=None, oldUnit=None, newUnit=None):
convertor = binary(value=value, unit=oldUnit)
return convertor.get(unit=newUnit)
def set(self, value, unit=None):
return self.do(value=value, unit=unit)
def get(self, unit=None):
return self.do(unit=unit)
def do(self, value=None, unit=None):
if not unit:
return self.bit(value=value)
if unit in ['bit', 'b']:
return self.bit(value=value)
if unit in ['kilobit', 'kbit', 'Kibit']:
return self.kilobit(value=value)
if unit in ['megabit', 'Mbit', 'Mibit', 'Mbit']:
return self.megabit(value=value)
if unit in ['gigabit', 'Gbit', 'Gibit']:
return self.gigabit(value=value)
if unit in ['terabit', 'Tbit', 'Tibit']:
return self.terabit(value=value)
if unit in ['petabit', 'Pbit', 'Pibit']:
return self.petabit(value=value)
if unit in ['exabit', 'Ebit', 'Eibit']:
return self.exabit(value=value)
if unit in ['zettabit', 'Zbit', 'Zibit']:
return self.zettabit(value=value)
if unit in ['yottabit', 'Ybit', 'Yibit']:
return self.yottabit(value=value)
if unit in ['byte', 'B']:
return self.byte(value=value)
if unit in ['kilobyte', 'kB', 'KiB', 'kb']:
return self.kilobyte(value=value)
if unit in ['megabyte', 'MB', 'MiB', 'Mbyte', 'mb']:
return self.megabyte(value=value)
if unit in ['gigabyte', 'GB', 'GiB', 'gb']:
return self.gigabyte(value=value)
if unit in ['terabyte', 'TB', 'TiB', 'tb']:
return self.terabyte(value=value)
if unit in ['petabyte', 'PB', 'PiB', 'pb']:
return self.petabyte(value=value)
if unit in ['exabyte', 'EB', 'EiB', 'eb']:
return self.exabyte(value=value)
if unit in ['zettabyte', 'ZB', 'ZiB', 'zb']:
return self.zettabyte(value=value)
if unit in ['yottabyte', 'YB', 'YiB', 'yb']:
return self.yottabyte(value=value)
raise NotImplementedError("unit %s" % unit)
def bit(self, value=None):
if value is None:
return self.value
else:
self.value = float(value)
def convertb(self, value, source, offset=1):
if value is None:
return source() / pow(1024, offset)
else:
source(value * pow(1024, offset))
def kilobit(self, value=None):
return self.convertb(value, self.bit)
def megabit(self, value=None):
return self.convertb(value, self.bit, 2)
def gigabit(self, value=None):
return self.convertb(value, self.bit, 3)
def terabit(self, value=None):
return self.convertb(value, self.bit, 4)
def petabit(self, value=None):
return self.convertb(value, self.bit, 5)
def exabit(self, value=None):
return self.convertb(value, self.bit, 6)
def zettabit(self, value=None):
return self.convertb(value, self.bit, 7)
def yottabit(self, value=None):
return self.convertb(value, self.bit, 8)
def byte(self, value=None):
if value is None:
return self.value / 8
else:
self.value = float(value) * 8
def kilobyte(self, value=None):
return self.convertb(value, self.byte)
def megabyte(self, value=None):
return self.convertb(value, self.byte, 2)
def gigabyte(self, value=None):
return self.convertb(value, self.byte, 3)
def terabyte(self, value=None):
return self.convertb(value, self.byte, 4)
def petabyte(self, value=None):
return self.convertb(value, self.byte, 5)
def exabyte(self, value=None):
return self.convertb(value, self.byte, 6)
def zettabyte(self, value=None):
return self.convertb(value, self.byte, 7)
def yottabyte(self, value=None):
return self.convertb(value, self.byte, 8)
class time:
"""
Store the value in miliseconds so we can convert between things easily
"""
value = None
def __init__(self, value=None, unit=None):
self.do(value=value, unit=unit)
@staticmethod
def convert(value=None, oldUnit=None, newUnit=None):
convertor = time(value=value, unit=oldUnit)
return convertor.get(unit=newUnit)
def set(self, value, unit=None):
return self.do(value=value, unit=unit)
def get(self, unit=None):
return self.do(unit=unit)
def do(self, value=None, unit=None):
if not unit:
v = self.millisecond(value=value)
elif unit.lower() in ['millisecond', 'milliseconds', 'ms']:
v = self.millisecond(value=value)
elif unit.lower() in ['second', 'seconds', 's']:
v = self.second(value=value)
elif unit.lower() in ['minute', 'minutes', 'm']:
v = self.minute(value=value)
elif unit.lower() in ['hour', 'hours', 'h']:
v = self.hour(value=value)
elif unit.lower() in ['day', 'days', 'd']:
v = self.day(value=value)
elif unit.lower() in ['year', 'years', 'y']:
v = self.year(value=value)
elif unit.lower() in ['microsecond', 'microseconds', 'us']:
v = self.microsecond(value=value)
elif unit.lower() in ['nanosecond', 'nanoseconds', 'ns']:
v = self.nanosecond(value=value)
else:
raise NotImplementedError("unit %s" % unit)
return v
def millisecond(self, value=None):
if value is None:
return self.value
else:
self.value = float(value)
def second(self, value=None):
if value is None:
return self.millisecond() / 1000
else:
self.millisecond(value * 1000)
def minute(self, value=None):
if value is None:
return self.second() / 60
else:
self.millisecond(self.second(value * 60))
def hour(self, value=None):
if value is None:
return self.minute() / 60
else:
self.millisecond(self.minute(value * 60))
def day(self, value=None):
if value is None:
return self.hour() / 24
else:
self.millisecond(self.hour(value * 24))
def year(self, value=None):
"""
We do *NOT* know for what year we are converting so lets assume the
year has 365 days.
"""
if value is None:
return self.day() / 365
else:
self.millisecond(self.day(value * 365))
def microsecond(self, value=None):
if value is None:
return self.millisecond() * 1000
else:
self.millisecond(value / 1000)
def nanosecond(self, value=None):
if value is None:
return self.microsecond() * 1000
else:
self.millisecond(self.microsecond(value / 1000))
|
|
from . import six
import contextlib
import os
import sys
import re
from itertools import takewhile
from .exceptions import BundleError
__all__ = ('md5_constructor', 'pickle', 'set', 'StringIO',
'common_path_prefix', 'working_directory', 'is_url')
if sys.version_info >= (2, 5):
import hashlib
md5_constructor = hashlib.md5
else:
import md5
md5_constructor = md5.new
try:
import cPickle as pickle
except ImportError:
import pickle
try:
set
except NameError:
from sets import Set as set
else:
set = set
from .six import StringIO
try:
from urllib import parse as urlparse
except ImportError: # Python 2
import urlparse
import urllib
def hash_func(data):
from .cache import make_md5
return make_md5(data)
_directory_separator_re = re.compile(r"[/\\]+")
def common_path_prefix(paths, sep=os.path.sep):
"""os.path.commonpath() is completely in the wrong place; it's
useless with paths since it only looks at one character at a time,
see http://bugs.python.org/issue10395
This replacement is from:
http://rosettacode.org/wiki/Find_Common_Directory_Path#Python
"""
def allnamesequal(name):
return all(n==name[0] for n in name[1:])
# The regex splits the paths on both / and \ characters, whereas the
# rosettacode.org algorithm only uses os.path.sep
bydirectorylevels = zip(*[_directory_separator_re.split(p) for p in paths])
return sep.join(x[0] for x in takewhile(allnamesequal, bydirectorylevels))
@contextlib.contextmanager
def working_directory(directory=None, filename=None):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
Filters will often find this helpful.
Instead of a ``directory``, you may also give a ``filename``, and the
working directory will be set to the directory that file is in.s
"""
assert bool(directory) != bool(filename) # xor
if not directory:
directory = os.path.dirname(filename)
prev_cwd = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(prev_cwd)
def make_option_resolver(clazz=None, attribute=None, classes=None,
allow_none=True, desc=None):
"""Returns a function which can resolve an option to an object.
The option may given as an instance or a class (of ``clazz``, or
duck-typed with an attribute ``attribute``), or a string value referring
to a class as defined by the registry in ``classes``.
This support arguments, so an option may look like this:
cache:/tmp/cachedir
If this must instantiate a class, it will pass such an argument along,
if given. In addition, if the class to be instantiated has a classmethod
``make()``, this method will be used as a factory, and will be given an
Environment object (if one has been passed to the resolver). This allows
classes that need it to initialize themselves based on an Environment.
"""
assert clazz or attribute or classes
desc_string = ' to %s' % desc if desc else None
def instantiate(clazz, env, *a, **kw):
# Create an instance of clazz, via the Factory if one is defined,
# passing along the Environment, or creating the class directly.
if hasattr(clazz, 'make'):
# make() protocol is that if e.g. the get_manifest() resolver takes
# an env, then the first argument of the factory is the env.
args = (env,) + a if env is not None else a
return clazz.make(*args, **kw)
return clazz(*a, **kw)
def resolve_option(option, env=None):
the_clazz = clazz() if callable(clazz) and not isinstance(option, type) else clazz
if not option and allow_none:
return None
# If the value has one of the support attributes (duck-typing).
if attribute and hasattr(option, attribute):
if isinstance(option, type):
return instantiate(option, env)
return option
# If it is the class we support.
if the_clazz and isinstance(option, the_clazz):
return option
elif isinstance(option, type) and issubclass(option, the_clazz):
return instantiate(option, env)
# If it is a string
elif isinstance(option, six.string_types):
parts = option.split(':', 1)
key = parts[0]
arg = parts[1] if len(parts) > 1 else None
if key in classes:
return instantiate(classes[key], env, *([arg] if arg else []))
raise ValueError('%s cannot be resolved%s' % (option, desc_string))
resolve_option.__doc__ = """Resolve ``option``%s.""" % desc_string
return resolve_option
def RegistryMetaclass(clazz=None, attribute=None, allow_none=True, desc=None):
"""Returns a metaclass which will keep a registry of all subclasses, keyed
by their ``id`` attribute.
The metaclass will also have a ``resolve`` method which can turn a string
into an instance of one of the classes (based on ``make_option_resolver``).
"""
def eq(self, other):
"""Return equality with config values that instantiate this."""
return (hasattr(self, 'id') and self.id == other) or\
id(self) == id(other)
def unicode(self):
return "%s" % (self.id if hasattr(self, 'id') else repr(self))
class Metaclass(type):
REGISTRY = {}
def __new__(mcs, name, bases, attrs):
if not '__eq__' in attrs:
attrs['__eq__'] = eq
if not '__unicode__' in attrs:
attrs['__unicode__'] = unicode
if not '__str__' in attrs:
attrs['__str__'] = unicode
new_klass = type.__new__(mcs, name, bases, attrs)
if hasattr(new_klass, 'id'):
mcs.REGISTRY[new_klass.id] = new_klass
return new_klass
resolve = staticmethod(make_option_resolver(
clazz=clazz,
attribute=attribute,
allow_none=allow_none,
desc=desc,
classes=REGISTRY
))
return Metaclass
def cmp_debug_levels(level1, level2):
"""cmp() for debug levels, returns True if ``level1`` is higher
than ``level2``."""
level_ints = {False: 0, 'merge': 1, True: 2}
try:
cmp = lambda a, b: (a > b) - (a < b) # 333
return cmp(level_ints[level1], level_ints[level2])
except KeyError as e:
# Not sure if a dependency on BundleError is proper here. Validating
# debug values should probably be done on assign. But because this
# needs to happen in two places (Environment and Bundle) we do it here.
raise BundleError('Invalid debug value: %s' % e)
def is_url(s):
if not isinstance(s, str):
return False
parsed = urlparse.urlsplit(s)
return bool(parsed.scheme and parsed.netloc) and len(parsed.scheme) > 1
|
|
import warnings
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=10000, n_iter=10):
"""
A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling `n_warmup` (1e5) points at random,
and then running L-BFGS-B from `n_iter` (250) random starting points.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
:param random_state:
instance of np.RandomState random number generator
:param n_warmup:
number of times to randomly sample the aquisition function
:param n_iter:
number of times to run scipy.minimize
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_warmup, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more throughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_iter, bounds.shape[0]))
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# See if success
if not res.success:
continue
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun[0] >= max_acq:
x_max = res.x
max_acq = -res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
class UtilityFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, kind, kappa, xi, kappa_decay=1, kappa_decay_delay=0):
self.kappa = kappa
self._kappa_decay = kappa_decay
self._kappa_decay_delay = kappa_decay_delay
self.xi = xi
self._iters_counter = 0
if kind not in ['ucb', 'ei', 'poi']:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(kind)
raise NotImplementedError(err)
else:
self.kind = kind
def update_params(self):
self._iters_counter += 1
if self._kappa_decay < 1 and self._iters_counter > self._kappa_decay_delay:
self.kappa *= self._kappa_decay
def utility(self, x, gp, y_max):
if self.kind == 'ucb':
return self._ucb(x, gp, self.kappa)
if self.kind == 'ei':
return self._ei(x, gp, y_max, self.xi)
if self.kind == 'poi':
return self._poi(x, gp, y_max, self.xi)
@staticmethod
def _ucb(x, gp, kappa):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
return mean + kappa * std
@staticmethod
def _ei(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
a = (mean - y_max - xi)
z = a / std
return a * norm.cdf(z) + std * norm.pdf(z)
@staticmethod
def _poi(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return norm.cdf(z)
def load_logs(optimizer, logs):
"""Load previous ...
"""
import json
if isinstance(logs, str):
logs = [logs]
for log in logs:
with open(log, "r") as j:
while True:
try:
iteration = next(j)
except StopIteration:
break
iteration = json.loads(iteration)
try:
optimizer.register(
params=iteration["params"],
target=iteration["target"],
)
except KeyError:
pass
return optimizer
def ensure_rng(random_state=None):
"""
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
"""
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
return random_state
class Colours:
"""Print in nice colours."""
BLUE = '\033[94m'
BOLD = '\033[1m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
END = '\033[0m'
GREEN = '\033[92m'
PURPLE = '\033[95m'
RED = '\033[91m'
UNDERLINE = '\033[4m'
YELLOW = '\033[93m'
@classmethod
def _wrap_colour(cls, s, colour):
return colour + s + cls.END
@classmethod
def black(cls, s):
"""Wrap text in black."""
return cls._wrap_colour(s, cls.END)
@classmethod
def blue(cls, s):
"""Wrap text in blue."""
return cls._wrap_colour(s, cls.BLUE)
@classmethod
def bold(cls, s):
"""Wrap text in bold."""
return cls._wrap_colour(s, cls.BOLD)
@classmethod
def cyan(cls, s):
"""Wrap text in cyan."""
return cls._wrap_colour(s, cls.CYAN)
@classmethod
def darkcyan(cls, s):
"""Wrap text in darkcyan."""
return cls._wrap_colour(s, cls.DARKCYAN)
@classmethod
def green(cls, s):
"""Wrap text in green."""
return cls._wrap_colour(s, cls.GREEN)
@classmethod
def purple(cls, s):
"""Wrap text in purple."""
return cls._wrap_colour(s, cls.PURPLE)
@classmethod
def red(cls, s):
"""Wrap text in red."""
return cls._wrap_colour(s, cls.RED)
@classmethod
def underline(cls, s):
"""Wrap text in underline."""
return cls._wrap_colour(s, cls.UNDERLINE)
@classmethod
def yellow(cls, s):
"""Wrap text in yellow."""
return cls._wrap_colour(s, cls.YELLOW)
|
|
from __future__ import absolute_import, print_function
import functools
import string
import struct
import operator
from wave_editor import wave_functions
WAVE_VERSION = 1
WAVE_HEADER_ID = 'wave'
wave_header = struct.Struct("BBH")
wave_sample = struct.Struct("B")
class WaveFileError(Exception):
pass
class WaveTable(object):
"""
Represents a wave table, by default this is fixed size list of signed byte
values. The length of the table is the wave length while the byte data type
represents the Dynamic range which can be represented.
For writing to a file samples are encoded around a 0 or origin value of
0x80 allowing for a maximum sample value of +127 (encoded as 0xFF) and a
minimum of -128 (encoded as 0x00).
The on disk representation::
| Header |
0000|XXXXVCLL|
0008|DDDDDDDD|
000F|DDDDDDDD|
| ...... |
0108|DDDDDDDD|
X = Header token value "wave"
V = File version
C = Checksum of table (XOR)
L = Length of table
"""
FILE_ORIGIN = 0x80
@classmethod
def read(cls, f):
# Check file id
file_id = f.read(4)
if file_id != WAVE_HEADER_ID:
raise WaveFileError("File is missing wave ID.")
# Read header and check version
data = f.read(wave_header.size)
version, checksum, length = wave_header.unpack(data)
if version != WAVE_VERSION:
raise WaveFileError("Unknown file version.")
# Load/parse data
data = f.read(length)
wave = [wave_sample.unpack_from(data, x)[0]for x in range(length)]
# Confirm checksum
if checksum != functools.reduce(operator.xor, wave):
raise WaveFileError("Invalid checksum.")
return cls(w - wave_functions.ORIGIN for w in wave)
def __init__(self, wave=None):
wave = list(wave or wave_functions.zero_wave())
assert wave is None or len(wave) == wave_functions.WAVE_LENGTH
self.modified = False
self._table = wave
def __getitem__(self, idx):
return self._table[idx]
def __setitem__(self, idx, value):
if not (-0x80 <= value <= 0x7F):
raise ValueError("Value outside dynamic range.")
self._table[idx] = value
def __len__(self):
return len(self._table)
def __iter__(self):
return iter(self._table)
def clear_modified(self):
self.modified = False
def zero(self):
"""
Zero the wave
"""
return self.insert(wave_functions.zero_wave())
def insert(self, wave, offset=0):
"""
Insert wave data into the wave table. Unlike a list inserting wave
data overwrites existing wave data.
:param wave: Iterable of bytes to insert into the wave table, this
data must be no longer than the Wavelength.
:type wave: iter<byte>
:param offset: Offset in wave table to begin insert
:type offset: int
"""
# Get data as a list
wave = list(wave)
# Check size of wave data
if len(wave) + offset > wave_functions.WAVE_LENGTH:
raise IndexError("Overflow of wave data.")
self.modified = True
# Copy into wave table
for idx, sample in enumerate(wave):
self[idx + offset] = sample
return self
def merge(self, wave, offset=0):
"""
Merge wave data into the wave table. This involves generating an
average of the two waves.
The processes is simple, add the two waves before dividing by two
producing an average of the two wave forms.
:param wave: Iterable of bytes to insert into the wave table, this
data must be no longer than the Wavelength.
:type wave: iter<byte>
:param offset: Offset in wave table to begin merge
:type offset: int
"""
# Get data as a list
wave = list(wave)
# Check size of wave data
if len(wave) + offset > wave_functions.WAVE_LENGTH:
raise IndexError("Overflow of wave data.")
self.modified = True
# Merge into wave table
for idx, sample in enumerate(wave):
self[idx + offset] = (self[idx + offset] + sample) >> 1
return self
def write(self, f):
"""
Write wave table to a file (or file like) object
"""
length = len(self._table)
checksum = functools.reduce(operator.xor, self._table)
# Write file ID
f.write(WAVE_HEADER_ID)
# Write header
f.write(wave_header.pack(WAVE_VERSION, checksum, length))
# Write samples
sample = struct.Struct("B")
for s in self._table:
f.write(sample.pack(s + 0x80))
class ExportAsmFormatter(object):
"""
Export a wave table to ASM source.
Supports both GCC and AVR Assembler (AVRASM2) style ASM style.
"""
ASM_STYLE_GCC = 'GCC'
ASM_STYLE_AVRASM2 = 'AVRASM2'
ASM_LABEL_CHARS = string.letters + string.digits + '_'
GCC_ROW_PREFIX = '\t.byte\t'
AVRASM2_ROW_PREFIX = '\t.DB\t'
def __init__(self, wave_table, label_name=None, asm_style=ASM_STYLE_GCC):
"""
Initialise formatter
:param wave_table: Wave table to format
:type wave_table: WaveTable
:param label_name: Label to apply to wave table
:param asm_style: Style of ASM to generate; default is GCC
"""
self.wave_table = wave_table
self.label_name = label_name
self.asm_style = asm_style
def __call__(self, f):
if self.label_name:
label_name = ''.join(c for c in self.label_name.replace(' ', '_') if c in self.ASM_LABEL_CHARS)
print("{}:".format(label_name), file=f)
if self.asm_style == self.ASM_STYLE_GCC:
prefix = self.GCC_ROW_PREFIX
else:
prefix = self.AVRASM2_ROW_PREFIX
for r in range(0, wave_functions.WAVE_LENGTH / 16):
samples = self.wave_table[r * 16:(r + 1) * 16]
print(prefix, ','.join("0x{:02X}".format(wave_functions.ORIGIN + s) for s in samples), sep='', file=f)
class ExportCFormatter(object):
"""
Export a wave table to c source.
"""
VARIABLE_NAME_CHARS = string.letters + string.digits + '_'
def __init__(self, wave_table, variable_name):
"""
:type wave_table: WaveTable
:param variable_name: Name of the variable holding wave table
"""
self.wave_table = wave_table
self.variable_name = variable_name
def __call__(self, f):
variable_name = ''.join(c for c in self.variable_name.replace(' ', '_') if c in self.VARIABLE_NAME_CHARS)
print(
"/**\n"
" * Exported from Wave Editor\n"
" */\n\n"
"#include <stdint.h>\n\n"
"const uint8_t {}[] = {{".format(variable_name),
file=f
)
for r in range(0, wave_functions.WAVE_LENGTH / 16):
samples = self.wave_table[r * 16:(r + 1) * 16]
print('\t', ','.join("0x{:02X}".format(wave_functions.ORIGIN + s) for s in samples), ',', sep='', file=f)
print("};", file=f)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import If, Switch, Concat
from hwt.code_utils import rename_signal
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.enum import HEnum
from hwt.interfaces.utils import propagateClkRst, addClkRst
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwt.synthesizer.unit import Unit
from hwtLib.peripheral.usb.usb2.ulpi import Ulpi, ULPI_TX_CMD, ULPI_REG, \
ulpi_reg_function_control_t, ulpi_reg_function_control_t_reset_default, \
ulpi_reg_otg_control_t, ulpi_reg_otg_control_t_reset_defaults, \
ulpi_reg_usb_interrupt_status_t_reset_default
from hwtLib.peripheral.usb.usb2.utmi import Utmi_8b, utmi_interrupt_t
from pyMathBitPrecise.bit_utils import mask
class Utmi_to_Ulpi(Unit):
"""
The ULPI is an interface which reduces the number of signals for UTMI+ interface.
This reduction is done using a register file which drives signals which are not used
and bi-directional wiring. This component does translation of ULPI to UTMI+ by keeping copy of UTMI+
registers and synchronizing the changes and it also handles the drive of the bi-directional wires.
:note: For up to UTMI+ Level 3
Based on https://raw.githubusercontent.com/ultraembedded/core_ulpi_wrapper/3c202963ac4b4ae50cadb44ce79c11463d3c6484/src_v/ulpi_wrapper.v
.. hwt-autodoc::
"""
def _declr(self):
addClkRst(self)
# PHY is a master for UTMI/ULPI style interface
self.utmi = Utmi_8b()._m()
self.ulpi = Ulpi()
def ulpi_turnaround_detect(self, ulpi_dir: RtlSignal):
#-----------------------------------------------------------------
# Bus turnaround detect
#-----------------------------------------------------------------
ulpi_dir_q = self._reg("ulpi_dir_q", def_val=1)
ulpi_dir_q(ulpi_dir)
turnaround_w = rename_signal(self, ulpi_dir_q != ulpi_dir._eq(Ulpi.DIR.PHY), "turnaround_w")
return turnaround_w
@staticmethod
def parse_RX_CMD(ulpi_data, utmi_linestate_q, utmi_interrupt_q, utmi_rxactive_q, utmi_rxerror_q):
return [
utmi_linestate_q(ulpi_data[2:0]),
Switch(ulpi_data[4:2])\
.Case(0b00,
utmi_interrupt_q.SessEnd(1),
utmi_interrupt_q.SessValid(0),
utmi_interrupt_q.VbusValid(0),
).Case(0b01,
utmi_interrupt_q.SessEnd(0),
utmi_interrupt_q.SessValid(0),
utmi_interrupt_q.VbusValid(0),
).Case(0b10,
utmi_interrupt_q.SessValid(1),
utmi_interrupt_q.VbusValid(0),
).Case(0b11,
utmi_interrupt_q.VbusValid(1),
),
Switch(ulpi_data[6:4])\
.Case(0b00,
utmi_rxactive_q(0),
utmi_rxerror_q(0)
).Case(0b01,
utmi_rxactive_q(1),
utmi_rxerror_q(0)
).Case(0b10,
utmi_interrupt_q.HostDisconnect(1)
).Case(0b11,
utmi_rxactive_q(1),
utmi_rxerror_q(1)
),
utmi_interrupt_q.IdGnd(ulpi_data[6])
]
def _impl(self):
ulpi: Ulpi = self.ulpi
utmi: Utmi_8b = self.utmi
# Description:
# - Converts from UTMI interface to reduced pin count ULPI.
# - No support for low power mode.
# - I/O synchronous to 60MHz ULPI clock input (from PHY)
# - Tested against SMSC/Microchip USB3300 in device mode.
#-----------------------------------------------------------------
# States
#-----------------------------------------------------------------
state_t = HEnum("state_t", ["w", "idle", "cmd", "data", "reg"])
state_q = self._reg("state_q", dtype=state_t, def_val=state_t.idle)
#-----------------------------------------------------------------
# UTMI Mode Select
#-----------------------------------------------------------------
# flag which tells that the function mode register is now ready writen to PHY
mode_update_q = self._reg("mode_update_q", def_val=0)
function_control_q = self._reg("function_control_q", ulpi_reg_function_control_t,
def_val=ulpi_reg_function_control_t_reset_default)
mode_write_q = self._reg("mode_write_q", def_val=0)
# Detect register write completion
mode_complete_w = rename_signal(self, ((state_q._eq(state_t.reg) & mode_write_q) & ulpi.nxt) & ulpi.dir._eq(Ulpi.DIR.LINK), "mode_complete_w")
#-----------------------------------------------------------------
# UTMI OTG Control
#-----------------------------------------------------------------
otg_update_q = self._reg("otg_update_q", def_val=0)
otg_control_q = self._reg("otg_control_q", ulpi_reg_otg_control_t,
def_val=ulpi_reg_otg_control_t_reset_defaults)
otg_write_q = self._reg("otg_write_q", def_val=0)
# Detect register write completion
otg_complete_w = rename_signal(self, ((state_q._eq(state_t.reg) & otg_write_q) & ulpi.nxt) & ulpi.dir._eq(Ulpi.DIR.LINK), "otg_complete_w")
#-----------------------------------------------------------------
# Tx Buffer - decouple UTMI Tx from PHY I/O
#-----------------------------------------------------------------
# tx_fifo = HandshakedFifo(Handshaked)
# tx_fifo.DATA_WIDTH = 8
# tx_fifo.DEPTH = 2
# self.tx_fifo = tx_fifo
#-----------------------------------------------------------------
# Implementation
#-----------------------------------------------------------------
# Xilinx placement pragmas:
# synthesis attribute IOB of ulpi_data_q is "TRUE"
# synthesis attribute IOB of ulpi_stp_q is "TRUE"
ulpi_data_q = self._reg("ulpi_data_q", Bits(8), def_val=0)
ulpi_stp_q = self._reg("ulpi_stp_q", def_val=0)
data_q = self._reg("data_q", Bits(8), def_val=0)
utmi_rxvalid_q = self._reg("utmi_rxvalid_q", def_val=0)
utmi_rxerror_q = self._reg("utmi_rxerror_q", def_val=0)
utmi_rxactive_q = self._reg("utmi_rxactive_q", def_val=0)
utmi_linestate_q = self._reg("utmi_linestate_q", Bits(2), def_val=0)
utmi_data_q = self._reg("utmi_data_q", Bits(8), def_val=0)
# interupts are cleared once new RX CMD is recieved and it does not contain the event flag
utmi_interrupt_q = self._reg("utmi_interrupt_q", utmi_interrupt_t,
def_val=ulpi_reg_usb_interrupt_status_t_reset_default)
utmi.interrupt(utmi_interrupt_q)
# Not interrupted by a Rx
function_control_q(utmi.function_control, exclude=[function_control_q.Reset])
If(mode_update_q & mode_complete_w,
function_control_q.Reset(0)
).Else(
function_control_q.Reset(utmi.function_control.Reset)
)
If(mode_update_q & mode_complete_w,
mode_update_q(0),
).Elif((function_control_q != utmi.function_control) | utmi.function_control.Reset,
mode_update_q(1)
)
# Not interrupted by a Rx
otg_control_q(utmi.otg_control)
If(otg_update_q & otg_complete_w,
otg_update_q(0)
).Elif(otg_control_q != utmi.otg_control,
otg_update_q(1)
)
turnaround_w = self.ulpi_turnaround_detect(ulpi.dir)
# utmi_tx_to_ulpi_vld = tx_fifo.dataOut.vld
# Push
# tx_fifo.dataIn.vld(utmi.tx.vld & utmi.tx.rd)
# tx_fifo.dataIn.data(utmi.tx.data)
# Pop
# tx_fifo.dataOut.rd(utmi_tx_to_ulpi_vld & utmi_tx_accept_w)
# utmi.tx.rd(tx_fifo.dataIn.rd & tx_delay_complete_w)
utmi_tx_to_ulpi_vld = utmi.tx.vld
utmi_tx_data_w = utmi.tx.data
utmi_tx_accept_w = rename_signal(
self,
~turnaround_w & ulpi.dir._eq(Ulpi.DIR.LINK) & (
(state_q._eq(state_t.idle) & ~(mode_update_q | otg_update_q | turnaround_w)) |
(state_q._eq(state_t.data) & ulpi.nxt)
),
"utmi_tx_accept_w")
utmi.tx.rd(utmi_tx_accept_w)
ulpi_stp_q(~turnaround_w & ulpi.dir._eq(Ulpi.DIR.LINK) & ulpi.nxt &
(state_q._eq(state_t.reg) |
(state_q._eq(state_t.data) & ~utmi_tx_to_ulpi_vld)
)
)
utmi_rxvalid_q(~turnaround_w & ulpi.dir._eq(Ulpi.DIR.PHY) & ulpi.nxt)
If(turnaround_w,
If(ulpi.dir._eq(Ulpi.DIR.PHY) & ulpi.nxt,
# Turnaround: Input + NXT - set RX_ACTIVE
utmi_rxactive_q(1),
# Register write - abort
If(state_q._eq(state_t.reg),
state_q(state_t.idle),
ulpi_data_q(0),
)
).Elif(ulpi.dir._eq(Ulpi.DIR.LINK),
utmi_rxactive_q(0),
# Register write - abort
If(state_q._eq(state_t.reg),
state_q(state_t.idle),
ulpi_data_q(0),
)
)
).Else(
If(ulpi.dir._eq(Ulpi.DIR.PHY),
If(ulpi.nxt,
#-----------------------------------------------------------------
# Input: RX_DATA
#-----------------------------------------------------------------
utmi_rxactive_q(1),
utmi_data_q(ulpi.data.i)
).Else(
#-----------------------------------------------------------------
# Input: RX_CMD (phy status), decode encoded status/event bits from this byte
#-----------------------------------------------------------------
self.parse_RX_CMD(ulpi.data.i,
utmi_linestate_q, utmi_interrupt_q,
utmi_rxactive_q, utmi_rxerror_q)
)
).Else(
#-----------------------------------------------------------------
# Output
#-----------------------------------------------------------------
If(state_q._eq(state_t.idle),
If(mode_update_q,
# IDLE: Pending mode update
state_q(state_t.cmd),
ulpi_data_q(ULPI_TX_CMD.REGW(ULPI_REG.Function_Control)),
data_q(function_control_q._reinterpret_cast(data_q._dtype) & mask(7)),
otg_write_q(0),
mode_write_q(1),
).Elif(otg_update_q,
# IDLE: Pending OTG control update
state_q(state_t.cmd),
ulpi_data_q(ULPI_TX_CMD.REGW(ULPI_REG.OTG_Control)),
data_q(otg_control_q._reinterpret_cast(data_q._dtype)),
otg_write_q(1),
mode_write_q(0),
).Elif(utmi_tx_to_ulpi_vld,
# IDLE: Pending transmit
# data should have USB_PID header and this is just to be sure
ulpi_data_q(ULPI_TX_CMD.USB_PID(utmi_tx_data_w[4:0])),
state_q(state_t.data)
)
).Elif(ulpi.nxt,
If(state_q._eq(state_t.cmd),
# Command, Write Register
state_q(state_t.reg),
ulpi_data_q(data_q),
).Elif(state_q._eq(state_t.reg),
# Data (register write)
state_q(state_t.idle),
ulpi_data_q(0),
otg_write_q(0),
mode_write_q(0),
).Elif(state_q._eq(state_t.data),
# Data
If(utmi_tx_to_ulpi_vld,
state_q(state_t.data),
ulpi_data_q(utmi_tx_data_w),
).Else(
# End of packet
state_q(state_t.idle),
ulpi_data_q(0),
)
)
)
)
)
ulpi.data.o(ulpi_data_q)
ulpi.data.t(Concat(*(utmi_tx_accept_w for _ in range(8))))
ulpi.stp(ulpi_stp_q)
utmi.LineState(utmi_linestate_q)
utmi.rx.data(utmi_data_q)
utmi.rx.error(utmi_rxerror_q)
utmi.rx.active(utmi_rxactive_q)
utmi.rx.valid(utmi_rxvalid_q)
propagateClkRst(self)
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = Utmi_to_Ulpi()
print(to_rtl_str(u))
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
import pytest
from pants.engine.addresses import Address
from pants.engine.fs import GlobExpansionConjunction, GlobMatchErrorBehavior, PathGlobs, Paths
from pants.engine.target import (
AsyncFieldMixin,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
ExplicitlyProvidedDependencies,
Field,
FieldSet,
FloatField,
GeneratedTargets,
GenerateSourcesRequest,
IntField,
InvalidFieldChoiceException,
InvalidFieldException,
InvalidFieldTypeException,
InvalidGeneratedTargetException,
InvalidTargetException,
MultipleSourcesField,
NestedDictStringToStringField,
OptionalSingleSourceField,
OverridesField,
RequiredFieldMissingException,
ScalarField,
SequenceField,
SingleSourceField,
StringField,
StringSequenceField,
Target,
ValidNumbers,
targets_with_sources_types,
)
from pants.engine.unions import UnionMembership
from pants.option.global_options import FilesNotFoundBehavior
from pants.testutil.pytest_util import no_exception
from pants.util.frozendict import FrozenDict
from pants.util.meta import FrozenInstanceError
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
# -----------------------------------------------------------------------------------------------
# Test core Field and Target abstractions
# -----------------------------------------------------------------------------------------------
class FortranExtensions(Field):
alias = "fortran_extensions"
value: Tuple[str, ...]
default = ()
@classmethod
def compute_value(cls, raw_value: Optional[Iterable[str]], address: Address) -> Tuple[str, ...]:
value_or_default = super().compute_value(raw_value, address)
# Add some arbitrary validation to test that hydration/validation works properly.
bad_extensions = [
extension for extension in value_or_default if not extension.startswith("Fortran")
]
if bad_extensions:
raise InvalidFieldException(
f"The {repr(cls.alias)} field in target {address} expects all elements to be "
f"prefixed by `Fortran`. Received {bad_extensions}.",
)
return tuple(value_or_default)
class FortranVersion(StringField):
alias = "version"
class UnrelatedField(BoolField):
alias = "unrelated"
default = False
class FortranTarget(Target):
alias = "fortran"
core_fields = (FortranExtensions, FortranVersion)
def validate(self) -> None:
if self[FortranVersion].value == "bad":
raise InvalidTargetException("Bad!")
def test_field_and_target_eq() -> None:
addr = Address("", target_name="tgt")
field = FortranVersion("dev0", addr)
assert field.value == "dev0"
other = FortranVersion("dev0", addr)
assert field == other
assert hash(field) == hash(other)
other = FortranVersion("dev1", addr)
assert field != other
assert hash(field) != hash(other)
# NB: because normal `Field`s throw away the address, these are equivalent.
other = FortranVersion("dev0", Address("", target_name="other"))
assert field == other
assert hash(field) == hash(other)
# Ensure the field is frozen.
with pytest.raises(FrozenInstanceError):
field.y = "foo" # type: ignore[attr-defined]
tgt = FortranTarget({"version": "dev0"}, addr)
assert tgt.address == addr
other_tgt = FortranTarget({"version": "dev0"}, addr)
assert tgt == other_tgt
assert hash(tgt) == hash(other_tgt)
other_tgt = FortranTarget({"version": "dev1"}, addr)
assert tgt != other_tgt
assert hash(tgt) != hash(other_tgt)
other_tgt = FortranTarget({"version": "dev0"}, Address("", target_name="other"))
assert tgt != other_tgt
assert hash(tgt) != hash(other_tgt)
# Ensure the target is frozen.
with pytest.raises(FrozenInstanceError):
tgt.y = "foo" # type: ignore[attr-defined]
# Ensure that subclasses are not equal.
class SubclassField(FortranVersion):
pass
subclass_field = SubclassField("dev0", addr)
assert field != subclass_field
assert hash(field) != hash(subclass_field)
class SubclassTarget(FortranTarget):
pass
subclass_tgt = SubclassTarget({"version": "dev0"}, addr)
assert tgt != subclass_tgt
assert hash(tgt) != hash(subclass_tgt)
def test_invalid_fields_rejected() -> None:
with pytest.raises(InvalidFieldException) as exc:
FortranTarget({"invalid_field": True}, Address("", target_name="lib"))
assert "Unrecognized field `invalid_field=True`" in str(exc)
assert "//:lib" in str(exc)
def test_get_field() -> None:
extensions = ("FortranExt1",)
tgt = FortranTarget({FortranExtensions.alias: extensions}, Address("", target_name="lib"))
assert tgt[FortranExtensions].value == extensions
assert tgt.get(FortranExtensions).value == extensions
assert tgt.get(FortranExtensions, default_raw_value=["FortranExt2"]).value == extensions
# Default field value. This happens when the field is registered on the target type, but the
# user does not explicitly set the field in the BUILD file.
default_field_tgt = FortranTarget({}, Address("", target_name="default"))
assert default_field_tgt[FortranExtensions].value == ()
assert default_field_tgt.get(FortranExtensions).value == ()
assert default_field_tgt.get(FortranExtensions, default_raw_value=["FortranExt2"]).value == ()
# Example of a call site applying its own default value instead of the field's default value.
assert default_field_tgt[FortranExtensions].value or 123 == 123
assert (
FortranTarget.class_get_field(FortranExtensions, union_membership=UnionMembership({}))
is FortranExtensions
)
# Field is not registered on the target.
with pytest.raises(KeyError) as exc:
default_field_tgt[UnrelatedField]
assert UnrelatedField.__name__ in str(exc)
with pytest.raises(KeyError) as exc:
FortranTarget.class_get_field(UnrelatedField, union_membership=UnionMembership({}))
assert UnrelatedField.__name__ in str(exc)
assert default_field_tgt.get(UnrelatedField).value == UnrelatedField.default
assert default_field_tgt.get(
UnrelatedField, default_raw_value=not UnrelatedField.default
).value == (not UnrelatedField.default)
def test_field_hydration_is_eager() -> None:
with pytest.raises(InvalidFieldException) as exc:
FortranTarget(
{FortranExtensions.alias: ["FortranExt1", "DoesNotStartWithFortran"]},
Address("", target_name="bad_extension"),
)
assert "DoesNotStartWithFortran" in str(exc)
assert "//:bad_extension" in str(exc)
def test_has_fields() -> None:
empty_union_membership = UnionMembership({})
tgt = FortranTarget({}, Address("", target_name="lib"))
assert tgt.field_types == (FortranExtensions, FortranVersion)
assert FortranTarget.class_field_types(union_membership=empty_union_membership) == (
FortranExtensions,
FortranVersion,
)
assert tgt.has_fields([]) is True
assert FortranTarget.class_has_fields([], union_membership=empty_union_membership) is True
assert tgt.has_fields([FortranExtensions]) is True
assert tgt.has_field(FortranExtensions) is True
assert (
FortranTarget.class_has_fields([FortranExtensions], union_membership=empty_union_membership)
is True
)
assert (
FortranTarget.class_has_field(FortranExtensions, union_membership=empty_union_membership)
is True
)
assert tgt.has_fields([UnrelatedField]) is False
assert tgt.has_field(UnrelatedField) is False
assert (
FortranTarget.class_has_fields([UnrelatedField], union_membership=empty_union_membership)
is False
)
assert (
FortranTarget.class_has_field(UnrelatedField, union_membership=empty_union_membership)
is False
)
assert tgt.has_fields([FortranExtensions, UnrelatedField]) is False
assert (
FortranTarget.class_has_fields(
[FortranExtensions, UnrelatedField], union_membership=empty_union_membership
)
is False
)
def test_add_custom_fields() -> None:
class CustomField(BoolField):
alias = "custom_field"
default = False
union_membership = UnionMembership.from_rules(
[FortranTarget.register_plugin_field(CustomField)]
)
tgt_values = {CustomField.alias: True}
tgt = FortranTarget(
tgt_values, Address("", target_name="lib"), union_membership=union_membership
)
assert tgt.field_types == (FortranExtensions, FortranVersion, CustomField)
assert tgt.core_fields == (FortranExtensions, FortranVersion)
assert tgt.plugin_fields == (CustomField,)
assert tgt.has_field(CustomField) is True
assert FortranTarget.class_field_types(union_membership=union_membership) == (
FortranExtensions,
FortranVersion,
CustomField,
)
assert FortranTarget.class_has_field(CustomField, union_membership=union_membership) is True
assert (
FortranTarget.class_get_field(CustomField, union_membership=union_membership) is CustomField
)
assert tgt[CustomField].value is True
default_tgt = FortranTarget(
{}, Address("", target_name="default"), union_membership=union_membership
)
assert default_tgt[CustomField].value is False
# Ensure that the `PluginField` is not being registered on other target types.
class OtherTarget(Target):
alias = "other_target"
core_fields = ()
other_tgt = OtherTarget({}, Address("", target_name="other"))
assert other_tgt.plugin_fields == ()
assert other_tgt.has_field(CustomField) is False
def test_override_preexisting_field_via_new_target() -> None:
# To change the behavior of a pre-existing field, you must create a new target as it would not
# be safe to allow plugin authors to change the behavior of core target types.
#
# Because the Target API does not care about the actual target type and we only check that the
# target has the required fields via Target.has_fields(), it is safe to create a new target
# that still works where the original target was expected.
#
# However, this means that we must ensure `Target.get()` and `Target.has_fields()` will work
# with subclasses of the original `Field`s.
class CustomFortranExtensions(FortranExtensions):
banned_extensions = ("FortranBannedExt",)
default_extensions = ("FortranCustomExt",)
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[str]], address: Address
) -> Tuple[str, ...]:
# Ensure that we avoid certain problematic extensions and always use some defaults.
specified_extensions = super().compute_value(raw_value, address)
banned = [
extension
for extension in specified_extensions
if extension in cls.banned_extensions
]
if banned:
raise InvalidFieldException(
f"The {repr(cls.alias)} field in target {address} is using banned "
f"extensions: {banned}"
)
return (*specified_extensions, *cls.default_extensions)
class CustomFortranTarget(Target):
alias = "custom_fortran"
core_fields = tuple(
{*FortranTarget.core_fields, CustomFortranExtensions} - {FortranExtensions}
)
custom_tgt = CustomFortranTarget(
{FortranExtensions.alias: ["FortranExt1"]}, Address("", target_name="custom")
)
assert custom_tgt.has_field(FortranExtensions) is True
assert custom_tgt.has_field(CustomFortranExtensions) is True
assert custom_tgt.has_fields([FortranExtensions, CustomFortranExtensions]) is True
assert (
CustomFortranTarget.class_get_field(FortranExtensions, union_membership=UnionMembership({}))
is CustomFortranExtensions
)
# Ensure that subclasses not defined on a target are not accepted. This allows us to, for
# example, filter every target with `PythonSources` (or a subclass) and to ignore targets with
# only `SourcesField`.
normal_tgt = FortranTarget({}, Address("", target_name="normal"))
assert normal_tgt.has_field(FortranExtensions) is True
assert normal_tgt.has_field(CustomFortranExtensions) is False
assert custom_tgt[FortranExtensions] == custom_tgt[CustomFortranExtensions]
assert custom_tgt[FortranExtensions].value == (
"FortranExt1",
*CustomFortranExtensions.default_extensions,
)
# Check custom default value
assert (
CustomFortranTarget({}, Address("", target_name="default"))[FortranExtensions].value
== CustomFortranExtensions.default_extensions
)
# Custom validation
with pytest.raises(InvalidFieldException) as exc:
CustomFortranTarget(
{FortranExtensions.alias: CustomFortranExtensions.banned_extensions},
Address("", target_name="invalid"),
)
assert str(list(CustomFortranExtensions.banned_extensions)) in str(exc)
assert "//:invalid" in str(exc)
def test_required_field() -> None:
class RequiredField(StringField):
alias = "field"
required = True
class RequiredTarget(Target):
alias = "required_target"
core_fields = (RequiredField,)
address = Address("", target_name="lib")
# No errors when defined
RequiredTarget({"field": "present"}, address)
with pytest.raises(RequiredFieldMissingException) as exc:
RequiredTarget({}, address)
assert str(address) in str(exc.value)
assert "field" in str(exc.value)
def test_async_field_mixin() -> None:
class ExampleField(IntField, AsyncFieldMixin):
alias = "field"
default = 10
addr = Address("", target_name="tgt")
field = ExampleField(None, addr)
assert field.value == 10
assert field.address == addr
ExampleField.mro() # Regression test that the mro is resolvable.
# Ensure equality and __hash__ work correctly.
other = ExampleField(None, addr)
assert field == other
assert hash(field) == hash(other)
other = ExampleField(25, addr)
assert field != other
assert hash(field) != hash(other)
# Whereas normally the address is not considered, it is considered for async fields.
other = ExampleField(None, Address("", target_name="other"))
assert field != other
assert hash(field) != hash(other)
# Ensure it's still frozen.
with pytest.raises(FrozenInstanceError):
field.y = "foo" # type: ignore[attr-defined]
# Ensure that subclasses are not equal.
class Subclass(ExampleField):
pass
subclass = Subclass(None, addr)
assert field != subclass
assert hash(field) != hash(subclass)
def test_target_validate() -> None:
with pytest.raises(InvalidTargetException):
FortranTarget({FortranVersion.alias: "bad"}, Address("", target_name="t"))
def test_target_residence_dir() -> None:
assert FortranTarget({}, Address("some_dir/subdir")).residence_dir == "some_dir/subdir"
assert (
FortranTarget({}, Address("some_dir/subdir"), residence_dir="another_dir").residence_dir
== "another_dir"
)
# -----------------------------------------------------------------------------------------------
# Test file-level target generation
# -----------------------------------------------------------------------------------------------
def test_generated_targets_address_validation() -> None:
"""Ensure that all addresses are well formed."""
class MockTarget(Target):
alias = "tgt"
core_fields = ()
generator = MockTarget({}, Address("dir", target_name="generator"))
with pytest.raises(InvalidGeneratedTargetException):
GeneratedTargets(
generator,
[
MockTarget(
{}, Address("a_different_dir", target_name="generator", generated_name="gen")
)
],
)
with pytest.raises(InvalidGeneratedTargetException):
GeneratedTargets(
generator,
[
MockTarget(
{}, Address("dir", target_name="a_different_generator", generated_name="gen")
)
],
)
with pytest.raises(InvalidGeneratedTargetException):
GeneratedTargets(
generator,
[
MockTarget(
{},
Address(
"dir",
target_name="a_different_generator",
generated_name=None,
relative_file_path=None,
),
)
],
)
# These are fine.
GeneratedTargets(
generator,
[
MockTarget({}, Address("dir", target_name="generator", generated_name="gen")),
MockTarget({}, Address("dir", target_name="generator", relative_file_path="gen")),
],
)
# -----------------------------------------------------------------------------------------------
# Test FieldSet. Also see engine/internals/graph_test.py.
# -----------------------------------------------------------------------------------------------
def test_field_set() -> None:
class RequiredField(StringField):
alias = "required_field"
default = "default"
class OptionalField(StringField):
alias = "optional_field"
default = "default"
class OptOutField(BoolField):
alias = "opt_out_field"
default = False
class TargetWithRequired(Target):
alias = "tgt_w_required"
# It has the required field registered, but not the optional field.
core_fields = (RequiredField,)
class TargetWithoutRequired(Target):
alias = "tgt_wo_required"
# It has the optional field registered, but not the required field.
core_fields = (OptionalField,)
class NoFieldsTarget(Target):
alias = "no_fields_tgt"
core_fields = ()
class OptOutTarget(Target):
alias = "opt_out_tgt"
core_fields = (RequiredField, OptOutField)
@dataclass(frozen=True)
class RequiredFieldSet(FieldSet):
required_fields = (RequiredField,)
required: RequiredField
optional: OptionalField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(OptOutField).value is True
@dataclass(frozen=True)
class OptionalFieldSet(FieldSet):
required_fields = ()
optional: OptionalField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(OptOutField).value is True
required_addr = Address("", target_name="required")
required_tgt = TargetWithRequired({RequiredField.alias: "configured"}, required_addr)
optional_addr = Address("", target_name="unrelated")
optional_tgt = TargetWithoutRequired({OptionalField.alias: "configured"}, optional_addr)
no_fields_addr = Address("", target_name="no_fields")
no_fields_tgt = NoFieldsTarget({}, no_fields_addr)
opt_out_addr = Address("", target_name="conditional")
opt_out_tgt = OptOutTarget(
{RequiredField.alias: "configured", OptOutField.alias: True}, opt_out_addr
)
assert RequiredFieldSet.is_applicable(required_tgt) is True
for tgt in [optional_tgt, no_fields_tgt, opt_out_tgt]:
assert RequiredFieldSet.is_applicable(tgt) is False
# When no fields are required, every target is applicable _unless_ it has been opted out of.
for tgt in [required_tgt, optional_tgt, no_fields_tgt]:
assert OptionalFieldSet.is_applicable(tgt) is True
assert OptionalFieldSet.is_applicable(opt_out_tgt) is False
required_fs = RequiredFieldSet.create(required_tgt)
assert required_fs.address == required_addr
assert required_fs.required.value == "configured"
assert required_fs.optional.value == OptionalField.default
assert isinstance(required_fs.required_fields, tuple)
with pytest.raises(KeyError):
RequiredFieldSet.create(optional_tgt)
# It is possible to create a target that should be opted out of; the caller must call
# `.is_applicable()` first.
opt_out_fs = RequiredFieldSet.create(opt_out_tgt)
assert opt_out_fs.address == opt_out_addr
assert opt_out_fs.required.value == "configured"
assert opt_out_fs.optional.value == OptionalField.default
assert isinstance(required_fs.required_fields, tuple)
assert OptionalFieldSet.create(optional_tgt).optional.value == "configured"
assert OptionalFieldSet.create(no_fields_tgt).optional.value == OptionalField.default
# -----------------------------------------------------------------------------------------------
# Test Field templates
# -----------------------------------------------------------------------------------------------
def test_scalar_field() -> None:
@dataclass(frozen=True)
class CustomObject:
pass
class Example(ScalarField):
alias = "example"
expected_type = CustomObject
expected_type_description = "a `CustomObject` instance"
@classmethod
def compute_value(
cls, raw_value: Optional[CustomObject], address: Address
) -> Optional[CustomObject]:
return super().compute_value(raw_value, address)
addr = Address("", target_name="example")
with pytest.raises(InvalidFieldTypeException) as exc:
Example(1, addr)
assert Example.expected_type_description in str(exc.value)
assert Example(CustomObject(), addr).value == CustomObject()
assert Example(None, addr).value is None
def test_string_field_valid_choices() -> None:
class GivenStrings(StringField):
alias = "example"
valid_choices = ("kale", "spinach")
class LeafyGreens(Enum):
KALE = "kale"
SPINACH = "spinach"
class GivenEnum(StringField):
alias = "example"
valid_choices = LeafyGreens
default = LeafyGreens.KALE.value
addr = Address("", target_name="example")
assert GivenStrings("spinach", addr).value == "spinach"
assert GivenEnum("spinach", addr).value == "spinach"
assert GivenStrings(None, addr).value is None
assert GivenEnum(None, addr).value == "kale"
with pytest.raises(InvalidFieldChoiceException):
GivenStrings("carrot", addr)
with pytest.raises(InvalidFieldChoiceException):
GivenEnum("carrot", addr)
@pytest.mark.parametrize("field_cls", [IntField, FloatField])
def test_int_float_fields_valid_numbers(field_cls: type) -> None:
class AllNums(field_cls): # type: ignore[valid-type,misc]
alias = "all_nums"
valid_numbers = ValidNumbers.all
class PositiveAndZero(field_cls): # type: ignore[valid-type,misc]
alias = "positive_and_zero"
valid_numbers = ValidNumbers.positive_and_zero
class PositiveOnly(field_cls): # type: ignore[valid-type,misc]
alias = "positive_only"
valid_numbers = ValidNumbers.positive_only
addr = Address("nums")
neg = -1 if issubclass(field_cls, IntField) else -1.0
zero = 0 if issubclass(field_cls, IntField) else 0.0
pos = 1 if issubclass(field_cls, IntField) else 1.0
assert AllNums(neg, addr).value == neg
assert AllNums(zero, addr).value == zero
assert AllNums(pos, addr).value == pos
with pytest.raises(InvalidFieldException):
PositiveAndZero(neg, addr)
assert PositiveAndZero(zero, addr).value == zero
assert PositiveAndZero(pos, addr).value == pos
with pytest.raises(InvalidFieldException):
PositiveOnly(neg, addr)
with pytest.raises(InvalidFieldException):
PositiveOnly(zero, addr)
assert PositiveOnly(pos, addr).value == pos
def test_sequence_field() -> None:
@dataclass(frozen=True)
class CustomObject:
pass
class Example(SequenceField):
alias = "example"
expected_element_type = CustomObject
expected_type_description = "an iterable of `CustomObject` instances"
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[CustomObject]], address: Address
) -> Optional[Tuple[CustomObject, ...]]:
return super().compute_value(raw_value, address)
addr = Address("", target_name="example")
def assert_flexible_constructor(raw_value: Iterable[CustomObject]) -> None:
assert Example(raw_value, addr).value == tuple(raw_value)
assert_flexible_constructor([CustomObject(), CustomObject()])
assert_flexible_constructor((CustomObject(), CustomObject()))
assert_flexible_constructor(OrderedSet([CustomObject(), CustomObject()]))
# Must be given a sequence, not a single element.
with pytest.raises(InvalidFieldTypeException) as exc:
Example(CustomObject(), addr)
assert Example.expected_type_description in str(exc.value)
# All elements must be the expected type.
with pytest.raises(InvalidFieldTypeException):
Example([CustomObject(), 1, CustomObject()], addr)
def test_string_sequence_field() -> None:
class Example(StringSequenceField):
alias = "example"
addr = Address("", target_name="example")
assert Example(["hello", "world"], addr).value == ("hello", "world")
assert Example(None, addr).value is None
with pytest.raises(InvalidFieldTypeException):
Example("strings are technically iterable...", addr)
with pytest.raises(InvalidFieldTypeException):
Example(["hello", 0, "world"], addr)
def test_dict_string_to_string_field() -> None:
class Example(DictStringToStringField):
alias = "example"
addr = Address("", target_name="example")
assert Example(None, addr).value is None
assert Example({}, addr).value == FrozenDict()
assert Example({"hello": "world"}, addr).value == FrozenDict({"hello": "world"})
def assert_invalid_type(raw_value: Any) -> None:
with pytest.raises(InvalidFieldTypeException):
Example(raw_value, addr)
for v in [0, object(), "hello", ["hello"], {"hello": 0}, {0: "world"}]:
assert_invalid_type(v)
# Regression test that a default can be set.
class ExampleDefault(DictStringToStringField):
alias = "example"
# Note that we use `FrozenDict` so that the object can be hashable.
default = FrozenDict({"default": "val"})
assert ExampleDefault(None, addr).value == FrozenDict({"default": "val"})
def test_nested_dict_string_to_string_field() -> None:
class Example(NestedDictStringToStringField):
alias = "example"
addr = Address("", target_name="example")
assert Example(None, address=addr).value is None
assert Example({}, address=addr).value == FrozenDict()
assert Example({"greeting": {"hello": "world"}}, address=addr).value == FrozenDict(
{"greeting": FrozenDict({"hello": "world"})}
)
def assert_invalid_type(raw_value: Any) -> None:
with pytest.raises(InvalidFieldTypeException):
Example(raw_value, address=addr)
for v in [
0,
object(),
"hello",
["hello"],
["hello", "world"],
{"hello": 0},
{0: "world"},
{"hello": "world"},
]:
assert_invalid_type(v)
# Regression test that a default can be set.
class ExampleDefault(NestedDictStringToStringField):
alias = "example"
# Note that we use `FrozenDict` so that the object can be hashable.
default = FrozenDict({"nest": FrozenDict({"default": "val"})})
assert ExampleDefault(None, address=addr).value == FrozenDict(
{"nest": FrozenDict({"default": "val"})}
)
def test_dict_string_to_string_sequence_field() -> None:
class Example(DictStringToStringSequenceField):
alias = "example"
addr = Address("", target_name="example")
def assert_flexible_constructor(raw_value: Dict[str, Iterable[str]]) -> None:
assert Example(raw_value, addr).value == FrozenDict(
{k: tuple(v) for k, v in raw_value.items()}
)
for v in [("hello", "world"), ["hello", "world"], OrderedSet(["hello", "world"])]:
assert_flexible_constructor({"greeting": v})
def assert_invalid_type(raw_value: Any) -> None:
with pytest.raises(InvalidFieldTypeException):
Example(raw_value, addr)
for v in [ # type: ignore[assignment]
0,
object(),
"hello",
["hello"],
{"hello": "world"},
{0: ["world"]},
]:
assert_invalid_type(v)
# Regression test that a default can be set.
class ExampleDefault(DictStringToStringSequenceField):
alias = "example"
# Note that we use `FrozenDict` so that the object can be hashable.
default = FrozenDict({"default": ("val",)})
assert ExampleDefault(None, addr).value == FrozenDict({"default": ("val",)})
# -----------------------------------------------------------------------------------------------
# Test `SourcesField` helper functions
# -----------------------------------------------------------------------------------------------
def test_targets_with_sources_types() -> None:
class Sources1(MultipleSourcesField):
pass
class Sources2(SingleSourceField):
pass
class CodegenSources(MultipleSourcesField):
pass
class Tgt1(Target):
alias = "tgt1"
core_fields = (Sources1,)
class Tgt2(Target):
alias = "tgt2"
core_fields = (Sources2,)
class CodegenTgt(Target):
alias = "codegen_tgt"
core_fields = (CodegenSources,)
class GenSources(GenerateSourcesRequest):
input = CodegenSources
output = Sources1
tgt1 = Tgt1({}, Address("tgt1"))
tgt2 = Tgt2({SingleSourceField.alias: "foo.ext"}, Address("tgt2"))
codegen_tgt = CodegenTgt({}, Address("codegen_tgt"))
result = targets_with_sources_types(
[Sources1],
[tgt1, tgt2, codegen_tgt],
union_membership=UnionMembership({GenerateSourcesRequest: [GenSources]}),
)
assert set(result) == {tgt1, codegen_tgt}
result = targets_with_sources_types(
[Sources2],
[tgt1, tgt2, codegen_tgt],
union_membership=UnionMembership({GenerateSourcesRequest: [GenSources]}),
)
assert set(result) == {tgt2}
SKIP = object()
expected_path_globs = namedtuple(
"expected_path_globs",
["globs", "glob_match_error_behavior", "conjunction", "description_of_origin"],
defaults=(SKIP, SKIP, SKIP, SKIP),
)
@pytest.mark.parametrize(
"default_value, field_value, expected",
[
pytest.param(
None,
None,
expected_path_globs(globs=()),
id="empty",
),
pytest.param(
["*"],
None,
expected_path_globs(
globs=("test/*",),
glob_match_error_behavior=GlobMatchErrorBehavior.ignore,
conjunction=GlobExpansionConjunction.any_match,
description_of_origin=None,
),
id="default ignores glob match error",
),
pytest.param(
["*"],
["a", "b"],
expected_path_globs(
globs=(
"test/a",
"test/b",
),
glob_match_error_behavior=GlobMatchErrorBehavior.warn,
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="test:test's `sources` field",
),
id="provided value warns on glob match error",
),
],
)
def test_multiple_sources_path_globs(
default_value: Any, field_value: Any, expected: expected_path_globs
) -> None:
class TestMultipleSourcesField(MultipleSourcesField):
default = default_value
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
sources = TestMultipleSourcesField(field_value, Address("test"))
actual = sources.path_globs(FilesNotFoundBehavior.warn)
for attr, expect in zip(expected._fields, expected):
if expect is not SKIP:
assert getattr(actual, attr) == expect
@pytest.mark.parametrize(
"default_value, field_value, expected",
[
pytest.param(
None,
None,
expected_path_globs(globs=()),
id="empty",
),
pytest.param(
"file",
None,
expected_path_globs(
globs=("test/file",),
glob_match_error_behavior=GlobMatchErrorBehavior.ignore,
conjunction=GlobExpansionConjunction.any_match,
description_of_origin=None,
),
id="default ignores glob match error",
),
pytest.param(
"default_file",
"other_file",
expected_path_globs(
globs=("test/other_file",),
glob_match_error_behavior=GlobMatchErrorBehavior.warn,
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="test:test's `source` field",
),
id="provided value warns on glob match error",
),
pytest.param(
"file",
"life",
expected_path_globs(
globs=("test/life",),
glob_match_error_behavior=GlobMatchErrorBehavior.warn,
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="test:test's `source` field",
),
id="default glob conjunction",
),
],
)
def test_single_source_path_globs(
default_value: Any, field_value: Any, expected: expected_path_globs
) -> None:
class TestSingleSourceField(SingleSourceField):
default = default_value
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
required = False
sources = TestSingleSourceField(field_value, Address("test"))
actual = sources.path_globs(FilesNotFoundBehavior.warn)
for attr, expect in zip(expected._fields, expected):
if expect is not SKIP:
assert getattr(actual, attr) == expect
def test_single_source_file_path() -> None:
class TestSingleSourceField(OptionalSingleSourceField):
pass
assert TestSingleSourceField(None, Address("project")).file_path is None
assert TestSingleSourceField("f.ext", Address("project")).file_path == "project/f.ext"
def test_single_source_field_bans_globs() -> None:
class TestSingleSourceField(SingleSourceField):
pass
with pytest.raises(InvalidFieldException):
TestSingleSourceField("*.ext", Address("project"))
with pytest.raises(InvalidFieldException):
TestSingleSourceField("!f.ext", Address("project"))
# -----------------------------------------------------------------------------------------------
# Test `ExplicitlyProvidedDependencies` helper functions
# -----------------------------------------------------------------------------------------------
def test_explicitly_provided_dependencies_any_are_covered_by_includes() -> None:
addr = Address("", target_name="a")
generated_addr = Address("", target_name="b", generated_name="gen")
epd = ExplicitlyProvidedDependencies(
Address("", target_name="input_tgt"),
includes=FrozenOrderedSet([addr, generated_addr]),
ignores=FrozenOrderedSet(),
)
assert epd.any_are_covered_by_includes(()) is False
assert epd.any_are_covered_by_includes((addr,)) is True
assert epd.any_are_covered_by_includes((generated_addr,)) is True
assert epd.any_are_covered_by_includes((addr, generated_addr)) is True
# Generated targets are covered if their original target generator is in the includes.
assert (
epd.any_are_covered_by_includes((Address("", target_name="a", generated_name="gen"),))
is True
)
assert epd.any_are_covered_by_includes((Address("", target_name="x"),)) is False
assert (
epd.any_are_covered_by_includes((Address("", target_name="x", generated_name="gen"),))
is False
)
# Ensure we check for _any_, not _all_.
assert epd.any_are_covered_by_includes((Address("", target_name="x"), addr)) is True
def test_explicitly_provided_dependencies_remaining_after_disambiguation() -> None:
# First check disambiguation via ignores (`!` and `!!`).
addr = Address("", target_name="a")
generated_addr = Address("", target_name="b", generated_name="gen")
epd = ExplicitlyProvidedDependencies(
Address("", target_name="input_tgt"),
includes=FrozenOrderedSet(),
ignores=FrozenOrderedSet([addr, generated_addr]),
)
def assert_disambiguated_via_ignores(ambiguous: List[Address], expected: Set[Address]) -> None:
assert (
epd.remaining_after_disambiguation(tuple(ambiguous), owners_must_be_ancestors=False)
== expected
)
assert_disambiguated_via_ignores([], set())
assert_disambiguated_via_ignores([addr], set())
assert_disambiguated_via_ignores([generated_addr], set())
assert_disambiguated_via_ignores([addr, generated_addr], set())
# Generated targets are covered if their original target generator is in the ignores.
assert_disambiguated_via_ignores([Address("", target_name="a", generated_name="gen")], set())
bad_tgt = Address("", target_name="x")
bad_generated_tgt = Address("", target_name="x", generated_name="gen")
assert_disambiguated_via_ignores([bad_tgt], {bad_tgt})
assert_disambiguated_via_ignores([bad_generated_tgt], {bad_generated_tgt})
assert_disambiguated_via_ignores([bad_generated_tgt, addr, generated_addr], {bad_generated_tgt})
# Check disambiguation via `owners_must_be_ancestors`.
epd = ExplicitlyProvidedDependencies(
Address("src/lang/project"), FrozenOrderedSet(), FrozenOrderedSet()
)
valid_candidates = {
Address("src/lang/project", target_name="another_tgt"),
Address("src/lang"),
Address("src"),
Address("", target_name="root_owner"),
}
invalid_candidates = {
Address("tests/lang"),
Address("src/another_lang"),
Address("src/lang/another_project"),
Address("src/lang/project/subdir"),
}
assert (
epd.remaining_after_disambiguation(
(*valid_candidates, *invalid_candidates), owners_must_be_ancestors=True
)
== valid_candidates
)
def test_explicitly_provided_dependencies_disambiguated() -> None:
def get_disambiguated(
ambiguous: List[Address],
*,
ignores: Optional[List[Address]] = None,
includes: Optional[List[Address]] = None,
owners_must_be_ancestors: bool = False,
) -> Optional[Address]:
epd = ExplicitlyProvidedDependencies(
address=Address("dir", target_name="input_tgt"),
includes=FrozenOrderedSet(includes or []),
ignores=FrozenOrderedSet(ignores or []),
)
return epd.disambiguated(
tuple(ambiguous), owners_must_be_ancestors=owners_must_be_ancestors
)
# A mix of normal and generated addresses.
addr_a = Address("dir", target_name="a", generated_name="gen")
addr_b = Address("dir", target_name="b", generated_name="gen")
addr_c = Address("dir", target_name="c")
all_addr = [addr_a, addr_b, addr_c]
# If 1 target remains, it's disambiguated. Note that ignores can be normal or generated targets.
assert get_disambiguated(all_addr, ignores=[addr_b, addr_c]) == addr_a
assert (
get_disambiguated(all_addr, ignores=[addr_b.maybe_convert_to_target_generator(), addr_c])
== addr_a
)
assert get_disambiguated(all_addr, ignores=[addr_a]) is None
assert get_disambiguated(all_addr, ignores=[addr_a.maybe_convert_to_target_generator()]) is None
assert get_disambiguated(all_addr, ignores=all_addr) is None
assert get_disambiguated([]) is None
# If any includes would disambiguate the ambiguous target, we don't consider disambiguating
# via excludes as the user has already explicitly disambiguated the module.
assert get_disambiguated(all_addr, ignores=[addr_a, addr_b], includes=[addr_a]) is None
assert (
get_disambiguated(
ambiguous=all_addr,
ignores=[addr_a, addr_b],
includes=[addr_a.maybe_convert_to_target_generator()],
)
is None
)
# You can also disambiguate via `owners_must_be_ancestors`.
another_dir = Address("another_dir")
assert get_disambiguated([addr_a, another_dir], owners_must_be_ancestors=True) == addr_a
assert get_disambiguated([addr_a, another_dir], owners_must_be_ancestors=False) is None
assert (
get_disambiguated(
[addr_a, addr_b, another_dir], ignores=[addr_b], owners_must_be_ancestors=True
)
== addr_a
)
def test_explicitly_provided_dependencies_maybe_warn_of_ambiguous_dependency_inference(
caplog,
) -> None:
def maybe_warn(
ambiguous: List[Address],
*,
ignores: Optional[List[Address]] = None,
includes: Optional[List[Address]] = None,
owners_must_be_ancestors: bool = False,
) -> None:
caplog.clear()
epd = ExplicitlyProvidedDependencies(
Address("dir", target_name="input_tgt"),
includes=FrozenOrderedSet(includes or []),
ignores=FrozenOrderedSet(ignores or []),
)
epd.maybe_warn_of_ambiguous_dependency_inference(
tuple(ambiguous),
Address("some_dir"),
import_reference="file",
context="foo",
owners_must_be_ancestors=owners_must_be_ancestors,
)
maybe_warn([])
assert not caplog.records
# A mix of normal and generated addresses.
addr_a = Address("dir", target_name="a", generated_name="gen")
addr_b = Address("dir", target_name="b", generated_name="gen")
addr_c = Address("dir", target_name="c")
all_addr = [addr_a, addr_b, addr_c]
maybe_warn(all_addr)
assert len(caplog.records) == 1
assert f"['{addr_a}', '{addr_b}', '{addr_c}']" in caplog.text
# Ignored addresses do not show up in the list of ambiguous owners, including for ignores of
# both file and BUILD targets.
maybe_warn(all_addr, ignores=[addr_b])
assert len(caplog.records) == 1
assert f"['{addr_a}', '{addr_c}']" in caplog.text
maybe_warn(all_addr, ignores=[addr_b.maybe_convert_to_target_generator()])
assert len(caplog.records) == 1
assert f"['{addr_a}', '{addr_c}']" in caplog.text
# Disambiguating via ignores turns off the warning, including for ignores of both normal and
# generated targets.
maybe_warn(all_addr, ignores=[addr_a, addr_b])
assert not caplog.records
maybe_warn(
all_addr,
ignores=[
addr_a.maybe_convert_to_target_generator(),
addr_b.maybe_convert_to_target_generator(),
],
)
assert not caplog.records
# Including a target turns off the warning, including for includes of both normal and generated
# targets.
maybe_warn(all_addr, includes=[addr_a])
assert not caplog.records
maybe_warn(all_addr, includes=[addr_a.maybe_convert_to_target_generator()])
assert not caplog.records
# You can also disambiguate via `owners_must_be_ancestors`.
another_dir = Address("another_dir")
maybe_warn([addr_a, another_dir], owners_must_be_ancestors=True)
assert not caplog.records
maybe_warn([addr_a, another_dir], owners_must_be_ancestors=False)
assert len(caplog.records) == 1
assert f"['{another_dir}', '{addr_a}']" in caplog.text
maybe_warn([addr_a, addr_b, another_dir], ignores=[addr_b], owners_must_be_ancestors=True)
assert not caplog.records
# -----------------------------------------------------------------------------------------------
# Test `overrides` field
# -----------------------------------------------------------------------------------------------
@pytest.mark.parametrize(
"raw_value",
[
0,
object(),
"hello",
["hello"],
["hello", "world"],
{"hello": 0},
{0: "world"},
{"hello": "world"},
{("hello",): "world"},
{("hello",): ["world"]},
{(0,): {"field": "value"}},
{("hello",): {0: "value"}},
],
)
def test_overrides_field_data_validation(raw_value: Any) -> None:
with pytest.raises(InvalidFieldTypeException):
OverridesField(raw_value, Address("", target_name="example"))
def test_overrides_field_normalization() -> None:
addr = Address("", target_name="example")
assert OverridesField(None, addr).value is None
assert OverridesField({}, addr).value == {}
# Note that `list_field` is not hashable. We have to override `__hash__` for this to work.
tgt1_override = {"str_field": "value", "list_field": [0, 1, 3]}
tgt2_override = {"int_field": 0, "dict_field": {"a": 0}}
# Convert a `str` key to `tuple[str, ...]`.
field = OverridesField({"tgt1": tgt1_override, ("tgt1", "tgt2"): tgt2_override}, addr)
assert field.value == {("tgt1",): tgt1_override, ("tgt1", "tgt2"): tgt2_override}
with no_exception():
hash(field)
path_field = OverridesField(
{"foo.ext": tgt1_override, ("foo.ext", "bar*.ext"): tgt2_override}, Address("dir")
)
globs = OverridesField.to_path_globs(
Address("dir"), path_field.flatten(), FilesNotFoundBehavior.error
)
assert [path_globs.globs for path_globs in globs] == [
("dir/foo.ext",),
("dir/bar*.ext",),
]
assert OverridesField.flatten_paths(
addr,
[
(paths, globs, overrides)
for (paths, overrides), globs in zip(
[
(Paths(("dir/foo.ext",), ()), tgt1_override),
(Paths(("dir/bar1.ext", "dir/bar2.ext"), ()), tgt2_override),
],
globs,
)
],
) == {
"dir/foo.ext": tgt1_override,
"dir/bar1.ext": tgt2_override,
"dir/bar2.ext": tgt2_override,
}
assert path_field.flatten() == {
"foo.ext": {**tgt1_override, **tgt2_override},
"bar*.ext": tgt2_override,
}
with pytest.raises(InvalidFieldException):
# Same field is overridden for the same file multiple times, which is an error.
OverridesField.flatten_paths(
addr,
[
(Paths(("dir/foo.ext",), ()), PathGlobs([]), tgt1_override),
(Paths(("dir/foo.ext", "dir/bar.ext"), ()), PathGlobs([]), tgt1_override),
],
)
|
|
#!/usr/bin/env python
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import astropy.io.ascii as ascii_io
import fitsio
import bass
import bokextract
datadir = '/global/scratch2/sd/imcgreer/'
ndwfs_starfile = datadir+'ndwfs/starcat.fits'
bootes_sdss_starfile = datadir+'ndwfs/sdss_bootes_gstars.fits'
cfhtlswide_starfile = datadir+'cfhtls/CFHTLSW3_starcat.fits'
cfhtlsdeep_starfile = datadir+'cfhtls/CFHTLSD3_starcat.fits'
def cfhtw3_tiles(observed=True):
w3west,w3east = 15*(13.+50/60.), 15*(14+45./60)
w3south,w3north = 50.7, 56.2
return bass.region_tiles(w3west,w3east,w3south,w3north,observed=observed)
def ndwfs_tiles(observed=True):
ndwest,ndeast = 15*14.37, 15*14.62
ndsouth,ndnorth = 32.5, 36.1
return bass.region_tiles(ndwest,ndeast,ndsouth,ndnorth,observed=observed)
def panstarrs_md_tiles(observed=True):
tiles = {}
for field,ra,dec in [('MD03',130.592,+44.317),
('MD05',161.917,+58.083),
('MD06',185.000,+47.117),
('MD07',213.704,+53.083),
('MD08',242.787,+54.950)]:
dra = 3.5/np.cos(np.radians(dec))
tiles[field] = bass.region_tiles(ra-dra,ra+dra,dec-3.5,dec+3.5,
observed=observed)
return tiles
def check_fields_list():
files = [ t['utDate']+'/'+t['fileName']+'.fits.gz'
for tiles in [cfhtw3_tiles(),ndwfs_tiles()]
for t in tiles ]
with open('checkfields_tiles.txt','w') as f:
f.write('\n'.join(sorted(files)))
def srcor(ra1,dec1,ra2,dec2,sep,return_sep=False):
from astropy.coordinates import SkyCoord,match_coordinates_sky
from astropy import units as u
c1 = SkyCoord(ra1,dec1,unit=(u.degree,u.degree))
c2 = SkyCoord(ra2,dec2,unit=(u.degree,u.degree))
idx,d2d,d3c = match_coordinates_sky(c1,c2)
ii = np.where(d2d.arcsec < sep)[0]
if return_sep:
return ii,idx[ii],d2d.arcsec[ii]
else:
return ii,idx[ii]
def srcorXY(x1,y1,x2,y2,maxrad):
sep = np.sqrt( (x1[:,np.newaxis]-x2[np.newaxis,:])**2 +
(y1[:,np.newaxis]-y2[np.newaxis,:])**2 )
ii = sep.argmin(axis=1)
m1 = np.arange(len(x1))
jj = np.where(sep[m1,ii] < maxrad)[0]
return m1[jj],ii[jj]
def match_objects(objs,tiles):
objpars = [('g_number','f4'),('g_ra','f8'),('g_dec','f8'),
('g_x','f4'),('g_y','f4'),
('g_autoMag','f4'),('g_autoMagErr','f4'),
('g_autoFlux','f4'),('g_autoFluxErr','f4'),
('g_psfMag','f4'),('g_psfMagErr','f4'),
('g_psfFlux','f4'),('g_psfFluxErr','f4'),
('g_elongation','f4'),('g_ellipticity','f4'),
('g_flags','i4'),('g_fluxRad','f4')]
tilepars = [('g_utDate','S8'),('g_expTime','f4'),
('g_tileId','i4'),('g_ditherId','i4'),('g_ccdNum','i4')]
dtype = objs.dtype.descr + objpars + tilepars
skeys = ['NUMBER','ALPHA_J2000','DELTA_J2000','X_IMAGE','Y_IMAGE',
'MAG_AUTO','MAGERR_AUTO','FLUX_AUTO','FLUXERR_AUTO',
'MAG_PSF','MAGERR_PSF','FLUX_PSF','FLUXERR_PSF',
'ELONGATION','ELLIPTICITY',
'FLAGS','FLUX_RADIUS']
tkeys = ['utDate','expTime','tileId','ditherId']
matches = []
for ti,t in enumerate(tiles):
print 'matching tile %d/%d' % (ti+1,len(tiles))
for ccdNum in range(1,5):
catpath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d.cat.fits'%ccdNum)
if not os.path.exists(catpath):
print ' ... %s does not exist, skipping' % catpath
continue
cat = fitsio.read(catpath)
ii = np.where( (objs['ra']>cat['ALPHA_J2000'].min()+3e-3) &
(objs['ra']<cat['ALPHA_J2000'].max()-3e-3) &
(objs['dec']>cat['DELTA_J2000'].min()+3e-3) &
(objs['dec']<cat['DELTA_J2000'].max()-3e-3) )[0]
if len(ii)==0:
continue
m1,m2 = srcor(objs['ra'][ii],objs['dec'][ii],
cat['ALPHA_J2000'],cat['DELTA_J2000'],2.5)
print ' ccd%d %d/%d' % (ccdNum,len(m1),len(ii)),
matches.extend( [ tuple(objs[i]) +
tuple([cat[k][j] for k in skeys]) +
tuple([t[k] for k in tkeys]) + (ccdNum,)
for i,j in zip(ii[m1],m2) ] )
uu = np.delete(np.arange(len(ii)),m1)
matches.extend( [ tuple(objs[i]) +
tuple([0]*len(skeys)) +
tuple([t[k] for k in tkeys]) + (ccdNum,)
for i in ii[uu] ] )
print
matches = np.array(matches,dtype=dtype)
print 'finished with ',matches.size
return matches
def depth_plots(matches,g_ref,gname,bypriority=True,aper='psf',**kwargs):
assert aper in ['psf','auto']
fluxk = 'g_%sFlux' % aper
errk = 'g_%sFluxErr' % aper
#
m = np.where( (matches[fluxk]>0) & (matches[errk]>0) )[0]
gSNR = matches[fluxk][m] / matches[errk][m]
if bypriority:
fig1 = plt.figure(figsize=(10,8))
plt.subplots_adjust(0.07,0.07,0.97,0.96,0.27,0.27)
else:
fig1 = plt.figure(figsize=(5,4.5))
plt.subplots_adjust(0.13,0.12,0.98,0.94)
for i in range(4):
if bypriority:
ax = plt.subplot(2,2,i+1)
else:
if i>0: break
ax = plt.subplot(1,1,i+1)
if i==0:
ii = np.where(matches['g_ditherId'][m] > 0)[0]
else:
ii = np.where(matches['g_ditherId'][m] == i)[0]
ax.hexbin(g_ref[m[ii]],np.log10(gSNR[ii]),
bins='log',cmap=plt.cm.Blues)
ax.axhline(np.log10(5.0),c='r',lw=1.3,alpha=0.7)
ax.plot([24.0-2.5*np.log10(np.sqrt(3))]*2,np.log10([3,8]),c='m',lw=1.5)
ax.set_xlim(17.2,24.5)
ax.set_ylim(np.log10(2),np.log10(500))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.2))
ax.yaxis.set_major_locator(ticker.FixedLocator(np.log10(
[2,5,10,20,50,100,200])))
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda x,pos: '%d' % np.round(10**x)))
ax.set_xlabel(gname+' mag')
ax.set_ylabel('BASS %s flux/err' % aper.upper())
if i==0:
ax.set_title('all tiles')
else:
ax.set_title('P%d tiles' % i)
#
mbins = np.arange(18.,24.01,0.1)
fig2 = plt.figure(figsize=(8,4))
plt.subplots_adjust(0.07,0.14,0.97,0.97,0.25)
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
for i in range(4):
if i==0:
ii = np.where(matches['g_ditherId'] > 0)[0]
else:
if not bypriority: break
ii = np.where(matches['g_ditherId'] == i)[0]
jj = np.where(matches[errk][ii]>0)[0]
g5sig = ( matches[fluxk][ii[jj]] / matches[errk][ii[jj]] ) > 5.0
tot,_ = np.histogram(g_ref[ii],mbins)
det,_ = np.histogram(g_ref[ii[jj]],mbins)
det5,_ = np.histogram(g_ref[ii[jj[g5sig]]],mbins)
ax1.plot(mbins[:-1],det.astype(np.float)/tot,drawstyle='steps-pre',
c=['black','blue','green','DarkCyan'][i],lw=1.3,
label=['all','P1','P2','P3'][i])
ax2.plot(mbins[:-1],det5.astype(np.float)/tot,drawstyle='steps-pre',
c=['black','blue','green','DarkCyan'][i],lw=1.3,
label=['all','P1','P2','P3'][i])
ax1.set_xlabel(gname+' mag')
ax2.set_xlabel(gname+' mag')
ax1.set_ylabel('fraction detected')
ax2.set_ylabel('fraction detected 5 sig')
ax1.legend(loc='lower left')
if kwargs.get('saveplots',False):
figname = kwargs.get('figname','blah')
figext = kwargs.get('figtype','png')
fig1.savefig(figname+'_depth.'+figext)
fig2.savefig(figname+'_complete.'+figext)
##############################################################################
# #
# NDWFS #
# #
##############################################################################
def select_ndwfs_stars():
ndwfsdir = '/global/scratch2/sd/imcgreer/ndwfs/DR3/matchedFITS/'
dtype = [('number','i4'),('autoMag','3f4'),('autoMagErr','3f4'),
('ra','f8'),('dec','f8'),('rFWHM','f4'),('rClass','f4')]
starcat = []
rcols = ['NUMBER','MAG_AUTO','MAGERR_AUTO','ALPHA_J2000','DELTA_J2000',
'FWHM_IMAGE','CLASS_STAR']
cols = ['MAG_AUTO','MAGERR_AUTO']
for dec1 in range(32,36):
catfn = lambda b: 'NDWFS_%s_%d_%d_cat_m.fits.gz' % (b,dec1,dec1+1)
rfits = fitsio.FITS(ndwfsdir+catfn('R'))
bfits = fitsio.FITS(ndwfsdir+catfn('Bw'))
ifits = fitsio.FITS(ndwfsdir+catfn('I'))
w = rfits[1].where('FWHM_IMAGE < 7 && MAG_AUTO < 24.0 && FLAGS == 0')
print len(w)
rcat = rfits[1].read(rows=w,columns=rcols)
bcat = bfits[1].read(rows=w,columns=cols)
icat = ifits[1].read(rows=w,columns=cols)
stars = np.empty(len(w),dtype=dtype)
stars['number'] = rcat['NUMBER']
stars['ra'] = rcat['ALPHA_J2000']
stars['dec'] = rcat['DELTA_J2000']
stars['rFWHM'] = rcat['FWHM_IMAGE']
stars['rClass'] = rcat['CLASS_STAR']
for j,cat in enumerate([bcat,rcat,icat]):
stars['autoMag'][:,j] = cat['MAG_AUTO']
stars['autoMagErr'][:,j] = cat['MAGERR_AUTO']
starcat.append(stars)
starcat = np.concatenate(starcat)
fitsio.write(ndwfs_starfile,starcat,clobber=True)
def match_ndwfs_stars(matchRad=2.5):
stars = fitsio.read(ndwfs_starfile)
tiles = ndwfs_tiles(observed=True)
matches = match_objects(stars,tiles)
fitsio.write('ndwfs_match.fits',matches,clobber=True)
def ndwfs_depth(**kwargs):
kwargs.setdefault('figname','ndwfs')
ndwfsm = fitsio.read('ndwfs_match.fits')
Bw = ndwfsm['autoMag'][:,0]
Bw_minus_R = ndwfsm['autoMag'][:,0] - ndwfsm['autoMag'][:,1]
NDWFSg = np.choose(Bw_minus_R <= 1.45,
[ Bw - (0.23*Bw_minus_R + 0.25),
Bw - (0.38*Bw_minus_R + 0.05) ])
#
m = np.where( np.all(ndwfsm['autoMag'][:,:2]> 0,axis=1) &
np.all(ndwfsm['autoMag'][:,:2]<30,axis=1) )[0]
depth_plots(ndwfsm[m],NDWFSg[m],'NDWFS g-ish',**kwargs)
##############################################################################
# #
# CFHTLS #
# #
##############################################################################
def match_cfhtls_stars(matchRad=2.5,survey='wide'):
if survey=='wide':
stars = fitsio.read(cfhtlswide_starfile)
tiles = cfhtw3_tiles(observed=True)
fname = 'cfhtlswide'
else:
stars = fitsio.read(cfhtlsdeep_starfile)
fname = 'cfhtlsdeep'
matches = match_objects(stars,tiles)
fitsio.write('%s_match.fits'%fname,matches,clobber=True)
def cfhtls_depth(**kwargs):
kwargs.setdefault('figname','cfhtls')
cfhtlsm = fitsio.read('cfhtlswide_match.fits')
m = np.where( (cfhtlsm['psfMag'][:,1]> 0) &
(cfhtlsm['psfMag'][:,1]<30) )[0]
depth_plots(cfhtlsm[m],cfhtlsm['psfMag'][m,1],'CFHTLS g',bypriority=False,
**kwargs)
bok_gain_2015 = [ 1.3325, 1.5225, 1.415, 1.47 ]
bok_rn_2015 = [ 7.94, 9.54, 11.81, 8.91 ]
def cfhtls_depth_compare():
import itertools
import boketc
import bokdepth
tiles = cfhtw3_tiles(observed=True)
cfhtlsm = fitsio.read('stuff/cfhtlswide_match.fits')
m = np.where( (cfhtlsm['psfMag'][:,1]>20) &
(cfhtlsm['psfMag'][:,1]<30) )[0]
m = cfhtlsm[m]
for ccdNum in range(1,5):
ents = []
for ti,t in enumerate(tiles):
print ccdNum,ti,len(tiles)
ii = np.where( (m['g_tileId'] == t['tileId']) &
(m['g_ditherId'] == t['ditherId']) &
(m['g_ccdNum'] == ccdNum) &
(m['g_psfFlux'] != 0) )[0]
if len(ii)==0:
continue
impath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d_pv.fits'%ccdNum)
psfpath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d.ldac_cat.psf'%ccdNum)
if not os.path.exists(impath):
print ' ... %s does not exist, skipping' % impath
continue
gain = bok_gain_2015[ccdNum-1]
rdnoise = bok_rn_2015[ccdNum-1]
rmsADU,rmsEl,A,skyADU = bokdepth.calc_processed_image_rms(
impath,psfpath,
gain=gain, rdNoise=rdnoise,
retPars=True)
snr = m['g_psfFlux'][ii] / rmsADU
fwhm = 2*m['g_fluxRad']*1.1 * 0.455
skyADUps = skyADU / m['g_expTime'][ii]
nominal_snr = [ boketc.snr_singleexposure('g',m['psfMag'][i,1],
m['g_expTime'][i],
fwhm=fwhm[i],
skyADU=skyADUps[0],
profile='gaussian')
for i in ii ]
nominal_snr = np.array(nominal_snr)
# revise the ETC calculation using updated gain and RN values,
# as well as the noise-equivalent-gaussian determined from the
# pixel area of the PSF
NEG = np.sqrt(A/(4*np.pi)) * 0.455 * 2.355
revnominal_snr = [ boketc.snr_singleexposure('g',m['psfMag'][i,1],
m['g_expTime'][i],
fwhm=NEG,
skyADU=skyADUps[0],
profile='gaussian',
gain=gain,
rdnoise=rdnoise)
for i in ii ]
revnominal_snr = np.array(revnominal_snr)
objEl = m['g_psfFlux'][ii] * gain
est_snr = objEl / np.sqrt(objEl + rmsEl**2)
sex_snr = m['g_psfFlux'][ii] / m['g_psfFluxErr'][ii]
ents.extend( [ vals for vals in itertools.izip(ii,
m['psfMag'][ii,1],
[A]*len(ii),
skyADUps,fwhm,
snr,nominal_snr,
est_snr,sex_snr,
revnominal_snr) ] )
ents = np.array(ents,dtype=[('ii','i4'),('refMag','f4'),
('psfArea','f4'),('skyADUperSec','f4'),
('fwhm','f4'),
('snrRMS','f4'),('snrETC','f4'),
('snrSky','f4'),('snrSex','f4'),
('snrETCrev','f4')])
fitsio.write('cfhtlswide_snr.fits',ents,clobber=(ccdNum==1))
def plot_cfhtls_snr_ratio(snr1='snrRMS',snr2='snrETCrev'):
hdus = fitsio.FITS('cfhtlswide_snr.fits')
ccds = [hdu.read() for hdu in hdus[1:]]
plt.figure()
for pnum,ccd in enumerate(ccds,start=1):
ax = plt.subplot(2,2,pnum)
plt.hexbin(ccd['refMag'],ccd[snr1]/ccd[snr2],
extent=(20,23.5,0.5,1.5),cmap=plt.cm.Blues)
plt.axhline(1,c='r')
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.0))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.2))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.05))
ax.set_title('CCD%d'%pnum)
plt.figtext(0.01,0.5,'SNR/SNR(ETC)',va='center',rotation='vertical')
plt.figtext(0.5,0.01,'g mag (CFHTLS)',ha='center')
##############################################################################
# #
# Pan-STARRS Medium Deeps #
# #
##############################################################################
def match_ps1mds(matchRad=2.5):
raise NotImplementedError
pstiles = panstarrs_md_tiles(observed=True)
for field,tiles in pstiles.items():
stars = fitsio.read(ps1md_starfile(field))
matches = match_objects(stars,tiles)
fitsio.write('ps1%s_match.fits'%field,matches,clobber=True)
##############################################################################
# #
# fake sources #
# #
##############################################################################
from astropy.io import fits
def fake_sdss_stars_on_tile(stars,tile,
nresample=200,magrange=(22.0,23.4),
stampSize=25,margin=50,aper='psf',
keepfakes=False,savestars=False):
pixlo = lambda _x: _x-stampSize/2
pixhi = lambda _x: _x-stampSize/2 + stampSize
fakemags = np.zeros(nresample*4,dtype=np.float32)
fakesnr = -np.ones_like(fakemags)
if aper=='auto':
magk,fluxk,errk = 'MAG_AUTO','FLUX_AUTO','FLUXERR_AUTO'
elif aper=='psf':
magk,fluxk,errk = 'MAG_PSF','FLUX_PSF','FLUXERR_PSF'
else:
raise ValueError
for ccdNum in range(1,5):
catpath = os.path.join(bass.rdxdir,tile['utDate'],'ccdproc3',
tile['fileName']+'_ccd%d.cat.fits'%ccdNum)
if not os.path.exists(catpath):
print ' ... %s does not exist, skipping' % catpath
continue
cat = fitsio.read(catpath)
impath = os.path.join(bass.rdxdir,tile['utDate'],'ccdproc3',
tile['fileName']+'_ccd%d.fits'%ccdNum)
_impath = impath.replace('.fits','_pv.fits')
fakeim = fits.open(_impath)
im = fakeim[0].data
nY,nX = im.shape
ii = np.where( (stars['ra']>cat['ALPHA_J2000'].min()+3e-3) &
(stars['ra']<cat['ALPHA_J2000'].max()-3e-3) &
(stars['dec']>cat['DELTA_J2000'].min()+3e-3) &
(stars['dec']<cat['DELTA_J2000'].max()-3e-3) )[0]
if len(ii)==0:
print 'no stars found on ccd #',ccdNum
continue
m1,m2 = srcor(stars['ra'][ii],stars['dec'][ii],
cat['ALPHA_J2000'],cat['DELTA_J2000'],2.5)
jj = np.where(cat['FLAGS'][m2] == 0)[0]
rindx = np.random.choice(len(jj),size=nresample,replace=True)
fakemag = magrange[0] + \
(magrange[1]-magrange[0])*np.random.random(nresample)
fscale = 10**(-0.4*(fakemag-stars['psfMag_g'][ii[m1[jj[rindx]]]]))
print 'matched %d/%d stars, max scale factor %.2e' % \
(len(m1),len(ii),fscale.max())
fakex = np.random.randint(margin,nX-margin,nresample)
fakey = np.random.randint(margin,nY-margin,nresample)
for x,y,fx,fy,fscl in zip(np.round(cat['X_IMAGE'][m2[jj[rindx]]]),
np.round(cat['Y_IMAGE'][m2[jj[rindx]]]),
fakex,fakey,fscale):
stamp = im[pixlo(y):pixhi(y),pixlo(x):pixhi(x)]
im[pixlo(fy):pixhi(fy),pixlo(fx):pixhi(fx)] += fscl*stamp
fakeimpath = impath.replace('.fits','_fake.fits')
fakecatpath = fakeimpath.replace('.fits','.cat.fits')
fakeim.writeto(fakeimpath,clobber=True)
bokextract.sextract(fakeimpath,frompv=False,redo=True,
withpsf=True,redopsf=False,
psfpath=impath.replace('.fits','.ldac_cat.psf'))
fakecat = fitsio.read(fakecatpath)
q1,q2 = srcorXY(fakex,fakey,fakecat['X_IMAGE'],fakecat['Y_IMAGE'],3.0)
snr = fakecat[fluxk][q2] / fakecat[errk][q2]
fakemags[nresample*(ccdNum-1):nresample*ccdNum] = fakemag
fakesnr[nresample*(ccdNum-1):nresample*ccdNum][q1] = snr
if True:
zpt = np.median(cat[magk][m2[jj]] - stars['psfMag_g'][ii[m1[jj]]])
zpt -= 25
foo = np.where(fakemag[q1] < 22.3)[0]
offset = np.median((-2.5*np.log10(fakecat[fluxk][q2[foo]]) - zpt) - fakemag[q1[foo]])
print 'fake star mag offset is ',offset
fakemags[nresample*(ccdNum-1):nresample*ccdNum] += offset
if False:
print ' --------- ZERO POINT CHECK -----------'
print cat[magk][m2[jj]][:10]
print -2.5*np.log10(cat[fluxk][m2[jj]])[:10] - zpt
print stars['psfMag_g'][ii[m1]][:10]
print ( (-2.5*np.log10(cat[fluxk][m2[jj]])[:10] - zpt) -
stars['psfMag_g'][ii[m1]][:10])
print -2.5*np.log10(fakecat[fluxk][q2[foo]]) - zpt
print fakemag[q1[foo]]
print ( (-2.5*np.log10(fakecat[fluxk][q2[foo]]) - zpt) -
fakemag[q1[foo]] )
print ( (-2.5*np.log10(fakecat[fluxk][q2[foo]]) - zpt) -
fakemag[q1[foo]] ).mean()
print snr[foo]
print
if not keepfakes:
os.unlink(fakeimpath)
os.unlink(fakecatpath)
if savestars:
np.savetxt(fakeimpath.replace('.fits','_stars.dat'),
np.vstack([fakemag,fakex,fakey]).transpose(),fmt='%9.3f')
return fakemags,fakesnr
def fake_ndwfs_stars(grange=(16.0,17.0),**kwargs):
aper = kwargs.setdefault('aper','psf')
magrange = kwargs.setdefault('magrange',(22.0,23.4))
nbins = 5
medges = np.linspace(magrange[0],magrange[1],nbins+1)
np.random.seed(1)
stars = fitsio.read('/global/scratch2/sd/imcgreer/ndwfs/sdss_bootes_gstars.fits')
fakedir = '/global/scratch2/sd/imcgreer/fakes/'
stars = stars[(stars['psfMag_g']>grange[0])&(stars['psfMag_g']<grange[1])]
tiles = ndwfs_tiles(observed=True)
summaryf = open(fakedir+'fakestars_%s_bytile.dat' % aper,'w')
summaryf.write('# %4s %1s %8s ' % ('tile','D','utdate'))
for i in range(nbins):
summaryf.write('%6.3f ' % ((medges[i]+medges[i+1])/2))
summaryf.write('\n')
for ti,tile in enumerate(tiles):
print 'faking stars in tile %d/%d' % (ti+1,len(tiles))
mag,snr = fake_sdss_stars_on_tile(stars,tile,**kwargs)
np.savetxt(fakedir+'fakestars_%s_%05d_%d_%s.dat' %
(aper,tile['tileId'],tile['ditherId'],tile['utDate']),
np.vstack([mag,snr]).transpose(),fmt='%8.3f')
summaryf.write(' %05d %1d %8s ' %
(tile['tileId'],tile['ditherId'],tile['utDate']))
ii = np.digitize(mag,medges)
# could divide by CCD
for i in range(nbins):
jj = np.where(ii==i+1)[0]
frac = np.sum(snr[jj]>5.0) / float(len(jj))
summaryf.write('%6.3f ' % frac)
summaryf.write('\n')
summaryf.close()
def ndwfs_sdss_matches():
''' for checking linearity '''
import basslog
stars = fitsio.read('/global/scratch2/sd/imcgreer/ndwfs/sdss_bootes_gstars.fits')
logs = basslog.load_Bok_logs('./logs/')
tiles = ndwfs_tiles(observed=True)
tiledb = bass.load_tiledb()
tid = np.array([int(tid) for tid in tiledb['TID']])
i1 = 0
m = np.zeros(1e5,dtype=[('sdss_id','i4'),('sdss_g_mag','f4'),
('bass_g_mag','f4'),('bass_g_err','f4'),
('bass_expTime','f4'),('bass_skyADU','f4'),
('bass_airmass','f4'),('bass_ebv','f4'),
('bass_ccdNum','i4'),('bass_ditherId','i4'),
('bass_fluxMax','f4'),('bass_FWHM','f4')])
for ti,tile in enumerate(tiles):
print 'tile %d/%d [%d]' % (ti+1,len(tiles),i1)
for ccdNum in range(1,5):
impath = os.path.join(bass.rdxdir,tile['utDate'],'ccdproc3',
tile['fileName']+'_ccd%d.fits'%ccdNum)
if not os.path.exists(impath):
print ' ... %s does not exist, skipping' % impath
continue
h = fitsio.read_header(impath)
sky = h['SKYVAL']
catpath = os.path.join(bass.rdxdir,tile['utDate'],'ccdproc3',
tile['fileName']+'_ccd%d.cat.fits'%ccdNum)
if not os.path.exists(catpath):
print ' ... %s does not exist, skipping' % catpath
continue
cat = fitsio.read(catpath)
ii = np.where( (stars['ra']>cat['ALPHA_J2000'].min()+3e-3) &
(stars['ra']<cat['ALPHA_J2000'].max()-3e-3) &
(stars['dec']>cat['DELTA_J2000'].min()+3e-3) &
(stars['dec']<cat['DELTA_J2000'].max()-3e-3) )[0]
if len(ii)==0:
print 'no stars found on ccd #',ccdNum
continue
m1,m2 = srcor(stars['ra'][ii],stars['dec'][ii],
cat['ALPHA_J2000'],cat['DELTA_J2000'],2.5)
jj = np.where(cat['FLAGS'][m2] == 0)[0]
i2 = i1 + len(jj)
m['sdss_id'][i1:i2] = ii[m1[jj]]
m['sdss_g_mag'][i1:i2] = stars['psfMag_g'][ii[m1[jj]]]
m['bass_g_mag'][i1:i2] = cat['MAG_PSF'][m2[jj]]
m['bass_g_err'][i1:i2] = cat['MAGERR_PSF'][m2[jj]]
m['bass_fluxMax'][i1:i2] = cat['FLUX_MAX'][m2[jj]]
m['bass_FWHM'][i1:i2] = np.median(cat['FWHM_IMAGE'][m2[jj]])
m['bass_expTime'][i1:i2] = tile['expTime']
i = np.where(logs[tile['utDate']]['fileName'] ==
tile['fileName'])[0][0]
m['bass_airmass'][i1:i2] = logs[tile['utDate']]['airmass'][i]
m['bass_ebv'][i1:i2] = tiledb['EBV'][tid==tile['tileId']][0]
m['bass_ccdNum'][i1:i2] = ccdNum
m['bass_ditherId'][i1:i2] = tile['ditherId']
m['bass_skyADU'][i1:i2] = sky
i1 = i2
m = m[:i1]
outdir = '/project/projectdirs/cosmo/staging/bok/ian/'
fitsio.write(outdir+'ndwfs_sdss.fits',m,clobber=True)
def get_phototiles_info():
import boklog
logs = boklog.load_Bok_logs('./logs/')
tiledb = bass.load_tiledb()
tid = np.array([int(tid) for tid in tiledb['TID']])
ccdNum = 1
photinfof = open('photo_tiles_info.txt','w')
photinfof.write('# %6s %10s %7s %7s %7s %10s %8s %7s\n' %
('UTD','file','airmass','E(B-V)','FWHMpix','skyADU','zpt','texp'))
for ti,tiles in enumerate([cfhtw3_tiles(),ndwfs_tiles()]):
if ti==0:
refcat = fitsio.read(cfhtlswide_starfile)
ii = np.where((refcat['psfMag'][:,1]>17) &
(refcat['psfMag'][:,1]<18.5))[0]
ref_ra = refcat['ra'][ii]
ref_dec = refcat['dec'][ii]
ref_mag = refcat['psfMag'][ii,1]
#ref_mag = refcat['psfMag'][ii,1] - A_ext['g']*refcat['E(B-V)'][ii]
else:
refcat = fitsio.read(bootes_sdss_starfile)
ii = np.where((refcat['psfMag_g']>16) &
(refcat['psfMag_g']<18.5))[0]
ref_ra = refcat['ra'][ii]
ref_dec = refcat['dec'][ii]
ref_mag = refcat['psfMag_g'][ii]
#ref_mag = refcat['psfMag_g'][ii] - refcat['extinction_g'][ii]
for tj,t in enumerate(tiles):
if t['ditherId'] != 1:
continue
# get E(B-V) from tile database
ebv = tiledb['EBV'][tid==t['tileId']][0]
# get conditions (airmass,exptime) from observing logs
try:
i = np.where(logs[t['utDate']]['fileName']==t['fileName'])[0][0]
except:
continue
airmass = logs[t['utDate']]['airmass'][i]
exptime = logs[t['utDate']]['expTime'][i]
# get sky value in ADU from FITS headers
impath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d.fits'%ccdNum)
h = fitsio.read_header(impath)
sky = h['SKYVAL']
# get FWHM and zero point from catalogs
catpath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d.cat.fits'%ccdNum)
cat = fitsio.read(catpath)
ii = np.where( (ref_ra>cat['ALPHA_J2000'].min()+3e-3) &
(ref_ra<cat['ALPHA_J2000'].max()-3e-3) &
(ref_dec>cat['DELTA_J2000'].min()+3e-3) &
(ref_dec<cat['DELTA_J2000'].max()-3e-3) )[0]
if len(ii)==0:
continue
m1,m2 = srcor(ref_ra[ii],ref_dec[ii],
cat['ALPHA_J2000'],cat['DELTA_J2000'],2)
if len(m1)==0:
continue
m1 = ii[m1]
ii = np.where(cat['FLAGS'][m2]==0)[0]
m1,m2 = m1[ii],m2[ii]
if len(m1)<5:
continue
print len(ii),' stars on tile ',t['utDate'],t['fileName']
fwhm = np.median(cat['FWHM_IMAGE'][m2])
zpt = 25 - np.median(cat['MAG_AUTO'][m2] - ref_mag[m1]) - \
2.5*np.log10(exptime)
photinfof.write('%8s %10s %7.2f %7.3f %7.2f %10.2f %8.3f %7.1f\n' %
(t['utDate'],t['fileName'],airmass,ebv,fwhm,sky,zpt,exptime))
photinfof.close()
def phototiles_stats(doplots=True):
import boketc
gain = boketc.G
pxscl = boketc.p
k = boketc.k_ext['g']
A = boketc.A_ext['g']
tiledat = ascii_io.read('photo_tiles_info.txt')
sky_ADUs = tiledat['skyADU'] / tiledat['texp']
sky_eps = sky_ADUs * gain
sky_magasec2 = -2.5*np.log10(sky_ADUs*pxscl**-2) + tiledat['zpt']
print sky_ADUs.mean(),sky_eps.mean(),sky_magasec2.mean()
zp0 = tiledat['zpt'] - k*(tiledat['airmass']-1) #- A*tiledat['E(B-V)']
print zp0.mean()
fwhm_asec = tiledat['FWHMpix'] * pxscl
if doplots:
fig = plt.figure(figsize=(8,6))
ax1 = plt.subplot(2,2,1)
ax1.hist(zp0)
#ax1.axvline(boketc.bok_zpt0_am00['g'],c='r',lw=2)
ax1.axvline(boketc.bok_zpt0_am10['g'],c='r',lw=2)
ax1 = plt.subplot(2,2,2)
ax1.hist(sky_magasec2)
ax1.axvline(boketc.kpno_sky_lun0['g'],c='r',lw=2)
ax1 = plt.subplot(2,2,3)
ax1.hist(fwhm_asec)
ax1.axvline(boketc.bok_medianFWHM['g'],c='r',lw=2)
if __name__=='__main__':
import sys
if sys.argv[1]=='match_ndwfs':
match_ndwfs_stars()
elif sys.argv[1]=='match_cfhtlswide':
match_cfhtls_stars(survey='wide')
elif sys.argv[1]=='fake_ndwfs':
if len(sys.argv)==2 or 'psf' in sys.argv[2:]:
aper = 'psf'
elif 'auto' in sys.argv[2:]:
aper = 'auto'
fake_ndwfs_stars(aper=aper)
elif sys.argv[1]=='photo_info':
get_phototiles_info()
else:
raise ValueError
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of CONF for use of fakes, and some black magic for
inline callbacks.
"""
import logging
import os
import shutil
import uuid
import fixtures
import mock
import mox
from oslo.messaging import conffixture as messaging_conffixture
from oslo.utils import strutils
from oslo.utils import timeutils
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
import stubout
import testtools
from cinder.common import config # noqa Need to register global_opts
from cinder.db import migration
from cinder.db.sqlalchemy import api as sqla_api
from cinder import i18n
from cinder.openstack.common import log as oslo_logging
from cinder import rpc
from cinder import service
from cinder.tests import conf_fixture
from cinder.tests import fake_notifier
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'), ]
CONF = cfg.CONF
CONF.register_opts(test_opts)
LOG = oslo_logging.getLogger(__name__)
_DB_CACHE = None
class TestingException(Exception):
pass
class Database(fixtures.Fixture):
def __init__(self, db_api, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_api.get_engine()
self.engine.dispose()
conn = self.engine.connect()
db_migrate.db_sync()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = os.path.join(CONF.state_path, sqlite_clean_db)
testdb = os.path.join(CONF.state_path, sqlite_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(
os.path.join(CONF.state_path, self.sqlite_clean_db),
os.path.join(CONF.state_path, self.sqlite_db))
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
# Unit tests do not need to use lazy gettext
i18n.enable_lazy(False)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
environ_enabled = (lambda var_name:
strutils.bool_from_string(os.environ.get(var_name)))
if environ_enabled('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if environ_enabled('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if environ_enabled('OS_LOG_CAPTURE'):
log_format = '%(levelname)s [%(name)s] %(message)s'
if environ_enabled('OS_DEBUG'):
level = logging.DEBUG
else:
level = logging.INFO
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=log_format,
level=level))
rpc.add_extra_exmods("cinder.tests")
self.addCleanup(rpc.clear_extra_exmods)
self.addCleanup(rpc.cleanup)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
rpc.init(CONF)
conf_fixture.set_defaults(CONF)
CONF([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
CONF.set_default('connection', 'sqlite://', 'database')
CONF.set_default('sqlite_synchronous', False, 'database')
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(sqla_api, migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.database.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE)
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
self.addCleanup(self.stubs.SmartUnsetAll)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self._common_cleanup)
self.injected = []
self._services = []
fake_notifier.stub_notifier(self.stubs)
self.override_config('fatal_exception_format_errors', True)
# This will be cleaned up by the NestedTempfile fixture
lock_path = self.useFixture(fixtures.TempDir()).path
self.fixture = self.useFixture(
config_fixture.Config(lockutils.CONF))
self.fixture.config(lock_path=lock_path,
group='oslo_concurrency')
lockutils.set_defaults(lock_path)
self.override_config('policy_file',
os.path.join(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
)
),
'cinder/tests/policy.json'))
def _common_cleanup(self):
"""Runs after each test method to tear down test environment."""
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def override_config(self, name, override, group=None):
"""Cleanly override CONF variables."""
CONF.set_override(name, override, group)
self.addCleanup(CONF.clear_override, name, group)
def flags(self, **kw):
"""Override CONF variables for a test."""
for k, v in kw.iteritems():
self.override_config(k, v)
def log_level(self, level):
"""Set logging level to the specified value."""
log_root = logging.getLogger(None).logger
log_root.setLevel(level)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
def mock_object(self, obj, attr_name, new_attr=None, **kwargs):
"""Use python mock to mock an object attribute
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
if not new_attr:
new_attr = mock.Mock()
patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
return new_attr
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = d1
d2str = d2
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' %
{'msg': msg, 'd1str': d1str, 'd2str': d2str})
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' %
{'d1only': d1only, 'd2only': d2only})
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" %
{
'key': key,
'd1value': d1value,
'd2value': d2value,
})
|
|
"""Analysis engine service."""
import dacite
import pandas as pd
from math import log, pi
from CoolProp.CoolProp import PropsSI
from CoolProp.HumidAirProp import HAPropsSI
from scipy.stats import chi2
from uncertainties import ufloat
from coimbra_chamber.access.experiment.service import ExperimentAccess
from coimbra_chamber.access.experiment.contracts import FitSpec
from coimbra_chamber.utility.io.contracts import Prompt
from coimbra_chamber.utility.io.service import IOUtility
from coimbra_chamber.utility.plot.contracts import Axis, DataSeries, Layout, Plot
from coimbra_chamber.utility.plot.service import PlotUtility
class AnalysisEngine(object):
"""Encapsulate all aspects of analysis."""
def __init__(self, experiment_id): # noqa: D107
self._experiment_id = experiment_id
self._exp_acc = ExperimentAccess()
self._io_util = IOUtility()
self._plot_util = PlotUtility()
self._error = 0.01
self._fits = []
self._idx = 1
self._steps = 1
self._bounds = (None, None)
# IR sensor calibration
self._a = ufloat(-2.34, 0.07)
self._b = ufloat(1.0445, 0.0022)
# Tube radius
self._R = ufloat(0.015, 0.0001)
self._A = pi*self._R**2
self._M1 = 18.015
self._M2 = 28.964
self._SIGMA = 5.67036713e-8
self._eps_chamber = 0.1
self._eps_h20 = 0.99
self._R_chamber = 0.3
self._L_chamber = 0.7
self._A_chamber = (
2*pi*self._R_chamber**2 + 2*pi*self._R_chamber*self._L_chamber
)
self._RAD_FACT = (
self._A * (
(1-self._eps_chamber)/(self._eps_chamber*self._A_chamber)
+ 1/self._A
+ (1-self._eps_h20)/(self._eps_h20*self._A)
)
)
self._ACC_G = 9.81
# ------------------------------------------------------------------------
# Public methods: included in the API
def process_fits(self, data):
"""
Process fits from data.
Parameters
----------
data : comimbra_chamber.access.experiment.contracts.DataSpec.
"""
self._data = data
self._get_observations()
self._get_fits()
self._persist_fits()
# ------------------------------------------------------------------------
# Internal methods: not included in the API
def _get_observations(self):
# Create empty lists to hold data as we iterate through observations.
dew_point = []
mass = []
pow_ref = []
pressure = []
surface_temp = []
ic_temp = []
cap_man = []
optidew = []
temp = []
time = []
# Interate and append observations while adding uncertainties
observations = self._data.observations
initial_idx = observations[0].idx
for obs in observations:
dew_point.append(ufloat(obs.dew_point, 0.2))
mass.append(ufloat(obs.mass, 1e-7))
pow_ref.append(ufloat(obs.pow_ref, abs(float(obs.pow_ref)) * 0.05))
pressure.append(ufloat(obs.pressure, int(obs.pressure * 0.0015)))
surface_temp.append(ufloat(obs.surface_temp, 0.5))
ic_temp.append(ufloat(obs.ic_temp, 0.2))
# Average temperatures with error propagation
temps = obs.temperatures
temp.append(
sum(ufloat(temp.temperature, 0.2) for temp in temps)
/ len(temps)
)
# Bools for equipment status
cap_man.append(obs.cap_man_ok)
optidew.append(obs.optidew_ok)
# Ensure that time starts at zero
time.append(obs.idx - initial_idx)
# DataFrame payload
data = dict(
Tdp=dew_point,
m=mass,
Jref=pow_ref,
P=pressure,
Te=temp,
Ts=surface_temp,
Tic=ic_temp,
cap_man=cap_man,
optidew=optidew,
)
self._observations = pd.DataFrame(index=time, data=data)
def _layout_observations(self):
# internal helper logic
def nominal(ufloat_):
return ufloat_.nominal_value
def std_dev(ufloat_):
return ufloat_.std_dev
# DataSeries ---------------------------------------------------------
data_series = dict()
# First get the time data series
data = dict(values=self._observations.index.tolist())
data_series['t'] = dacite.from_dict(DataSeries, data)
# dew point, Tdp
data = dict(
values=self._observations.Tdp.map(nominal).tolist(),
sigma=self._observations.Tdp.map(std_dev).tolist(),
label='Tdp')
data_series['Tdp'] = dacite.from_dict(DataSeries, data)
# mass, m
data = dict(
values=self._observations.m.map(nominal).tolist(),
sigma=self._observations.m.map(std_dev).tolist(),
label='m')
data_series['m'] = dacite.from_dict(DataSeries, data)
# pow_ref, Jref
data = dict(
values=self._observations.Jref.map(nominal).to_list(),
sigma=self._observations.Jref.map(std_dev).to_list(),
label='Jref')
data_series['Jref'] = dacite.from_dict(DataSeries, data)
# pressure, P
data = dict(
values=self._observations.P.map(nominal).tolist(),
sigma=self._observations.P.map(std_dev).tolist(),
label='P')
data_series['P'] = dacite.from_dict(DataSeries, data)
# Ambient temp, Te
data = dict(
values=self._observations.Te.map(nominal).tolist(),
sigma=self._observations.Te.map(std_dev).tolist(),
label='Te')
data_series['Te'] = dacite.from_dict(DataSeries, data)
# Surface temp, Ts
data = dict(
values=self._observations.Ts.map(nominal).tolist(),
sigma=self._observations.Ts.map(std_dev).tolist(),
label='Ts')
data_series['Ts'] = dacite.from_dict(DataSeries, data)
# IC temp, Tic
data = dict(
values=self._observations.Tic.map(nominal).tolist(),
sigma=self._observations.Tic.map(std_dev).tolist(),
label='Tic')
data_series['Tic'] = dacite.from_dict(DataSeries, data)
# Cap-man status, cap_man
data = dict(
values=self._observations.cap_man.tolist(),
label='cap_man')
data_series['cap_man'] = dacite.from_dict(DataSeries, data)
# Optidew status, optidew
data = dict(
values=self._observations.optidew.tolist(),
label='optidew')
data_series['optidew'] = dacite.from_dict(DataSeries, data)
# Axes ---------------------------------------------------------------
axes = dict()
data = dict(
data=[data_series['m']], y_label='mass, [kg]',
error_type='continuous')
axes['mass'] = dacite.from_dict(Axis, data)
data = dict(
data=[data_series['Tdp'], data_series['Te'], data_series['Ts'],
data_series['Tic']],
y_label='temperature, [K]',
error_type='continuous')
axes['temp'] = dacite.from_dict(Axis, data)
data = dict(
data=[data_series['P']], y_label='pressure, [Pa]',
error_type='continuous')
axes['pressure'] = dacite.from_dict(Axis, data)
data = dict(
data=[data_series['Jref']], y_label='Ref power, [W]',
error_type='continuous')
axes['Jref'] = dacite.from_dict(Axis, data)
data = dict(
data=[data_series['cap_man'], data_series['optidew']],
y_label='status')
axes['status'] = dacite.from_dict(Axis, data)
# Then the Plots ---------------------------------------------------------
plots = dict()
data = dict(
abscissa=data_series['t'],
axes=[axes['mass'], axes['temp']],
x_label='index')
plots['mass_and_temp'] = dacite.from_dict(Plot, data)
data = dict(
abscissa=data_series['t'],
axes=[axes['pressure']],
x_label='index')
plots['pressure'] = dacite.from_dict(Plot, data)
data = dict(
abscissa=data_series['t'],
axes=[axes['Jref'], axes['status']],
x_label='index')
plots['pow_and_status'] = dacite.from_dict(Plot, data)
# Finally, the layout ----------------------------------------------------
data = dict(
plots=[
plots['mass_and_temp'], plots['pressure'],
plots['pow_and_status']
],
style='seaborn-darkgrid')
self._layout = dacite.from_dict(Layout, data)
def _get_fits(self):
# len - 2 because we want to make sure we never end up at the last
# index and can't take a max slice
while self._idx < len(self._observations) - 2:
# Get a new sample centered at the self._idx that is as large as
# possible.
left = (2 * self._idx) - len(self._observations) + 1
right = 2 * self._idx
self._sample = self._observations.loc[left:right, :]
# Then search for the best fit in self._sample
self._get_best_local_fit()
if self._this_fit: # We got a fit that met the error threshold
self._evaluate_fit()
self._set_local_exp_state()
self._set_local_properties()
self._set_nondim_groups()
self._fits.append(self._this_fit)
# Length of the best fit is the degrees of freedom plus 2 for
# a linear fit.
self._idx += self._this_fit['nu_chi'] + 2
else: # _get_best_local_fit returned None
self._idx += len(self._sample)
def _persist_fits(self):
counter = 0
for data in self._fits:
fit_spec = dacite.from_dict(FitSpec, data)
self._exp_acc.add_fit(fit_spec, self._experiment_id)
counter += 1
return counter
# Properties .............................................................
def _set_local_exp_state(self):
samples = len(self._this_sample)
data = self._this_sample
offset = 273.15
# Use calibration for ifrared sensor
Ts_bar_K = sum(data.Ts)/samples
Ts_bar_C = Ts_bar_K - offset
Ts_bar_C = self._a + self._b*Ts_bar_C
Ts_bar_K = Ts_bar_C + offset
# Now the rest of the state variables
Te_bar = sum(data.Te)/samples
Tdp_bar = sum(data.Tdp)/samples
P_bar = sum(data.P)/samples
self._experimental_state = dict(
Te=Te_bar,
Tdp=Tdp_bar,
Ts=Ts_bar_K,
P=P_bar,
)
def _set_local_properties(self):
# Internal mapper ----------------------------------------------------
def x1_2_m1(self, x1):
num = self._M1 * x1
den = num + (self._M2 * (1 - x1))
return num/den
Ts = self._experimental_state['Ts']
P = self._experimental_state['P']
Te = self._experimental_state['Te']
Tdp = self._experimental_state['Tdp']
# mddp ---------------------------------------------------------------
mdot = ufloat(-self._this_fit['b'], self._this_fit['sig_b'])
mddp = mdot/self._A
# x1 -----------------------------------------------------------------
# s-state
x1s_nv = HAPropsSI(
'psi_w',
'T', Ts.nominal_value,
'P', P.nominal_value,
'RH', 1)
x1s_sig = x1s_nv - HAPropsSI(
'psi_w',
'T', Ts.nominal_value + Ts.std_dev,
'P', P.nominal_value,
'RH', 1)
x1s = ufloat(x1s_nv, abs(x1s_sig))
# e-state
x1e_nv = HAPropsSI(
'psi_w',
'T', Te.nominal_value,
'P', P.nominal_value,
'Tdp', Tdp.nominal_value)
x1e_sig = x1e_nv - HAPropsSI(
'psi_w',
'T', Te.nominal_value + Te.std_dev,
'P', P.nominal_value,
'Tdp', Tdp.nominal_value + Tdp.std_dev)
x1e = ufloat(x1e_nv, abs(x1e_sig))
# film
x1 = (x1s+x1e) / 2
# m1 -----------------------------------------------------------------
# s-state
m1s = x1_2_m1(self, x1s)
# e-state
m1e = x1_2_m1(self, x1e)
# film
m1 = (m1s+m1e) / 2
# rho ---------------------------------------------------------------
# s-state
rhos_nv = 1 / HAPropsSI(
'Vha',
'T', Ts.nominal_value,
'P', P.nominal_value,
'Y', x1s_nv)
rhos_sig = rhos_nv - (
1 / HAPropsSI(
'Vha',
'T', Ts.nominal_value + Ts.std_dev,
'P', P.nominal_value,
'Y', x1s_nv)
)
rhos = ufloat(rhos_nv, abs(rhos_sig))
# e-state
rhoe_nv = 1 / HAPropsSI(
'Vha',
'T', Te.nominal_value,
'P', P.nominal_value,
'Y', x1e_nv)
rhoe_sig = rhoe_nv - (
1 / HAPropsSI(
'Vha',
'T', Te.nominal_value + Te.std_dev,
'P', P.nominal_value,
'Y', x1e_nv)
)
rhoe = ufloat(rhoe_nv, abs(rhoe_sig))
# film
rho = (rhos+rhoe) / 2
# Bm1 ----------------------------------------------------------------
Bm1 = (m1s - m1e)/(1-m1s)
# T ------------------------------------------------------------------
T = (Te+Ts) / 2
# D12 ----------------------------------------------------------------
D12 = 1.97e-5 * (101325/P) * pow(T/256, 1.685)
# hfg -----------------------------------------------------------------
# hg
hg_nv = PropsSI(
'H',
'T', Ts.nominal_value,
'Q', 1,
'water')
hg_sig = hg_nv - PropsSI(
'H',
'T', Ts.nominal_value + Ts.std_dev,
'Q', 1,
'water')
hg = ufloat(hg_nv, abs(hg_sig))
# hf
hf_nv = PropsSI(
'H',
'T', Ts.nominal_value,
'Q', 0,
'water')
hf_sig = hf_nv - PropsSI(
'H',
'T', Ts.nominal_value + Ts.std_dev,
'Q', 0,
'water')
hf = ufloat(hf_nv, abs(hf_sig))
# hfg
hfg = hg - hf
# hu -----------------------------------------------------------------
hu = -hfg
# hs -----------------------------------------------------------------
hs = ufloat(0, 0)
# cpv ----------------------------------------------------------------
cpv_nv = HAPropsSI(
'cp_ha',
'P', P.nominal_value,
'T', T.nominal_value,
'Y', x1.nominal_value,
)
cpv_sig = cpv_nv - HAPropsSI(
'cp_ha',
'P', P.nominal_value,
'T', T.nominal_value + T.std_dev,
'Y', x1.nominal_value,
)
cpv = ufloat(cpv_nv, abs(cpv_sig))
# he -----------------------------------------------------------------
he = cpv * (Te - Ts)
# cpl ----------------------------------------------------------------
cpl_nv = PropsSI(
'Cpmass',
'T', T.nominal_value,
'Q', 0,
'water')
cpl_sig = cpl_nv - PropsSI(
'Cpmass',
'T', T.nominal_value + T.std_dev,
'Q', 0,
'water')
cpl = ufloat(cpl_nv, abs(cpl_sig))
# hT -----------------------------------------------------------------
hT = cpl * (Te - Ts)
# qcu ----------------------------------------------------------------
qcu = mddp * (hT - hu)
# Ebe ----------------------------------------------------------------
Ebe = self._SIGMA*Te**4
# Ebs ----------------------------------------------------------------
Ebs = self._SIGMA*Ts**4
# qrs ----------------------------------------------------------------
qrs = (Ebe - Ebs)/self._RAD_FACT
# kv -----------------------------------------------------------------
kv_nv = HAPropsSI(
'k',
'P', P.nominal_value,
'T', T.nominal_value,
'Y', x1.nominal_value,
)
kv_sig = kv_nv - HAPropsSI(
'k',
'P', P.nominal_value,
'T', T.nominal_value + T.std_dev,
'Y', x1.nominal_value,
)
kv = ufloat(kv_nv, abs(kv_sig))
# alpha --------------------------------------------------------------
alpha = kv / (rho*cpv)
# Bh -----------------------------------------------------------------
Bh = (hs-he) / (hu + (qcu+qrs)/mddp - hs)
# M ------------------------------------------------------------------
M = (m1 * self._M1) + ((1 - m1) * self._M2)
# gamma1 -------------------------------------------------------------
gamma1 = (1/rho) * (M/self._M1 - 1)
# gamma2 -------------------------------------------------------------
gamma2 = (1/rho) * (M/self._M2 - 1)
# beta ---------------------------------------------------------------
beta = 1/T
# Delta_m ------------------------------------------------------------
Delta_m = m1s - m1e
# Delta_T ------------------------------------------------------------
Delta_T = Ts - Te
# mu -----------------------------------------------------------------
mu_nv = HAPropsSI(
'mu',
'P', P.nominal_value,
'T', T.nominal_value,
'Y', x1.nominal_value,
)
mu_sig = mu_nv - HAPropsSI(
'mu',
'P', P.nominal_value,
'T', T.nominal_value + T.std_dev,
'Y', x1.nominal_value,
)
mu = ufloat(mu_nv, abs(mu_sig))
# nu -----------------------------------------------------------------
nu = mu/rho
# set properties
self._properties = dict(
mddp=mddp,
x1s=x1s,
x1e=x1e,
x1=x1,
m1s=m1s,
m1e=m1e,
m1=m1,
rhos=rhos,
rhoe=rhoe,
rho=rho,
Bm1=Bm1,
T=T,
D12=D12,
hfg=hfg,
hu=hu,
hs=hs,
cpv=cpv,
he=he,
cpl=cpl,
hT=hT,
qcu=qcu,
Ebe=Ebe,
Ebs=Ebs,
qrs=qrs,
kv=kv,
alpha=alpha,
Bh=Bh,
M=M,
gamma1=gamma1,
gamma2=gamma2,
beta=beta,
Delta_m=Delta_m,
Delta_T=Delta_T,
mu=mu,
nu=nu,
Ts=Ts,
)
# Update this fit
for key, value in self._properties.items():
self._this_fit[key] = value.nominal_value
self._this_fit[f'sig_{key}'] = value.std_dev
def _set_nondim_groups(self):
Bm1 = self._properties['Bm1']
mddp = self._properties['mddp']
R = self._R
rho = self._properties['rho']
D12 = self._properties['D12']
alpha = self._properties['alpha']
Bh = self._properties['Bh']
g = self._ACC_G
nu = self._properties['nu']
beta = self._properties['beta']
Delta_T = self._properties['Delta_T']
gamma1 = self._properties['gamma1']
Delta_m = self._properties['Delta_m']
mu = self._properties['mu']
rhoe = self._properties['rhoe']
rhos = self._properties['rhos']
# Manual natural log error propagation -------------------------------
# Bm1
ln_Bm1_nv = log(1 + Bm1.nominal_value)
ln_Bm1_sig = ln_Bm1_nv - log(1 + Bm1.nominal_value + Bm1.std_dev)
ln_Bm1 = ufloat(ln_Bm1_nv, abs(ln_Bm1_sig))
# Bh
ln_Bh_nv = log(1 + Bh.nominal_value)
ln_Bh_sig = ln_Bh_nv - log(1 + Bh.nominal_value + Bh.std_dev)
ln_Bh = ufloat(ln_Bh_nv, abs(ln_Bh_sig))
# ShR ----------------------------------------------------------------
ShR = (mddp * R) / (ln_Bm1 * rho * D12)
# NuR ----------------------------------------------------------------
NuR = (mddp * R) / (ln_Bh * rho * alpha)
# Le -----------------------------------------------------------------
Le = D12/alpha
# GrR_binary ---------------------------------------------------------
GrR_binary = (g * R**3 / nu**2) * (beta*Delta_T + gamma1*rho*Delta_m)
# GrR_primary --------------------------------------------------------
GrR_primary = (g * R**3 / mu**2) * (rho * (rhos - rhoe))
self._nondim_groups = dict(
ShR=ShR,
NuR=NuR,
Le=Le,
GrR_binary=GrR_binary,
GrR_primary=GrR_primary,
)
# Update this fit
for key, value in self._nondim_groups.items():
self._this_fit[key] = value.nominal_value
self._this_fit[f'sig_{key}'] = value.std_dev
# ------------------------------------------------------------------------
# Class helpers: internal use only
def _ols_fit(self):
sample = self._this_sample['m'].tolist()
# Prepare the data
y = [i.nominal_value for i in sample]
sig = [i.std_dev for i in sample]
x = list(range(len(y))) # Always indexed at zero
# Determine fit components
S = sum(1/sig[i]**2 for i in range(len(x)))
Sx = sum(x[i]/sig[i]**2 for i in range(len(x)))
Sy = sum(y[i]/sig[i]**2 for i in range(len(x)))
Sxx = sum(x[i]**2/sig[i]**2 for i in range(len(x)))
Sxy = sum(x[i]*y[i]/sig[i]**2 for i in range(len(x)))
Delta = S*Sxx - Sx**2
# Now calculate model parameters: y = a + bx
a = (Sxx*Sy - Sx*Sxy) / Delta
sig_a = (Sxx/Delta)**0.5
b = (S*Sxy - Sx*Sy) / Delta
sig_b = (S/Delta)**0.5
return dict(
a=a,
sig_a=sig_a,
b=b,
sig_b=sig_b,
)
def _get_best_local_fit(self):
# self._sample always has an odd length, so we use integer division.
center = len(self._sample) // 2
steps = int(self._steps) # Explicitly make a copy
delta = int(steps) # Explicityly make a copy
while center + steps + 1 <= len(self._sample):
self._this_sample = (
self._sample.iloc[center - steps: center + steps + 1, :]
)
fit = self._ols_fit()
# With small sample sizes, b is sometimes zero.
# If this is the case we want to continue.
if fit['b'] == 0:
steps += delta
continue
elif fit['sig_b']/abs(fit['b']) <= self._error:
self._this_fit = fit
return
else:
steps += delta
# We did not find a fit
self._this_fit = None
def _evaluate_fit(self):
# Prepare the data
y = [i.nominal_value for i in self._this_sample['m']]
sig = [i.std_dev for i in self._this_sample['m']]
x = list(range(len(y))) # Always indexed at zero
# Fit parameters
a = self._this_fit['a']
b = self._this_fit['b']
# Calculate R^2
predicted = [a + b*i for i in x]
y_bar = sum(y)/len(y)
SSres = sum((y[i] - predicted[i])**2 for i in range(len(x)))
SStot = sum((y[i] - y_bar)**2 for i in range(len(x)))
R2 = 1 - SSres/SStot
# Now for the merit function; i.e. chi^2
merit_value = sum(((y[i] - a - b*x[i])/sig[i])**2 for i in range(len(x)))
# And the goodness of fit; i.e. Q from Numerical Recipes
Q = chi2.sf(merit_value, len(x)-2)
# update this fit
self._this_fit['r2'] = R2
self._this_fit['q'] = Q
self._this_fit['chi2'] = merit_value
self._this_fit['nu_chi'] = len(x) - 2
self._this_fit['exp_id'] = self._experiment_id
self._this_fit['idx'] = self._idx
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class TypedObjectPlugTest( GafferTest.TestCase ) :
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].isInstanceOf( Gaffer.ObjectPlug.staticTypeId() ) )
def testSerialisationWithConnection( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"] = Gaffer.Node()
s["n2"]["t2"] = Gaffer.ObjectPlug( "hello", defaultValue = IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, direction=Gaffer.Plug.Direction.Out )
s["n"]["t"].setInput( s["n2"]["t2"] )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].getInput().isSame( s2["n2"]["t2"] ) )
def testDefaultValue( self ) :
p = Gaffer.ObjectPlug( "p", defaultValue = IECore.IntVectorData( [ 1, 2, 3 ] ) )
self.assertEqual( p.defaultValue(), IECore.IntVectorData( [ 1, 2, 3 ] ) )
self.assertFalse( p.defaultValue().isSame( p.defaultValue() ) )
self.assertTrue( p.defaultValue( _copy = False ).isSame( p.defaultValue( _copy = False ) ) )
def testRunTimeTyped( self ) :
self.assertEqual( IECore.RunTimeTyped.baseTypeId( Gaffer.ObjectPlug.staticTypeId() ), Gaffer.ValuePlug.staticTypeId() )
def testAcceptsNoneInput( self ) :
p = Gaffer.ObjectPlug( "hello", Gaffer.Plug.Direction.In, IECore.IntData( 10 ) )
self.failUnless( p.acceptsInput( None ) )
def testBoolVectorDataPlug( self ) :
p = Gaffer.BoolVectorDataPlug( "p", defaultValue = IECore.BoolVectorData( [ True, False ] ) )
self.assertEqual( p.defaultValue(), IECore.BoolVectorData( [ True, False ] ) )
self.assertEqual( p.getValue(), IECore.BoolVectorData( [ True, False ] ) )
p.setValue( IECore.BoolVectorData( [ False ] ) )
self.assertEqual( p.getValue(), IECore.BoolVectorData( [ False ] ) )
self.assertRaises( Exception, p.setValue, IECore.IntData( 10 ) )
def testNullDefaultValue( self ) :
self.assertRaises( ValueError, Gaffer.ObjectPlug, "hello", defaultValue = None )
def testNullValue( self ) :
p = Gaffer.ObjectPlug( "hello", Gaffer.Plug.Direction.In, IECore.IntData( 10 ) )
self.assertRaises( ValueError, p.setValue, None )
def testSerialisationWithValueAndDefaultValue( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["t"] = Gaffer.ObjectPlug( "hello", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, defaultValue = IECore.IntData( 10 ) )
s["n"]["t"].setValue( IECore.CompoundObject( { "a" : IECore.IntData( 20 ) } ) )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.failUnless( s2["n"]["t"].isInstanceOf( Gaffer.ObjectPlug.staticTypeId() ) )
self.failUnless( s2["n"]["t"].defaultValue() == IECore.IntData( 10 ) )
self.failUnless( s2["n"]["t"].getValue() == IECore.CompoundObject( { "a" : IECore.IntData( 20 ) } ) )
def testConstructCantSpecifyBothInputAndValue( self ) :
out = Gaffer.ObjectPlug( "out", direction=Gaffer.Plug.Direction.Out, defaultValue=IECore.StringData( "hi" ) )
self.assertRaises( Exception, Gaffer.ObjectPlug, "in", input=out, value=IECore.IntData( 10 ) )
class TypedObjectPlugNode( Gaffer.Node ) :
def __init__( self, name="TypedObjectPlugNode" ) :
Gaffer.Node.__init__( self, name )
self.addChild(
Gaffer.ObjectPlug( "p", defaultValue = IECore.IntData( 1 ) ),
)
IECore.registerRunTimeTyped( TypedObjectPlugNode )
def testSerialisationOfStaticPlugs( self ) :
s = Gaffer.ScriptNode()
s["n"] = self.TypedObjectPlugNode()
s["n"]["p"].setValue( IECore.IntData( 10 ) )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.assertEqual( s2["n"]["p"].getValue(), IECore.IntData( 10 ) )
def testSetToDefault( self ) :
defaultValue = IECore.IntVectorData( [ 1, 2, 3 ] )
plug = Gaffer.ObjectPlug( defaultValue = defaultValue )
self.assertEqual( plug.getValue(), defaultValue )
plug.setValue( IECore.StringData( "value" ) )
self.assertEqual( plug.getValue(), IECore.StringData( "value" ) )
plug.setToDefault()
self.assertEqual( plug.getValue(), defaultValue )
def testValueType( self ) :
self.failUnless( Gaffer.ObjectPlug.ValueType is IECore.Object )
self.failUnless( Gaffer.BoolVectorDataPlug.ValueType is IECore.BoolVectorData )
self.failUnless( Gaffer.IntVectorDataPlug.ValueType is IECore.IntVectorData )
self.failUnless( Gaffer.FloatVectorDataPlug.ValueType is IECore.FloatVectorData )
self.failUnless( Gaffer.StringVectorDataPlug.ValueType is IECore.StringVectorData )
self.failUnless( Gaffer.V3fVectorDataPlug.ValueType is IECore.V3fVectorData )
self.failUnless( Gaffer.Color3fVectorDataPlug.ValueType is IECore.Color3fVectorData )
self.failUnless( Gaffer.M44fVectorDataPlug.ValueType is IECore.M44fVectorData )
self.failUnless( Gaffer.V2iVectorDataPlug.ValueType is IECore.V2iVectorData )
self.failUnless( Gaffer.ObjectVectorPlug.ValueType is IECore.ObjectVector )
self.failUnless( Gaffer.AtomicCompoundDataPlug.ValueType is IECore.CompoundData )
def testSetValueCopying( self ) :
p = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 1 ) )
i = IECore.IntData( 10 )
p.setValue( i )
self.failIf( p.getValue( _copy=False ).isSame( i ) )
i = IECore.IntData( 20 )
p.setValue( i, _copy=False )
self.failUnless( p.getValue( _copy=False ).isSame( i ) )
def testCreateCounterpart( self ) :
p = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 20 ) )
p2 = p.createCounterpart( "c", Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getName(), "c" )
self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( p2.defaultValue(), p.defaultValue() )
self.assertEqual( p2.getFlags(), p.getFlags() )
def testNoChildrenAccepted( self ) :
p1 = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 20 ) )
p2 = Gaffer.ObjectPlug( defaultValue = IECore.IntData( 20 ) )
self.assertFalse( p1.acceptsChild( p2 ) )
self.assertRaises( RuntimeError, p1.addChild, p2 )
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Config object backed by a JSON encoded file. This module is compatible with
plistconfig module. The only difference is the on-disk storage format.
"""
import sys
import os
import re
from json.encoder import JSONEncoder
from json.decoder import JSONDecoder
def get_encoder():
return JSONEncoder(skipkeys=False, ensure_ascii=False,
check_circular=True, allow_nan=True, indent=2, separators=(',', ': '))
def dump(conf, fo):
encoder = get_encoder()
for chunk in encoder.iterencode(conf):
fo.write(chunk)
def dumps(conf):
encoder = get_encoder()
return encoder.encode(conf)
def load(fo):
s = fo.read()
return loads(s)
def loads(s):
decoder = JSONDecoder(object_hook=_object_hook, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None)
return decoder.decode(s)
# json gives us unicode strings. This hook makes them strings.
def _object_hook(d):
rv = {}
for key, value in d.items():
rv[key] = value
return rv
class AutoAttrDict(dict):
"""A dictionary with attribute-style access and automatic container node creation.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__["_dirty"] = False
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
def __str__(self):
s = []
if self:
for key in self:
val = self[key]
if isinstance(val, AutoAttrDict):
s.append("{:>22s}=[AutoAttrDict()]".format(key))
else:
s.append("{:>22s}={!r}".format(key, val))
else:
s.append("{[empty]}")
if self.__dict__["_dirty"]:
s.append(" (modified)")
return "\n".join(s)
def __setitem__(self, key, value):
self.__dict__["_dirty"] = True
return super(AutoAttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
try:
return super(AutoAttrDict, self).__getitem__(name)
except KeyError:
d = AutoAttrDict()
super(AutoAttrDict, self).__setitem__(name, d)
return d
def __delitem__(self, name):
self.__dict__["_dirty"] = True
return super(AutoAttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
__delattr__ = __delitem__
def copy(self):
return AutoAttrDict(self)
# perform shell-like variable expansion
def expand(self, value):
if not isinstance(value, str):
return value
if '$' not in value:
return value
i = 0
while 1:
mo = _var_re.search(value, i)
if not mo:
return value
i, j = mo.span(0)
oname = vname = mo.group(1)
if vname.startswith('{') and vname.endswith('}'):
vname = vname[1:-1]
tail = value[j:]
value = value[:i] + str(self.get(vname, "$"+oname))
i = len(value)
value += tail
def add_container(self, name):
d = AutoAttrDict()
super(AutoAttrDict, self).__setitem__(name, d)
self.__dict__["_dirty"] = True
return d
def tofile(self, path_or_file):
write_config(self, path_or_file)
reset_modified(self)
_var_re = re.compile(r'\$([a-zA-Z0-9_\?]+|\{[^}]*\})')
def read_config(path_or_file):
"""Read a JSON config file."""
if isinstance(path_or_file, str):
fp = open(path_or_file, "r")
doclose = True
else:
fp = path_or_file
doclose = False
d = load(fp)
if doclose:
fp.close()
return _convert_dict(d)
def _convert_dict(d):
for key, value in d.items():
if isinstance(value, dict):
d[str(key)] = _convert_dict(value)
return AutoAttrDict(d)
def write_config(conf, path_or_file):
"""Write a JSON config file."""
if isinstance(path_or_file, str):
fp = open(path_or_file, "w+")
doclose = True
else:
fp = path_or_file
doclose = False
dump(conf, fp)
if doclose:
fp.close()
def is_modified(conf):
if conf.__dict__["_dirty"]:
return True
for value in conf.values():
if isinstance(value, AutoAttrDict):
if is_modified(value):
return True
return False
def reset_modified(conf):
conf.__dict__["_dirty"] = False
for value in conf.values():
if isinstance(value, AutoAttrDict):
reset_modified(value)
def get_config(filename=None, init=None):
"""Get an existing or new json config object.
Optionally initialize from another dictionary.
"""
if init is not None:
return _convert_dict(init)
if filename is None:
return AutoAttrDict()
if os.path.exists(filename):
return read_config(filename)
else:
d = AutoAttrDict()
write_config(d, filename)
return d
if __name__ == "__main__":
# from pycopia import autodebug
cf = get_config()
cf.parts.program.flags.flagname = 2
cf.parts.program.path = "$BASE/program"
cf.parts.BASE = "bin"
assert cf.parts.program.flags.flagname == 2
assert cf.parts.program.path == "$BASE/program"
assert cf.parts.expand(cf.parts.program.path) == "bin/program"
cf.tofile("/tmp/testjson.json")
del cf
cf = read_config("/tmp/testjson.json")
assert type(cf) is AutoAttrDict
assert cf.parts.program.flags.flagname == 2
assert cf.parts.program.path == "$BASE/program"
assert cf.parts.expand(cf.parts.program.path) == "bin/program"
assert is_modified(cf) == False
cf.parts.program.flags.flagname = 3
assert cf.parts.program.flags.flagname == 3
assert is_modified(cf) == True
cf.tofile("/tmp/testjson.json")
assert is_modified(cf) == False
del cf
cf = read_config("/tmp/testjson.json")
assert cf.parts.program.flags.flagname == 3
assert is_modified(cf) == False
del cf.parts.program.flags.flagname
assert len(cf.parts.program.flags) == 0
assert len(cf.parts.program["flags"]) == 0
assert is_modified(cf) == True
assert cf.parts.program.flags is cf.parts.program["flags"]
|
|
import random
import math
import datetime as dt
import urllib.request
import urllib
import requests
try:
# Python 2.6-2.7
from HTMLParser import HTMLParser
except ImportError:
# Python 3
from html.parser import HTMLParser
# This module contains all the shit methods used for getting comic URLs... ugh.
def julianDate(my_date):
# Takes a date string MM-DD-YYYY and
# returns Julian Date
date = my_date.split("-")
month = int(date[0])
day = int(date[1])
year = int(date[2])
month=(month-14)/12
year=year+4800
JDate=1461*(year+month)/4+367*(month-2-12*month)/12-(3*((year+month+100)/100))/4+day-32075
return JDate
# Function from: https://gist.github.com/jiffyclub/1294443
def date_to_jd(year,month,day):
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
# Function from: https://gist.github.com/jiffyclub/1294443
def jd_to_date(jd):
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
# Find string between 2 strings
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
def find_first_between( source, start_sep, end_sep ):
result=[]
tmp=source.split(start_sep)
for par in tmp:
if end_sep in par:
result.append(par.split(end_sep)[0])
if len(result) == 0:
return None
else:
return result[0]
def find_last_between( source, start_sep, end_sep ):
result=[]
tmp=source.split(start_sep)
for par in tmp:
if end_sep in par:
result.append(par.split(end_sep)[0])
if len(result) == 0:
return None
else:
return result[len(result)-1] # Return last item
def getImageHTML ( url, ua : str = '' ):
try:
req = urllib.request.Request(url, data=None,headers={'User-Agent': ua})
with urllib.request.urlopen(req) as f:
htmlSource = str(f.read())
return htmlSource
except Exception as e:
return None
def getImageURL ( html ):
imageURL = find_between( html, "data-image=", "data-date=" )
return imageURL.replace('"', '').strip()
def getImageTitle ( html ):
imageTitle = find_between( html, "data-title=", "data-tags=" )
h = HTMLParser()
imageTitle = h.unescape(imageTitle)
#print(h.unescape(imageTitle))
return imageTitle.replace('"', '').strip()
# C&H Methods
def getCHURL ( html, date ):
# YYYY.MM.DD format
# <a href="[comic url]">2005.01.31</a>
comicBlock = find_last_between( html, '<a href="', "\">" + date + "</a>")
if not comicBlock:
return None
else:
return comicBlock.replace('"', '').strip()
def getCHImageURL ( html ):
# comicBlock = find_last_between( html, 'div id="comic-container"', "</div>")
# if comicBlock == None:
# return None
imageURL = find_last_between( html, 'id="main-comic" src=', '>' )
if not imageURL:
return None
imageURL = imageURL.replace('"', '').strip()
imageURL = imageURL.split("?t=")[0]
if imageURL[0:2] == "//":
# Add http?
imageURL = "http:" + imageURL
if imageURL[-1:] == "/":
# Strip trailing /
return imageURL[0:-1]
else:
return imageURL
# XKCD Methods
def getNewestXKCD ( html ):
comicBlock = find_last_between( html, 'div id="middleContainer"', "</div>")
if not comicBlock:
return None
imageURL = find_first_between( comicBlock, "href=", " title=" )
imageURL = imageURL.replace('/', '').strip()
return imageURL.replace('"', '').strip()
def getXKCDURL ( html, date ):
# YYYY-M(M)-D(D) format
# <a href="/17/" title="2006-1-1">What If</a>
comicBlock = find_last_between( html, 'div id="comic"', "</div>")
if not comicBlock:
return None
imageURL = find_first_between( html, "href=", " title=\"" + date + "\"" )
if imageURL == None:
return None
else:
return imageURL.replace('"', '').strip()
def getXKCDImageURL ( html ):
comicBlock = find_last_between( html, 'div id="comic"', "</div>")
if not comicBlock:
return None
imageURL = find_last_between( comicBlock, "img src=", "title=" )
imageURL = imageURL.replace('"', '').strip()
if imageURL[0:2] == "//":
# Add http?
return "http:" + imageURL
else:
return imageURL
def getXKCDImageTitle ( html ):
comicBlock = find_last_between( html, 'div id="comic"', "</div>")
if not comicBlock:
return None
imageTitle = find_last_between( comicBlock, "alt=", ">" )
# Drop srcset= if there
imageTitle = imageTitle.split('srcset=')[0]
h = HTMLParser()
imageTitle = h.unescape(imageTitle)
imageTitle = imageTitle.replace('"', '').strip()
imageTitle = imageTitle.replace('/', '').strip()
return imageTitle
# Garfield Minus Garfield Methods
def getGMGImageURL ( html ):
if not html:
return None
comicBlock = find_last_between( html, 'div class="photo"', "</a>")
if not comicBlock:
return None
imageURL = find_last_between( comicBlock, "img src=", " alt=" )
imageURL = imageURL.replace('"', '').strip()
return imageURL
# Garfield Methods
def getGImageURL ( html ):
if not html:
return None
comicBlock = find_last_between( html, 'img class="img-responsive" src=', ' width')
if not comicBlock:
return None
imageURL = comicBlock.replace('"', '').strip()
return imageURL
# Peanuts Methods
def getPeanutsImageURL ( html ):
if not html:
return None
comicBlock = find_last_between( html, 'src=', ' />')
if not comicBlock:
return None
imageURL = comicBlock.replace('"', '').strip()
return imageURL
|
|
# -*- coding: utf-8 -*-
"""
CRF with different types of nodes
NOTE: this is an abstract class. Do not use directly.
JL. Meunier
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import numpy as np
from .crf import CRF
from ..inference import get_installed
class InconsistentLabel(Exception):
pass
class TypedCRF(CRF):
"""Abstract base class"""
def __init__(self,
n_types, # how many node type?
l_n_states, # how many labels per node type?
l_n_features, # how many features per node type?
inference_method="ad3",
l_class_weight=None): # class_weight per node type or None
# <list of array-like> or None
if inference_method is None:
# get first in list that is installed
inference_method = get_installed(['ad3+', 'ad3'])[0]
self.setInferenceMethod(inference_method)
self.inference_calls = 0
# if inference cannot be done, raises an exception
self.inference_exception = False
if len(l_n_states) != n_types:
raise ValueError("Expected 1 number of states per node type.")
if l_n_features is not None and len(l_n_features) != n_types:
raise ValueError("Expected 1 number pf features per node type.")
self.n_types = n_types
self.l_n_states = l_n_states
self._n_states = sum(l_n_states) # total number of states
self.l_n_features = l_n_features
self._n_features = sum(self.l_n_features) # total number of node feat.
# number of typextype states, or number of states per type of edge
self.l_n_edge_states = [n1 * n2
for n1 in self.l_n_states
for n2 in self.l_n_states]
# class weights:
# either we get class weights for all types of nodes
# , or for none of them!
if l_class_weight:
if len(l_class_weight) != self.n_types:
raise ValueError("Expected 1 class weight list per node type.")
for i, n_states in enumerate(self.l_n_states):
if len(l_class_weight[i]) != n_states:
raise ValueError("Expected 1 class weight per state"
" per node type. Wrong for type %d" % i)
# class weights are computed by type and simply concatenated
self.l_class_weight = [np.asarray(class_weight)
for class_weight in l_class_weight]
else:
self.l_class_weight = [np.ones(n) for n in self.l_n_states]
self.class_weight = np.hstack(self.l_class_weight)
self._set_size_joint_feature()
# internal stuff
# when putting node states in a single sequence, index of 1st state
# for type i
self._l_type_startindex = [sum(self.l_n_states[:i])
for i in range(self.n_types+1)]
# when putting edge states in a single sequence, index of 1st state of
# an edge of type (typ1, typ2)
self.a_startindex_by_typ_typ = np.zeros((self.n_types, self.n_types),
dtype=np.uint32)
i_state_start = 0
for typ1, typ1_n_states in enumerate(self.l_n_states):
for typ2, typ2_n_states in enumerate(self.l_n_states):
self.a_startindex_by_typ_typ[typ1, typ2] = i_state_start
i_state_start += typ1_n_states*typ2_n_states
# -------------- CONVENIENCE --------------------------
def setInferenceMethod(self, inference_method):
if inference_method in ["ad3", "ad3+"]:
self.inference_method = inference_method
else:
raise Exception("You must use ad3 or ad3+ as inference method")
def flattenY(self, lY_by_typ):
"""
It is more convenient to have the Ys grouped by type, as the Xs are,
and to have the first label of each type encoded as 0.
This method does the job. It returns a flat Y array, with unique code
per class label, which can be passed to 'fit'
"""
lY = list()
for n_start_state, Y_typ in zip(self._l_type_startindex, lY_by_typ):
lY.append(np.asarray(Y_typ) + n_start_state)
return np.hstack(lY)
def unflattenY(self, X, flatY):
"""
predict returns a flat array of Y (same structure as for 'fit')
This method structures the Y as a list of Y_per_type, where the first
label of any type is 0
"""
lY = list()
i_start_node = 0
(l_node_features, l_edges, l_edge_features) = X
for n_start_state, nf in zip(self._l_type_startindex, l_node_features):
n_nodes = nf.shape[0]
Y = flatY[i_start_node: i_start_node+n_nodes] - n_start_state
lY.append(Y)
i_start_node += n_nodes
if flatY.shape != (i_start_node,):
raise ValueError("The total number of label does not match the"
" total number of nodes:"
" %d != %d" % (flatY.shape[0], i_start_node))
return lY
def initialize(self, X, Y=None):
"""
It is optional to call it. Does data checking only!
"""
if isinstance(X, list):
map(self._check_size_x, X)
if not (Y is None):
map(self._check_size_xy, X, Y)
else:
self._check_size_x(X)
self._check_size_xy(X, Y)
def setInferenceException(self, bRaiseExceptionWhenInferenceNotSuccessful):
"""
set exception on or off when inference canoot be done.
"""
self.inference_exception = bRaiseExceptionWhenInferenceNotSuccessful
return self.inference_exception
# -------------- INTERNAL STUFF --------------------------
def _set_size_joint_feature(self):
"""
We have:
- 1 weight per node feature per label per node type
"""
self.size_unaries = sum(n_states * n_features for n_states, n_features
in zip(self.l_n_states, self.l_n_features)
)
self.size_joint_feature = self.size_unaries
def __repr__(self):
return ("%s(n_states: %s, inference_method: %s)"
% (type(self).__name__, self.l_n_states,
self.inference_method))
def _check_size_x(self, x):
# node_features are [ i_in_typ -> features ]
l_node_features = self._get_node_features(x)
if len(l_node_features) != self.n_types:
raise ValueError("Expected one node feature array per node type.")
for typ, typ_features in enumerate(l_node_features):
if typ_features.shape[1] != self.l_n_features[typ]:
raise ValueError("Expected %d features for type"
" %d" % (self.l_n_features[typ], typ))
# edges
l_edges = self._get_edges(x)
for edges in l_edges:
if edges is None:
continue
if edges.ndim != 2:
raise ValueError("Expected a 2 dimensions edge arrays")
if edges.shape[1] != 2:
raise ValueError("Expected 2 columns in edge arrays")
for typ1, typ2 in self._iter_type_pairs():
edges = self._get_edges_by_type(x, typ1, typ2)
if edges is None or len(edges) == 0:
continue
# edges should point to valid node indices
nodes1, nodes2 = edges[:, 0], edges[:, 1]
if min(nodes1) < 0 or min(nodes2) < 0:
raise ValueError("At least one edge points to negative and"
" therefore invalid node index:"
" type %d to type %d" % (typ1, typ2))
if max(nodes1) >= l_node_features[typ1].shape[0]:
raise ValueError("At least one edge starts from a non-existing"
" node index:"
" type %d to type %d" % (typ1, typ2))
if max(nodes2) >= l_node_features[typ2].shape[0]:
raise ValueError("At least one edge points to a non-existing"
" node index:"
" type %d to type %d" % (typ1, typ2))
return True
def _check_size_xy(self, X, Y):
if Y is None:
return
# make sure Y has the proper length and acceptable labels
l_node_features = self._get_node_features(X)
nb_nodes = sum(nf.shape[0] for nf in l_node_features)
if Y.shape[0] != nb_nodes:
raise ValueError("Expected 1 label for each of the %d nodes. Got"
" %d labels." % (nb_nodes, Y.shape[0]))
i_start = 0
for typ, nf, n_states in zip(range(self.n_types),
l_node_features,
self.l_n_states):
nb_nodes = nf.shape[0]
if nb_nodes == 0:
continue
Y_typ = Y[i_start:i_start+nb_nodes]
if np.min(Y_typ) < 0:
raise ValueError("Got a negative label for type %d" % typ)
if np.min(Y_typ) < self._l_type_startindex[typ]:
raise InconsistentLabel("labels of type %d start at %d"
"" % (typ,
self._l_type_startindex[typ]))
if np.max(Y_typ) >= self._l_type_startindex[typ+1]:
raise InconsistentLabel("labels of type %d end at %d"
"" % (typ,
self._l_type_startindex[typ+1]-1)
)
i_start = i_start + nb_nodes
return True
def _get_node_features(self, x):
# we replace None by empty array with proper shape
return [np.empty((0, _n_feat)) if node_features is None
else node_features
for (node_features, _n_feat) in zip(x[0], self.l_n_features)]
def _get_edges(self, x):
return [np.empty((0, 2)) if edges is None or len(edges) == 0
else edges for edges in x[1]]
def _get_edges_by_type(self, x, typ1, typ2):
return x[1][typ1 * self.n_types+typ2]
def _iter_type_pairs(self):
for typ1 in range(self.n_types):
for typ2 in range(self.n_types):
yield (typ1, typ2)
return
def _get_unary_potentials(self, x, w):
"""Computes unary potentials for x and w.
Parameters
----------
x : tuple
Instance Representation.
w : ndarray, shape=(size_joint_feature,)
Weight vector for CRF instance.
Returns
-------
unaries : list of ndarray, shape=( n_nodes_typ, n_states_typ )
Unary weights.
"""
self._check_size_w(w)
l_node_features = self._get_node_features(x)
l_unary_potentials = []
i_w = 0
for (features, n_states, n_features) in zip(l_node_features,
self.l_n_states,
self.l_n_features):
n_w = n_states*n_features
l_unary_potentials.append(
np.dot(features,
w[i_w:i_w+n_w].reshape(n_states,
n_features).T
)
)
i_w += n_w
assert i_w == self.size_unaries
# nodes x features . features x states --> nodes x states
return l_unary_potentials
def continuous_loss(self, y, l_y_hat):
# continuous version of the loss
# y is the result of linear programming
# BUT, in multitype mode, y_hat is a list of unaries
l_result = list()
cum_n_node = 0
cum_n_state = 0
for y_hat in l_y_hat:
n_node, n_state = y_hat.shape
# all entries minus correct ones
# select the correct range of labels and make the labels start at 0
y_type = y[cum_n_node:cum_n_node+n_node] - cum_n_state
gx = np.indices(y_type.shape)
result = 1 - y_hat[gx, y_type]
l_result.append(result)
cum_n_node += n_node
cum_n_state += n_state
result = np.hstack(l_result)
if hasattr(self, 'class_weight'):
return np.sum(self.class_weight[y] * result)
return np.sum(result)
|
|
suite = {
"mxversion" : "5.55.0",
"name" : "sulong",
"versionConflictResolution" : "latest",
"imports" : {
"suites" : [
{
"name" : "graal-core",
"version" : "ca165d0f0de274911d1b36b0113e3fd4c6952787",
"urls" : [
{"url" : "https://github.com/graalvm/graal-core", "kind" : "git"},
{"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind" : "binary"},
]
},
{
"name" : "truffle",
"version" : "ca21972635d350fcce90f1934d5882e144621d18",
"urls" : [
{"url" : "https://github.com/graalvm/truffle", "kind" : "git"},
{"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind" : "binary"},
]
},
],
},
"javac.lint.overrides" : "none",
"libraries" : {
"LLVM_IR_PARSER" : {
"path" : "lib/com.intel.llvm.ireditor-1.0.8.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/com.intel.llvm.ireditor-1.0.8.jar",
],
"sha1" : "7d8495fa6bbf3acb4faf4cedf42460be541e69dc",
"maven" : {
"groupId" : "parser",
"artifactId" : "parser",
"version" : "1.0.8",
}
},
"EMF_COMMON" : {
"path" : "lib/org.eclipse.emf.common_2.11.0.v20150512-0501.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/emf/org.eclipse.emf.common_2.11.0.v20150512-0501.jar",
],
"sha1" : "2ee408923125830711b2817095010bce18ee8bb7",
"maven" : {
"groupId" : "emf",
"artifactId" : "emfcommon",
"version" : "2.11.0.v20150512-0501",
}
},
"ECORE" : {
"path" : "lib/org.eclipse.emf.ecore_2.11.0.v20150512-0501.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/emf/org.eclipse.emf.ecore_2.11.0.v20150512-0501.jar",
],
"sha1" : "4dc95540c73cce54846ad976fbbe997a7f11aa9b",
"maven" : {
"groupId" : "emf",
"artifactId" : "emf",
"version" : "2.11.0.v20150512-0501.jar",
}
},
"INJECT" : {
"path" : "lib/com.google.inject_3.0.0.v201312141243.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/com.google.inject_3.0.0.v201312141243.jar",
],
"sha1" : "2f5301dcdccf1a88b0022b932b6363825918d9a1",
"maven" : {
"groupId" : "xtext",
"artifactId" : "inject",
"version" : "2.11.0.v20150512-0501.jar",
}
},
"XTEXT" : {
"path" : "lib/org.eclipse.xtext_2.9.2.v201603040440.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/org.eclipse.xtext_2.9.2.v201603040440.jar",
],
"sha1" : "17d747e9d13de0d07ca9139c8d457f8d251bccfc",
"maven" : {
"groupId" : "xtext",
"artifactId" : "xtext",
"version" : "2.8.4.v201508050135.jar",
}
},
"XTEXT_XBASE" : {
"path" : "lib/org.eclipse.xtext.xbase_2.9.2.v201603040440.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/org.eclipse.xtext.xbase_2.9.2.v201603040440.jar",
],
"sha1" : "dac32b3a2021127f42eedc099d77a66109587dde",
"maven" : {
"groupId" : "xtext",
"artifactId" : "xtext",
"version" : "2.9.2.v201603040440.jar",
}
},
"XTEXT_XBASE_LIB" : {
"path" : "lib/org.eclipse.xtext.xbase.lib_2.9.2.v201603040440.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/org.eclipse.xtext.xbase.lib_2.9.2.v201603040440.jar",
],
"sha1" : "7d184d42dd41a6c470007998a060fc07bad0a1b4",
"maven" : {
"groupId" : "xtext",
"artifactId" : "xtext",
"version" : "2.9.2.v201603040440.jar",
}
},
"EMF_ECORE_XMI" : {
"path" : "lib/org.eclipse.emf.ecore.xmi_2.11.0.v20150512-0501.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/emf/org.eclipse.emf.ecore.xmi_2.11.0.v20150512-0501.jar",
],
"sha1" : "14711c456be51f16102bfd94cc5ad144b5dad4a3",
"maven" : {
"groupId" : "emf",
"artifactId" : "emf",
"version" : "2.11.0.v20150512-0501.jar",
}
},
"XTEXT_TYPES" : {
"path" : "lib/org.eclipse.xtext.common.types_2.8.4.v201508050135.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/org.eclipse.xtext.common.types_2.8.4.v201508050135.jar",
],
"sha1" : "61dfb0e684ecf3a89392d151c440bafd99ff4711",
"maven" : {
"groupId" : "xtext",
"artifactId" : "text",
"version" : "2.8.4.v201508050135.jar",
}
},
"XTEXT_JAVAX_INJECT" : {
"path" : "lib/javax.inject_1.0.0.v20091030.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/javax.inject_1.0.0.v20091030.jar",
],
"sha1" : "38623235627d561c3eb9a558de9a5535a1c30e29",
"maven" : {
"groupId" : "xtext",
"artifactId" : "text",
"version" : "javax.inject_1.0.0.v20091030.jar",
}
},
"XTEXT_LOG4J" : {
"path" : "lib/org.apache.log4j_1.2.15.v201012070815.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/org.apache.log4j_1.2.15.v201012070815.jar",
],
"sha1" : "c8ec3aac571c457e84a039722a6b471a107c25bf",
"maven" : {
"groupId" : "xtext",
"artifactId" : "text",
"version" : "1.2.15.v201012070815.jar",
}
},
"XTEXT_GOOGLE_GUAVA" : {
"path" : "lib/com.google.guava_15.0.0.v201403281430.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/com.google.guava_15.0.0.v201403281430.jar",
],
"sha1" : "6bc5d67ff18f033093fb493c0127a4219b1613a3",
"maven" : {
"groupId" : "xtext",
"artifactId" : "text",
"version" : "15.0.0.v201403281430.jar",
}
},
"XTEXT_ANTLR_RUNTIME" : {
"path" : "lib/org.antlr.runtime_3.2.0.v201101311130.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/org.antlr.runtime_3.2.0.v201101311130.jar",
],
"sha1" : "94105115603f6e3276da3be15fc8d3186ed9e92e",
"maven" : {
"groupId" : "xtext",
"artifactId" : "text",
"version" : "3.2.0.v201101311130.jar",
}
},
"XTEXT_UTIL" : {
"path" : "lib/org.eclipse.xtext.util_2.8.4.v201508050135.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/xtext/org.eclipse.xtext.util_2.8.4.v201508050135.jar",
],
"sha1" : "70616b797177f2e2b1b844f02a188e2837d648cb",
"maven" : {
"groupId" : "xtext",
"artifactId" : "text",
"version" : "2.8.4.v201508050135.jar",
}
},
"ECLIPSE_EQUINOX" : {
"path" : "lib/org.eclipse.equinox.common_3.6.200.v20130402-1505.jar",
"urls" : [
"http://lafo.ssw.uni-linz.ac.at/sulong-deps/org.eclipse.equinox.common_3.6.200.v20130402-1505.jar",
],
"sha1" : "550778d95ea4d5f2fee765e85eb799cec21067e0",
"maven" : {
"groupId" : "eclipse",
"artifactId" : "eclipse",
"version" : "3.6.200.v20130402-1505.jar",
}
},
"ARGON2" : {
"path" : "tests/phc-winner-argon2-20160406.tar.gz",
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/20160406.tar.gz",
"https://github.com/P-H-C/phc-winner-argon2/archive/20160406.tar.gz",
],
"sha1" : "5552052e53fcd7fe40c558866c9cd51027c17322",
},
"LLVM_TEST_SUITE" : {
"path" : "tests/test-suite-3.2.src.tar.gz",
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/test-suite-3.2.src.tar.gz",
"http://llvm.org/releases/3.2/test-suite-3.2.src.tar.gz",
],
"sha1" : "e370255ca2540bcd66f316fe5b96f459382f3e8a",
},
"GCC_SOURCE" : {
"path" : "tests/gcc-5.2.0.tar.gz",
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/gcc-5.2.0.tar.gz",
"http://gd.tuwien.ac.at/gnu/gcc/releases/gcc-5.2.0/gcc-5.2.0.tar.gz",
"ftp://ftp.fu-berlin.de/unix/languages/gcc/releases/gcc-5.2.0/gcc-5.2.0.tar.gz",
"http://mirrors-usa.go-parts.com/gcc/releases/gcc-5.2.0/gcc-5.2.0.tar.gz",
],
"sha1" : "713211883406b3839bdba4a22e7111a0cff5d09b",
},
"SHOOTOUT_SUITE" : {
"path" : "tests/benchmarksgame-scm-latest.tar.gz",
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/benchmarksgame-scm-latest.tar.gz",
],
"sha1" : "9684ca5aaa38ff078811f9b42f15ee65cdd259fc",
},
},
"projects" : {
"com.oracle.truffle.llvm.test" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm",
"com.oracle.truffle.llvm.tools",
"com.oracle.truffle.llvm.pipe",
"truffle:TRUFFLE_TCK",
"mx:JUNIT",
],
"checkstyle" : "com.oracle.truffle.llvm.test",
"annotationProcessors" : ["SULONG_OPTIONS"],
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.bench" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm",
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.tools" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.runtime",
],
"checkstyle" : "com.oracle.truffle.llvm.nodes",
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.types" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.runtime",
"truffle:TRUFFLE_API",
"graal-core:GRAAL_TRUFFLE_HOTSPOT",
],
"checkstyle" : "com.oracle.truffle.llvm.nodes",
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.types.test" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.types",
"mx:JUNIT",
],
"checkstyle" : "com.oracle.truffle.llvm.test",
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.runtime" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"truffle:TRUFFLE_API",
"com.oracle.truffle.llvm.option"
],
"checkstyle" : "com.oracle.truffle.llvm.nodes",
"annotationProcessors" : ["SULONG_OPTIONS"],
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.nodes" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"truffle:TRUFFLE_API",
"com.oracle.truffle.llvm.types"
],
"checkstyle" : "com.oracle.truffle.llvm.nodes",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.nodes.impl" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.context",
],
"checkstyle" : "com.oracle.truffle.llvm.nodes",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.intel.llvm.ireditor" : {
"subDir" : "projects",
"sourceDirs" : ["dummy-src"],
"dependencies" : [
"EMF_COMMON", "ECORE", "INJECT", "XTEXT", "XTEXT_XBASE", "XTEXT_XBASE_LIB", "EMF_ECORE_XMI", "XTEXT_TYPES", "XTEXT_JAVAX_INJECT", "XTEXT_LOG4J", "XTEXT_GOOGLE_GUAVA", "XTEXT_ANTLR_RUNTIME", "XTEXT_UTIL", "ECLIPSE_EQUINOX"
],
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.parser" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.types",
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.parser.base" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.parser",
"com.oracle.truffle.llvm.nodes",
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.parser.impl" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.context",
"LLVM_IR_PARSER",
"EMF_COMMON", "ECORE", "INJECT", "XTEXT", "XTEXT_XBASE", "XTEXT_XBASE_LIB", "EMF_ECORE_XMI", "XTEXT_TYPES", "XTEXT_JAVAX_INJECT", "XTEXT_LOG4J", "XTEXT_GOOGLE_GUAVA", "XTEXT_ANTLR_RUNTIME", "XTEXT_UTIL", "ECLIPSE_EQUINOX"
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.parser.factories",
# TODO: remove parser.impl and parser.bc.impl dependency here
"com.oracle.truffle.llvm.parser.impl",
"com.oracle.truffle.llvm.parser.bc.impl",
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.asm.amd64" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.nodes.impl",
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.parser.factories" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.asm.amd64",
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.context" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"graal-core:GRAAL_TRUFFLE_HOTSPOT",
"com.oracle.truffle.llvm.parser.base"
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.parser.bc.impl" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.context",
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"annotationProcessors" : ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.pipe" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.runtime",
],
"checkstyle" : "com.oracle.truffle.llvm.test",
"javaCompliance" : "1.8",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.pipe.native" : {
"subDir" : "projects",
"native" : True,
"vpath" : True,
"results" : [
"bin/libpipe.so"
],
"dependencies" : [
"com.oracle.truffle.llvm.pipe",
],
"checkstyle" : "com.oracle.truffle.llvm.test",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.option" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
"com.oracle.truffle.llvm.option.processor" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"com.oracle.truffle.llvm.option",
],
"checkstyle" : "com.oracle.truffle.llvm",
"javaCompliance" : "1.8",
"workingSets" : "Truffle, LLVM",
"license" : "BSD-new",
},
},
"distributions" : {
"SULONG" : {
"path" : "build/sulong.jar",
"subDir" : "graal",
"sourcesPath" : "build/sulong.src.zip",
"mainClass" : "com.oracle.truffle.llvm.LLVM",
"dependencies" : ["com.oracle.truffle.llvm"],
"exclude" : [
"EMF_COMMON",
"LLVM_IR_PARSER",
"ECORE",
"INJECT",
"XTEXT",
"XTEXT_XBASE",
"XTEXT_XBASE_LIB",
"EMF_ECORE_XMI",
"XTEXT_TYPES",
"XTEXT_JAVAX_INJECT",
"XTEXT_LOG4J",
"XTEXT_GOOGLE_GUAVA",
"XTEXT_ANTLR_RUNTIME",
"XTEXT_UTIL",
"ECLIPSE_EQUINOX",
],
"distDependencies" : [
"truffle:TRUFFLE_API",
"SULONG_OPTIONS",
"graal-core:GRAAL_API",
"graal-core:GRAAL_COMPILER",
"graal-core:GRAAL_HOTSPOT",
"graal-core:GRAAL_TRUFFLE_HOTSPOT",
]
},
"SULONG_OPTIONS" : {
"path" : "build/sulong_options.jar",
"subDir" : "graal",
"javaCompliance" : "1.8",
"dependencies" : ["com.oracle.truffle.llvm.option.processor"],
"description" : "The Sulong Option Processor generates an option class declared using options annotations.",
},
"SULONG_TEST" : {
"path" : "build/sulong_test.jar",
"subDir" : "graal",
"sourcesPath" : "build/sulong_test.src.zip",
"dependencies" : [
"com.oracle.truffle.llvm.test",
"com.oracle.truffle.llvm.types.test",
"com.oracle.truffle.llvm.pipe"
],
"exclude" : [
"EMF_COMMON",
"LLVM_IR_PARSER",
"ECORE",
"INJECT",
"XTEXT",
"XTEXT_XBASE",
"XTEXT_XBASE_LIB",
"EMF_ECORE_XMI",
"XTEXT_TYPES",
"XTEXT_JAVAX_INJECT",
"XTEXT_LOG4J",
"XTEXT_GOOGLE_GUAVA",
"XTEXT_ANTLR_RUNTIME",
"XTEXT_UTIL",
"ECLIPSE_EQUINOX",
"mx:JUNIT"
],
"distDependencies" : [
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_TCK",
"graal-core:GRAAL_API",
"graal-core:GRAAL_COMPILER",
"graal-core:GRAAL_HOTSPOT",
"graal-core:GRAAL_TRUFFLE_HOTSPOT",
"sulong:SULONG"
]
},
}
}
|
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
"""
This test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify cross tenant connectivity as follows
ssh:
in order to overcome "ip namespace", each tenant has an "access point"
VM with floating-ip open to incoming ssh connection allowing network
commands (ping/ssh) to be executed from within the
tenant-network-namespace
Tempest host performs key-based authentication to the ssh server via
floating IP address
connectivity test is done by pinging destination server via source server
ssh connection.
success - ping returns
failure - ping_timeout reached
setup:
for primary tenant:
1. create a network&subnet
2. create a router (if public router isn't configured)
3. connect tenant network to public network via router
4. create an access point:
a. a security group open to incoming ssh connection
b. a VM with a floating ip
5. create a general empty security group (same as "default", but
without rules allowing in-tenant traffic)
tests:
1. _verify_network_details
2. _verify_mac_addr: for each access point verify that
(subnet, fix_ip, mac address) are as defined in the port list
3. _test_in_tenant_block: test that in-tenant traffic is disabled
without rules allowing it
4. _test_in_tenant_allow: test that in-tenant traffic is enabled
once an appropriate rule has been created
5. _test_cross_tenant_block: test that cross-tenant traffic is disabled
without a rule allowing it on destination tenant
6. _test_cross_tenant_allow:
* test that cross-tenant traffic is enabled once an appropriate
rule has been created on destination tenant.
* test that reverse traffic is still blocked
* test than revesre traffic is enabled once an appropriate rule has
been created on source tenant
assumptions:
1. alt_tenant/user existed and is different from primary_tenant/user
2. Public network is defined and reachable from the Tempest host
3. Public router can either be:
* defined, in which case all tenants networks can connect directly
to it, and cross tenant check will be done on the private IP of the
destination tenant
or
* not defined (empty string), in which case each tanant will have
its own router connected to the public network
"""
class TenantProperties():
"""
helper class to save tenant details
id
credentials
network
subnet
security groups
servers
access point
"""
def __init__(self, credentials):
self.manager = clients.Manager(credentials)
# Credentials from manager are filled with both names and IDs
self.creds = self.manager.credentials
self.network = None
self.subnet = None
self.router = None
self.security_groups = {}
self.servers = list()
def set_network(self, network, subnet, router):
self.network = network
self.subnet = subnet
self.router = router
@classmethod
def check_preconditions(cls):
if CONF.baremetal.driver_enabled:
msg = ('Not currently supported by baremetal.')
raise cls.skipException(msg)
super(TestSecurityGroupsBasicOps, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable or
CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestSecurityGroupsBasicOps, cls).resource_setup()
# TODO(mnewby) Consider looking up entities as needed instead
# of storing them as collections on the class.
# get credentials for secondary tenant
cls.alt_creds = cls.isolated_creds.get_alt_creds()
cls.alt_manager = clients.Manager(cls.alt_creds)
# Credentials from the manager are filled with both IDs and Names
cls.alt_creds = cls.alt_manager.credentials
cls.floating_ips = {}
cls.tenants = {}
creds = cls.credentials()
cls.primary_tenant = cls.TenantProperties(creds)
cls.alt_tenant = cls.TenantProperties(cls.alt_creds)
for tenant in [cls.primary_tenant, cls.alt_tenant]:
cls.tenants[tenant.creds.tenant_id] = tenant
cls.floating_ip_access = not CONF.network.public_router_id
def cleanup_wrapper(self, resource):
self.cleanup_resource(resource, self.__class__.__name__)
def setUp(self):
super(TestSecurityGroupsBasicOps, self).setUp()
self._deploy_tenant(self.primary_tenant)
self._verify_network_details(self.primary_tenant)
self._verify_mac_addr(self.primary_tenant)
def _create_tenant_keypairs(self, tenant):
keypair = self.create_keypair(tenant.manager.keypairs_client)
tenant.keypair = keypair
def _create_tenant_security_groups(self, tenant):
access_sg = self._create_empty_security_group(
namestart='secgroup_access-',
tenant_id=tenant.creds.tenant_id,
client=tenant.manager.network_client
)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
tenant_id=tenant.creds.tenant_id,
client=tenant.manager.network_client
)
tenant.security_groups.update(access=access_sg, default=def_sg)
ssh_rule = dict(
protocol='tcp',
port_range_min=22,
port_range_max=22,
direction='ingress',
)
self._create_security_group_rule(secgroup=access_sg,
client=tenant.manager.network_client,
**ssh_rule)
def _verify_network_details(self, tenant):
# Checks that we see the newly created network/subnet/router via
# checking the result of list_[networks,routers,subnets]
# Check that (router, subnet) couple exist in port_list
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(tenant.network.name, seen_names)
self.assertIn(tenant.network.id, seen_ids)
seen_subnets = [(n['id'], n['cidr'], n['network_id'])
for n in self._list_subnets()]
mysubnet = (tenant.subnet.id, tenant.subnet.cidr, tenant.network.id)
self.assertIn(mysubnet, seen_subnets)
seen_routers = self._list_routers()
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(tenant.router.name, seen_router_names)
self.assertIn(tenant.router.id, seen_router_ids)
myport = (tenant.router.id, tenant.subnet.id)
router_ports = [(i['device_id'], i['fixed_ips'][0]['subnet_id']) for i
in self._list_ports()
if self._is_router_port(i)]
self.assertIn(myport, router_ports)
def _is_router_port(self, port):
"""Return True if port is a router interface."""
# NOTE(armando-migliaccio): match device owner for both centralized
# and distributed routers; 'device_owner' is "" by default.
return port['device_owner'].startswith('network:router_interface')
def _create_server(self, name, tenant, security_groups=None):
"""
creates a server and assigns to security group
"""
self._set_compute_context(tenant)
if security_groups is None:
security_groups = [tenant.security_groups['default']]
security_groups_names = [{'name': s['name']} for s in security_groups]
create_kwargs = {
'networks': [
{'uuid': tenant.network.id},
],
'key_name': tenant.keypair['name'],
'security_groups': security_groups_names,
'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.assertEqual(
sorted([s['name'] for s in security_groups]),
sorted([s['name'] for s in server['security_groups']]))
return server
def _create_tenant_servers(self, tenant, num=1):
for i in range(num):
name = 'server-{tenant}-gen-{num}-'.format(
tenant=tenant.creds.tenant_name,
num=i
)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant)
tenant.servers.append(server)
def _set_access_point(self, tenant):
"""
creates a server in a secgroup with rule allowing external ssh
in order to access tenant internal network
workaround ip namespace
"""
secgroups = tenant.security_groups.values()
name = 'server-{tenant}-access_point-'.format(
tenant=tenant.creds.tenant_name)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant,
security_groups=secgroups)
tenant.access_point = server
self._assign_floating_ips(tenant, server)
def _assign_floating_ips(self, tenant, server):
public_network_id = CONF.network.public_network_id
floating_ip = self.create_floating_ip(
server, public_network_id,
client=tenant.manager.network_client)
self.floating_ips.setdefault(server['id'], floating_ip)
def _create_tenant_network(self, tenant):
network, subnet, router = self.create_networks(
client=tenant.manager.network_client)
tenant.set_network(network, subnet, router)
def _set_compute_context(self, tenant):
self.servers_client = tenant.manager.servers_client
return self.servers_client
def _deploy_tenant(self, tenant_or_id):
"""
creates:
network
subnet
router (if public not defined)
access security group
access-point server
"""
if not isinstance(tenant_or_id, self.TenantProperties):
tenant = self.tenants[tenant_or_id]
else:
tenant = tenant_or_id
self._set_compute_context(tenant)
self._create_tenant_keypairs(tenant)
self._create_tenant_network(tenant)
self._create_tenant_security_groups(tenant)
self._set_access_point(tenant)
def _get_server_ip(self, server, floating=False):
"""
returns the ip (floating/internal) of a server
"""
if floating:
server_ip = self.floating_ips[server['id']].floating_ip_address
else:
server_ip = None
network_name = self.tenants[server['tenant_id']].network.name
if network_name in server['addresses']:
server_ip = server['addresses'][network_name][0]['addr']
return server_ip
def _connect_to_access_point(self, tenant):
"""
create ssh connection to tenant access point
"""
access_point_ssh = \
self.floating_ips[tenant.access_point['id']].floating_ip_address
private_key = tenant.keypair['private_key']
access_point_ssh = self._ssh_to_server(access_point_ssh,
private_key=private_key)
return access_point_ssh
def _check_connectivity(self, access_point, ip, should_succeed=True):
if should_succeed:
msg = "Timed out waiting for %s to become reachable" % ip
else:
msg = "%s is reachable" % ip
self.assertTrue(self._check_remote_connectivity(access_point, ip,
should_succeed), msg)
def _test_in_tenant_block(self, tenant):
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
ip=self._get_server_ip(server),
should_succeed=False)
def _test_in_tenant_allow(self, tenant):
ruleset = dict(
protocol='icmp',
remote_group_id=tenant.security_groups['default'].id,
direction='ingress'
)
self._create_security_group_rule(
secgroup=tenant.security_groups['default'],
**ruleset
)
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
ip=self._get_server_ip(server))
def _test_cross_tenant_block(self, source_tenant, dest_tenant):
"""
if public router isn't defined, then dest_tenant access is via
floating-ip
"""
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point=access_point_ssh, ip=ip,
should_succeed=False)
def _test_cross_tenant_allow(self, source_tenant, dest_tenant):
"""
check for each direction:
creating rule for tenant incoming traffic enables only 1way traffic
"""
ruleset = dict(
protocol='icmp',
direction='ingress'
)
self._create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
client=dest_tenant.manager.network_client,
**ruleset
)
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point_ssh, ip)
# test that reverse traffic is still blocked
self._test_cross_tenant_block(dest_tenant, source_tenant)
# allow reverse traffic and check
self._create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
client=source_tenant.manager.network_client,
**ruleset
)
access_point_ssh_2 = self._connect_to_access_point(dest_tenant)
ip = self._get_server_ip(source_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point_ssh_2, ip)
def _verify_mac_addr(self, tenant):
"""
verify that VM (tenant's access point) has the same ip,mac as listed in
port list
"""
access_point_ssh = self._connect_to_access_point(tenant)
mac_addr = access_point_ssh.get_mac_address()
mac_addr = mac_addr.strip().lower()
# Get the fixed_ips and mac_address fields of all ports. Select
# only those two columns to reduce the size of the response.
port_list = self._list_ports(fields=['fixed_ips', 'mac_address'])
port_detail_list = [
(port['fixed_ips'][0]['subnet_id'],
port['fixed_ips'][0]['ip_address'],
port['mac_address'].lower())
for port in port_list if port['fixed_ips']
]
server_ip = self._get_server_ip(tenant.access_point)
subnet_id = tenant.subnet.id
self.assertIn((subnet_id, server_ip, mac_addr), port_detail_list)
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_cross_tenant_traffic(self):
if not self.isolated_creds.is_multi_tenant():
raise self.skipException("No secondary tenant defined")
try:
# deploy new tenant
self._deploy_tenant(self.alt_tenant)
self._verify_network_details(self.alt_tenant)
self._verify_mac_addr(self.alt_tenant)
# cross tenant check
source_tenant = self.primary_tenant
dest_tenant = self.alt_tenant
self._test_cross_tenant_block(source_tenant, dest_tenant)
self._test_cross_tenant_allow(source_tenant, dest_tenant)
except Exception:
for tenant in self.tenants.values():
self._log_console_output(servers=tenant.servers)
raise
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_in_tenant_traffic(self):
try:
self._create_tenant_servers(self.primary_tenant, num=1)
# in-tenant check
self._test_in_tenant_block(self.primary_tenant)
self._test_in_tenant_allow(self.primary_tenant)
except Exception:
for tenant in self.tenants.values():
self._log_console_output(servers=tenant.servers)
raise
|
|
# -------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Stefan
#
# Created: 30.07.2017
# Copyright: (c) Stefan 2017
# Licence: <your licence>
# -------------------------------------------------------------------------------
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from scrape_interface import ScrapeProcessor
import re
# number of entity for which we download
_ENTITY = 4340730
def extractdata(sp):
print("Start processing entity " + str(_ENTITY))
# _process_pvpage(sp)
_process_main(sp)
print("End processing entity " + str(_ENTITY))
# main processing - take all years and process until no more pages
def _process_main(sp):
html = ScrapeProcessor.download_page(
"http://www.primarie6.ro/consiliul-local/sedintele-consiliului-local/hotarari-ale-consiliului-local/")
soup = BeautifulSoup(html, 'html.parser')
controls = soup.find("ul", {"class": "et_pb_tabs_controls"})
if controls is None:
print("ERROR|Can't find controls element!")
return
firstyear = controls.find("a")
if firstyear is None:
print("ERROR|Can't find first year link in controls element!")
return
year = int(firstyear.string)
parentdiv = soup.find("div", {"class": "et_pb_all_tabs"})
if parentdiv is None:
print("ERROR|Can't find parent div element!")
return
for yeardiv in parentdiv.find_all("div", {"class": "et_pb_tab"}):
# there are not documents available for download over 2013
#if year <= 2012:
# continue
annex = 0
for elem in yeardiv.children:
if elem.name == "p" or elem.name == "div":
alink = elem.find("a")
if alink is None:
continue
if alink == -1:
continue
href = alink["href"]
if not href.startswith("http://"):
href = urljoin("http://www.primarie6.ro", href)
title = ScrapeProcessor.converthtml2text(str(elem))
title = title.replace("\n", " ").strip()
match = re.search("(^|\d?\. ?)anex", title, re.IGNORECASE)
if match:
annex = annex + 1
if decisionid == 0:
print("Skipping annex because of parent error: " + title)
continue
code, result = _process_annex(sp, href, decisionid, annex)
if code == "ERROR":
print("ERROR|" + result)
continue
else:
decisionid = 0
annex = 0
code, result = _process_doc(sp, href, title, year)
if code == "ERROR":
print("ERROR|" + title + "|" + result)
continue
decisionid = result
if elem.name == "ol":
for li in elem.find_all("li"):
alink = li.find("a")
if alink is None:
continue
if alink == -1:
continue
href = alink["href"]
if not href.startswith("http://"):
href = urljoin("http://www.primarie6.ro", href)
annex = annex + 1
code, result = _process_annex(sp, href, decisionid, annex)
if code == "ERROR":
print("ERROR|" + title + "|" + result)
if sp.get_processmode() in (ScrapeProcessor.ProcessMode.DELTA, ScrapeProcessor.ProcessMode.DELTA_DOWNLOAD):
break
year = year - 1
# pv processing - take all years and process until no more pages
def _process_pvpage(sp):
html = ScrapeProcessor.download_page(
"http://www.primarie6.ro/consiliul-local/sedintele-consiliului-local/procese-verbale/")
soup = BeautifulSoup(html, 'html.parser')
parentdiv = soup.find("div", {"class": "et_pb_all_tabs"})
for li in parentdiv.find_all("li"):
alink = li.find("a")
if alink is None:
continue
href = alink["href"]
if not href.startswith("http://"):
href = urljoin("http://www.primarie6.ro", href)
title = alink.string
datetext = ScrapeProcessor.finddate(title)
if datetext == "":
print("ERROR|Cannot find date: " + title)
continue
if datetext[:4] < str(ScrapeProcessor.currentyear()) and\
(sp.get_processmode() in (ScrapeProcessor.ProcessMode.DELTA, ScrapeProcessor.ProcessMode.DELTA_DOWNLOAD)):
return
code, result = _process_pv(sp, href, datetext, title)
if code == "ERROR":
print("ERROR|" + title + "|" + result)
continue
if code == "EXISTS" and (sp.get_processmode() in (ScrapeProcessor.ProcessMode.DELTA, ScrapeProcessor.ProcessMode.DELTA_DOWNLOAD)):
break
def _process_pv(sp, link, datetext, title):
number = ScrapeProcessor.dayinyear(datetext)
code, result = sp.post_decision("PRVB", number, datetext[:4], _ENTITY, datetext, title)
if code == "ERROR":
return code, result
decisionid = result
# download page
code, result = sp.download_file(link)
if code == "ERROR":
sp.post_document("MAIN", decisionid, 0, "ERROR_DOWNLOAD", "", link)
return code, result
fname = result
code, result, filetype = sp.ocr_document(fname)
if code == "ERROR":
sp.post_document("MAIN", decisionid, 0, "ERROR_OCR", "", link)
return code, result
ocrfname = result
outstr, cssstr = ScrapeProcessor.preparehtml(ocrfname, filetype)
return sp.post_document("MAIN", decisionid, 0, outstr, cssstr, link)
def _process_doc(sp, href, title, year):
match = re.search("H.. *?(\d+)[ -/]", title)
if not match:
match = re.search("hcl-nr-(\d+)", title)
if not match:
return "ERROR", "no decision number found: " + title
number = int(match.group(1))
code, result = sp.post_decision("HOTA", number, year, _ENTITY, "", title)
if code == "ERROR":
return code, result
decisionid = result
code, result = sp.download_file(href)
if code == "ERROR":
sp.post_document("MAIN", decisionid, 0, "ERROR_DOWNLOAD", "", href)
return code, result
fname = result
code, result, filetype = sp.ocr_document(fname)
if code == "ERROR":
sp.post_document("MAIN", decisionid, 0, "ERROR_OCR", "", href)
return code, result
ocrfname = result
outstr, cssstr = ScrapeProcessor.preparehtml(ocrfname, filetype)
code, result = sp.post_document("MAIN", decisionid, 0, outstr, cssstr, href)
if code == "ERROR":
return code, result
return code, decisionid
def _process_annex(sp, href, decisionid, annex):
code, result = sp.download_file(href)
if code == "ERROR":
sp.post_document("ANEX", decisionid, annex, "ERROR_DOWNLOAD", "", href)
return code, result
if code == "ARCHIVE":
print("WARNING|Skipping zip file as annex: " + href)
return "CREATED", annex
fname = result
code, result, filetype = sp.ocr_document(fname)
if code == "ERROR":
sp.post_document("ANEX", decisionid, annex, "ERROR_DOWNLOAD", "", href)
return code, result
ocrfname = result
outstr, cssstr = ScrapeProcessor.preparehtml(ocrfname, filetype)
return sp.post_document("ANEX", decisionid, annex, outstr, cssstr, href)
if __name__ == '__main__':
localsp = ScrapeProcessor("http://192.168.56.10", "stefan_cioc", "parola1234")
localsp.set_folders("X:/hot/S6I", "X:/hot/S6O")
localsp.set_processmode(ScrapeProcessor.ProcessMode.FULL)
extractdata(localsp)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Subclass for httplib.HTTPSConnection with optional certificate name
verification, depending on libcloud.security settings.
"""
import os
import warnings
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
import libcloud.security
from libcloud.utils.py3 import urlparse, PY3
__all__ = [
'LibcloudBaseConnection',
'LibcloudConnection'
]
ALLOW_REDIRECTS = 1
HTTP_PROXY_ENV_VARIABLE_NAME = 'http_proxy'
class SignedHTTPSAdapter(HTTPAdapter):
def __init__(self, cert_file, key_file):
self.cert_file = cert_file
self.key_file = key_file
super(SignedHTTPSAdapter, self).__init__()
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections, maxsize=maxsize,
block=block,
cert_file=self.cert_file,
key_file=self.key_file)
class LibcloudBaseConnection(object):
"""
Base connection class to inherit from.
Note: This class should not be instantiated directly.
"""
session = None
proxy_scheme = None
proxy_host = None
proxy_port = None
proxy_username = None
proxy_password = None
http_proxy_used = False
ca_cert = None
def __init__(self):
self.session = requests.Session()
def set_http_proxy(self, proxy_url):
"""
Set a HTTP proxy which will be used with this connection.
:param proxy_url: Proxy URL (e.g. http://<hostname>:<port> without
authentication and
http://<username>:<password>@<hostname>:<port> for
basic auth authentication information.
:type proxy_url: ``str``
"""
result = self._parse_proxy_url(proxy_url=proxy_url)
scheme = result[0]
host = result[1]
port = result[2]
username = result[3]
password = result[4]
self.proxy_scheme = scheme
self.proxy_host = host
self.proxy_port = port
self.proxy_username = username
self.proxy_password = password
self.http_proxy_used = True
self.session.proxies = {
'http': proxy_url,
'https': proxy_url
}
def _parse_proxy_url(self, proxy_url):
"""
Parse and validate a proxy URL.
:param proxy_url: Proxy URL (e.g. http://hostname:3128)
:type proxy_url: ``str``
:rtype: ``tuple`` (``scheme``, ``hostname``, ``port``)
"""
parsed = urlparse.urlparse(proxy_url)
if parsed.scheme != 'http':
raise ValueError('Only http proxies are supported')
if not parsed.hostname or not parsed.port:
raise ValueError('proxy_url must be in the following format: '
'http://<proxy host>:<proxy port>')
proxy_scheme = parsed.scheme
proxy_host, proxy_port = parsed.hostname, parsed.port
netloc = parsed.netloc
if '@' in netloc:
username_password = netloc.split('@', 1)[0]
split = username_password.split(':', 1)
if len(split) < 2:
raise ValueError('URL is in an invalid format')
proxy_username, proxy_password = split[0], split[1]
else:
proxy_username = None
proxy_password = None
return (proxy_scheme, proxy_host, proxy_port, proxy_username,
proxy_password)
def _setup_verify(self):
self.verify = libcloud.security.VERIFY_SSL_CERT
def _setup_ca_cert(self, **kwargs):
# simulating keyword-only argument in Python 2
ca_certs_path = kwargs.get('ca_cert', libcloud.security.CA_CERTS_PATH)
if self.verify is False:
pass
else:
if isinstance(ca_certs_path, list):
msg = (
'Providing a list of CA trusts is no longer supported '
'since libcloud 2.0. Using the first element in the list. '
'See http://libcloud.readthedocs.io/en/latest/other/'
'changes_in_2_0.html#providing-a-list-of-ca-trusts-is-no-'
'longer-supported')
warnings.warn(msg, DeprecationWarning)
self.ca_cert = ca_certs_path[0]
else:
self.ca_cert = ca_certs_path
def _setup_signing(self, cert_file=None, key_file=None):
"""
Setup request signing by mounting a signing
adapter to the session
"""
self.session.mount('https://', SignedHTTPSAdapter(cert_file, key_file))
class LibcloudConnection(LibcloudBaseConnection):
timeout = None
host = None
response = None
def __init__(self, host, port, secure=None, **kwargs):
scheme = 'https' if secure is not None and secure else 'http'
self.host = '{0}://{1}{2}'.format(
'https' if port == 443 else scheme,
host,
":{0}".format(port) if port not in (80, 443) else ""
)
# Support for HTTP proxy
proxy_url_env = os.environ.get(HTTP_PROXY_ENV_VARIABLE_NAME, None)
proxy_url = kwargs.pop('proxy_url', proxy_url_env)
self._setup_verify()
self._setup_ca_cert()
LibcloudBaseConnection.__init__(self)
if 'cert_file' in kwargs or 'key_file' in kwargs:
self._setup_signing(**kwargs)
if proxy_url:
self.set_http_proxy(proxy_url=proxy_url)
self.timeout = kwargs.get('timeout', 60)
@property
def verification(self):
"""
The option for SSL verification given to underlying requests
"""
return self.ca_cert if self.ca_cert is not None else self.verify
def request(self, method, url, body=None, headers=None, raw=False,
stream=False):
url = urlparse.urljoin(self.host, url)
headers = self._normalize_headers(headers=headers)
self.response = self.session.request(
method=method.lower(),
url=url,
data=body,
headers=headers,
allow_redirects=ALLOW_REDIRECTS,
stream=stream,
timeout=self.timeout,
verify=self.verification
)
def prepared_request(self, method, url, body=None,
headers=None, raw=False, stream=False):
headers = self._normalize_headers(headers=headers)
req = requests.Request(method, ''.join([self.host, url]),
data=body, headers=headers)
prepped = self.session.prepare_request(req)
prepped.body = body
self.response = self.session.send(
prepped,
stream=raw,
verify=self.ca_cert if self.ca_cert is not None else self.verify)
def getresponse(self):
return self.response
def getheaders(self):
# urlib decoded response body, libcloud has a bug
# and will not check if content is gzipped, so let's
# remove headers indicating compressed content.
if 'content-encoding' in self.response.headers:
del self.response.headers['content-encoding']
return self.response.headers
@property
def status(self):
return self.response.status_code
@property
def reason(self):
return None if self.response.status_code > 400 else self.response.text
def connect(self): # pragma: no cover
pass
def read(self):
return self.response.content
def close(self): # pragma: no cover
# return connection back to pool
self.response.close()
def _normalize_headers(self, headers):
headers = headers or {}
# all headers should be strings
for key, value in headers.items():
if isinstance(value, (int, float)):
headers[key] = str(value)
return headers
class HttpLibResponseProxy(object):
"""
Provides a proxy pattern around the :class:`requests.Reponse`
object to a :class:`httplib.HTTPResponse` object
"""
def __init__(self, response):
self._response = response
def read(self, amt=None):
return self._response.text
def getheader(self, name, default=None):
"""
Get the contents of the header name, or default
if there is no matching header.
"""
if name in self._response.headers.keys():
return self._response.headers[name]
else:
return default
def getheaders(self):
"""
Return a list of (header, value) tuples.
"""
if PY3:
return list(self._response.headers.items())
else:
return self._response.headers.items()
@property
def status(self):
return self._response.status_code
@property
def reason(self):
return self._response.reason
@property
def version(self):
# requests doesn't expose this
return '11'
|
|
#!/usr/bin/python
# ----------------------------------------------------------------------
# Copyright (2010) Aram Davtyan and Garegin Papoian
# Papoian's Group, University of Maryland at Collage Park
# http://papoian.chem.umd.edu/
# Last Update: 03/04/2011
# ----------------------------------------------------------------------
import sys, os
import numpy
from Bio.SVDSuperimposer import SVDSuperimposer
from VectorAlgebra import *
#from Bio.PDB.PDBParser import PDBParser
atom_type = {'1' : 'C', '2' : 'N', '3' : 'O', '4' : 'C', '5' : 'H', '6' : 'C'}
atom_desc = {'1' : 'C-Alpha', '2' : 'N', '3' : 'O', '4' : 'C-Beta', '5' : 'H-Beta', '6' : 'C-Prime'}
PDB_type = {'1' : 'CA', '2' : 'N', '3' : 'O', '4' : 'CB', '5' : 'HB', '6' : 'C' }
class PDB_Atom:
no = 0
ty = ''
res = 'UNK'
res_no = 0
x = 0.0
y = 0.0
z = 0.0
atm = 'C'
def __init__(self, no, ty, res, res_no, x, y, z, atm):
self.no = no
self.ty = ty
self.res = res
self.res_no = res_no
self.x = x
self.y = y
self.z = z
self.atm = atm
def write_(self, f):
f.write('ATOM')
f.write((' '+str(self.no))[-7:])
f.write(' ')
f.write((self.ty+' ')[:4])
f.write(self.res)
f.write(' ')
f.write('T')
f.write((' '+str(self.res_no))[-4:])
f.write((' '+str(round(self.x,3)))[-12:])
f.write((' '+str(round(self.y,3)))[-8:])
f.write((' '+str(round(self.z,3)))[-8:])
f.write(' 1.00')
f.write(' 0.00')
f.write((' '+self.atm)[-12:]+' ')
f.write('\n')
class Atom:
No = 0
ty = ''
x = 0.0
y = 0.0
z = 0.0
desc = ''
def __init__(self, No, ty, No_m, x, y, z, desc=''):
self.No = No
self.ty = ty
self.No_m = No_m
self.x = x
self.y = y
self.z = z
self.desc = desc
def write_(self, f):
f.write(str(self.No))
f.write(' ')
f.write(PDB_type[self.No_m])
f.write(' ')
f.write(str(round(self.x,8)))
f.write(' ')
f.write(str(round(self.y,8)))
f.write(' ')
f.write(str(round(self.z,8)))
f.write(' ')
f.write(self.desc)
f.write('\n')
if len(sys.argv)!=7 and len(sys.argv)!=8 :
print "# of arguments", len(sys.argv)
print "\nCalcQValue.py PDB_Id Frag_PDB_path FragPDB+chain i_start j_start length (1 for rmsd calculation) \n"
exit()
struct_id = sys.argv[1]
if struct_id[-4:].lower()==".pdb":
pdb_file = struct_id
else:
pdb_file = struct_id + ".pdb"
#myhome = os.environ.get("HOME")
#pdbDir = myhome + "/opt/script/PDBs/"
Frag_struct_id = sys.argv[3]
if len(Frag_struct_id) < 5:
print "\nCalcQValue.py PDB_Id Frag_PDB_path FragPDB+chain i_start j_start length\n"
print "format of FragPDB+chain, like 1r69a "
exit()
pdbDir = sys.argv[2]
Frag_pdbID = Frag_struct_id[0:4].upper()
Frag_chain_name = Frag_struct_id[4].upper()
Frag_pdb_file = pdbDir + Frag_pdbID + ".pdb"
#output_file = ""
#if len(sys.argv)>3: output_file = sys.argv[3]
sigma_exp = 0.15
res_flag = 0
res_flag = 1
res_Start = int(sys.argv[4])
res_len = int(sys.argv[6])
rmsd_flag = 0
if len(sys.argv) == 8 :
rmsd_flag = int(sys.argv[7])
if rmsd_flag != 1 :
print "rmsd_flag has to be 1 !!"
exit()
#res_End = res_Start + res_len - 1
Frag_Start = int(sys.argv[5])
#Frag_End = res_len + Frag_Start - 1
#print "res_Start:", res_Start, "Frag_Start:", Frag_Start
n_atoms = 0
i_atom = 0
item = ''
step = 0
ca_atoms_pdb = []
ca_atoms = []
box = []
A = []
sigma = []
sigma_sq = []
#out = open(output_file, 'w')
from Bio.PDB.PDBParser import PDBParser
p = PDBParser(PERMISSIVE=1)
def compute_frag_RMSD(res_len):
if len(ca_atoms)!=len(ca_atoms_pdb):
print "Error. Length mismatch! target:frag", len(ca_atoms_pdb), len(ca_atoms)
return 0
l = len(ca_atoms)
N = res_len
if l != N :
print "atom list length mismatches the fragment length!", str(l), str(N)
return 0
fixed_coord = numpy.zeros((l, 3))
moving_coord = numpy.zeros((l, 3))
for i in range(0, l):
fixed_coord[i] = numpy.array ([ca_atoms_pdb[i][0], ca_atoms_pdb[i][1], ca_atoms_pdb[i][2]])
moving_coord[i] = numpy.array ([ca_atoms[i][0], ca_atoms[i][1], ca_atoms[i][2]])
sup = SVDSuperimposer()
sup.set(fixed_coord, moving_coord)
sup.run()
rms = sup.get_rms()
return rms
def computeQ1(res_len):
#print "res_len:", res_len
#print "target:frag", len(ca_atoms_pdb), len(ca_atoms)
if len(ca_atoms)!=len(ca_atoms_pdb):
print "Error. Length mismatch! target:frag", len(ca_atoms_pdb), len(ca_atoms)
return 0
Q = 0
N = res_len
for ia in range(0, res_len-2):
for ja in range(ia+2, res_len):
#print ia, ja
r = vabs(vector(ca_atoms[ia], ca_atoms[ja]))
rn = vabs(vector(ca_atoms_pdb[ia], ca_atoms_pdb[ja]))
dr = r - rn
Q = Q + exp(-dr*dr/(2*sigma_sq[ja-ia]));
Q = 2*Q/((N-1)*(N-2))
return Q
s = p.get_structure(struct_id, pdb_file)
chains = s[0].get_list()
chain = chains[0]
count=0
shift = 0
first_flag = 0
for res in chain:
res_index = res.get_id()[1]
#print "res_index: ", res_index, "first_flag: ", first_flag
#pdb.fasta file is modified, shift is needed to get back pbd index
if first_flag == 0 and res_index != 1:
shift = res_index - 1
# print "shift:",shift
first_flag = 1
res_index = res_index - shift
if (res_index < res_Start):
continue
if (count >= res_len ):
break
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res_id ==' ' or res_id =='H_MSE' or res_id == 'H_M3L' or res_id == 'H_CAS') and is_regular_res:
#print "Add res_index:", res_index + shift, "resname:", res.get_resname()
ca_atoms_pdb.append(res['CA'].get_coord())
#print 'res: ', res.get_resname(), res.get_id()[1]
count += 1
else:
print "res_id:", res_id, "res_index: ", res_index, "is_regular_res: ", is_regular_res
#exit()
s = p.get_structure(Frag_pdbID, Frag_pdb_file)
chains = s[0].get_list()
for chain in chains:
if chain.get_id() == Frag_chain_name:
count_frag=0
for res in chain:
res_index = res.get_id()[1]
if (res_index < Frag_Start):
continue
if (count_frag >= res_len ):
break
count_frag += 1
is_regular_res = res.has_id('CA')
res_id = res.get_id()[0]
if (res_id ==' ' or res_id =='H_MSE' or res_id == 'H_M3L' or res_id == 'H_CAS') and is_regular_res:
ca_atoms.append(res['CA'].get_coord())
else:
print "res_id:", res_id, "res_index: ", res_index, "is_regular_res: ", is_regular_res
for i in range(0, len(ca_atoms_pdb)+1):
#sigma.append( (1+i)**sigma_exp )
#sigma_sq.append(sigma[-1]*sigma[-1])
sigma_tmp = (1+i)**sigma_exp
sigma_sq.append(sigma_tmp*sigma_tmp)
if rmsd_flag == 1:
rmsd = compute_frag_RMSD(res_len)
print round(rmsd,3)
else :
Q = computeQ1(res_len)
strs=str(round(Q,3))
sys.stdout.write(strs+"\n")
#print round(Q,3)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the DataflowRunner class."""
from __future__ import absolute_import
import json
import sys
import unittest
from builtins import object
from builtins import range
from datetime import datetime
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import mock
import pytest
import apache_beam as beam
import apache_beam.transforms as ptransform
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import Pipeline
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.pvalue import PCollection
from apache_beam.runners import DataflowRunner
from apache_beam.runners import TestDataflowRunner
from apache_beam.runners import create_runner
from apache_beam.runners.dataflow.dataflow_runner import DataflowPipelineResult
from apache_beam.runners.dataflow.dataflow_runner import DataflowRuntimeException
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.runner import PipelineState
from apache_beam.testing.extra_assertions import ExtraAssertionsMixin
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.transforms import window
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.core import _GroupByKeyOnly
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.typehints import typehints
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
apiclient = None # type: ignore
# pylint: enable=wrong-import-order, wrong-import-position
# SpecialParDo and SpecialDoFn are used in test_remote_runner_display_data.
# Due to BEAM-8482, these need to be declared outside of the test method.
# TODO: Should not subclass ParDo. Switch to PTransform as soon as
# composite transforms support display data.
class SpecialParDo(beam.ParDo):
def __init__(self, fn, now):
super(SpecialParDo, self).__init__(fn)
self.fn = fn
self.now = now
# Make this a list to be accessible within closure
def display_data(self):
return {'asubcomponent': self.fn,
'a_class': SpecialParDo,
'a_time': self.now}
class SpecialDoFn(beam.DoFn):
def display_data(self):
return {'dofn_value': 42}
def process(self):
pass
@unittest.skipIf(apiclient is None, 'GCP dependencies are not installed')
class DataflowRunnerTest(unittest.TestCase, ExtraAssertionsMixin):
def setUp(self):
self.default_properties = [
'--dataflow_endpoint=ignored',
'--job_name=test-job',
'--project=test-project',
'--staging_location=ignored',
'--temp_location=/dev/null',
'--no_auth',
'--dry_run=True']
@mock.patch('time.sleep', return_value=None)
def test_wait_until_finish(self, patched_time_sleep):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
class MockDataflowRunner(object):
def __init__(self, states):
self.dataflow_client = mock.MagicMock()
self.job = mock.MagicMock()
self.job.currentState = values_enum.JOB_STATE_UNKNOWN
self._states = states
self._next_state_index = 0
def get_job_side_effect(*args, **kwargs):
self.job.currentState = self._states[self._next_state_index]
if self._next_state_index < (len(self._states) - 1):
self._next_state_index += 1
return mock.DEFAULT
self.dataflow_client.get_job = mock.MagicMock(
return_value=self.job, side_effect=get_job_side_effect)
self.dataflow_client.list_messages = mock.MagicMock(
return_value=([], None))
with self.assertRaisesRegex(
DataflowRuntimeException, 'Dataflow pipeline failed. State: FAILED'):
failed_runner = MockDataflowRunner([values_enum.JOB_STATE_FAILED])
failed_result = DataflowPipelineResult(failed_runner.job, failed_runner)
failed_result.wait_until_finish()
succeeded_runner = MockDataflowRunner([values_enum.JOB_STATE_DONE])
succeeded_result = DataflowPipelineResult(
succeeded_runner.job, succeeded_runner)
result = succeeded_result.wait_until_finish()
self.assertEqual(result, PipelineState.DONE)
# Time array has duplicate items, because some logging implementations also
# call time.
with mock.patch('time.time', mock.MagicMock(side_effect=[1, 1, 2, 2, 3])):
duration_succeeded_runner = MockDataflowRunner(
[values_enum.JOB_STATE_RUNNING, values_enum.JOB_STATE_DONE])
duration_succeeded_result = DataflowPipelineResult(
duration_succeeded_runner.job, duration_succeeded_runner)
result = duration_succeeded_result.wait_until_finish(5000)
self.assertEqual(result, PipelineState.DONE)
with mock.patch('time.time', mock.MagicMock(side_effect=[1, 9, 9, 20, 20])):
duration_timedout_runner = MockDataflowRunner(
[values_enum.JOB_STATE_RUNNING])
duration_timedout_result = DataflowPipelineResult(
duration_timedout_runner.job, duration_timedout_runner)
result = duration_timedout_result.wait_until_finish(5000)
self.assertEqual(result, PipelineState.RUNNING)
with mock.patch('time.time', mock.MagicMock(side_effect=[1, 1, 2, 2, 3])):
with self.assertRaisesRegex(
DataflowRuntimeException,
'Dataflow pipeline failed. State: CANCELLED'):
duration_failed_runner = MockDataflowRunner(
[values_enum.JOB_STATE_CANCELLED])
duration_failed_result = DataflowPipelineResult(
duration_failed_runner.job, duration_failed_runner)
duration_failed_result.wait_until_finish(5000)
@mock.patch('time.sleep', return_value=None)
def test_cancel(self, patched_time_sleep):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
class MockDataflowRunner(object):
def __init__(self, state, cancel_result):
self.dataflow_client = mock.MagicMock()
self.job = mock.MagicMock()
self.job.currentState = state
self.dataflow_client.get_job = mock.MagicMock(return_value=self.job)
self.dataflow_client.modify_job_state = mock.MagicMock(
return_value=cancel_result)
self.dataflow_client.list_messages = mock.MagicMock(
return_value=([], None))
with self.assertRaisesRegex(
DataflowRuntimeException, 'Failed to cancel job'):
failed_runner = MockDataflowRunner(values_enum.JOB_STATE_RUNNING, False)
failed_result = DataflowPipelineResult(failed_runner.job, failed_runner)
failed_result.cancel()
succeeded_runner = MockDataflowRunner(values_enum.JOB_STATE_RUNNING, True)
succeeded_result = DataflowPipelineResult(
succeeded_runner.job, succeeded_runner)
succeeded_result.cancel()
terminal_runner = MockDataflowRunner(values_enum.JOB_STATE_DONE, False)
terminal_result = DataflowPipelineResult(
terminal_runner.job, terminal_runner)
terminal_result.cancel()
def test_create_runner(self):
self.assertTrue(
isinstance(create_runner('DataflowRunner'),
DataflowRunner))
self.assertTrue(
isinstance(create_runner('TestDataflowRunner'),
TestDataflowRunner))
def test_environment_override_translation(self):
self.default_properties.append('--experiments=beam_fn_api')
self.default_properties.append('--worker_harness_container_image=FOO')
remote_runner = DataflowRunner()
p = Pipeline(remote_runner,
options=PipelineOptions(self.default_properties))
(p | ptransform.Create([1, 2, 3]) # pylint: disable=expression-not-assigned
| 'Do' >> ptransform.FlatMap(lambda x: [(x, x)])
| ptransform.GroupByKey())
p.run()
self.assertEqual(
list(remote_runner.proto_pipeline.components.environments.values()),
[beam_runner_api_pb2.Environment(
urn=common_urns.environments.DOCKER.urn,
payload=beam_runner_api_pb2.DockerPayload(
container_image='FOO').SerializeToString())])
def test_remote_runner_translation(self):
remote_runner = DataflowRunner()
p = Pipeline(remote_runner,
options=PipelineOptions(self.default_properties))
(p | ptransform.Create([1, 2, 3]) # pylint: disable=expression-not-assigned
| 'Do' >> ptransform.FlatMap(lambda x: [(x, x)])
| ptransform.GroupByKey())
p.run()
def test_streaming_create_translation(self):
remote_runner = DataflowRunner()
self.default_properties.append("--streaming")
p = Pipeline(remote_runner, PipelineOptions(self.default_properties))
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
p.run()
job_dict = json.loads(str(remote_runner.job))
self.assertEqual(len(job_dict[u'steps']), 3)
self.assertEqual(job_dict[u'steps'][0][u'kind'], u'ParallelRead')
self.assertEqual(
job_dict[u'steps'][0][u'properties'][u'pubsub_subscription'],
'_starting_signal/')
self.assertEqual(job_dict[u'steps'][1][u'kind'], u'ParallelDo')
self.assertEqual(job_dict[u'steps'][2][u'kind'], u'ParallelDo')
def test_biqquery_read_streaming_fail(self):
remote_runner = DataflowRunner()
self.default_properties.append("--streaming")
p = Pipeline(remote_runner, PipelineOptions(self.default_properties))
_ = p | beam.io.Read(beam.io.BigQuerySource('some.table'))
with self.assertRaisesRegex(ValueError,
r'source is not currently available'):
p.run()
# TODO(BEAM-8095): Segfaults in Python 3.7 with xdist.
@pytest.mark.no_xdist
def test_remote_runner_display_data(self):
remote_runner = DataflowRunner()
p = Pipeline(remote_runner,
options=PipelineOptions(self.default_properties))
now = datetime.now()
# pylint: disable=expression-not-assigned
(p | ptransform.Create([1, 2, 3, 4, 5])
| 'Do' >> SpecialParDo(SpecialDoFn(), now))
# TODO(BEAM-366) Enable runner API on this test.
p.run(test_runner_api=False)
job_dict = json.loads(str(remote_runner.job))
steps = [step
for step in job_dict['steps']
if len(step['properties'].get('display_data', [])) > 0]
step = steps[1]
disp_data = step['properties']['display_data']
nspace = SpecialParDo.__module__+ '.'
expected_data = [{'type': 'TIMESTAMP', 'namespace': nspace+'SpecialParDo',
'value': DisplayDataItem._format_value(now, 'TIMESTAMP'),
'key': 'a_time'},
{'type': 'STRING', 'namespace': nspace+'SpecialParDo',
'value': nspace+'SpecialParDo', 'key': 'a_class',
'shortValue': 'SpecialParDo'},
{'type': 'INTEGER', 'namespace': nspace+'SpecialDoFn',
'value': 42, 'key': 'dofn_value'}]
self.assertUnhashableCountEqual(disp_data, expected_data)
def test_no_group_by_key_directly_after_bigquery(self):
remote_runner = DataflowRunner()
p = Pipeline(remote_runner,
options=PipelineOptions([
'--dataflow_endpoint=ignored',
'--job_name=test-job',
'--project=test-project',
'--staging_location=ignored',
'--temp_location=/dev/null',
'--no_auth'
]))
rows = p | beam.io.Read(beam.io.BigQuerySource('dataset.faketable'))
with self.assertRaises(ValueError,
msg=('Coder for the GroupByKey operation'
'"GroupByKey" is not a key-value coder: '
'RowAsDictJsonCoder')):
unused_invalid = rows | beam.GroupByKey()
def test_group_by_key_input_visitor_with_valid_inputs(self):
p = TestPipeline()
pcoll1 = PCollection(p)
pcoll2 = PCollection(p)
pcoll3 = PCollection(p)
for transform in [_GroupByKeyOnly(), beam.GroupByKey()]:
pcoll1.element_type = None
pcoll2.element_type = typehints.Any
pcoll3.element_type = typehints.KV[typehints.Any, typehints.Any]
for pcoll in [pcoll1, pcoll2, pcoll3]:
applied = AppliedPTransform(None, transform, "label", [pcoll])
applied.outputs[None] = PCollection(None)
DataflowRunner.group_by_key_input_visitor().visit_transform(
applied)
self.assertEqual(pcoll.element_type,
typehints.KV[typehints.Any, typehints.Any])
def test_group_by_key_input_visitor_with_invalid_inputs(self):
p = TestPipeline()
pcoll1 = PCollection(p)
pcoll2 = PCollection(p)
for transform in [_GroupByKeyOnly(), beam.GroupByKey()]:
pcoll1.element_type = str
pcoll2.element_type = typehints.Set
err_msg = (
r"Input to 'label' must be compatible with KV\[Any, Any\]. "
"Found .*")
for pcoll in [pcoll1, pcoll2]:
with self.assertRaisesRegex(ValueError, err_msg):
DataflowRunner.group_by_key_input_visitor().visit_transform(
AppliedPTransform(None, transform, "label", [pcoll]))
def test_group_by_key_input_visitor_for_non_gbk_transforms(self):
p = TestPipeline()
pcoll = PCollection(p)
for transform in [beam.Flatten(), beam.Map(lambda x: x)]:
pcoll.element_type = typehints.Any
DataflowRunner.group_by_key_input_visitor().visit_transform(
AppliedPTransform(None, transform, "label", [pcoll]))
self.assertEqual(pcoll.element_type, typehints.Any)
def test_flatten_input_with_visitor_with_single_input(self):
self._test_flatten_input_visitor(typehints.KV[int, int], typehints.Any, 1)
def test_flatten_input_with_visitor_with_multiple_inputs(self):
self._test_flatten_input_visitor(
typehints.KV[int, typehints.Any], typehints.Any, 5)
def _test_flatten_input_visitor(self, input_type, output_type, num_inputs):
p = TestPipeline()
inputs = []
for _ in range(num_inputs):
input_pcoll = PCollection(p)
input_pcoll.element_type = input_type
inputs.append(input_pcoll)
output_pcoll = PCollection(p)
output_pcoll.element_type = output_type
flatten = AppliedPTransform(None, beam.Flatten(), "label", inputs)
flatten.add_output(output_pcoll, None)
DataflowRunner.flatten_input_visitor().visit_transform(flatten)
for _ in range(num_inputs):
self.assertEqual(inputs[0].element_type, output_type)
def test_gbk_then_flatten_input_visitor(self):
p = TestPipeline(
runner=DataflowRunner(),
options=PipelineOptions(self.default_properties))
none_str_pc = p | 'c1' >> beam.Create({None: 'a'})
none_int_pc = p | 'c2' >> beam.Create({None: 3})
flat = (none_str_pc, none_int_pc) | beam.Flatten()
_ = flat | beam.GroupByKey()
# This may change if type inference changes, but we assert it here
# to make sure the check below is not vacuous.
self.assertNotIsInstance(flat.element_type, typehints.TupleConstraint)
p.visit(DataflowRunner.group_by_key_input_visitor())
p.visit(DataflowRunner.flatten_input_visitor())
# The dataflow runner requires gbk input to be tuples *and* flatten
# inputs to be equal to their outputs. Assert both hold.
self.assertIsInstance(flat.element_type, typehints.TupleConstraint)
self.assertEqual(flat.element_type, none_str_pc.element_type)
self.assertEqual(flat.element_type, none_int_pc.element_type)
def test_serialize_windowing_strategy(self):
# This just tests the basic path; more complete tests
# are in window_test.py.
strategy = Windowing(window.FixedWindows(10))
self.assertEqual(
strategy,
DataflowRunner.deserialize_windowing_strategy(
DataflowRunner.serialize_windowing_strategy(strategy)))
def test_side_input_visitor(self):
p = TestPipeline()
pc = p | beam.Create([])
transform = beam.Map(
lambda x, y, z: (x, y, z),
beam.pvalue.AsSingleton(pc),
beam.pvalue.AsMultiMap(pc))
applied_transform = AppliedPTransform(None, transform, "label", [pc])
DataflowRunner.side_input_visitor().visit_transform(applied_transform)
self.assertEqual(2, len(applied_transform.side_inputs))
for side_input in applied_transform.side_inputs:
self.assertEqual(
common_urns.side_inputs.MULTIMAP.urn,
side_input._side_input_data().access_pattern)
def test_min_cpu_platform_flag_is_propagated_to_experiments(self):
remote_runner = DataflowRunner()
self.default_properties.append('--min_cpu_platform=Intel Haswell')
p = Pipeline(remote_runner, PipelineOptions(self.default_properties))
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
p.run()
self.assertIn('min_cpu_platform=Intel Haswell',
remote_runner.job.options.view_as(DebugOptions).experiments)
def test_streaming_engine_flag_adds_windmill_experiments(self):
remote_runner = DataflowRunner()
self.default_properties.append('--streaming')
self.default_properties.append('--enable_streaming_engine')
self.default_properties.append('--experiment=some_other_experiment')
p = Pipeline(remote_runner, PipelineOptions(self.default_properties))
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
p.run()
experiments_for_job = (
remote_runner.job.options.view_as(DebugOptions).experiments)
self.assertIn('enable_streaming_engine', experiments_for_job)
self.assertIn('enable_windmill_service', experiments_for_job)
self.assertIn('some_other_experiment', experiments_for_job)
def test_dataflow_worker_jar_flag_non_fnapi_noop(self):
remote_runner = DataflowRunner()
self.default_properties.append('--experiment=some_other_experiment')
self.default_properties.append('--dataflow_worker_jar=test.jar')
p = Pipeline(remote_runner, PipelineOptions(self.default_properties))
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
p.run()
experiments_for_job = (
remote_runner.job.options.view_as(DebugOptions).experiments)
self.assertIn('some_other_experiment', experiments_for_job)
self.assertNotIn('use_staged_dataflow_worker_jar', experiments_for_job)
def test_dataflow_worker_jar_flag_adds_use_staged_worker_jar_experiment(self):
remote_runner = DataflowRunner()
self.default_properties.append('--experiment=beam_fn_api')
self.default_properties.append('--dataflow_worker_jar=test.jar')
p = Pipeline(remote_runner, PipelineOptions(self.default_properties))
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
p.run()
experiments_for_job = (
remote_runner.job.options.view_as(DebugOptions).experiments)
self.assertIn('beam_fn_api', experiments_for_job)
self.assertIn('use_staged_dataflow_worker_jar', experiments_for_job)
def test_use_fastavro_experiment_is_added_on_py3_and_onwards(self):
remote_runner = DataflowRunner()
p = Pipeline(remote_runner, PipelineOptions(self.default_properties))
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
p.run()
self.assertEqual(
sys.version_info[0] > 2,
remote_runner.job.options.view_as(DebugOptions).lookup_experiment(
'use_fastavro', False))
def test_use_fastavro_experiment_is_not_added_when_use_avro_is_present(self):
remote_runner = DataflowRunner()
self.default_properties.append('--experiment=use_avro')
p = Pipeline(remote_runner, PipelineOptions(self.default_properties))
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
p.run()
debug_options = remote_runner.job.options.view_as(DebugOptions)
self.assertFalse(debug_options.lookup_experiment('use_fastavro', False))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt import fake
from nova.virt.libvirt import driver
from nova.virt.libvirt import host
from nova.virt.libvirt.volume import volume
SECRET_UUID = '2a0a0d6c-babf-454d-b93e-9ac9957b95e0'
class FakeSecret(object):
def __init__(self):
self.uuid = SECRET_UUID
def getUUIDString(self):
return self.uuid
def UUIDString(self):
return self.uuid
def setValue(self, value):
self.value = value
return 0
def getValue(self, value):
return self.value
def undefine(self):
self.value = None
return 0
class LibvirtBaseVolumeDriverSubclassSignatureTestCase(
test.SubclassSignatureTestCase):
def _get_base_class(self):
# We do this because it has the side-effect of loading all the
# volume drivers
self.useFixture(fakelibvirt.FakeLibvirtFixture())
driver.LibvirtDriver(fake.FakeVirtAPI(), False)
return volume.LibvirtBaseVolumeDriver
class LibvirtVolumeBaseTestCase(test.NoDBTestCase):
"""Contains common setup and helper methods for libvirt volume tests."""
def setUp(self):
super(LibvirtVolumeBaseTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.fake_host = host.Host("qemu:///system")
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
'host': 'fake_host'
}
self.disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.name = 'volume-00000001'
self.location = '10.0.2.15:3260'
self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
self.vol = {'id': 1, 'name': self.name}
self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.user = 'foo'
def _assertFileTypeEquals(self, tree, file_path):
self.assertEqual('file', tree.get('type'))
self.assertEqual(file_path, tree.find('./source').get('file'))
class LibvirtISCSIVolumeBaseTestCase(LibvirtVolumeBaseTestCase):
"""Contains common setup and helper methods for iSCSI volume tests."""
def iscsi_connection(self, volume, location, iqn, auth=False,
transport=None):
dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
if transport is not None:
dev_name = 'pci-0000:00:00.0-' + dev_name
dev_path = '/dev/disk/by-path/%s' % (dev_name)
ret = {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
'device_path': dev_path,
'qos_specs': {
# Note that read/write iops/bytes values cannot
# be used with total values.
# These are only here for illustrative purposes.
'total_bytes_sec': '102400',
'read_iops_sec': '200',
'read_bytes_sec_max': '150000',
'read_iops_sec_max': '2000',
'write_bytes_sec_max': '250000',
'write_iops_sec_max': '3000',
'total_bytes_sec_max': '400000',
'total_iops_sec_max': '4000',
'size_iops_sec': '16',
}
}
}
if auth:
ret['data']['auth_method'] = 'CHAP'
ret['data']['auth_username'] = 'foo'
ret['data']['auth_password'] = 'bar'
return ret
@ddt.ddt
class LibvirtVolumeTestCase(LibvirtISCSIVolumeBaseTestCase):
def _assertDiskInfoEquals(self, tree, disk_info):
self.assertEqual(disk_info['type'], tree.get('device'))
self.assertEqual(disk_info['bus'], tree.find('./target').get('bus'))
self.assertEqual(disk_info['dev'], tree.find('./target').get('dev'))
def _test_libvirt_volume_driver_disk_info(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertDiskInfoEquals(tree, self.disk_info)
def test_libvirt_volume_disk_info_type(self):
self.disk_info['type'] = 'cdrom'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_dev(self):
self.disk_info['dev'] = 'hdc'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_bus(self):
self.disk_info['bus'] = 'scsi'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_driver_serial(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual('fake_serial', tree.find('./serial').text)
self.assertIsNone(tree.find('./blockio'))
self.assertIsNone(tree.find("driver[@discard]"))
def test_libvirt_volume_driver_blockio(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'logical_block_size': '4096',
'physical_block_size': '4096',
},
'serial': 'fake_serial',
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
blockio = tree.find('./blockio')
self.assertEqual('4096', blockio.get('logical_block_size'))
self.assertEqual('4096', blockio.get('physical_block_size'))
def test_libvirt_volume_driver_iotune(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'qos_specs': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
iotune = tree.find('./iotune')
# ensure invalid qos_specs is ignored
self.assertIsNone(iotune)
specs = {
'total_bytes_sec': '102400',
'read_bytes_sec': '51200',
'write_bytes_sec': '0',
'total_iops_sec': '0',
'read_iops_sec': '200',
'write_iops_sec': '200',
}
del connection_info['data']['qos_specs']
connection_info['data'].update(dict(qos_specs=specs))
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
def test_libvirt_volume_driver_readonly(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'access_mode': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.assertRaises(exception.InvalidVolumeAccessMode,
libvirt_driver.get_config,
connection_info, self.disk_info)
connection_info['data']['access_mode'] = 'rw'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNone(readonly)
connection_info['data']['access_mode'] = 'ro'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNotNone(readonly)
def test_libvirt_volume_multiattach(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'access_mode': 'rw',
},
'multiattach': True,
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
shareable = tree.find('./shareable')
self.assertIsNotNone(shareable)
connection_info['multiattach'] = False
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
shareable = tree.find('./shareable')
self.assertIsNone(shareable)
@mock.patch('nova.virt.libvirt.host.Host.has_min_version')
def test_libvirt_volume_driver_discard_true(self, mock_has_min_version):
# Check the discard attrib is present in driver section
mock_has_min_version.return_value = True
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'discard': True,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
driver_node = tree.find("driver[@discard]")
self.assertIsNotNone(driver_node)
self.assertEqual('unmap', driver_node.attrib['discard'])
def test_libvirt_volume_driver_discard_false(self):
# Check the discard attrib is not present in driver section
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'discard': False,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertIsNone(tree.find("driver[@discard]"))
def test_libvirt_volume_driver_encryption(self):
fake_secret = FakeSecret()
fake_host = mock.Mock(spec=host.Host)
fake_host.find_secret.return_value = fake_secret
libvirt_driver = volume.LibvirtVolumeDriver(fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'volume_id': uuids.volume_id,
'device_path': '/foo',
'discard': False,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
encryption = tree.find("encryption")
secret = encryption.find("secret")
self.assertEqual('luks', encryption.attrib['format'])
self.assertEqual('passphrase', secret.attrib['type'])
self.assertEqual(SECRET_UUID, secret.attrib['uuid'])
def test_libvirt_volume_driver_encryption_missing_secret(self):
fake_host = mock.Mock(spec=host.Host)
fake_host.find_secret.return_value = None
libvirt_driver = volume.LibvirtVolumeDriver(fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'volume_id': uuids.volume_id,
'device_path': '/foo',
'discard': False,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertIsNone(tree.find("encryption"))
@ddt.data(5, None)
def test_libvirt_volume_driver_address_tag_scsi_unit(self, disk_unit):
# The address tag should be set if bus is scsi and unit is set.
# Otherwise, it should not be set at all.
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {'data': {'device_path': '/foo'}}
disk_info = {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}
if disk_unit:
disk_info['unit'] = disk_unit
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
address = tree.find('address')
if disk_unit:
self.assertEqual('0', address.attrib['controller'])
self.assertEqual(str(disk_unit), address.attrib['unit'])
else:
self.assertIsNone(address)
|
|
import unittest
from collections import OrderedDict
from pypika import (
Array,
Field,
JSON,
QueryException,
Table,
)
from pypika.dialects import PostgreSQLQuery
class InsertTests(unittest.TestCase):
table_abc = Table("abc")
def test_array_keyword(self):
q = PostgreSQLQuery.into(self.table_abc).insert(1, [1, "a", True])
self.assertEqual("INSERT INTO \"abc\" VALUES (1,ARRAY[1,'a',true])", str(q))
class JSONObjectTests(unittest.TestCase):
def test_alias_set_correctly(self):
table = Table('jsonb_table')
q = PostgreSQLQuery.from_('abc').select(table.value.get_text_value('a').as_('name'))
self.assertEqual('''SELECT "value"->>'a' "name" FROM "abc"''', str(q))
def test_json_value_from_dict(self):
q = PostgreSQLQuery.select(JSON({"a": "foo"}))
self.assertEqual('SELECT \'{"a":"foo"}\'', str(q))
def test_json_value_from_array_num(self):
q = PostgreSQLQuery.select(JSON([1, 2, 3]))
self.assertEqual("SELECT '[1,2,3]'", str(q))
def test_json_value_from_array_str(self):
q = PostgreSQLQuery.select(JSON(["a", "b", "c"]))
self.assertEqual('SELECT \'["a","b","c"]\'', str(q))
def test_json_value_from_dict_recursive(self):
q = PostgreSQLQuery.select(JSON({"a": "z", "b": {"c": "foo"}, "d": 1}))
# gotta split this one up to avoid the indeterminate order
sql = str(q)
start, end = 9, -2
self.assertEqual("SELECT '{}'", sql[:start] + sql[end:])
members_set = set(sql[start:end].split(","))
self.assertSetEqual({'"a":"z"', '"b":{"c":"foo"}', '"d":1'}, members_set)
class JSONOperatorsTests(unittest.TestCase):
# reference https://www.postgresql.org/docs/9.5/functions-json.html
table_abc = Table("abc")
def test_get_json_value_by_key(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_json_value("dates"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"->\'dates\'', str(q))
def test_get_json_value_by_index(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_json_value(1))
self.assertEqual('SELECT * FROM "abc" WHERE "json"->1', str(q))
def test_get_text_value_by_key(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_text_value("dates"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"->>\'dates\'', str(q))
def test_get_text_value_by_index(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_text_value(1))
self.assertEqual('SELECT * FROM "abc" WHERE "json"->>1', str(q))
def test_get_path_json_value(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_path_json_value("{a,b}"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"#>\'{a,b}\'', str(q))
def test_get_path_text_value(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_path_text_value("{a,b}"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"#>>\'{a,b}\'', str(q))
class JSONBOperatorsTests(unittest.TestCase):
# reference https://www.postgresql.org/docs/9.5/functions-json.html
table_abc = Table("abc")
def test_json_contains_for_json(self):
q = PostgreSQLQuery.select(JSON({"a": 1, "b": 2}).contains({"a": 1}))
# gotta split this one up to avoid the indeterminate order
sql = str(q)
start, end = 9, -13
self.assertEqual("SELECT '{}'@>'{\"a\":1}'", sql[:start] + sql[end:])
members_set = set(sql[start:end].split(","))
self.assertSetEqual({'"a":1', '"b":2'}, members_set)
def test_json_contains_for_field(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(self.table_abc.json.contains({"dates": "2018-07-10 - 2018-07-17"}))
)
self.assertEqual(
"SELECT * " 'FROM "abc" ' 'WHERE "json"@>\'{"dates":"2018-07-10 - 2018-07-17"}\'',
str(q),
)
def test_json_contained_by_using_str_arg(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(
self.table_abc.json.contained_by(
OrderedDict(
[
("dates", "2018-07-10 - 2018-07-17"),
("imported", "8"),
]
)
)
)
)
self.assertEqual(
'SELECT * FROM "abc" ' 'WHERE "json"<@\'{"dates":"2018-07-10 - 2018-07-17","imported":"8"}\'',
str(q),
)
def test_json_contained_by_using_list_arg(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(self.table_abc.json.contained_by(["One", "Two", "Three"]))
)
self.assertEqual('SELECT * FROM "abc" WHERE "json"<@\'["One","Two","Three"]\'', str(q))
def test_json_contained_by_with_complex_criterion(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(self.table_abc.json.contained_by(["One", "Two", "Three"]) & (self.table_abc.id == 26))
)
self.assertEqual(
'SELECT * FROM "abc" WHERE "json"<@\'["One","Two","Three"]\' AND "id"=26',
str(q),
)
def test_json_has_key(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.has_key("dates"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"?\'dates\'', str(q))
def test_json_has_keys(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.has_keys(["dates", "imported"]))
self.assertEqual("SELECT * FROM \"abc\" WHERE \"json\"?&ARRAY['dates','imported']", str(q))
def test_json_has_any_keys(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(self.table_abc.json.has_any_keys(["dates", "imported"]))
)
self.assertEqual("SELECT * FROM \"abc\" WHERE \"json\"?|ARRAY['dates','imported']", str(q))
def test_subnet_contains_inet(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select(self.table_abc.a.lshift(2))
.where(self.table_abc.cidr >> "1.1.1.1")
)
self.assertEqual("SELECT \"a\"<<2 FROM \"abc\" WHERE \"cidr\">>'1.1.1.1'", str(q))
class DistinctOnTests(unittest.TestCase):
table_abc = Table("abc")
def test_distinct_on(self):
q = PostgreSQLQuery.from_(self.table_abc).distinct_on("lname", self.table_abc.fname).select("lname", "id")
self.assertEqual('''SELECT DISTINCT ON("lname","fname") "lname","id" FROM "abc"''', str(q))
class ArrayTests(unittest.TestCase):
def test_array_syntax(self):
tb = Table("tb")
q = PostgreSQLQuery.from_(tb).select(Array(1, "a", ["b", 2, 3]))
self.assertEqual(str(q), "SELECT ARRAY[1,'a',ARRAY['b',2,3]] FROM \"tb\"")
def test_render_alias_in_array_sql(self):
tb = Table("tb")
q = PostgreSQLQuery.from_(tb).select(Array(tb.col).as_("different_name"))
self.assertEqual(str(q), 'SELECT ARRAY["col"] "different_name" FROM "tb"')
class ReturningClauseTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.table_abc = Table('abc')
def test_returning_from_missing_table_raises_queryexception(self):
field_from_diff_table = Field('xyz', table=Table('other'))
with self.assertRaisesRegex(QueryException, "You can't return from other tables"):
(
PostgreSQLQuery.from_(self.table_abc)
.where(self.table_abc.foo == self.table_abc.bar)
.delete()
.returning(field_from_diff_table)
)
def test_queryexception_if_returning_used_on_invalid_query(self):
with self.assertRaisesRegex(QueryException, "Returning can't be used in this query"):
PostgreSQLQuery.from_(self.table_abc).select('abc').returning('abc')
def test_no_queryexception_if_returning_used_on_valid_query_type(self):
# No exceptions for insert, update and delete queries
with self.subTest('DELETE'):
PostgreSQLQuery.from_(self.table_abc).where(self.table_abc.foo == self.table_abc.bar).delete().returning(
"id"
)
with self.subTest('UPDATE'):
PostgreSQLQuery.update(self.table_abc).where(self.table_abc.foo == 0).set("foo", "bar").returning("id")
with self.subTest('INSERT'):
PostgreSQLQuery.into(self.table_abc).insert('abc').returning('abc')
def test_return_field_from_join_table(self):
new_table = Table('xyz')
q = (
PostgreSQLQuery.update(self.table_abc)
.join(new_table)
.on(new_table.id == self.table_abc.xyz)
.where(self.table_abc.foo == 0)
.set("foo", "bar")
.returning(new_table.a)
)
self.assertEqual(
'UPDATE "abc" '
'JOIN "xyz" ON "xyz"."id"="abc"."xyz" '
'SET "foo"=\'bar\' '
'WHERE "abc"."foo"=0 '
'RETURNING "xyz"."a"',
str(q),
)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
direct_fetch_response_time = 0.05
class BaseNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
self.last_headers = message
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_close(self, conn):
self.disconnected = True
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
assert(wait_until(test_function, timeout=60))
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders(timeout=5)
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[ inv_node.send_block_inv(x.sha256) for x in blocks ]
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_getdata = None
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders(timeout=1)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(timeout=1)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders(timeout=1)
# Eventually this stops working.
with mininode_lock:
self.last_getheaders = None
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
with mininode_lock:
self.last_getheaders = True
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
|
|
# -*- coding: utf-8 -*-
"""
pgshift: write a Postgres pg_dump .sql file to Redshift via S3
"""
from __future__ import division, print_function
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import gzip
import json
import math
import os
import re
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import uuid
from boto.s3.connection import S3Connection
import pandas as pd
import psycopg2
def get_rows(path):
"""
Given a path to a pg_dump .sql file, return a list of dicts, ex:
[{'col1': 'one', 'col2': 1, 'col3': 'foo'}]
Parameters
----------
path: string
Path to pg_dump .sql file
"""
col_matcher = re.compile('COPY.+\((.+)\) FROM stdin;')
str_blob = ''
read_lines = False
with open(path, 'r') as fread:
for line in fread:
match = col_matcher.match(line)
if '\.\n' in line:
read_lines = False
if read_lines:
str_blob = ''.join([str_blob, line])
if match:
read_lines = True
col_keys = [c.strip() for c in match.groups()[0].split(',')]
tbl = pd.read_table(StringIO(str_blob), delimiter='\t',
names=col_keys)
return tbl
def process(filepath):
"""
Process a given pg_dump into a pgshift result that you can use to
write to S3, or perform a COPY statement in Redshift
Parameters
----------
filename: str
Path to pg_dump .sql file
"""
return PGShift(get_rows(filepath))
def chunk_dataframe(df, num_chunks):
"""Chunk DataFrame into `chunks` DataFrames in a list"""
chunk_size = int(math.floor(len(df) / num_chunks)) or 1
chunker = list(range(chunk_size, len(df), chunk_size)) or [chunk_size]
if len(df) == num_chunks:
chunker.append(len(df))
last_iter = 0
df_list = []
for c in chunker:
if c == chunker[-1]:
c = len(df)
df_list.append(df[last_iter:c])
last_iter = c
return df_list
class PGShift(object):
def __init__(self, table):
self.table = table
self.manifest_url = None
def put_to_s3(self, bucket_name, keypath, chunks=1, aws_access_key_id=None,
aws_secret_access_key=None, mandatory_manifest=True):
"""
Will put the result table to S3 as a gzipped CSV with an accompanying
.manifest file. The aws keys are not required if you have environmental
params set for boto to pick up:
http://boto.readthedocs.org/en/latest/s3_tut.html#creating-a-connection
Each call to this function will generate a unique UUID for that
particular run.
Ex: If bucket is 'mybucket', keypath is 'pgshift/temp/',
and chunks is 2, then will write the following:
s3://mybucket/pgshift/temp/pgdump_uuid_0.gz
s3://mybucket/pgshift/temp/pgdump_uuid_1.gz
s3://mybucket/pgshift/temp/pgtemp.manifest
Parameters
----------
bucket_name: str
S3 bucket name
keypath: str
Key path for writing file
chunks: int, default 1
Number of gzipped chunks to write. Upload speed
is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes
have 2 slices per node, so if running 2 nodes you will want
chunks=4, 8, etc
aws_access_key_id: str, default None
aws_secret_access_key: str, default None
mandatory_manifest: bool, default True
Should .manifest entries be mandatory?
"""
if aws_access_key_id and aws_secret_access_key:
self.conn = S3Connection(aws_access_key_id, aws_secret_access_key)
else:
self.conn = S3Connection()
self.bucket = self.conn.get_bucket(bucket_name)
self.manifest = {'entries': []}
self.generated_keys = []
table_chunks = chunk_dataframe(self.table, chunks)
batch_uuid = str(uuid.uuid4())
for idx, chunk in enumerate(table_chunks):
zipname = '_'.join(['pgdump', batch_uuid, str(idx)]) + '.gz'
fp, gzfp = StringIO(), StringIO()
csvd = chunk.to_csv(fp, index=False, header=False)
fp.seek(0)
url = urljoin(keypath, zipname)
key = self.bucket.new_key(url)
self.generated_keys.append(url)
gzipped = gzip.GzipFile(fileobj=gzfp, mode='w')
gzipped.write(fp.read())
gzipped.close()
gzfp.seek(0)
print('Uploading {}...'.format(self.bucket.name + url))
key.set_contents_from_file(gzfp)
self.manifest['entries'].append({
'url': ''.join(['s3://', self.bucket.name, url]),
'mandatory': mandatory_manifest}
)
manifest_name = 'pgshift_{}.manifest'.format(batch_uuid)
fest_url = urljoin(keypath, manifest_name)
self.generated_keys.append(fest_url)
self.manifest_url = ''.join(['s3://', self.bucket.name, fest_url])
fest_key = self.bucket.new_key(fest_url)
fest_fp = StringIO(json.dumps(self.manifest, sort_keys=True,
indent=4))
fest_fp.seek(0)
print('Uploading manifest file {}...'.format(
self.bucket.name + fest_url))
fest_key.set_contents_from_file(fest_fp)
def clean_up_s3(self):
"""Clean up S3 keys generated in `put_to_s3`"""
for key in self.generated_keys:
print('Deleting {}...'.format(self.bucket.name + key))
self.bucket.delete_key(key)
def copy_to_redshift(self, table_name, aws_access_key_id=None,
aws_secret_access_key=None, database=None, user=None,
password=None, host=None, port=None, sslmode=None):
"""
COPY data from S3 to Redshift using the data and manifest generated
with `put_to_s3`, which must be called first in order to
perform the COPY statement.
Parameters
----------
table_name: str
Table name to copy data to
aws_access_key_id: str
aws_secret_access_key: str
database: str, if None os.environ.get('PGDATABASE')
user: str, if None os.environ.get('PGUSER')
password: str, if None os.environ.get('PGPASSWORD')
host: str, if None os.environ.get('PGHOST')
port: int, if None os.environ.get('PGPORT') or 5439
sslmode: str
sslmode param (ex: 'require', 'prefer', etc)
"""
aws_secret_access_key = (aws_secret_access_key
or os.environ.get('AWS_SECRET_ACCESS_KEY'))
aws_access_key_id = (aws_access_key_id
or os.environ.get('AWS_ACCESS_KEY_ID'))
database = database or os.environ.get('PGDATABASE')
user = user or os.environ.get('PGUSER')
password = password or os.environ.get('PGPASSWORD')
host = host or os.environ.get('PGHOST')
port = port or os.environ.get('PGPORT') or 5439
print('Connecting to Redshift...')
self.conn = psycopg2.connect(database=database, user=user,
password=password, host=host,
port=port, sslmode='require')
self.cur = self.conn.cursor()
query = """COPY {0}
FROM '{1}'
CREDENTIALS 'aws_access_key_id={2};aws_secret_access_key={3}'
MANIFEST
GZIP
CSV;""".format(table_name, self.manifest_url,
aws_access_key_id, aws_secret_access_key)
print("COPYing data from {} into table {}...".format(self.manifest_url,
table_name))
self.cur.execute(query)
self.conn.commit()
self.conn.close()
|
|
#!/usr/bin/env python
# A part of pdfrw (pdfrw.googlecode.com)
# Copyright (C) 2006-2009 Patrick Maupin, Austin, Texas
# MIT license -- See LICENSE.txt for details
'''
The PdfWriter class writes an entire PDF file out to disk.
The writing process is not at all optimized or organized.
An instance of the PdfWriter class has two methods:
addpage(page)
and
write(fname)
addpage() assumes that the pages are part of a valid
tree/forest of PDF objects.
'''
try:
set
except NameError:
from sets import Set as set
from pdfrw.objects import PdfName, PdfArray, PdfDict, IndirectPdfDict
from pdfrw.objects import PdfObject, PdfString
from pdfrw.compress import compress as do_compress
from pdfrw.errors import PdfOutputError, log
NullObject = PdfObject('null')
NullObject.indirect = True
NullObject.Type = 'Null object'
def FormatObjects(f, trailer, version='1.3', compress=True, killobj=(),
id=id, isinstance=isinstance, getattr=getattr, len=len,
sum=sum, set=set, str=str, basestring=basestring,
hasattr=hasattr, repr=repr, enumerate=enumerate,
list=list, dict=dict, tuple=tuple,
do_compress=do_compress, PdfArray=PdfArray,
PdfDict=PdfDict, PdfObject=PdfObject, encode=PdfString.encode):
''' FormatObjects performs the actual formatting and disk write.
Should be a class, was a class, turned into nested functions
for performace (to reduce attribute lookups).
'''
def add(obj):
''' Add an object to our list, if it's an indirect
object. Just format it if not.
'''
# Can't hash dicts, so just hash the object ID
objid = id(obj)
# Automatically set stream objects to indirect
if isinstance(obj, PdfDict):
indirect = obj.indirect or (obj.stream is not None)
else:
indirect = getattr(obj, 'indirect', False)
if not indirect:
if objid in visited:
log.warning(('Replicating direct %s object,'
' should be indirect for optimal file size')
% type(obj))
obj = type(obj)(obj)
objid = id(obj)
visiting(objid)
result = format_obj(obj)
leaving(objid)
return result
objnum = indirect_dict_get(objid)
# If we haven't seen the object yet, we need to
# add it to the indirect object list.
if objnum is None:
swapped = swapobj(objid)
if swapped is not None:
old_id = objid
obj = swapped
objid = id(obj)
objnum = indirect_dict_get(objid)
if objnum is not None:
indirect_dict[old_id] = objnum
return '%s 0 R' % objnum
objnum = len(objlist) + 1
objlist_append(None)
indirect_dict[objid] = objnum
deferred.append((objnum - 1, obj))
return '%s 0 R' % objnum
def format_array(myarray, formatter):
# Format array data into semi-readable ASCII
if sum([len(x) for x in myarray]) <= 70:
return formatter % space_join(myarray)
return format_big(myarray, formatter)
def format_big(myarray, formatter):
bigarray = []
count = 1000000
for x in myarray:
lenx = len(x) + 1
count += lenx
if count > 71:
subarray = []
bigarray.append(subarray)
count = lenx
subarray.append(x)
return formatter % lf_join([space_join(x) for x in bigarray])
def format_obj(obj):
''' format PDF object data into semi-readable ASCII.
May mutually recurse with add() -- add() will
return references for indirect objects, and add
the indirect object to the list.
'''
while 1:
if isinstance(obj, (list, dict, tuple)):
if isinstance(obj, PdfArray):
myarray = [add(x) for x in obj]
return format_array(myarray, '[%s]')
elif isinstance(obj, PdfDict):
if compress and obj.stream:
do_compress([obj])
myarray = []
dictkeys = [str(x) for x in obj.keys()]
dictkeys.sort()
for key in dictkeys:
myarray.append(key)
myarray.append(add(obj[key]))
result = format_array(myarray, '<<%s>>')
stream = obj.stream
if stream is not None:
result = '%s\nstream\n%s\nendstream' % (result, stream)
return result
obj = (PdfArray, PdfDict)[isinstance(obj, dict)](obj)
continue
elif isinstance(obj, float):
return ('%f' % obj).rstrip('0')
if not hasattr(obj, 'indirect') and isinstance(obj, basestring):
return encode(obj)
return str(getattr(obj, 'encoded', obj))
def format_deferred():
while deferred:
index, obj = deferred.pop()
objlist[index] = format_obj(obj)
indirect_dict = {}
indirect_dict_get = indirect_dict.get
objlist = []
objlist_append = objlist.append
visited = set()
visiting = visited.add
leaving = visited.remove
space_join = ' '.join
lf_join = '\n '.join
f_write = f.write
deferred = []
# Don't reference old catalog or pages objects --
# swap references to new ones.
swapobj = {PdfName.Catalog: trailer.Root,
PdfName.Pages: trailer.Root.Pages,
None: trailer}.get
swapobj = [(objid, swapobj(obj.Type)) for objid, obj in
killobj.iteritems()]
swapobj = dict((objid, obj is None and NullObject or obj) for objid, obj in
swapobj).get
for objid in killobj:
assert swapobj(objid) is not None
# The first format of trailer gets all the information,
# but we throw away the actual trailer formatting.
format_obj(trailer)
# Keep formatting until we're done.
# (Used to recurse inside format_obj for this, but
# hit system limit.)
format_deferred()
# Now we know the size, so we update the trailer dict
# and get the formatted data.
trailer.Size = PdfObject(len(objlist) + 1)
trailer = format_obj(trailer)
# Now we have all the pieces to write out to the file.
# Keep careful track of the counts while we do it so
# we can correctly build the cross-reference.
header = '%%PDF-%s\n%%\xe2\xe3\xcf\xd3\n' % version
f_write(header)
offset = len(header)
offsets = [(0, 65535, 'f')]
offsets_append = offsets.append
for i, x in enumerate(objlist):
objstr = '%s 0 obj\n%s\nendobj\n' % (i + 1, x)
offsets_append((offset, 0, 'n'))
offset += len(objstr)
f_write(objstr)
f_write('xref\n0 %s\n' % len(offsets))
for x in offsets:
f_write('%010d %05d %s\r\n' % x)
f_write('trailer\n\n%s\nstartxref\n%s\n%%%%EOF\n' % (trailer, offset))
class PdfWriter(object):
_trailer = None
def __init__(self, version='1.3', compress=False):
self.pagearray = PdfArray()
self.compress = compress
self.version = version
self.killobj = {}
def addpage(self, page):
self._trailer = None
if page.Type != PdfName.Page:
raise PdfOutputError('Bad /Type: Expected %s, found %s'
% (PdfName.Page, page.Type))
inheritable = page.inheritable # searches for resources
self.pagearray.append(
IndirectPdfDict(
page,
Resources=inheritable.Resources,
MediaBox=inheritable.MediaBox,
CropBox=inheritable.CropBox,
Rotate=inheritable.Rotate,
)
)
# Add parents in the hierarchy to objects we
# don't want to output
killobj = self.killobj
obj = page.Parent
while obj is not None:
objid = id(obj)
if objid in killobj:
break
killobj[objid] = obj
obj = obj.Parent
return self
def addpages(self, pagelist):
for page in pagelist:
self.addpage(page)
return self
def _get_trailer(self):
trailer = self._trailer
if trailer is not None:
return trailer
# Create the basic object structure of the PDF file
trailer = PdfDict(
Root=IndirectPdfDict(
Type=PdfName.Catalog,
Pages=IndirectPdfDict(
Type=PdfName.Pages,
Count=PdfObject(len(self.pagearray)),
Kids=self.pagearray
)
)
)
# Make all the pages point back to the page dictionary and
# ensure they are indirect references
pagedict = trailer.Root.Pages
for page in pagedict.Kids:
page.Parent = pagedict
page.indirect = True
self._trailer = trailer
return trailer
def _set_trailer(self, trailer):
self._trailer = trailer
trailer = property(_get_trailer, _set_trailer)
def write(self, fname, trailer=None):
trailer = trailer or self.trailer
# Dump the data. We either have a filename or a preexisting
# file object.
preexisting = hasattr(fname, 'write')
f = preexisting and fname or open(fname, 'wb')
FormatObjects(f, trailer, self.version, self.compress, self.killobj)
if not preexisting:
f.close()
if __name__ == '__main__':
import logging
log.setLevel(logging.DEBUG)
import pdfreader
x = pdfreader.PdfReader('source.pdf')
y = PdfWriter()
for i, page in enumerate(x.pages):
print ' Adding page', i + 1, '\r',
y.addpage(page)
print
y.write('result.pdf')
print
|
|
# Copyright (c) 2010 Ross Kinder. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This script downloads and builds the third party dependencies for Windows.
# For each dependency do roughly the following steps:
# - download and extract the source code
# - patch the source and build environment as needed
# - force the runtime library configuration to be /MD or /MDd
# - force the building of static libraries
# - fix bugs
# - build both the Debug and Release versions of the library
# - create an empty file in called <project>/.stamp to indicate success
#
# If the stamp file is present, then the script will not try to build the
# dependency again. If you have trouble with a build, the best approach is to
# remove the source directory before rerunning the script.
#
# The current windows dependencies are:
#
# ================= ===================================================
# Name Purpose
# ================= ===================================================
# Google Gtest Unittests
# Google Gmock Unittests
# ================= ===================================================
#
import urllib
import os.path
import tarfile
import subprocess
import os
import stat
import shutil
import zipfile
import xml.dom.minidom
import sys
from os.path import isfile, isdir, join as pj, exists
import hashlib
import optparse
# To save typing:
# pj = os.path.join
# wd = working directory
wd = os.path.dirname(os.path.abspath(__file__))
class XmlEditor(object):
def __init__(self, path):
print "[xml]", path
self.path = path
content = file(path, "rb").read()
content = content.replace('<?xml version="1.0" encoding="shift_jis"?>', '')
self.dom = xml.dom.minidom.parseString(content)
def Save(self):
os.chmod(self.path, stat.S_IWRITE)
self.dom.writexml(file(self.path, "wb"))
def rm(path):
if not exists(path):
return
if isdir(path):
for sub_path in os.listdir(path):
rm(pj(path, sub_path))
print "[rm]", path
while isdir(path):
try:
os.rmdir(path)
except WindowsError, err:
pass
else:
print "[rm]", path
os.chmod(path, stat.S_IWRITE)
for i in range(10):
try:
os.unlink(path)
except:
continue
break
class Builder(object):
# True if the dependencies should be built with /MT or /MTd
# False if the dependencies should be build with /MD or /MDd
STATIC_RUNTIME = False
MSBUILD_COMMAND = "msbuild"
def __init__(self):
pass
def Fetch(self):
raise NotImplemented()
def Patch(self):
raise NotImplemented()
def Build(self):
raise NotImplemented()
def WriteStamp(self):
file(pj(self.path, ".stamp"), "w").write("")
def HasStamp(self):
return isfile(pj(self.path, ".stamp"))
#---- Helper Functions -------------------------------------------------------
def Download(self, url, checksum):
path = url.split("/")[-1]
path = pj(wd, path)
if isfile(path):
if hashlib.md5(file(path, "rb").read()).hexdigest() == checksum:
print "[ ok ]", url
return
print "[download]", url
urllib.urlretrieve(url, path)
assert hashlib.md5(file(path, "rb").read()).hexdigest() == checksum
def ExtractTarGz(self, path, out = None):
if out is None: out = wd
if not exists(path): path = pj(wd, path)
print "[extract]", path
tar = tarfile.open(path, mode="r:gz")
tar.extractall(out)
def ExtractZip(self, path, out = None):
if out is None: out = wd
if not exists(path): path = pj(wd, path)
print "[extract]", path
archive = zipfile.ZipFile(path, mode = "r")
archive.extractall(out)
def UpgradeVisualStudioFiles(self, root = None):
if root is not None: root = self.path
for dirpath, dirnames, filenames in os.walk(root):
for filename in filenames:
filename = pj(dirpath, filename)
if filename.endswith(".vcproj"):
self.UpgradeVisualStudioFile(filename)
def UpgradeVisualStudioFile(self, filename):
if filename.endswith(".sln"):
os.chmod(filename, stat.S_IWRITE)
subprocess.call(["devenv", "/upgrade", filename])
return
try:
xml = XmlEditor(filename)
except:
print "[WARNING] Cannot parse XML, upgrading anyway: " + filename
subprocess.call(["devenv", "/upgrade", filename])
return
for el in xml.dom.getElementsByTagName("VisualStudioProject"):
if float(el.getAttribute("Version")) >= 10.0:
continue
print "[upgrade]", filename
os.chmod(filename, stat.S_IWRITE)
subprocess.call(["devenv", "/upgrade", filename])
def SetRuntimeLibrary(self, filename = None):
if filename is None:
filename = self.path
if isdir(filename):
for dirpath, dirnames, filenames in os.walk(filename):
for filename in filenames:
filename = pj(dirpath, filename)
if filename.endswith(".vcproj") or filename.endswith(".vsprops"):
print '[ setruntime ]', filename
self.SetRuntimeLibrary(filename)
return
xml = XmlEditor(filename)
for filter in xml.dom.getElementsByTagName("Tool"):
if filter.getAttribute("Name") != u'VCCLCompilerTool':
continue
if self.STATIC_RUNTIME:
if filter.getAttribute("RuntimeLibrary") == u"2":
filter.setAttribute("RuntimeLibrary", "0")
elif filter.getAttribute("RuntimeLibrary") == u"3":
filter.setAttribute("RuntimeLibrary", "1")
else:
if filter.getAttribute("RuntimeLibrary") == u"0":
filter.setAttribute("RuntimeLibrary", "2")
elif filter.getAttribute("RuntimeLibrary") == u"1":
filter.setAttribute("RuntimeLibrary", "3")
xml.Save()
def BuildSolution(self, path, target=None, configurations=None, args=None):
cmd = [self.MSBUILD_COMMAND, path]
if target is not None:
cmd += ["/t:" + target]
if args:
cmd += args
if configurations is None:
configurations = ["Debug", "Release"]
for configuration in configurations:
subprocess.check_call(cmd + ["/p:Configuration=" + configuration],
cwd = pj(wd))
class GtestBuilder(Builder):
path = pj(wd, "gtest-1.5.0")
vcproj = pj(path, "msvc", "gtest.vcproj")
def Fetch(self):
self.Download("http://googletest.googlecode.com/files/gtest-1.5.0.tar.gz",
"7e27f5f3b79dd1ce9092e159cdbd0635")
def Patch(self):
rm(self.path)
self.ExtractTarGz("gtest-1.5.0.tar.gz")
self.UpgradeVisualStudioFiles(pj(self.path, "msvc"))
xml = XmlEditor(self.vcproj)
for el in xml.dom.getElementsByTagName("Tool"):
if el.getAttribute("Name") != u"VCLibrarianTool":
continue
el.setAttribute("OutputFile",
el.getAttribute("OutputFile").replace("gtestd.lib", "gtest.lib"))
xml.Save()
self.SetRuntimeLibrary(pj(self.path, "msvc"))
def Build(self):
self.BuildSolution(pj(self.path, "msvc", "gtest.sln"))
class GmockBuilder(Builder):
path = pj(wd, "gmock-1.5.0")
def Fetch(self):
self.Download("http://googlemock.googlecode.com/files/gmock-1.5.0.tar.gz",
"d9e62a4702c300ae9c87284ca8da7fac")
def Patch(self):
rm(self.path)
self.ExtractTarGz("gmock-1.5.0.tar.gz")
self.UpgradeVisualStudioFiles(pj(self.path, "msvc"))
self.SetRuntimeLibrary(pj(self.path, "msvc"))
def Build(self):
self.BuildSolution(pj(self.path, "msvc", "gmock.sln"))
builders = [
GtestBuilder(),
GmockBuilder(),
]
def main(argv):
global builders
parser = optparse.OptionParser()
parser.add_option("--build", action="store_true")
parser.add_option("--rebuild", action="store_true")
parser.add_option("--clean", action="store_true")
parser.add_option("--Debug", action="store_true")
parser.add_option("--Release", action="store_true")
parser.add_option("--msbuild", default="msbuild")
parser.add_option("--static-runtime", action="store_true", default=False)
parser.add_option("--dll-runtime", action="store_false",
dest="static_runtime")
(options, args) = parser.parse_args(argv)
if os.name != 'nt':
print >>sys.stderr, "This program should only be used to build the "\
"Windows dependencies."
Builder.STATIC_RUNTIME = options.static_runtime
Builder.MSBUILD_COMMAND = options.msbuild
if options.rebuild or options.clean:
for builder in builders:
rm(builder.path)
if options.clean:
return
builders = [builder for builder in builders if not builder.HasStamp()]
for builder in builders:
builder.Fetch()
for builder in builders:
builder.Patch()
for builder in builders:
builder.Build()
builder.WriteStamp()
if __name__ == "__main__":
main(sys.argv)
|
|
# canonical.py - functions for converting systems to canonical forms
# RMM, 10 Nov 2012
from .exception import ControlNotImplemented
from .lti import issiso
from .statesp import StateSpace
from .statefbk import ctrb, obsv
from numpy import zeros, shape, poly, iscomplex, hstack, dot, transpose
from numpy.linalg import solve, matrix_rank, eig
__all__ = ['canonical_form', 'reachable_form', 'observable_form', 'modal_form',
'similarity_transform']
def canonical_form(xsys, form='reachable'):
"""Convert a system into canonical form
Parameters
----------
xsys : StateSpace object
System to be transformed, with state 'x'
form : String
Canonical form for transformation. Chosen from:
* 'reachable' - reachable canonical form
* 'observable' - observable canonical form
* 'modal' - modal canonical form
Returns
-------
zsys : StateSpace object
System in desired canonical form, with state 'z'
T : matrix
Coordinate transformation matrix, z = T * x
"""
# Call the appropriate tranformation function
if form == 'reachable':
return reachable_form(xsys)
elif form == 'observable':
return observable_form(xsys)
elif form == 'modal':
return modal_form(xsys)
else:
raise ControlNotImplemented(
"Canonical form '%s' not yet implemented" % form)
# Reachable canonical form
def reachable_form(xsys):
"""Convert a system into reachable canonical form
Parameters
----------
xsys : StateSpace object
System to be transformed, with state `x`
Returns
-------
zsys : StateSpace object
System in reachable canonical form, with state `z`
T : matrix
Coordinate transformation: z = T * x
"""
# Check to make sure we have a SISO system
if not issiso(xsys):
raise ControlNotImplemented(
"Canonical forms for MIMO systems not yet supported")
# Create a new system, starting with a copy of the old one
zsys = StateSpace(xsys)
# Generate the system matrices for the desired canonical form
zsys.B = zeros(shape(xsys.B))
zsys.B[0, 0] = 1.0
zsys.A = zeros(shape(xsys.A))
Apoly = poly(xsys.A) # characteristic polynomial
for i in range(0, xsys.states):
zsys.A[0, i] = -Apoly[i+1] / Apoly[0]
if (i+1 < xsys.states):
zsys.A[i+1, i] = 1.0
# Compute the reachability matrices for each set of states
Wrx = ctrb(xsys.A, xsys.B)
Wrz = ctrb(zsys.A, zsys.B)
if matrix_rank(Wrx) != xsys.states:
raise ValueError("System not controllable to working precision.")
# Transformation from one form to another
Tzx = solve(Wrx.T, Wrz.T).T # matrix right division, Tzx = Wrz * inv(Wrx)
# Check to make sure inversion was OK. Note that since we are inverting
# Wrx and we already checked its rank, this exception should never occur
if matrix_rank(Tzx) != xsys.states: # pragma: no cover
raise ValueError("Transformation matrix singular to working precision.")
# Finally, compute the output matrix
zsys.C = solve(Tzx.T, xsys.C.T).T # matrix right division, zsys.C = xsys.C * inv(Tzx)
return zsys, Tzx
def observable_form(xsys):
"""Convert a system into observable canonical form
Parameters
----------
xsys : StateSpace object
System to be transformed, with state `x`
Returns
-------
zsys : StateSpace object
System in observable canonical form, with state `z`
T : matrix
Coordinate transformation: z = T * x
"""
# Check to make sure we have a SISO system
if not issiso(xsys):
raise ControlNotImplemented(
"Canonical forms for MIMO systems not yet supported")
# Create a new system, starting with a copy of the old one
zsys = StateSpace(xsys)
# Generate the system matrices for the desired canonical form
zsys.C = zeros(shape(xsys.C))
zsys.C[0, 0] = 1
zsys.A = zeros(shape(xsys.A))
Apoly = poly(xsys.A) # characteristic polynomial
for i in range(0, xsys.states):
zsys.A[i, 0] = -Apoly[i+1] / Apoly[0]
if (i+1 < xsys.states):
zsys.A[i, i+1] = 1
# Compute the observability matrices for each set of states
Wrx = obsv(xsys.A, xsys.C)
Wrz = obsv(zsys.A, zsys.C)
# Transformation from one form to another
Tzx = solve(Wrz, Wrx) # matrix left division, Tzx = inv(Wrz) * Wrx
if matrix_rank(Tzx) != xsys.states:
raise ValueError("Transformation matrix singular to working precision.")
# Finally, compute the output matrix
zsys.B = Tzx * xsys.B
return zsys, Tzx
def modal_form(xsys):
"""Convert a system into modal canonical form
Parameters
----------
xsys : StateSpace object
System to be transformed, with state `x`
Returns
-------
zsys : StateSpace object
System in modal canonical form, with state `z`
T : matrix
Coordinate transformation: z = T * x
"""
# Check to make sure we have a SISO system
if not issiso(xsys):
raise ControlNotImplemented(
"Canonical forms for MIMO systems not yet supported")
# Create a new system, starting with a copy of the old one
zsys = StateSpace(xsys)
# Calculate eigenvalues and matrix of eigenvectors Tzx,
eigval, eigvec = eig(xsys.A)
# Eigenvalues and according eigenvectors are not sorted,
# thus modal transformation is ambiguous
# Sorting eigenvalues and respective vectors by largest to smallest eigenvalue
idx = eigval.argsort()[::-1]
eigval = eigval[idx]
eigvec = eigvec[:,idx]
# If all eigenvalues are real, the matrix of eigenvectors is Tzx directly
if not iscomplex(eigval).any():
Tzx = eigvec
else:
# A is an arbitrary semisimple matrix
# Keep track of complex conjugates (need only one)
lst_conjugates = []
Tzx = None
for val, vec in zip(eigval, eigvec.T):
if iscomplex(val):
if val not in lst_conjugates:
lst_conjugates.append(val.conjugate())
if Tzx is not None:
Tzx = hstack((Tzx, hstack((vec.real.T, vec.imag.T))))
else:
Tzx = hstack((vec.real.T, vec.imag.T))
else:
# if conjugate has already been seen, skip this eigenvalue
lst_conjugates.remove(val)
else:
if Tzx is not None:
Tzx = hstack((Tzx, vec.real.T))
else:
Tzx = vec.real.T
# Generate the system matrices for the desired canonical form
zsys.A = solve(Tzx, xsys.A).dot(Tzx)
zsys.B = solve(Tzx, xsys.B)
zsys.C = xsys.C.dot(Tzx)
return zsys, Tzx
def similarity_transform(xsys, T, timescale=1):
"""Perform a similarity transformation, with option time rescaling.
Transform a linear state space system to a new state space representation
z = T x, where T is an invertible matrix.
Parameters
----------
T : 2D invertible array
The matrix `T` defines the new set of coordinates z = T x.
timescale : float
If present, also rescale the time unit to tau = timescale * t
Returns
-------
zsys : StateSpace object
System in transformed coordinates, with state 'z'
"""
# Create a new system, starting with a copy of the old one
zsys = StateSpace(xsys)
# Define a function to compute the right inverse (solve x M = y)
def rsolve(M, y):
return transpose(solve(transpose(M), transpose(y)))
# Update the system matrices
zsys.A = rsolve(T, dot(T, zsys.A)) / timescale
zsys.B = dot(T, zsys.B) / timescale
zsys.C = rsolve(T, zsys.C)
return zsys
|
|
from devp2p.protocol import BaseProtocol, SubProtocolError
from ethereum.transactions import Transaction
from ethereum.blocks import Block, BlockHeader
import rlp
import gevent
import time
from ethereum import slogging
log = slogging.get_logger('protocol.eth')
class ETHProtocolError(SubProtocolError):
pass
class ETHProtocol(BaseProtocol):
"""
DEV Ethereum Wire Protocol
https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol
https://github.com/ethereum/go-ethereum/blob/develop/eth/protocol.go#L15
"""
protocol_id = 1
network_id = 0
max_cmd_id = 15 # FIXME
name = 'eth'
version = 60
max_getblocks_count = 64
max_getblockhashes_count = 2048
def __init__(self, peer, service):
# required by P2PProtocol
self.config = peer.config
BaseProtocol.__init__(self, peer, service)
class status(BaseProtocol.command):
"""
protocolVersion: The version of the Ethereum protocol this peer implements. 30 at present.
networkID: The network version of Ethereum for this peer. 0 for the official testnet.
totalDifficulty: Total Difficulty of the best chain. Integer, as found in block header.
latestHash: The hash of the block with the highest validated total difficulty.
GenesisHash: The hash of the Genesis block.
"""
cmd_id = 0
sent = False
structure = [
('eth_version', rlp.sedes.big_endian_int),
('network_id', rlp.sedes.big_endian_int),
('chain_difficulty', rlp.sedes.big_endian_int),
('chain_head_hash', rlp.sedes.binary),
('genesis_hash', rlp.sedes.binary)]
def create(self, proto, chain_difficulty, chain_head_hash, genesis_hash):
self.sent = True
network_id = proto.service.app.config['eth'].get('network_id', proto.network_id)
return [proto.version, network_id, chain_difficulty, chain_head_hash, genesis_hash]
class newblockhashes(BaseProtocol.command):
"""
NewBlockHashes [+0x01: P, hash1: B_32, hash2: B_32, ...] Specify one or more new blocks which have appeared on the network. Including hashes that the sending peer could reasonable be considered to know that the receiving node is aware of is considered Bad Form, and may reduce the reputation of the sending node. Including hashes that the sending node later refuses to honour with a proceeding GetBlocks message is considered Bad Form, and may reduce the reputation of the sending node.
"""
cmd_id = 1
structure = rlp.sedes.CountableList(rlp.sedes.binary)
class transactions(BaseProtocol.command):
"""
Specify (a) transaction(s) that the peer should make sure is included on its transaction
queue. The items in the list (following the first item 0x12) are transactions in the
format described in the main Ethereum specification. Nodes must not resend the same
transaction to a peer in the same session. This packet must contain at least one (new)
transaction.
"""
cmd_id = 2
structure = rlp.sedes.CountableList(Transaction)
# todo: bloomfilter: so we don't send tx to the originating peer
@classmethod
def decode_payload(cls, rlp_data):
# convert to dict
txs = []
for i, tx in enumerate(rlp.decode_lazy(rlp_data)):
txs.append(Transaction.deserialize(tx))
if not i % 10:
gevent.sleep(0.0001)
return txs
class getblockhashes(BaseProtocol.command):
"""
Requests a BlockHashes message of at most maxBlocks entries, of block hashes from
the blockchain, starting at the parent of block hash. Does not require the peer
to give maxBlocks hashes - they could give somewhat fewer.
"""
cmd_id = 3
structure = [
('child_block_hash', rlp.sedes.binary),
('count', rlp.sedes.big_endian_int),
]
class blockhashes(BaseProtocol.command):
"""
Gives a series of hashes of blocks (each the child of the next). This implies that
the blocks are ordered from youngest to oldest.
"""
cmd_id = 4
structure = rlp.sedes.CountableList(rlp.sedes.binary)
class getblocks(BaseProtocol.command):
"""
Requests a Blocks message detailing a number of blocks to be sent, each referred to
by a hash. Note: Don't expect that the peer necessarily give you all these blocks
in a single message - you might have to re-request them.
"""
cmd_id = 5
structure = rlp.sedes.CountableList(rlp.sedes.binary)
class blocks(BaseProtocol.command):
cmd_id = 6
structure = rlp.sedes.CountableList(Block)
@classmethod
def encode_payload(cls, list_of_rlp):
return rlp.encode([rlp.codec.RLPData(x) for x in list_of_rlp], infer_serializer=False)
@classmethod
def decode_payload(cls, rlp_data):
# fn = 'blocks.fromthewire.hex.rlp'
# open(fn, 'a').write(rlp_data.encode('hex') + '\n')
# convert to dict
blocks = []
for block in rlp.decode_lazy(rlp_data):
blocks.append(TransientBlock(block))
return blocks
class newblock(BaseProtocol.command):
"""
NewBlock [+0x07, [blockHeader, transactionList, uncleList], totalDifficulty]
Specify a single block that the peer should know about.
The composite item in the list (following the message ID) is a block in
the format described in the main Ethereum specification.
"""
cmd_id = 7
structure = [('block', Block), ('chain_difficulty', rlp.sedes.big_endian_int)]
# todo: bloomfilter: so we don't send block to the originating peer
@classmethod
def decode_payload(cls, rlp_data):
# convert to dict
# print rlp_data.encode('hex')
ll = rlp.decode_lazy(rlp_data)
assert len(ll) == 2
transient_block = TransientBlock(ll[0], time.time())
difficulty = rlp.sedes.big_endian_int.deserialize(ll[1])
data = [transient_block, difficulty]
return dict((cls.structure[i][0], v) for i, v in enumerate(data))
class getblockheaders(BaseProtocol.command):
"""
Requests a BlockHeaders message detailing a number of block headers to be sent,
each referred to by a hash. Note: Don't expect that the peer necessarily give you all
these block headers in a single message - you might have to re-request them.
"""
cmd_id = 8
structure = rlp.sedes.CountableList(rlp.sedes.binary)
class blockheaders(BaseProtocol.command):
cmd_id = 9
structure = rlp.sedes.CountableList(Block)
@classmethod
def encode_payload(cls, list_of_rlp):
return rlp.encode([rlp.codec.RLPData(x) for x in list_of_rlp], infer_serializer=False)
@classmethod
def decode_payload(cls, rlp_data):
# fn = 'blocks.fromthewire.hex.rlp'
# open(fn, 'a').write(rlp_data.encode('hex') + '\n')
# convert to dict
blockheaders = []
for blockheader in rlp.decode_lazy(rlp_data):
blockheaders.append(BlockHeader(blockheader))
return blockheaders
class hashlookup(BaseProtocol.command):
cmd_id = 10
structure = rlp.sedes.CountableList(rlp.sedes.binary)
class hashlookupresponse(BaseProtocol.command):
cmd_id = 11
structure = rlp.sedes.CountableList(rlp.sedes.binary)
class TransientBlock(rlp.Serializable):
"""A partially decoded, unvalidated block."""
fields = [
('header', BlockHeader),
('transaction_list', rlp.sedes.CountableList(Transaction)),
('uncles', rlp.sedes.CountableList(BlockHeader))
]
def __init__(self, block_data, newblock_timestamp=0):
self.newblock_timestamp = newblock_timestamp
self.header = BlockHeader.deserialize(block_data[0])
self.transaction_list = rlp.sedes.CountableList(Transaction).deserialize(block_data[1])
self.uncles = rlp.sedes.CountableList(BlockHeader).deserialize(block_data[2])
def to_block(self, db, parent=None):
"""Convert the transient block to a :class:`ethereum.blocks.Block`"""
return Block(self.header, self.transaction_list, self.uncles, db=db, parent=parent)
# def serialize(self):
# return rlp.encode([self.header.serialize(self.header),
# self.transaction_list, self.uncles])
@property
def hex_hash(self):
return self.header.hex_hash()
def __repr__(self):
return '<TransientBlock(#%d %s)>' % (self.header.number, self.header.hash.encode('hex')[:8])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import unittest
import random
import math
import copy
from sudoku.sudoku_algorithms import *
VALID_PUZZLE = [
[0,3,5,2,9,0,8,6,4],
[0,8,2,4,1,0,7,0,3],
[7,6,4,3,8,0,0,9,0],
[2,1,8,7,3,9,0,4,0],
[0,0,0,8,0,4,2,3,0],
[0,4,3,0,5,2,9,7,0],
[4,0,6,5,7,1,0,0,9],
[3,5,9,0,2,8,4,1,7],
[8,0,0,9,0,0,5,2,6],
]
INVALID_PUZZLE = [
[0,3,5,2,9,0,8,6,9],
[0,8,2,4,1,0,7,0,3],
[7,6,4,3,8,0,0,9,0],
[2,1,8,7,3,9,0,4,0],
[0,0,0,8,0,4,2,3,0],
[0,4,3,0,5,2,9,7,0],
[4,0,6,5,7,1,0,0,9],
[3,5,9,0,2,8,4,1,7],
[8,0,0,9,0,0,5,2,6],
]
VALID_SOLUTION = [
[1,3,5,2,9,7,8,6,4],
[9,8,2,4,1,6,7,5,3],
[7,6,4,3,8,5,1,9,2],
[2,1,8,7,3,9,6,4,5],
[5,9,7,8,6,4,2,3,1],
[6,4,3,1,5,2,9,7,8],
[4,2,6,5,7,1,3,8,9],
[3,5,9,6,2,8,4,1,7],
[8,7,1,9,4,3,5,2,6],
]
class TestSudoku(unittest.TestCase):
"""
RecursiveBacktrackingSudokuSolver test suite.
Main focus on testing the helping and evaluation functions
"""
def setUp(self):
self.solver = Sudoku()
def tearDown(self):
pass
def test__accept(self):
"""
"""
D = len(VALID_PUZZLE)
self.solver.D = D
#sum to check
self.solver.S = sum(range(1, D+1))
#number of quadrant rows and columns
self.solver.N = int(math.sqrt(D))
self.assertFalse(self.solver._accept(VALID_PUZZLE))
self.assertTrue(self.solver._accept(VALID_SOLUTION))
#TODO search for weird cases
def test_not_accept(self):
"""
tests valid puzzles that are not yet completed, should always not accept
"""
#for some dimensions (from 4x4 -> 25x25)
for d in range(2, 5):
D = d**2
self.solver.D = D
#sum to check
self.solver.S = sum(range(1, D+1))
#number of quadrant rows and columns
self.solver.N = int(math.sqrt(D))
#for zero puzzle
zero_puzzle = self._create_zero_puzzle(D)
self.assertFalse(self.solver._accept(zero_puzzle))
#for no zero in puzzle (is most likely invalid, anyways it is useful for testing)
no_zero_puzzle = [[random.randint(1,D) for j in range(D)] for i in range(D)]
candidates = range(1, D+1)
for r in range(D):
for c in range(D):
#generate a valid column, row and quadrant for the given position
vpos_puzzle = self._generate_valid_position(D, (r,c))
#
self.assertFalse(self.solver._accept(vpos_puzzle))
def test__reject(self):
"""
"""
#TODO
pass
def test_not_reject(self):
"""
"""
for d in range(2, 5):
D = d**2
self.solver.D = D
#sum to check
self.solver.S = sum(range(1, D+1))
#number of quadrant rows and columns
self.solver.N = int(math.sqrt(D))
#for zero puzzle
zero_puzzle = self._create_zero_puzzle(D)
self.assertFalse(self.solver._reject(zero_puzzle))
#for no zero in puzzle (is most likely invalid, anyways it is useful for testing)
no_zero_puzzle = [[random.randint(1,D) for j in range(D)] for i in range(D)]
candidates = range(1, D+1)
for r in range(D):
for c in range(D):
#generate a valid column, row and quadrant for the given position
vpos_puzzle = self._generate_valid_position(D, (r,c))
#
self.assertFalse(self.solver._reject(vpos_puzzle))
def test__get_next_zero(self):
"""
Checks that the function returns the next zero to evaluate (by row, column)
Evaluation is left to right up to down
"""
#for some dimensions (from 4x4 -> 25x25)
for d in range(2, 5):
D = d**2
#all rows
for r in range(D):
#columns
for c in range(D):
#non empty (and maybe non valid) puzzle
puzzle = [[random.randint(1,D) for j in range(D)] for i in range(D)]
#r,c = 0
puzzle[r][c] = 0
#get sudoku puzzle dimensions
self.solver.D = len(puzzle)
#sum to check
self.solver.S = sum(range(1, self.solver.D+1))
#number of quadrant rows and columns
self.solver.N = int(math.sqrt(self.solver.D))
self.assertEquals( (r,c), self.solver._get_next_zero(puzzle))
def test__get_candidates(self):
"""
Evaluates that the candidates obtained for each coordinate pair are correct.
"""
#for some dimensions (from 4x4 -> 25x25), bigger has not much sense and will take too long
#d is number of quadrants
for d in range(2, 5):
D = d**2 #dimension
self.solver.D = D
self.solver.S = sum(range(1, D+1)) #sum to check
self.solver.N = int(math.sqrt(D)) #number of quadrants, rows and columns
#for zero puzzle
zero_puzzle = self._create_zero_puzzle(D)
#for no zero in puzzle (is most likely invalid, anyways it is useful for testing)
no_zero_puzzle = [[random.randint(1,D) for j in range(D)] for i in range(D)]
candidates = range(1, D+1)
for r in range(D):
for c in range(D):
#evaluate empty
self.assertEquals(candidates, self.solver._get_candidates(zero_puzzle, (r,c)))
#evaluate full
self.assertEquals(0, len(self.solver._get_candidates(no_zero_puzzle, (r,c))))
#generate a valid column, row and quadrant for the given position
vpos_puzzle = self._generate_valid_position(D, (r,c))
#for this generated valid position and for 1 to D start cleaning and find candidates
#covers the spectrum of possibilities for the whole puzzle position set (not the whole integer space)
for i in range(D):
missing, epos_puzzle = self._empty_numbers_for_pos(copy.deepcopy(vpos_puzzle), i, (r,c))
pos_candidates = self.solver._get_candidates(epos_puzzle, (r,c))
self.assertEquals(set(missing), set(pos_candidates))
def test__get_all_candidates(self):
"""
"""
for d in range(2, 5):
D = d**2
self.solver.D = D
#sum to check
self.solver.S = sum(range(1, D+1))
#number of quadrant rows and columns
self.solver.N = int(math.sqrt(D))
#for zero puzzle
zero_puzzle = self._create_zero_puzzle(D)
candidates = set(range(1, D+1))
missing = self.solver._get_all_candidates(zero_puzzle)
for r in range(D):
for c in range(D):
self.assertEquals(set(missing[r][c]), candidates)
#see other cases although they should be covered by the test__get_candidates
############################################################################
#helper methods
############################################################################
def _create_zero_puzzle(self, D):
"""
Creates a DxD matrix of zeros
"""
puzzle = [[0 for j in range(D)] for i in range(D)]
return puzzle
def _generate_valid_position(self, D, pos):
"""
The generated puzzle might not be valid, the goal is to be able to test a position only
@param: D dimension of the puzzle
@param: pos = (i,j) position in the matrix that should contain a valid row column and quadrant
@return: a maybe invalid puzzle where the given position is valid
"""
puzzle = self._create_zero_puzzle(D)
x,y = pos
N = int(math.sqrt(D))
qr,qc = x/N, y/N
#fill quadrant
qcandidates = range(1,D+1)
random.shuffle(qcandidates)
#quadrant = [row[qc*N:(qc+1)*N] for row in puzzle[qr*N:(qr+1)*N]]
for r in range(qr*N, (qr+1)*N):
for c in range(qc*N, (qc+1)*N):
puzzle[r][c] = qcandidates.pop()
#fill row
candidates = range(1,D+1)
random.shuffle(candidates)
rcandidates = [i for i in candidates if i not in puzzle[x]]
for i in range(D):
if puzzle[x][i] == 0:
puzzle[x][i] = rcandidates.pop()
#fill column
t_puzzle = [[r[i] for r in puzzle] for i in range(D)]
random.shuffle(candidates)
ccandidates = [i for i in candidates if i not in t_puzzle[y]]
for i in range(D):
if puzzle[i][y] == 0:
puzzle[i][y] = ccandidates.pop()
return puzzle
def _empty_numbers_for_pos(self, puzzle, n, pos):
"""
takes out n numbers from the column, row and quadrant, the resulting puzzle
contains a 0 in the position and
@param puzzle: given puzzle to empty (given position must be already solved)
@param n: number of elements to empty from the row, column and quadrant
@param pos: (x,y) position in the puzzle where will be a zero
@return missing,puzzle where missing is the list of numbers missing in that point
"""
x,y = pos
D = len(puzzle)
missing = set([random.randint(1,D) for i in range(n-1) if n>0])
missing.add(puzzle[x][y])
missing = list(missing)
#empties from the whole puzzle the selected numbers, this is easy and
#has the desired result in the given position
for r in puzzle:
for m in missing:
if m in r:
r[r.index(m)] = 0
return missing, puzzle
def _empty_random_positions(self, puzzle, n, max_tries=1000):
"""
empties 'n' positions from the given puzzle
@param puzzle: puzle to use
@param n: number of elements to put in 0
@param max_tries: maximum number of loops that will try
"""
#TODO implement a better algorithm that can not choose a position that is already in 0
D = len(puzzle)
counter = 0;
emptied = 0;
for i in range(max_tries):
x,y = random.randint(0,D-1), random.randint(0,D-1)
if puzzle[x][y] != 0:
counter+=1
puzzle[x][y] = 0
if counter >= n:
break
return puzzle
class TestRecursiveBacktrackingSudokuSolver(TestSudoku):
"""
RecursiveBacktrackingSudokuSolver test suite.
Main focus on testing the helping and evaluation functions
"""
def setUp(self):
self.solver = RecursiveBacktrackingSudokuSolver()
def tearDown(self):
pass
def test_solve(self):
"""
"""
#test a valid solution
solution = self.solver.solve(VALID_PUZZLE)
self.assertEquals(9, self.solver.D)
self.assertEquals(3, self.solver.N)
self.assertEquals(45, self.solver.S)
self.assertIsNotNone(solution)
self.assertEquals(VALID_SOLUTION, solution)
#test an invalid solution
in_solution = self.solver.solve(INVALID_PUZZLE)
self.assertEquals(9, self.solver.D)
self.assertEquals(3, self.solver.N)
self.assertEquals(45, self.solver.S)
self.assertIsNone(in_solution)
#TODO should test with a DB of cases
###In progress
class TestSudokuPuzzleGenerator(TestSudoku):
"""
"""
def setUp(self):
self.solver = SudokuPuzzleGenerator()
def tearDown(self):
pass
def test__generate_puzzle(self):
"""
"""
#TODO
pass
def test__generate_valid_position(self):
"""
"""
#TODO
pass
def test__generate_base_and_solve(self):
"""
"""
#TODO
pass
def test__generate_dumb_solution(self):
"""
"""
#TODO
pass
def test_generate(self):
"""
"""
#TODO
pass
if __name__ == '__main__':
unittest.main()
|
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
'''
FILE: blob_samples_directory_interface.py
DESCRIPTION:
This example shows how to perform common filesystem-like operations on a
container. This includes uploading and downloading files to and from the
container with an optional prefix, listing files in the container both at
a single level and recursively, and deleting files in the container either
individually or recursively.
To run this sample, provide the name of the storage container to operate on
as the script argument (e.g. `python3 directory_interface.py my-container`).
This sample expects that the `AZURE_STORAGE_CONNECTION_STRING` environment
variable is set. It SHOULD NOT be hardcoded in any code derived from this
sample.
USAGE: python blob_samples_directory_interface.py CONTAINER_NAME
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
'''
import os
from azure.storage.blob import BlobServiceClient
class DirectoryClient:
def __init__(self, connection_string, container_name):
service_client = BlobServiceClient.from_connection_string(connection_string)
self.client = service_client.get_container_client(container_name)
def upload(self, source, dest):
'''
Upload a file or directory to a path inside the container
'''
if (os.path.isdir(source)):
self.upload_dir(source, dest)
else:
self.upload_file(source, dest)
def upload_file(self, source, dest):
'''
Upload a single file to a path inside the container
'''
print(f'Uploading {source} to {dest}')
with open(source, 'rb') as data:
self.client.upload_blob(name=dest, data=data)
def upload_dir(self, source, dest):
'''
Upload a directory to a path inside the container
'''
prefix = '' if dest == '' else dest + '/'
prefix += os.path.basename(source) + '/'
for root, dirs, files in os.walk(source):
for name in files:
dir_part = os.path.relpath(root, source)
dir_part = '' if dir_part == '.' else dir_part + '/'
file_path = os.path.join(root, name)
blob_path = prefix + dir_part + name
self.upload_file(file_path, blob_path)
def download(self, source, dest):
'''
Download a file or directory to a path on the local filesystem
'''
if not dest:
raise Exception('A destination must be provided')
blobs = self.ls_files(source, recursive=True)
if blobs:
# if source is a directory, dest must also be a directory
if not source == '' and not source.endswith('/'):
source += '/'
if not dest.endswith('/'):
dest += '/'
# append the directory name from source to the destination
dest += os.path.basename(os.path.normpath(source)) + '/'
blobs = [source + blob for blob in blobs]
for blob in blobs:
blob_dest = dest + os.path.relpath(blob, source)
self.download_file(blob, blob_dest)
else:
self.download_file(source, dest)
def download_file(self, source, dest):
'''
Download a single file to a path on the local filesystem
'''
# dest is a directory if ending with '/' or '.', otherwise it's a file
if dest.endswith('.'):
dest += '/'
blob_dest = dest + os.path.basename(source) if dest.endswith('/') else dest
print(f'Downloading {source} to {blob_dest}')
os.makedirs(os.path.dirname(blob_dest), exist_ok=True)
bc = self.client.get_blob_client(blob=source)
with open(blob_dest, 'wb') as file:
data = bc.download_blob()
file.write(data.readall())
def ls_files(self, path, recursive=False):
'''
List files under a path, optionally recursively
'''
if not path == '' and not path.endswith('/'):
path += '/'
blob_iter = self.client.list_blobs(name_starts_with=path)
files = []
for blob in blob_iter:
relative_path = os.path.relpath(blob.name, path)
if recursive or not '/' in relative_path:
files.append(relative_path)
return files
def ls_dirs(self, path, recursive=False):
'''
List directories under a path, optionally recursively
'''
if not path == '' and not path.endswith('/'):
path += '/'
blob_iter = self.client.list_blobs(name_starts_with=path)
dirs = []
for blob in blob_iter:
relative_dir = os.path.dirname(os.path.relpath(blob.name, path))
if relative_dir and (recursive or not '/' in relative_dir) and not relative_dir in dirs:
dirs.append(relative_dir)
return dirs
def rm(self, path, recursive=False):
'''
Remove a single file, or remove a path recursively
'''
if recursive:
self.rmdir(path)
else:
print(f'Deleting {path}')
self.client.delete_blob(path)
def rmdir(self, path):
'''
Remove a directory and its contents recursively
'''
blobs = self.ls_files(path, recursive=True)
if not blobs:
return
if not path == '' and not path.endswith('/'):
path += '/'
blobs = [path + blob for blob in blobs]
print(f'Deleting {", ".join(blobs)}')
self.client.delete_blobs(*blobs)
# Sample setup
import sys
try:
CONNECTION_STRING = os.environ['AZURE_STORAGE_CONNECTION_STRING']
except KeyError:
print('AZURE_STORAGE_CONNECTION_STRING must be set')
sys.exit(1)
try:
CONTAINER_NAME = sys.argv[1]
except IndexError:
print('usage: directory_interface.py CONTAINER_NAME')
print('error: the following arguments are required: CONTAINER_NAME')
sys.exit(1)
SAMPLE_DIRS = [
'cats/calico',
'cats/siamese',
'cats/tabby'
]
SAMPLE_FILES = [
'readme.txt',
'cats/herds.txt',
'cats/calico/anna.txt',
'cats/calico/felix.txt',
'cats/siamese/mocha.txt',
'cats/tabby/bojangles.txt'
]
for path in SAMPLE_DIRS:
os.makedirs(path, exist_ok=True)
for path in SAMPLE_FILES:
with open(path, 'w') as file:
file.write('content')
# Sample body
client = DirectoryClient(CONNECTION_STRING, CONTAINER_NAME)
# Upload a single file to the container. The destination must be a path
# including the destination file name.
#
# After this call, the container will look like:
# cat-herding/
# readme.txt
client.upload('readme.txt', 'cat-herding/readme.txt')
files = client.ls_files('', recursive=True)
print(files)
# Upload a directory to the container with a path prefix. The directory
# structure will be preserved inside the path prefix.
#
# After this call, the container will look like:
# cat-herding/
# readme.txt
# cats/
# herds.txt
# calico/
# anna.txt
# felix.txt
# siamese/
# mocha.txt
# tabby/
# bojangles.txt
client.upload('cats', 'cat-herding')
files = client.ls_files('', recursive=True)
print(files)
# List files in a single directory
# Returns:
# ['herds.txt']
files = client.ls_files('cat-herding/cats')
print(files)
# List files in a directory recursively
# Returns:
# [
# 'herds.txt',
# 'calico/anna.txt',
# 'calico/felix.txt',
# 'siamese/mocha.txt',
# 'tabby/bojangles.txt'
# ]
files = client.ls_files('cat-herding/cats', recursive=True)
print(files)
# List directories in a single directory
# Returns:
# ['calico', 'siamese', 'tabby']
dirs = client.ls_dirs('cat-herding/cats')
print(dirs)
# List files in a directory recursively
# Returns:
# ['cats', 'cats/calico', 'cats/siamese', 'cats/tabby']
dirs = client.ls_dirs('cat-herding', recursive=True)
print(dirs)
# Download a single file to a location on disk, specifying the destination file
# name. When the destination does not end with a slash '/' and is not a relative
# path specifier (e.g. '.', '..', '../..', etc), the destination will be
# interpreted as a full path including the file name. If intermediate
# directories in the destination do not exist they will be created.
#
# After this call, your working directory will look like:
# downloads/
# cat-info.txt
client.download('cat-herding/readme.txt', 'downloads/cat-info.txt')
import glob
print(glob.glob('downloads/**', recursive=True))
# Download a single file to a folder on disk, preserving the original file name.
# When the destination ends with a slash '/' or is a relative path specifier
# (e.g. '.', '..', '../..', etc), the destination will be interpreted as a
# directory name and the specified file will be saved within the destination
# directory. If intermediate directories in the destination do not exist they
# will be created.
#
# After this call, your working directory will look like:
# downloads/
# cat-info.txt
# herd-info/
# herds.txt
client.download('cat-herding/cats/herds.txt', 'downloads/herd-info/')
print(glob.glob('downloads/**', recursive=True))
# Download a directory to a folder on disk. The destination is always
# interpreted as a directory name. The directory structure will be preserved
# inside destination folder. If intermediate directories in the destination do
# not exist they will be created.
#
# After this call, your working directory will look like:
# downloads/
# cat-data/
# cats/
# herds.txt
# calico/
# anna.txt
# felix.txt
# siamese/
# mocha.txt
# tabby/
# bojangles.txt
# cat-info.txt
# herd-info/
# herds.txt
client.download('cat-herding/cats', 'downloads/cat-data')
print(glob.glob('downloads/**', recursive=True))
# Delete a single file from the container
#
# After this call, the container will look like:
# cat-herding/
# readme.txt
# cats/
# herds.txt
# calico/
# anna.txt
# siamese/
# mocha.txt
# tabby/
# bojangles.txt
client.rm('cat-herding/cats/calico/felix.txt')
files = client.ls_files('', recursive=True)
print(files)
# Delete files in a directory recursively. This is equivalent to
# client.rmdir('cat-herding/cats')
#
# After this call, the container will look like:
# cat-herding/
# readme.txt
client.rm('cat-herding/cats', recursive=True)
files = client.ls_files('', recursive=True)
print(files)
# Delete files in a directory recursively. This is equivalent to
# client.rm('cat-herding', recursive=True)
#
# After this call, the container will be empty.
client.rmdir('cat-herding')
files = client.ls_files('', recursive=True)
print(files)
# Sample cleanup
import shutil
shutil.rmtree('downloads')
shutil.rmtree('cats')
os.remove('readme.txt')
|
|
#!/usr/bin/env python
"""Support classes for automated testing.
* `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase
with additional support for testing asynchronous (`.IOLoop`-based) code.
* `ExpectLog` and `LogTrapTestCase`: Make test logs less spammy.
* `main()`: A simple test runner (wrapper around unittest.main()) with support
for the tornado.autoreload module to rerun the tests when code changes.
"""
from __future__ import absolute_import, division, print_function, with_statement
try:
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.ioloop import IOLoop, TimeoutError
from tornado import netutil
from tornado.process import Subprocess
except ImportError:
# These modules are not importable on app engine. Parts of this module
# won't work, but e.g. LogTrapTestCase and main() will.
AsyncHTTPClient = None # type: ignore
gen = None # type: ignore
HTTPServer = None # type: ignore
IOLoop = None # type: ignore
netutil = None # type: ignore
SimpleAsyncHTTPClient = None # type: ignore
Subprocess = None # type: ignore
from tornado.log import gen_log, app_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import raise_exc_info, basestring_type, PY3
import functools
import inspect
import logging
import os
import re
import signal
import socket
import sys
if PY3:
from io import StringIO
else:
from cStringIO import StringIO
try:
from collections.abc import Generator as GeneratorType # type: ignore
except ImportError:
from types import GeneratorType # type: ignore
if sys.version_info >= (3, 5):
iscoroutine = inspect.iscoroutine # type: ignore
iscoroutinefunction = inspect.iscoroutinefunction # type: ignore
else:
iscoroutine = iscoroutinefunction = lambda f: False
# Tornado's own test suite requires the updated unittest module
# (either py27+ or unittest2) so tornado.test.util enforces
# this requirement, but for other users of tornado.testing we want
# to allow the older version if unitest2 is not available.
if PY3:
# On python 3, mixing unittest2 and unittest (including doctest)
# doesn't seem to work, so always use unittest.
import unittest
else:
# On python 2, prefer unittest2 when available.
try:
import unittest2 as unittest # type: ignore
except ImportError:
import unittest # type: ignore
_next_port = 10000
def get_unused_port():
"""Returns a (hopefully) unused port number.
This function does not guarantee that the port it returns is available,
only that a series of get_unused_port calls in a single process return
distinct ports.
.. deprecated::
Use bind_unused_port instead, which is guaranteed to find an unused port.
"""
global _next_port
port = _next_port
_next_port = _next_port + 1
return port
def bind_unused_port(reuse_port=False):
"""Binds a server socket to an available port on localhost.
Returns a tuple (socket, port).
.. versionchanged:: 4.4
Always binds to ``127.0.0.1`` without resolving the name
``localhost``.
"""
sock = netutil.bind_sockets(None, '127.0.0.1', family=socket.AF_INET,
reuse_port=reuse_port)[0]
port = sock.getsockname()[1]
return sock, port
def get_async_test_timeout():
"""Get the global timeout setting for async tests.
Returns a float, the timeout in seconds.
.. versionadded:: 3.1
"""
try:
return float(os.environ.get('ASYNC_TEST_TIMEOUT'))
except (ValueError, TypeError):
return 5
class _TestMethodWrapper(object):
"""Wraps a test method to raise an error if it returns a value.
This is mainly used to detect undecorated generators (if a test
method yields it must use a decorator to consume the generator),
but will also detect other kinds of return values (these are not
necessarily errors, but we alert anyway since there is no good
reason to return a value from a test).
"""
def __init__(self, orig_method):
self.orig_method = orig_method
def __call__(self, *args, **kwargs):
result = self.orig_method(*args, **kwargs)
if isinstance(result, GeneratorType) or iscoroutine(result):
raise TypeError("Generator and coroutine test methods should be"
" decorated with tornado.testing.gen_test")
elif result is not None:
raise ValueError("Return value from test method ignored: %r" %
result)
def __getattr__(self, name):
"""Proxy all unknown attributes to the original method.
This is important for some of the decorators in the `unittest`
module, such as `unittest.skipIf`.
"""
return getattr(self.orig_method, name)
class AsyncTestCase(unittest.TestCase):
"""`~unittest.TestCase` subclass for testing `.IOLoop`-based
asynchronous code.
The unittest framework is synchronous, so the test must be
complete by the time the test method returns. This means that
asynchronous code cannot be used in quite the same way as usual.
To write test functions that use the same ``yield``-based patterns
used with the `tornado.gen` module, decorate your test methods
with `tornado.testing.gen_test` instead of
`tornado.gen.coroutine`. This class also provides the `stop()`
and `wait()` methods for a more manual style of testing. The test
method itself must call ``self.wait()``, and asynchronous
callbacks should call ``self.stop()`` to signal completion.
By default, a new `.IOLoop` is constructed for each test and is available
as ``self.io_loop``. This `.IOLoop` should be used in the construction of
HTTP clients/servers, etc. If the code being tested requires a
global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
The `.IOLoop`'s ``start`` and ``stop`` methods should not be
called directly. Instead, use `self.stop <stop>` and `self.wait
<wait>`. Arguments passed to ``self.stop`` are returned from
``self.wait``. It is possible to have multiple ``wait``/``stop``
cycles in the same test.
Example::
# This test uses coroutine style.
class MyTestCase(AsyncTestCase):
@tornado.testing.gen_test
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
response = yield client.fetch("http://www.tornadoweb.org")
# Test contents of response
self.assertIn("FriendFeed", response.body)
# This test uses argument passing between self.stop and self.wait.
class MyTestCase2(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client.fetch("http://www.tornadoweb.org/", self.stop)
response = self.wait()
# Test contents of response
self.assertIn("FriendFeed", response.body)
# This test uses an explicit callback-based style.
class MyTestCase3(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
self.wait()
def handle_fetch(self, response):
# Test contents of response (failures and exceptions here
# will cause self.wait() to throw an exception and end the
# test).
# Exceptions thrown here are magically propagated to
# self.wait() in test_http_fetch() via stack_context.
self.assertIn("FriendFeed", response.body)
self.stop()
"""
def __init__(self, methodName='runTest'):
super(AsyncTestCase, self).__init__(methodName)
self.__stopped = False
self.__running = False
self.__failure = None
self.__stop_args = None
self.__timeout = None
# It's easy to forget the @gen_test decorator, but if you do
# the test will silently be ignored because nothing will consume
# the generator. Replace the test method with a wrapper that will
# make sure it's not an undecorated generator.
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
def setUp(self):
super(AsyncTestCase, self).setUp()
self.io_loop = self.get_new_ioloop()
self.io_loop.make_current()
def tearDown(self):
# Clean up Subprocess, so it can be used again with a new ioloop.
Subprocess.uninitialize()
self.io_loop.clear_current()
if (not IOLoop.initialized() or
self.io_loop is not IOLoop.instance()):
# Try to clean up any file descriptors left open in the ioloop.
# This avoids leaks, especially when tests are run repeatedly
# in the same process with autoreload (because curl does not
# set FD_CLOEXEC on its file descriptors)
self.io_loop.close(all_fds=True)
super(AsyncTestCase, self).tearDown()
# In case an exception escaped or the StackContext caught an exception
# when there wasn't a wait() to re-raise it, do so here.
# This is our last chance to raise an exception in a way that the
# unittest machinery understands.
self.__rethrow()
def get_new_ioloop(self):
"""Creates a new `.IOLoop` for this test. May be overridden in
subclasses for tests that require a specific `.IOLoop` (usually
the singleton `.IOLoop.instance()`).
"""
return IOLoop()
def _handle_exception(self, typ, value, tb):
if self.__failure is None:
self.__failure = (typ, value, tb)
else:
app_log.error("multiple unhandled exceptions in test",
exc_info=(typ, value, tb))
self.stop()
return True
def __rethrow(self):
if self.__failure is not None:
failure = self.__failure
self.__failure = None
raise_exc_info(failure)
def run(self, result=None):
with ExceptionStackContext(self._handle_exception):
super(AsyncTestCase, self).run(result)
# As a last resort, if an exception escaped super.run() and wasn't
# re-raised in tearDown, raise it here. This will cause the
# unittest run to fail messily, but that's better than silently
# ignoring an error.
self.__rethrow()
def stop(self, _arg=None, **kwargs):
"""Stops the `.IOLoop`, causing one pending (or future) call to `wait()`
to return.
Keyword arguments or a single positional argument passed to `stop()` are
saved and will be returned by `wait()`.
"""
assert _arg is None or not kwargs
self.__stop_args = kwargs or _arg
if self.__running:
self.io_loop.stop()
self.__running = False
self.__stopped = True
def wait(self, condition=None, timeout=None):
"""Runs the `.IOLoop` until stop is called or timeout has passed.
In the event of a timeout, an exception will be thrown. The
default timeout is 5 seconds; it may be overridden with a
``timeout`` keyword argument or globally with the
``ASYNC_TEST_TIMEOUT`` environment variable.
If ``condition`` is not None, the `.IOLoop` will be restarted
after `stop()` until ``condition()`` returns true.
.. versionchanged:: 3.1
Added the ``ASYNC_TEST_TIMEOUT`` environment variable.
"""
if timeout is None:
timeout = get_async_test_timeout()
if not self.__stopped:
if timeout:
def timeout_func():
try:
raise self.failureException(
'Async operation timed out after %s seconds' %
timeout)
except Exception:
self.__failure = sys.exc_info()
self.stop()
self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func)
while True:
self.__running = True
self.io_loop.start()
if (self.__failure is not None or
condition is None or condition()):
break
if self.__timeout is not None:
self.io_loop.remove_timeout(self.__timeout)
self.__timeout = None
assert self.__stopped
self.__stopped = False
self.__rethrow()
result = self.__stop_args
self.__stop_args = None
return result
class AsyncHTTPTestCase(AsyncTestCase):
"""A test case that starts up an HTTP server.
Subclasses must override `get_app()`, which returns the
`tornado.web.Application` (or other `.HTTPServer` callback) to be tested.
Tests will typically use the provided ``self.http_client`` to fetch
URLs from this server.
Example, assuming the "Hello, world" example from the user guide is in
``hello.py``::
import hello
class TestHelloApp(AsyncHTTPTestCase):
def get_app(self):
return hello.make_app()
def test_homepage(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, 'Hello, world')
That call to ``self.fetch()`` is equivalent to ::
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
which illustrates how AsyncTestCase can turn an asynchronous operation,
like ``http_client.fetch()``, into a synchronous operation. If you need
to do other asynchronous operations in tests, you'll probably need to use
``stop()`` and ``wait()`` yourself.
"""
def setUp(self):
super(AsyncHTTPTestCase, self).setUp()
sock, port = bind_unused_port()
self.__port = port
self.http_client = self.get_http_client()
self._app = self.get_app()
self.http_server = self.get_http_server()
self.http_server.add_sockets([sock])
def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop)
def get_http_server(self):
return HTTPServer(self._app, io_loop=self.io_loop,
**self.get_httpserver_options())
def get_app(self):
"""Should be overridden by subclasses to return a
`tornado.web.Application` or other `.HTTPServer` callback.
"""
raise NotImplementedError()
def fetch(self, path, **kwargs):
"""Convenience method to synchronously fetch a url.
The given path will be appended to the local server's host and
port. Any additional kwargs will be passed directly to
`.AsyncHTTPClient.fetch` (and so could be used to pass
``method="POST"``, ``body="..."``, etc).
"""
self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
return self.wait()
def get_httpserver_options(self):
"""May be overridden by subclasses to return additional
keyword arguments for the server.
"""
return {}
def get_http_port(self):
"""Returns the port used by the server.
A new port is chosen for each test.
"""
return self.__port
def get_protocol(self):
return 'http'
def get_url(self, path):
"""Returns an absolute url for the given path on the test server."""
return '%s://localhost:%s%s' % (self.get_protocol(),
self.get_http_port(), path)
def tearDown(self):
self.http_server.stop()
self.io_loop.run_sync(self.http_server.close_all_connections,
timeout=get_async_test_timeout())
if (not IOLoop.initialized() or
self.http_client.io_loop is not IOLoop.instance()):
self.http_client.close()
super(AsyncHTTPTestCase, self).tearDown()
class AsyncHTTPSTestCase(AsyncHTTPTestCase):
"""A test case that starts an HTTPS server.
Interface is generally the same as `AsyncHTTPTestCase`.
"""
def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop, force_instance=True,
defaults=dict(validate_cert=False))
def get_httpserver_options(self):
return dict(ssl_options=self.get_ssl_options())
def get_ssl_options(self):
"""May be overridden by subclasses to select SSL options.
By default includes a self-signed testing certificate.
"""
# Testing keys were generated with:
# openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509
module_dir = os.path.dirname(__file__)
return dict(
certfile=os.path.join(module_dir, 'test', 'test.crt'),
keyfile=os.path.join(module_dir, 'test', 'test.key'))
def get_protocol(self):
return 'https'
def gen_test(func=None, timeout=None):
"""Testing equivalent of ``@gen.coroutine``, to be applied to test methods.
``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not
already running. ``@gen_test`` should be applied to test methods
on subclasses of `AsyncTestCase`.
Example::
class MyTest(AsyncHTTPTestCase):
@gen_test
def test_something(self):
response = yield gen.Task(self.fetch('/'))
By default, ``@gen_test`` times out after 5 seconds. The timeout may be
overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
or for each test with the ``timeout`` keyword argument::
class MyTest(AsyncHTTPTestCase):
@gen_test(timeout=10)
def test_something_slow(self):
response = yield gen.Task(self.fetch('/'))
.. versionadded:: 3.1
The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
variable.
.. versionchanged:: 4.0
The wrapper now passes along ``*args, **kwargs`` so it can be used
on functions with arguments.
"""
if timeout is None:
timeout = get_async_test_timeout()
def wrap(f):
# Stack up several decorators to allow us to access the generator
# object itself. In the innermost wrapper, we capture the generator
# and save it in an attribute of self. Next, we run the wrapped
# function through @gen.coroutine. Finally, the coroutine is
# wrapped again to make it synchronous with run_sync.
#
# This is a good case study arguing for either some sort of
# extensibility in the gen decorators or cancellation support.
@functools.wraps(f)
def pre_coroutine(self, *args, **kwargs):
result = f(self, *args, **kwargs)
if isinstance(result, GeneratorType) or iscoroutine(result):
self._test_generator = result
else:
self._test_generator = None
return result
if iscoroutinefunction(f):
coro = pre_coroutine
else:
coro = gen.coroutine(pre_coroutine)
@functools.wraps(coro)
def post_coroutine(self, *args, **kwargs):
try:
return self.io_loop.run_sync(
functools.partial(coro, self, *args, **kwargs),
timeout=timeout)
except TimeoutError as e:
# run_sync raises an error with an unhelpful traceback.
# Throw it back into the generator or coroutine so the stack
# trace is replaced by the point where the test is stopped.
self._test_generator.throw(e)
# In case the test contains an overly broad except clause,
# we may get back here. In this case re-raise the original
# exception, which is better than nothing.
raise
return post_coroutine
if func is not None:
# Used like:
# @gen_test
# def f(self):
# pass
return wrap(func)
else:
# Used like @gen_test(timeout=10)
return wrap
# Without this attribute, nosetests will try to run gen_test as a test
# anywhere it is imported.
gen_test.__test__ = False # type: ignore
class LogTrapTestCase(unittest.TestCase):
"""A test case that captures and discards all logging output
if the test passes.
Some libraries can produce a lot of logging output even when
the test succeeds, so this class can be useful to minimize the noise.
Simply use it as a base class for your test case. It is safe to combine
with AsyncTestCase via multiple inheritance
(``class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):``)
This class assumes that only one log handler is configured and
that it is a `~logging.StreamHandler`. This is true for both
`logging.basicConfig` and the "pretty logging" configured by
`tornado.options`. It is not compatible with other log buffering
mechanisms, such as those provided by some test runners.
.. deprecated:: 4.1
Use the unittest module's ``--buffer`` option instead, or `.ExpectLog`.
"""
def run(self, result=None):
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
handler = logger.handlers[0]
if (len(logger.handlers) > 1 or
not isinstance(handler, logging.StreamHandler)):
# Logging has been configured in a way we don't recognize,
# so just leave it alone.
super(LogTrapTestCase, self).run(result)
return
old_stream = handler.stream
try:
handler.stream = StringIO()
gen_log.info("RUNNING TEST: " + str(self))
old_error_count = len(result.failures) + len(result.errors)
super(LogTrapTestCase, self).run(result)
new_error_count = len(result.failures) + len(result.errors)
if new_error_count != old_error_count:
old_stream.write(handler.stream.getvalue())
finally:
handler.stream = old_stream
class ExpectLog(logging.Filter):
"""Context manager to capture and suppress expected log output.
Useful to make tests of error conditions less noisy, while still
leaving unexpected log entries visible. *Not thread safe.*
The attribute ``logged_stack`` is set to true if any exception
stack trace was logged.
Usage::
with ExpectLog('tornado.application', "Uncaught exception"):
error_response = self.fetch("/some_page")
.. versionchanged:: 4.3
Added the ``logged_stack`` attribute.
"""
def __init__(self, logger, regex, required=True):
"""Constructs an ExpectLog context manager.
:param logger: Logger object (or name of logger) to watch. Pass
an empty string to watch the root logger.
:param regex: Regular expression to match. Any log entries on
the specified logger that match this regex will be suppressed.
:param required: If true, an exception will be raised if the end of
the ``with`` statement is reached without matching any log entries.
"""
if isinstance(logger, basestring_type):
logger = logging.getLogger(logger)
self.logger = logger
self.regex = re.compile(regex)
self.required = required
self.matched = False
self.logged_stack = False
def filter(self, record):
if record.exc_info:
self.logged_stack = True
message = record.getMessage()
if self.regex.match(message):
self.matched = True
return False
return True
def __enter__(self):
self.logger.addFilter(self)
return self
def __exit__(self, typ, value, tb):
self.logger.removeFilter(self)
if not typ and self.required and not self.matched:
raise Exception("did not get expected log message")
def main(**kwargs):
"""A simple test runner.
This test runner is essentially equivalent to `unittest.main` from
the standard library, but adds support for tornado-style option
parsing and log formatting.
The easiest way to run a test is via the command line::
python -m tornado.testing tornado.test.stack_context_test
See the standard library unittest module for ways in which tests can
be specified.
Projects with many tests may wish to define a test script like
``tornado/test/runtests.py``. This script should define a method
``all()`` which returns a test suite and then call
`tornado.testing.main()`. Note that even when a test script is
used, the ``all()`` test suite may be overridden by naming a
single test on the command line::
# Runs all tests
python -m tornado.test.runtests
# Runs one test
python -m tornado.test.runtests tornado.test.stack_context_test
Additional keyword arguments passed through to ``unittest.main()``.
For example, use ``tornado.testing.main(verbosity=2)``
to show many test details as they are run.
See http://docs.python.org/library/unittest.html#unittest.main
for full argument list.
"""
from tornado.options import define, options, parse_command_line
define('exception_on_interrupt', type=bool, default=True,
help=("If true (default), ctrl-c raises a KeyboardInterrupt "
"exception. This prints a stack trace but cannot interrupt "
"certain operations. If false, the process is more reliably "
"killed, but does not print a stack trace."))
# support the same options as unittest's command-line interface
define('verbose', type=bool)
define('quiet', type=bool)
define('failfast', type=bool)
define('catch', type=bool)
define('buffer', type=bool)
argv = [sys.argv[0]] + parse_command_line(sys.argv)
if not options.exception_on_interrupt:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if options.verbose is not None:
kwargs['verbosity'] = 2
if options.quiet is not None:
kwargs['verbosity'] = 0
if options.failfast is not None:
kwargs['failfast'] = True
if options.catch is not None:
kwargs['catchbreak'] = True
if options.buffer is not None:
kwargs['buffer'] = True
if __name__ == '__main__' and len(argv) == 1:
print("No tests specified", file=sys.stderr)
sys.exit(1)
try:
# In order to be able to run tests by their fully-qualified name
# on the command line without importing all tests here,
# module must be set to None. Python 3.2's unittest.main ignores
# defaultTest if no module is given (it tries to do its own
# test discovery, which is incompatible with auto2to3), so don't
# set module if we're not asking for a specific test.
if len(argv) > 1:
unittest.main(module=None, argv=argv, **kwargs)
else:
unittest.main(defaultTest="all", argv=argv, **kwargs)
except SystemExit as e:
if e.code == 0:
gen_log.info('PASS')
else:
gen_log.error('FAIL')
raise
if __name__ == '__main__':
main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources for Rackspace Auto Scale."""
import copy
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
try:
from pyrax.exceptions import Forbidden
from pyrax.exceptions import NotFound
PYRAX_INSTALLED = True
except ImportError:
class Forbidden(Exception):
"""Dummy pyrax exception - only used for testing."""
class NotFound(Exception):
"""Dummy pyrax exception - only used for testing."""
PYRAX_INSTALLED = False
class Group(resource.Resource):
"""Represents a scaling group."""
# pyrax differs drastically from the actual Auto Scale API. We'll prefer
# the true API here, but since pyrax doesn't support the full flexibility
# of the API, we'll have to restrict what users can provide.
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
# properties are identical to the API POST /groups.
PROPERTIES = (
GROUP_CONFIGURATION, LAUNCH_CONFIGURATION,
) = (
'groupConfiguration', 'launchConfiguration',
)
_GROUP_CONFIGURATION_KEYS = (
GROUP_CONFIGURATION_MAX_ENTITIES, GROUP_CONFIGURATION_COOLDOWN,
GROUP_CONFIGURATION_NAME, GROUP_CONFIGURATION_MIN_ENTITIES,
GROUP_CONFIGURATION_METADATA,
) = (
'maxEntities', 'cooldown',
'name', 'minEntities',
'metadata',
)
_LAUNCH_CONFIG_KEYS = (
LAUNCH_CONFIG_ARGS, LAUNCH_CONFIG_TYPE,
) = (
'args', 'type',
)
_LAUNCH_CONFIG_ARGS_KEYS = (
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
LAUNCH_CONFIG_ARGS_SERVER,
) = (
'loadBalancers',
'server',
)
_LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID,
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT,
) = (
'loadBalancerId',
'port',
)
_LAUNCH_CONFIG_ARGS_SERVER_KEYS = (
LAUNCH_CONFIG_ARGS_SERVER_NAME, LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF,
LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF,
LAUNCH_CONFIG_ARGS_SERVER_METADATA,
LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY,
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS,
LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG,
LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME,
LAUNCH_CONFIG_ARGS_SERVER_USER_DATA,
LAUNCH_CONFIG_ARGS_SERVER_CDRIVE
) = (
'name', 'flavorRef',
'imageRef',
'metadata',
'personality',
'networks',
'diskConfig', # technically maps to OS-DCF:diskConfig
'key_name',
'user_data',
'config_drive'
)
_LAUNCH_CONFIG_ARGS_SERVER_NETWORK_KEYS = (
LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID,
) = (
'uuid',
)
_launch_configuration_args_schema = {
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS: properties.Schema(
properties.Schema.LIST,
_('List of load balancers to hook the '
'server up to. If not specified, no '
'load balancing will be configured.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the load balancer.'),
required=True
),
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT: properties.Schema(
properties.Schema.INTEGER,
_('Server port to connect the load balancer to.'),
required=True
),
},
)
),
LAUNCH_CONFIG_ARGS_SERVER: properties.Schema(
properties.Schema.MAP,
_('Server creation arguments, as accepted by the Cloud Servers '
'server creation API.'),
schema={
LAUNCH_CONFIG_ARGS_SERVER_NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF: properties.Schema(
properties.Schema.STRING,
_('Flavor ID.'),
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF: properties.Schema(
properties.Schema.STRING,
_('Image ID.'),
required=True
),
LAUNCH_CONFIG_ARGS_SERVER_METADATA: properties.Schema(
properties.Schema.MAP,
_('Metadata key and value pairs.')
),
LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('File path and contents.')
),
LAUNCH_CONFIG_ARGS_SERVER_CDRIVE: properties.Schema(
properties.Schema.BOOLEAN,
_('Enable config drive on the instance.')
),
LAUNCH_CONFIG_ARGS_SERVER_USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data for bootstrapping the instance.')
),
LAUNCH_CONFIG_ARGS_SERVER_NETWORKS: properties.Schema(
properties.Schema.LIST,
_('Networks to attach to. If unspecified, the instance '
'will be attached to the public Internet and private '
'ServiceNet networks.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID:
properties.Schema(
properties.Schema.STRING,
_('UUID of network to attach to.'),
required=True)
}
)
),
LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Configuration specifying the partition layout. AUTO to '
'create a partition utilizing the entire disk, and '
'MANUAL to create a partition matching the source '
'image.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of a previously created SSH keypair to allow '
'key-based authentication to the server.')
),
},
required=True
),
}
properties_schema = {
GROUP_CONFIGURATION: properties.Schema(
properties.Schema.MAP,
_('Group configuration.'),
schema={
GROUP_CONFIGURATION_MAX_ENTITIES: properties.Schema(
properties.Schema.INTEGER,
_('Maximum number of entities in this scaling group.'),
required=True
),
GROUP_CONFIGURATION_COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Number of seconds after capacity changes during '
'which further capacity changes are disabled.'),
required=True
),
GROUP_CONFIGURATION_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the scaling group.'),
required=True
),
GROUP_CONFIGURATION_MIN_ENTITIES: properties.Schema(
properties.Schema.INTEGER,
_('Minimum number of entities in this scaling group.'),
required=True
),
GROUP_CONFIGURATION_METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to associate with '
'this group.')
),
},
required=True,
update_allowed=True
),
LAUNCH_CONFIGURATION: properties.Schema(
properties.Schema.MAP,
_('Launch configuration.'),
schema={
LAUNCH_CONFIG_ARGS: properties.Schema(
properties.Schema.MAP,
_('Type-specific server launching arguments.'),
schema=_launch_configuration_args_schema,
required=True
),
LAUNCH_CONFIG_TYPE: properties.Schema(
properties.Schema.STRING,
_('Launch configuration method. Only launch_server '
'is currently supported.'),
required=True,
constraints=[
constraints.AllowedValues(['launch_server']),
]
),
},
required=True,
update_allowed=True
),
# We don't allow scaling policies to be specified here, despite the
# fact that the API supports it. Users should use the ScalingPolicy
# resource.
}
def _get_group_config_args(self, groupconf):
"""Get the groupConfiguration-related pyrax arguments."""
return dict(
name=groupconf[self.GROUP_CONFIGURATION_NAME],
cooldown=groupconf[self.GROUP_CONFIGURATION_COOLDOWN],
min_entities=groupconf[self.GROUP_CONFIGURATION_MIN_ENTITIES],
max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))
def _get_launch_config_args(self, launchconf):
"""Get the launchConfiguration-related pyrax arguments."""
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
lbs = copy.deepcopy(lb_args)
if lbs:
for lb in lbs:
lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
personality = server_args.get(
self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY)
if personality:
personality = [{'path': k, 'contents': v} for k, v in
personality.items()]
user_data = server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_USER_DATA)
cdrive = (server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_CDRIVE) or
bool(user_data is not None and len(user_data.strip())))
return dict(
launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
server_name=server_args[self.GROUP_CONFIGURATION_NAME],
image=server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF],
flavor=server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF],
disk_config=server_args.get(
self.LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG),
metadata=server_args.get(self.GROUP_CONFIGURATION_METADATA),
config_drive=cdrive,
user_data=user_data,
personality=personality,
networks=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_NETWORKS),
load_balancers=lbs,
key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
)
def _get_create_args(self):
"""Get pyrax-style arguments for creating a scaling group."""
args = self._get_group_config_args(
self.properties[self.GROUP_CONFIGURATION])
args['group_metadata'] = args.pop('metadata')
args.update(self._get_launch_config_args(
self.properties[self.LAUNCH_CONFIGURATION]))
return args
def handle_create(self):
"""Create the autoscaling group and set resource_id.
The resource_id is set to the resulting group's ID.
"""
asclient = self.auto_scale()
group = asclient.create(**self._get_create_args())
self.resource_id_set(str(group.id))
def handle_check(self):
self.auto_scale().get(self.resource_id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Update the group configuration and the launch configuration."""
asclient = self.auto_scale()
if self.GROUP_CONFIGURATION in prop_diff:
args = self._get_group_config_args(
prop_diff[self.GROUP_CONFIGURATION])
asclient.replace(self.resource_id, **args)
if self.LAUNCH_CONFIGURATION in prop_diff:
args = self._get_launch_config_args(
prop_diff[self.LAUNCH_CONFIGURATION])
asclient.replace_launch_config(self.resource_id, **args)
def handle_delete(self):
"""Delete the scaling group.
Since Auto Scale doesn't allow deleting a group until all its servers
are gone, we must set the minEntities and maxEntities of the group to 0
and then keep trying the delete until Auto Scale has deleted all the
servers and the delete will succeed.
"""
if self.resource_id is None:
return
asclient = self.auto_scale()
args = self._get_group_config_args(
self.properties[self.GROUP_CONFIGURATION])
args['min_entities'] = 0
args['max_entities'] = 0
try:
asclient.replace(self.resource_id, **args)
except NotFound:
pass
def check_delete_complete(self, result):
"""Try the delete operation until it succeeds."""
if self.resource_id is None:
return True
try:
self.auto_scale().delete(self.resource_id)
except Forbidden:
return False
except NotFound:
return True
else:
return True
def auto_scale(self):
return self.client('auto_scale')
class ScalingPolicy(resource.Resource):
"""Represents a Rackspace Auto Scale scaling policy."""
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
PROPERTIES = (
GROUP, NAME, CHANGE, CHANGE_PERCENT, DESIRED_CAPACITY,
COOLDOWN, TYPE, ARGS,
) = (
'group', 'name', 'change', 'changePercent', 'desiredCapacity',
'cooldown', 'type', 'args',
)
properties_schema = {
# group isn't in the post body, but it's in the URL to post to.
GROUP: properties.Schema(
properties.Schema.STRING,
_('Scaling group ID that this policy belongs to.'),
required=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this scaling policy.'),
required=True,
update_allowed=True
),
CHANGE: properties.Schema(
properties.Schema.INTEGER,
_('Amount to add to or remove from current number of instances. '
'Incompatible with changePercent and desiredCapacity.'),
update_allowed=True
),
CHANGE_PERCENT: properties.Schema(
properties.Schema.NUMBER,
_('Percentage-based change to add or remove from current number '
'of instances. Incompatible with change and desiredCapacity.'),
update_allowed=True
),
DESIRED_CAPACITY: properties.Schema(
properties.Schema.INTEGER,
_('Absolute number to set the number of instances to. '
'Incompatible with change and changePercent.'),
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Number of seconds after a policy execution during which '
'further executions are disabled.'),
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of this scaling policy. Specifies how the policy is '
'executed.'),
required=True,
constraints=[
constraints.AllowedValues(['webhook', 'schedule',
'cloud_monitoring']),
],
update_allowed=True
),
ARGS: properties.Schema(
properties.Schema.MAP,
_('Type-specific arguments for the policy.'),
update_allowed=True
),
}
def _get_args(self, properties):
"""Get pyrax-style create arguments for scaling policies."""
args = dict(
scaling_group=properties[self.GROUP],
name=properties[self.NAME],
policy_type=properties[self.TYPE],
cooldown=properties[self.COOLDOWN],
)
if properties.get(self.CHANGE) is not None:
args['change'] = properties[self.CHANGE]
elif properties.get(self.CHANGE_PERCENT) is not None:
args['change'] = properties[self.CHANGE_PERCENT]
args['is_percent'] = True
elif properties.get(self.DESIRED_CAPACITY) is not None:
args['desired_capacity'] = properties[self.DESIRED_CAPACITY]
if properties.get(self.ARGS) is not None:
args['args'] = properties[self.ARGS]
return args
def handle_create(self):
"""Create the scaling policy and initialize the resource ID.
The resource ID is initialized to {group_id}:{policy_id}.
"""
asclient = self.auto_scale()
args = self._get_args(self.properties)
policy = asclient.add_policy(**args)
resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)
self.resource_id_set(resource_id)
def _get_policy_id(self):
return self.resource_id.split(':', 1)[1]
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
asclient = self.auto_scale()
args = self._get_args(tmpl_diff['Properties'])
args['policy'] = self._get_policy_id()
asclient.replace_policy(**args)
def handle_delete(self):
"""Delete the policy if it exists."""
asclient = self.auto_scale()
if self.resource_id is None:
return
policy_id = self._get_policy_id()
try:
asclient.delete_policy(self.properties[self.GROUP], policy_id)
except NotFound:
pass
def auto_scale(self):
return self.client('auto_scale')
class WebHook(resource.Resource):
"""Represents a Rackspace AutoScale webhook.
Exposes the URLs of the webhook as attributes.
"""
support_status = support.SupportStatus(
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
PROPERTIES = (
POLICY, NAME, METADATA,
) = (
'policy', 'name', 'metadata',
)
ATTRIBUTES = (
EXECUTE_URL, CAPABILITY_URL,
) = (
'executeUrl', 'capabilityUrl',
)
properties_schema = {
POLICY: properties.Schema(
properties.Schema.STRING,
_('The policy that this webhook should apply to, in '
'{group_id}:{policy_id} format. Generally a Ref to a Policy '
'resource.'),
required=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('The name of this webhook.'),
required=True,
update_allowed=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata for this webhook.'),
update_allowed=True
),
}
attributes_schema = {
EXECUTE_URL: attributes.Schema(
_("The url for executing the webhook (requires auth)."),
cache_mode=attributes.Schema.CACHE_NONE
),
CAPABILITY_URL: attributes.Schema(
_("The url for executing the webhook (doesn't require auth)."),
cache_mode=attributes.Schema.CACHE_NONE
),
}
def _get_args(self, props):
group_id, policy_id = props[self.POLICY].split(':', 1)
return dict(
name=props[self.NAME],
scaling_group=group_id,
policy=policy_id,
metadata=props.get(self.METADATA))
def handle_create(self):
asclient = self.auto_scale()
args = self._get_args(self.properties)
webhook = asclient.add_webhook(**args)
self.resource_id_set(webhook.id)
for link in webhook.links:
rel_to_key = {'self': 'executeUrl',
'capability': 'capabilityUrl'}
key = rel_to_key.get(link['rel'])
if key is not None:
url = link['href'].encode('utf-8')
self.data_set(key, url)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
asclient = self.auto_scale()
args = self._get_args(json_snippet['Properties'])
args['webhook'] = self.resource_id
asclient.replace_webhook(**args)
def _resolve_attribute(self, key):
v = self.data().get(key)
if v is not None:
return v.decode('utf-8')
else:
return None
def handle_delete(self):
if self.resource_id is None:
return
asclient = self.auto_scale()
group_id, policy_id = self.properties[self.POLICY].split(':', 1)
try:
asclient.delete_webhook(group_id, policy_id, self.resource_id)
except NotFound:
pass
def auto_scale(self):
return self.client('auto_scale')
def resource_mapping():
return {
'Rackspace::AutoScale::Group': Group,
'Rackspace::AutoScale::ScalingPolicy': ScalingPolicy,
'Rackspace::AutoScale::WebHook': WebHook
}
def available_resource_mapping():
if PYRAX_INSTALLED:
return resource_mapping()
return {}
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__all__ = []
class DataGenerator(object):
"""
DataGenerator is a general Base class for user to inherit
A user who wants to define his/her own python processing logic
with paddle.distributed.InMemoryDataset/QueueDataset should
inherit this class.
"""
def __init__(self):
self._proto_info = None
self.batch_size_ = 32
def set_batch(self, batch_size):
'''
Set batch size of current DataGenerator
This is necessary only if a user wants to define generator_batch
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", int_words)
return local_iter
def generate_batch(self, samples):
def local_iter():
for s in samples:
yield ("words", s[1].extend([s[1][0]]))
mydata = MyData()
mydata.set_batch(128)
'''
self.batch_size_ = batch_size
def run_from_memory(self):
'''
This function generator data from memory, it is usually used for
debug and benchmarking
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
yield ("words", [1, 2, 3, 4])
return local_iter
mydata = MyData()
mydata.run_from_memory()
'''
batch_samples = []
line_iter = self.generate_sample(None)
for user_parsed_line in line_iter():
if user_parsed_line == None:
continue
batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
batch_samples = []
if len(batch_samples) > 0:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
def run_from_stdin(self):
'''
This function reads the data row from stdin, parses it with the
process function, and further parses the return value of the
process function with the _gen_str function. The parsed data will
be wrote to stdout and the corresponding protofile will be
generated.
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", [int_words])
return local_iter
mydata = MyData()
mydata.run_from_stdin()
'''
batch_samples = []
for line in sys.stdin:
line_iter = self.generate_sample(line)
for user_parsed_line in line_iter():
if user_parsed_line == None:
continue
batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
batch_samples = []
if len(batch_samples) > 0:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the datafeed,and
updating proto_info information.
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the datafeed.
'''
raise NotImplementedError(
"pls use MultiSlotDataGenerator or PairWiseDataGenerator")
def generate_sample(self, line):
'''
This function needs to be overridden by the user to process the
original data row into a list or tuple.
Args:
line(str): the original data row
Returns:
Returns the data processed by the user.
The data format is list or tuple:
[(name, [feasign, ...]), ...]
or ((name, [feasign, ...]), ...)
For example:
[("words", [1926, 08, 17]), ("label", [1])]
or (("words", [1926, 08, 17]), ("label", [1]))
Note:
The type of feasigns must be in int or float. Once the float
element appears in the feasign, the type of that slot will be
processed into a float.
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", [int_words])
return local_iter
'''
raise NotImplementedError(
"Please rewrite this function to return a list or tuple: " +
"[(name, [feasign, ...]), ...] or ((name, [feasign, ...]), ...)")
def generate_batch(self, samples):
'''
This function needs to be overridden by the user to process the
generated samples from generate_sample(self, str) function
It is usually used as batch processing when a user wants to
do preprocessing on a batch of samples, e.g. padding according to
the max length of a sample in the batch
Args:
samples(list tuple): generated sample from generate_sample
Returns:
a python generator, the same format as return value of generate_sample
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", int_words)
return local_iter
def generate_batch(self, samples):
def local_iter():
for s in samples:
yield ("words", s[1].extend([s[1][0]]))
mydata = MyData()
mydata.set_batch(128)
'''
def local_iter():
for sample in samples:
yield sample
return local_iter
# TODO: guru4elephant
# add more generalized DataGenerator that can adapt user-defined slot
# for example, [(name, float_list), (name, str_list), (name, int_list)]
class MultiSlotStringDataGenerator(DataGenerator):
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the MultiSlotDataFeed,
and updating proto_info information.
The input line will be in this format:
>>> [(name, [str(feasign), ...]), ...]
>>> or ((name, [str(feasign), ...]), ...)
The output will be in this format:
>>> [ids_num id1 id2 ...] ...
For example, if the input is like this:
>>> [("words", ["1926", "08", "17"]), ("label", ["1"])]
>>> or (("words", ["1926", "08", "17"]), ("label", ["1"]))
the output will be:
>>> 3 1234 2345 3456 1 1
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the MultiSlotDataFeed.
'''
if sys.version > '3' and isinstance(line, zip):
line = list(line)
if not isinstance(line, list) and not isinstance(line, tuple):
raise ValueError(
"the output of process() must be in list or tuple type"
"Examples: [('words', ['1926', '08', '17']), ('label', ['1'])]")
output = ""
for index, item in enumerate(line):
name, elements = item
if output:
output += " "
out_str = []
out_str.append(str(len(elements)))
out_str.extend(elements)
output += " ".join(out_str)
return output + "\n"
class MultiSlotDataGenerator(DataGenerator):
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the MultiSlotDataFeed,
and updating proto_info information.
The input line will be in this format:
>>> [(name, [feasign, ...]), ...]
>>> or ((name, [feasign, ...]), ...)
The output will be in this format:
>>> [ids_num id1 id2 ...] ...
The proto_info will be in this format:
>>> [(name, type), ...]
For example, if the input is like this:
>>> [("words", [1926, 08, 17]), ("label", [1])]
>>> or (("words", [1926, 08, 17]), ("label", [1]))
the output will be:
>>> 3 1234 2345 3456 1 1
the proto_info will be:
>>> [("words", "uint64"), ("label", "uint64")]
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the MultiSlotDataFeed.
'''
if sys.version > '3' and isinstance(line, zip):
line = list(line)
if not isinstance(line, list) and not isinstance(line, tuple):
raise ValueError(
"the output of process() must be in list or tuple type"
"Example: [('words', [1926, 08, 17]), ('label', [1])]")
output = ""
if self._proto_info is None:
self._proto_info = []
for item in line:
name, elements = item
if not isinstance(name, str):
raise ValueError("name%s must be in str type" % type(name))
if not isinstance(elements, list):
raise ValueError("elements%s must be in list type" %
type(elements))
if not elements:
raise ValueError(
"the elements of each field can not be empty, you need padding it in process()."
)
self._proto_info.append((name, "uint64"))
if output:
output += " "
output += str(len(elements))
for elem in elements:
if isinstance(elem, float):
self._proto_info[-1] = (name, "float")
elif not isinstance(elem, int) and not isinstance(elem,
long):
raise ValueError(
"the type of element%s must be in int or float" %
type(elem))
output += " " + str(elem)
else:
if len(line) != len(self._proto_info):
raise ValueError(
"the complete field set of two given line are inconsistent.")
for index, item in enumerate(line):
name, elements = item
if not isinstance(name, str):
raise ValueError("name%s must be in str type" % type(name))
if not isinstance(elements, list):
raise ValueError("elements%s must be in list type" %
type(elements))
if not elements:
raise ValueError(
"the elements of each field can not be empty, you need padding it in process()."
)
if name != self._proto_info[index][0]:
raise ValueError(
"the field name of two given line are not match: require<%s>, get<%s>."
% (self._proto_info[index][0], name))
if output:
output += " "
output += str(len(elements))
for elem in elements:
if self._proto_info[index][1] != "float":
if isinstance(elem, float):
self._proto_info[index] = (name, "float")
elif not isinstance(elem, int) and not isinstance(elem,
long):
raise ValueError(
"the type of element%s must be in int or float"
% type(elem))
output += " " + str(elem)
return output + "\n"
|
|
import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import vm_util
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
stubs.set_stubs(self)
vm_util.vm_refs_cache_reset()
self._instance = fake_instance.fake_instance_obj(
None,
**{'id': 7, 'name': 'fake!',
'display_name': 'fake-display-name',
'uuid': uuidutils.generate_uuid(),
'vcpus': 2, 'memory_mb': 2048})
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
"HostSystem"),
fake.ManagedObjectReference("host2",
"HostSystem")]
hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
prop_dict = {'host': hosts, 'resourcePool': respool}
hardware = fake.DataObject()
hardware.numCpuCores = 8
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
runtime_host_1 = fake.DataObject()
runtime_host_1.connectionState = "connected"
runtime_host_1.inMaintenanceMode = False
runtime_host_2 = fake.DataObject()
runtime_host_2.connectionState = connection_state
runtime_host_2.inMaintenanceMode = maintenance_mode
prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_1)]
prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_2)]
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_1))
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_2))
respool_resource_usage = fake.DataObject()
respool_resource_usage.maxUsage = 5368709120
respool_resource_usage.overallUsage = 2147483648
def fake_call_method(*args):
if "get_object_properties_dict" in args:
return prop_dict
elif "get_properties_for_a_collection_of_objects" in args:
return fake_objects
else:
return respool_resource_usage
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', fake_call_method):
result = vm_util.get_stats_from_cluster(session, "cluster1")
mem_info = {}
if connection_state == "connected" and not maintenance_mode:
vcpus = 32
else:
vcpus = 16
mem_info['total'] = 5120
mem_info['free'] = 3072
expected_stats = {'vcpus': vcpus, 'mem': mem_info}
self.assertEqual(expected_stats, result)
def test_get_stats_from_cluster_hosts_connected_and_active(self):
self._test_get_stats_from_cluster()
def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
self._test_get_stats_from_cluster(connection_state="disconnected")
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
def test_get_resize_spec(self):
vcpus = 2
memory_mb = 2048
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_resize_spec(fake_factory,
vcpus, memory_mb, extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.memoryMB = memory_mb
expected.numCPUs = vcpus
cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpuAllocation.reservation = 0
cpuAllocation.limit = -1
cpuAllocation.shares = fake_factory.create('ns0:SharesInfo')
cpuAllocation.shares.level = 'normal'
cpuAllocation.shares.shares = 0
expected.cpuAllocation = cpuAllocation
self.assertEqual(expected, result)
def test_get_resize_spec_with_limits(self):
vcpus = 2
memory_mb = 2048
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_resize_spec(fake_factory,
vcpus, memory_mb, extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.memoryMB = memory_mb
expected.numCPUs = vcpus
cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpuAllocation.reservation = 6
cpuAllocation.limit = 7
cpuAllocation.shares = fake_factory.create('ns0:SharesInfo')
cpuAllocation.shares.level = 'normal'
cpuAllocation.shares.shares = 0
expected.cpuAllocation = cpuAllocation
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
fake_factory = fake.FakeFactory()
datastore = fake.Datastore()
result = vm_util.get_cdrom_attach_config_spec(fake_factory,
datastore,
"/tmp/foo.iso",
200, 0)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device_change.device = fake_factory.create('ns0:VirtualCdrom')
device_change.device.controllerKey = 200
device_change.device.unitNumber = 0
device_change.device.key = -1
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = False
connectable.startConnected = True
connectable.connected = True
device_change.device.connectable = connectable
backing = fake_factory.create('ns0:VirtualCdromIsoBackingInfo')
backing.fileName = '/tmp/foo.iso'
backing.datastore = datastore
device_change.device.backing = backing
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_LSILOGICSAS)
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def test_paravirtual_controller_spec(self):
# Test controller spec returned for paraVirtual adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_PARAVIRTUAL)
self.assertEqual("ns0:ParaVirtualSCSIController",
config_spec.device.obj_name)
def test_create_controller_spec_with_specfic_bus_number(self):
# Test controller spec with specifc bus number rather default 0
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_LSILOGICSAS,
bus_number=1)
self.assertEqual(1, config_spec.device.busNumber)
def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk.capacityInBytes = 1024
if parent:
disk_backing.parent = parent
disk.backing = disk_backing
# Ephemeral disk
e_disk = fake.VirtualDisk()
e_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = '[test_datastore] uuid/ephemeral_0.vmdk'
e_disk.capacityInBytes = 512
e_disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, e_disk, controller]
return devices
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None)
self.assertEqual(constants.ADAPTER_TYPE_LSILOGICSAS,
vmdk.adapter_type)
self.assertEqual('[test_datastore] uuid/ephemeral_0.vmdk',
vmdk.path)
self.assertEqual(512, vmdk.capacity_in_bytes)
self.assertEqual(devices[1], vmdk.device)
def test_get_vmdk_path_and_adapter_type_with_match(self):
n_filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid')
self.assertEqual(constants.ADAPTER_TYPE_LSILOGICSAS,
vmdk.adapter_type)
self.assertEqual(n_filename, vmdk.path)
self.assertEqual(1024, vmdk.capacity_in_bytes)
self.assertEqual(devices[0], vmdk.device)
def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
n_filename = '[test_datastore] diuu/diuu.vmdk'
session = fake.FakeSession()
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid')
self.assertIsNone(vmdk.adapter_type)
self.assertIsNone(vmdk.path)
self.assertEqual(0, vmdk.capacity_in_bytes)
self.assertIsNone(vmdk.device)
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
# and ParaVirtual
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.ADAPTER_TYPE_LSILOGICSAS)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.ADAPTER_TYPE_PARAVIRTUAL)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def test_get_scsi_adapter_type(self):
vm = fake.VirtualMachine()
devices = vm.get("config.hardware.device").VirtualDevice
scsi_controller = fake.VirtualLsiLogicController()
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
fake._update_object("VirtualMachine", vm)
# return the scsi type, not ide
hardware_device = vm.get("config.hardware.device")
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
vm_util.get_scsi_adapter_type(hardware_device))
def test_get_scsi_adapter_type_with_error(self):
vm = fake.VirtualMachine()
devices = vm.get("config.hardware.device").VirtualDevice
scsi_controller = fake.VirtualLsiLogicController()
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
fake._update_object("VirtualMachine", vm)
# the controller is not suitable since the device under this controller
# has exceeded SCSI_MAX_CONNECT_NUMBER
for i in range(0, constants.SCSI_MAX_CONNECT_NUMBER):
scsi_controller.device.append('device' + str(i))
hardware_device = vm.get("config.hardware.device")
self.assertRaises(exception.StorageError,
vm_util.get_scsi_adapter_type,
hardware_device)
def test_find_allocated_slots(self):
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
disk3 = fake.VirtualDisk(201, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
taken = vm_util._find_allocated_slots(devices)
self.assertEqual([0, 1], sorted(taken[200]))
self.assertEqual([1], taken[201])
self.assertEqual([7], taken[1000])
def test_get_bus_number_for_scsi_controller(self):
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7,
busNumber=0),
fake.VirtualLsiLogicController(1002, scsiCtlrUnitNumber=7,
busNumber=2)]
bus_number = vm_util._get_bus_number_for_scsi_controller(devices)
self.assertEqual(1, bus_number)
def test_get_bus_number_for_scsi_controller_buses_used_up(self):
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7,
busNumber=0),
fake.VirtualLsiLogicController(1001, scsiCtlrUnitNumber=7,
busNumber=1),
fake.VirtualLsiLogicController(1002, scsiCtlrUnitNumber=7,
busNumber=2),
fake.VirtualLsiLogicController(1003, scsiCtlrUnitNumber=7,
busNumber=3)]
self.assertRaises(vexc.VMwareDriverException,
vm_util._get_bus_number_for_scsi_controller,
devices)
def test_allocate_controller_key_and_unit_number_ide_default(self):
# Test that default IDE controllers are used when there is a free slot
# on them
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [disk1, disk2, ide0, ide1]
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
None,
devices,
'ide')
self.assertEqual(201, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_ide(self):
# Test that a new controller is created when there is no free slot on
# the default IDE controllers
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [ide0, ide1]
for controller_key in [200, 201]:
for unit_number in [0, 1]:
disk = fake.VirtualDisk(controller_key, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'ide')
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNotNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
for unit_number in range(7):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(1000, controller_key)
self.assertEqual(8, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi_new_controller(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=15)]
for unit_number in range(15):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertEqual(1, controller_spec.device.busNumber)
def test_get_vnc_config_spec(self):
fake_factory = fake.FakeFactory()
result = vm_util.get_vnc_config_spec(fake_factory,
7)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
remote_display_vnc_enabled = fake_factory.create('ns0:OptionValue')
remote_display_vnc_enabled.value = 'true'
remote_display_vnc_enabled.key = 'RemoteDisplay.vnc.enabled'
expected.extraConfig.append(remote_display_vnc_enabled)
remote_display_vnc_port = fake_factory.create('ns0:OptionValue')
remote_display_vnc_port.value = 7
remote_display_vnc_port.key = 'RemoteDisplay.vnc.port'
expected.extraConfig.append(remote_display_vnc_port)
remote_display_vnc_keymap = fake_factory.create('ns0:OptionValue')
remote_display_vnc_keymap.value = 'en-us'
remote_display_vnc_keymap.key = 'RemoteDisplay.vnc.keyMap'
expected.extraConfig.append(remote_display_vnc_keymap)
self.assertEqual(expected, result)
def _create_fake_vms(self):
fake_vms = fake.FakeRetrieveResult()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
for i in range(10):
vm = fake.ManagedObject()
opt_val = OptionValue(key='', value=5900 + i)
vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
fake_vms.add_object(vm)
return fake_vms
def test_get_vnc_port(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10000, group='vmware')
actual = vm_util.get_vnc_port(
fake.FakeObjectRetrievalSession(fake_vms))
self.assertEqual(actual, 5910)
def test_get_vnc_port_exhausted(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10, group='vmware')
self.assertRaises(exception.ConsolePortRangeExhausted,
vm_util.get_vnc_port,
fake.FakeObjectRetrievalSession(fake_vms))
def test_get_cluster_ref_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'fake_cluster')
self.assertIsNone(ref)
def test_get_cluster_ref_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
cluster = fake.ClusterComputeResource(name='cluster')
fake_objects.add_object(cluster)
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'cluster')
self.assertIs(cluster.obj, ref)
def test_get_cluster_ref_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'cluster')
self.assertIsNone(ref)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
def _create_vm_config_spec(self):
fake_factory = fake.FakeFactory()
spec = fake_factory.create('ns0:VirtualMachineConfigSpec')
spec.name = self._instance.uuid
spec.instanceUuid = self._instance.uuid
spec.deviceChange = []
spec.numCPUs = 2
spec.version = None
spec.memoryMB = 2048
spec.guestId = 'otherGuest'
spec.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
spec.extraConfig.append(extra_config)
spec.files = fake_factory.create('ns0:VirtualMachineFileInfo')
spec.files.vmPathName = '[fake-datastore]'
spec.managedBy = fake_factory.create('ns0:ManagedByInfo')
spec.managedBy.extensionKey = 'org.openstack.compute'
spec.managedBy.type = 'instance'
spec.tools = fake_factory.create('ns0:ToolsConfigInfo')
spec.tools.afterPowerOn = True
spec.tools.afterResume = True
spec.tools.beforeGuestReboot = True
spec.tools.beforeGuestShutdown = True
spec.tools.beforeGuestStandby = True
return spec
def test_get_vm_extra_config_spec(self):
fake_factory = fake.FakeFactory()
extra_opts = {mock.sentinel.key: mock.sentinel.value}
res = vm_util.get_vm_extra_config_spec(fake_factory, extra_opts)
self.assertEqual(1, len(res.extraConfig))
self.assertEqual(mock.sentinel.key, res.extraConfig[0].key)
self.assertEqual(mock.sentinel.value, res.extraConfig[0].value)
def test_get_vm_create_spec(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = self._create_vm_config_spec()
self.assertEqual(expected, result)
expected.version = None
expected.memoryMB = 2048
expected.guestId = constants.DEFAULT_OS_TYPE
expected.extraConfig = []
def test_get_vm_create_spec_with_serial_port(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
self.flags(serial_port_service_uri='foobar', group='vmware')
self.flags(serial_port_proxy_uri='telnet://example.com:31337',
group='vmware')
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
serial_port_spec = vm_util.create_serial_port_spec(fake_factory)
expected = self._create_vm_config_spec()
expected.deviceChange = [serial_port_spec]
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_allocations(self):
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = constants.DEFAULT_OS_TYPE
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.limit = 7
cpu_allocation.reservation = 6
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'normal'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_limit(self):
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.limit = 7
cpu_allocation.reservation = 0
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'normal'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share(self):
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.type = 'instance'
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.beforeGuestStandby = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.afterResume = True
expected.tools.afterPowerOn = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.reservation = 0
cpu_allocation.limit = -1
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'high'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share_custom(self):
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.beforeGuestStandby = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.afterResume = True
expected.tools.afterPowerOn = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.reservation = 0
cpu_allocation.limit = -1
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'custom'
cpu_allocation.shares.shares = 1948
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_metadata(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs,
metadata='fake-metadata')
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.name = self._instance.uuid
expected.instanceUuid = self._instance.uuid
expected.deviceChange = []
expected.numCPUs = 2
expected.version = None
expected.memoryMB = 2048
expected.guestId = 'otherGuest'
expected.annotation = 'fake-metadata'
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
self.assertEqual(expected, result)
def test_create_vm(self):
def fake_call_method(module, method, *args, **kwargs):
if (method == 'CreateVM_Task'):
return 'fake_create_vm_task'
else:
self.fail('Should not get here....')
def fake_wait_for_task(self, *args):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
session = fake.FakeSession()
fake_call_mock = mock.Mock(side_effect=fake_call_method)
fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
with test.nested(
mock.patch.object(session, '_wait_for_task',
fake_wait_mock),
mock.patch.object(session, '_call_method',
fake_call_mock)
) as (wait_for_task, call_method):
vm_ref = vm_util.create_vm(
session,
self._instance,
'fake_vm_folder',
'fake_config_spec',
'fake_res_pool_ref')
self.assertEqual('fake_vm_ref', vm_ref)
call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
'fake_vm_folder', config='fake_config_spec',
pool='fake_res_pool_ref')
wait_for_task.assert_called_once_with('fake_create_vm_task')
@mock.patch.object(vm_util.LOG, 'warning')
def test_create_vm_invalid_guestid(self, mock_log_warn):
"""Ensure we warn when create_vm() fails after we passed an
unrecognised guestId
"""
found = [False]
def fake_log_warn(msg, values):
if not isinstance(values, dict):
return
if values.get('ostype') == 'invalid_os_type':
found[0] = True
mock_log_warn.side_effect = fake_log_warn
session = driver.VMwareAPISession()
config_spec = vm_util.get_vm_create_spec(
session.vim.client.factory,
self._instance, 'fake-datastore', [],
vm_util.ExtraSpecs(),
os_type='invalid_os_type')
self.assertRaises(vexc.VMwareDriverException,
vm_util.create_vm, session, self._instance,
'folder', config_spec, 'res-pool')
self.assertTrue(found[0])
def test_convert_vif_model(self):
expected = "VirtualE1000"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
self.assertEqual(expected, result)
expected = "VirtualE1000e"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
self.assertEqual(expected, result)
types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
"VirtualVmxnet", "VirtualVmxnet3"]
for type in types:
self.assertEqual(type,
vm_util.convert_vif_model(type))
self.assertRaises(exception.Invalid,
vm_util.convert_vif_model,
"InvalidVifModel")
def test_power_on_instance_with_vm_ref(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_without_vm_ref(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance)
fake_get_vm_ref.assert_called_once_with(session, self._instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_exception(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task",
side_effect=exception.NovaException('fake')),
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_on_instance,
session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_power_state_exception(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(
session, "_wait_for_task",
side_effect=vexc.InvalidPowerStateException),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_create_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with test.nested(
mock.patch.object(vm_util, "get_vmdk_create_spec",
return_value='fake-spec'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_spec, fake_call_method, fake_wait_for_task):
vm_util.create_virtual_disk(session, 'fake-dc-ref',
'fake-adapter-type', 'fake-disk-type',
'fake-path', 7)
fake_get_spec.assert_called_once_with(
session.vim.client.factory, 7,
'fake-adapter-type',
'fake-disk-type')
fake_call_method.assert_called_once_with(
session.vim,
"CreateVirtualDisk_Task",
dm,
name='fake-path',
datacenter='fake-dc-ref',
spec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_copy_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.copy_virtual_disk(session, 'fake-dc-ref',
'fake-source', 'fake-dest')
fake_call_method.assert_called_once_with(
session.vim,
"CopyVirtualDisk_Task",
dm,
sourceName='fake-source',
sourceDatacenter='fake-dc-ref',
destName='fake-dest')
fake_wait_for_task.assert_called_once_with('fake-task')
def _create_fake_vm_objects(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.VirtualMachine())
return fake_objects
def test_reconfigure_vm(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake_reconfigure_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
_call_method.assert_called_once_with(mock.ANY,
'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
_wait_for_task.assert_called_once_with(
'fake_reconfigure_task')
def _get_network_attach_config_spec_opaque(self, network_ref,
vc6_onwards=False):
vif_info = {'network_name': 'fake-name',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': network_ref,
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1)
card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
if network_ref['use-external-id']:
if vc6_onwards:
device.externalId = vif_info['iface_id']
else:
dp = fake_factory.create('ns0:DynamicProperty')
dp.name = '__externalId__'
dp.val = vif_info['iface_id']
device.dynamicProperty = [dp]
device.addressType = 'manual'
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.startConnected = True
connectable.connected = True
device.connectable = connectable
backing = fake_factory.create(card)
backing.opaqueNetworkType = vif_info['network_ref']['network-type']
backing.opaqueNetworkId = vif_info['network_ref']['network-id']
device.backing = backing
device.key = -47
device.wakeOnLanEnabled = True
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_get_network_attach_config_spec_opaque_integration_bridge(self):
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'opaque',
'use-external-id': False}
self._get_network_attach_config_spec_opaque(network_ref)
def test_get_network_attach_config_spec_opaque(self):
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self._get_network_attach_config_spec_opaque(network_ref)
@mock.patch.object(fake, 'DataObject')
def test_get_network_attach_config_spec_opaque_vc6_onwards(self,
mock_object):
# Add new attribute externalId supported from VC6
class FakeVirtualE1000(fake.DataObject):
def __init__(self):
super(FakeVirtualE1000, self).__init__()
self.externalId = None
mock_object.return_value = FakeVirtualE1000
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self._get_network_attach_config_spec_opaque(network_ref,
vc6_onwards=True)
def test_get_network_attach_config_spec_dvs(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.key = -47
device.addressType = 'manual'
device.wakeOnLanEnabled = True
device.backing = fake_factory.create(backing)
device.backing.port = fake_factory.create(port)
device.backing.port.portgroupKey = vif_info['network_ref']['dvpg']
device.backing.port.switchUuid = vif_info['network_ref']['dvsw']
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.connected = True
connectable.startConnected = True
device.connectable = connectable
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def _get_create_vif_spec(self, fake_factory, vif_info):
limits = vm_util.Limits()
limits.limit = 10
limits.reservation = 20
limits.shares_level = 'custom'
limits.shares_share = 40
return vm_util._create_vif_spec(fake_factory, vif_info, limits)
def _construct_vif_spec(self, fake_factory, vif_info):
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.key = -47
device.addressType = 'manual'
device.wakeOnLanEnabled = True
device.backing = fake_factory.create(backing)
device.backing.port = fake_factory.create(port)
device.backing.port.portgroupKey = vif_info['network_ref']['dvpg']
device.backing.port.switchUuid = vif_info['network_ref']['dvsw']
if vif_info['network_ref'].get('dvs_port_key'):
device.backing.port.portKey = (
vif_info['network_ref']['dvs_port_key'])
device.resourceAllocation = fake_factory.create(
'ns0:VirtualEthernetCardResourceAllocation')
device.resourceAllocation.limit = 10
device.resourceAllocation.reservation = 20
device.resourceAllocation.share = fake_factory.create(
'ns0:SharesInfo')
device.resourceAllocation.share.level = 'custom'
device.resourceAllocation.share.shares = 40
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.connected = True
connectable.startConnected = True
device.connectable = connectable
device_change.device = device
return device_change
def test_get_create_vif_spec(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = self._get_create_vif_spec(fake_factory, vif_info)
device_change = self._construct_vif_spec(fake_factory, vif_info)
self.assertEqual(device_change, result)
def test_get_create_vif_spec_dvs_port_key(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group',
'dvs_port_key': 'fake-key'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = self._get_create_vif_spec(fake_factory, vif_info)
device_change = self._construct_vif_spec(fake_factory, vif_info)
self.assertEqual(device_change, result)
def test_get_network_detach_config_spec(self):
fake_factory = fake.FakeFactory()
result = vm_util.get_network_detach_config_spec(
fake_factory, 'fake-device', 2)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = 'free'
extra_config.key = 'nvp.iface-id.2'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.device = 'fake-device'
device_change.operation = 'remove'
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
def test_power_off_instance_no_vm_ref(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance)
fake_get_ref.assert_called_once_with(session, self._instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_with_exception(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task',
side_effect=exception.NovaException('fake'))
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_off_instance,
session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_power_state_exception(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(
session, '_wait_for_task',
side_effect=vexc.InvalidPowerStateException)
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
def test_get_vm_create_spec_updated_hw_version(self):
extra_specs = vm_util.ExtraSpecs(hw_version='vmx-08')
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
self._instance,
'fake-datastore', [],
extra_specs=extra_specs)
self.assertEqual('vmx-08', result.version)
def test_vm_create_spec_with_profile_spec(self):
datastore = ds_obj.Datastore('fake-ds-ref', 'fake-ds-name')
extra_specs = vm_util.ExtraSpecs()
create_spec = vm_util.get_vm_create_spec(fake.FakeFactory(),
self._instance,
datastore.name, [],
extra_specs,
profile_spec='fake_profile_spec')
self.assertEqual(['fake_profile_spec'], create_spec.vmProfile)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_get_storage_profile_spec(self, mock_retrieve_profile_id):
fake_profile_id = fake.DataObject()
fake_profile_id.uniqueId = 'fake_unique_id'
mock_retrieve_profile_id.return_value = fake_profile_id
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertEqual('ns0:VirtualMachineDefinedProfileSpec',
profile_spec.obj_name)
self.assertEqual(fake_profile_id.uniqueId, profile_spec.profileId)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_storage_spec_empty_profile(self, mock_retrieve_profile_id):
mock_retrieve_profile_id.return_value = None
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertIsNone(profile_spec)
def test_get_ephemeral_name(self):
filename = vm_util.get_ephemeral_name(0)
self.assertEqual('ephemeral_0.vmdk', filename)
def test_detach_and_delete_devices_config_spec(self):
fake_devices = ['device1', 'device2']
fake_factory = fake.FakeFactory()
result = vm_util._detach_and_delete_devices_config_spec(fake_factory,
fake_devices)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
device1 = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device1.device = 'device1'
device1.operation = 'remove'
device1.fileOperation = 'destroy'
expected.deviceChange.append(device1)
device2 = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device2.device = 'device2'
device2.operation = 'remove'
device2.fileOperation = 'destroy'
expected.deviceChange.append(device2)
self.assertEqual(expected, result)
@mock.patch.object(vm_util, 'reconfigure_vm')
def test_detach_devices_from_vm(self, mock_reconfigure):
fake_devices = ['device1', 'device2']
session = fake.FakeSession()
vm_util.detach_devices_from_vm(session,
'fake-ref',
fake_devices)
mock_reconfigure.assert_called_once_with(session, 'fake-ref', mock.ANY)
def test_get_vm_boot_spec(self):
disk = fake.VirtualDisk()
disk.key = 7
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_boot_spec(fake_factory,
disk)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
boot_disk = fake_factory.create(
'ns0:VirtualMachineBootOptionsBootableDiskDevice')
boot_disk.deviceKey = disk.key
boot_options = fake_factory.create('ns0:VirtualMachineBootOptions')
boot_options.bootOrder = [boot_disk]
expected.bootOptions = boot_options
self.assertEqual(expected, result)
def _get_devices(self, filename):
devices = fake._create_array_of_type('VirtualDevice')
devices.VirtualDevice = self._vmdk_path_and_adapter_type_devices(
filename)
return devices
def test_find_rescue_device(self):
filename = '[test_datastore] uuid/uuid-rescue.vmdk'
devices = self._get_devices(filename)
device = vm_util.find_rescue_device(devices, self._instance)
self.assertEqual(filename, device.backing.fileName)
def test_find_rescue_device_not_found(self):
filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._get_devices(filename)
self.assertRaises(exception.NotFound,
vm_util.find_rescue_device,
devices,
self._instance)
def test_validate_limits(self):
limits = vm_util.Limits(shares_level='high',
shares_share=1948)
self.assertRaises(exception.InvalidInput,
limits.validate)
limits = vm_util.Limits(shares_level='fira')
self.assertRaises(exception.InvalidInput,
limits.validate)
def test_get_vm_create_spec_with_console_delay(self):
extra_specs = vm_util.ExtraSpecs()
self.flags(console_delay_seconds=2, group='vmware')
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.name = self._instance.uuid
expected.instanceUuid = self._instance.uuid
expected.deviceChange = []
expected.numCPUs = 2
expected.version = None
expected.memoryMB = 2048
expected.guestId = constants.DEFAULT_OS_TYPE
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = 2000000
extra_config.key = 'keyboard.typematicMinDelay'
expected.extraConfig.append(extra_config)
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_cores_per_socket(self):
extra_specs = vm_util.ExtraSpecs(cores_per_socket=4)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = 'otherGuest'
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.numCoresPerSocket = 4
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_memory_allocations(self):
memory_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = 'otherGuest'
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
memory_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
memory_allocation.limit = 7
memory_allocation.reservation = 6
memory_allocation.shares = fake_factory.create('ns0:SharesInfo')
memory_allocation.shares.level = 'normal'
memory_allocation.shares.shares = 0
expected.memoryAllocation = memory_allocation
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_swap(self):
vm_ref = 'fake-vm-ref'
# Root disk
controller_key = 1000
root_disk = fake.VirtualDisk()
root_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = '[test_datastore] uuid/uuid.vmdk'
root_disk.capacityInBytes = 1048576
root_disk.backing = disk_backing
# Swap disk
swap_disk = fake.VirtualDisk()
swap_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = "swap"
swap_disk.capacityInBytes = 1024
swap_disk.backing = disk_backing
devices = [root_disk, swap_disk]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=devices) as mock_call:
device = vm_util.get_swap(session, vm_ref)
mock_call.assert_called_once_with(mock.ANY,
"get_object_property", vm_ref, "config.hardware.device")
self.assertEqual(swap_disk, device)
def test_create_folder_with_empty_vmfolder(self):
"""Test create_folder when the datacenter vmFolder is empty"""
child_folder = mock.sentinel.child_folder
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[None, child_folder]):
parent_folder = mock.sentinel.parent_folder
parent_folder.value = 'parent-ref'
child_name = 'child_folder'
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_folder, ret)
expected_calls = [mock.call(vutil, 'get_object_property',
parent_folder,
'childEntity'),
mock.call(session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_calls,
session._call_method.call_args_list)
def test_create_folder_not_present(self):
"""Test create_folder when child not present."""
prop_val = mock.Mock()
prop_val.ManagedObjectReference = []
child_folder = mock.sentinel.child_folder
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val, child_folder]):
child_name = 'child_folder'
parent_folder = mock.sentinel.parent_folder
parent_folder.value = 'parent-ref'
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_folder, ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
parent_folder,
'childEntity'),
mock.call(session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_create_folder_already_present(self):
"""Test create_folder when child already present."""
parent_folder = mock.sentinel.parent_folder
child_name = 'child_folder'
prop_val = mock.Mock()
child_entity_1 = mock.Mock()
child_entity_1._type = 'Folder'
child_entity_1_name = 'SomeOtherName'
child_entity_2 = mock.Mock()
child_entity_2._type = 'Folder'
child_entity_2_name = 'AnotherName'
child_entity_3 = mock.Mock()
child_entity_3._type = 'Folder'
child_entity_3_name = child_name
prop_val.ManagedObjectReference = [child_entity_1, child_entity_2,
child_entity_3]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val,
child_entity_1_name,
child_entity_2_name,
child_entity_3_name]):
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_entity_3, ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
parent_folder,
'childEntity'),
mock.call(vutil, 'get_object_property',
child_entity_1,
'name'),
mock.call(vutil, 'get_object_property',
child_entity_2,
'name'),
mock.call(vutil, 'get_object_property',
child_entity_3,
'name')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_create_folder_with_duplicate_name(self):
parent_folder = mock.sentinel.parent_folder
parent_folder.value = 'parent-ref'
child_name = 'child_folder'
prop_val_1 = mock.Mock()
prop_val_1.ManagedObjectReference = []
child_entity_2 = mock.Mock()
child_entity_2._type = 'Folder'
prop_val_2 = mock.Mock()
prop_val_2.ManagedObjectReference = [child_entity_2]
child_entity_2_name = child_name
details = {'object': 'folder-1'}
duplicate_exception = vexc.DuplicateName(details=details)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val_1,
duplicate_exception,
prop_val_2,
child_entity_2_name]):
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_entity_2._type, ret._type)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
parent_folder,
'childEntity'),
mock.call(session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_does_not_exist(self):
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=None):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_not_folder(self):
child_entity = mock.Mock()
child_entity._type = 'NotFolder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=prop_val):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_not_matched(self):
child_entity = mock.Mock()
child_entity._type = 'Folder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val, 'fake-1-name']):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity'),
mock.call(vutil, 'get_object_property',
child_entity, 'name')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_matched(self):
child_entity = mock.Mock()
child_entity._type = 'Folder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val, 'fake-name']):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertEqual(ret, child_entity)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity'),
mock.call(vutil, 'get_object_property',
child_entity, 'name')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_folder_path_ref_cache(self):
path = 'OpenStack/Project (e2b86092bf064181ade43deb3188f8e4)'
self.assertIsNone(vm_util.folder_ref_cache_get(path))
vm_util.folder_ref_cache_update(path, 'fake-ref')
self.assertEqual('fake-ref', vm_util.folder_ref_cache_get(path))
def test_get_vm_name(self):
uuid = uuidutils.generate_uuid()
expected = uuid
name = vm_util._get_vm_name(None, uuid)
self.assertEqual(expected, name)
display_name = 'fira'
expected = 'fira (%s)' % uuid
name = vm_util._get_vm_name(display_name, uuid)
self.assertEqual(expected, name)
display_name = 'X' * 255
expected = '%s (%s)' % ('X' * 41, uuid)
name = vm_util._get_vm_name(display_name, uuid)
self.assertEqual(expected, name)
self.assertEqual(len(name), 80)
@mock.patch.object(vm_util, '_get_vm_name', return_value='fake-name')
def test_rename_vm(self, mock_get_name):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake_rename_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.rename_vm(session, 'fake-ref', self._instance)
_call_method.assert_called_once_with(mock.ANY,
'Rename_Task', 'fake-ref', newName='fake-name')
_wait_for_task.assert_called_once_with(
'fake_rename_task')
mock_get_name.assert_called_once_with(self._instance.display_name,
self._instance.uuid)
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
# N.B. Mocking on the class only mocks test_*(), but we need
# VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
# setUp causes object initialisation to fail. Not mocking in tests results
# in vim calls not using FakeVim.
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
def setUp(self):
super(VMwareVMUtilGetHostRefTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
self.session = driver.VMwareAPISession()
# Create a fake VirtualMachine running on a known host
self.host_ref = list(fake._db_content['HostSystem'].keys())[0]
self.vm_ref = fake.create_vm(host_ref=self.host_ref)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_ref_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(self.host_ref, ret)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_name_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
host = fake._get_object(self.host_ref)
ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(host.name, ret)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""test_02_subcmd_filter.py
Test prodigal filtering for pdp script
This test suite is intended to be run from the repository root using:
nosetests -v
Individual test classes can be run using, e.g.:
$ nosetests -v tests/test_subcommands.py:TestConfigSubcommand
Each command CMD available at the command-line as pdp <CMD> is
tested in its own class (subclassing unittest.TestCase), where the
setUp() method defines input/output files, a null logger (picked up
by nosetests), and a dictionary of command lines, keyed by test name
with values that represent the command-line options.
For each test, command-line options are defined in a Namespace,
and passed as the sole argument to the appropriate subcommand
function from subcommands.py.
(c) The James Hutton Institute 2017-2019
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2017-2019 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import shutil
from argparse import Namespace
from collections import namedtuple
import pytest
from diagnostic_primers.scripts import subcommands
from diagnostic_primers.scripts.subcommands.subcmd_filter import PDPFilterException
from tools import PDPTestCase, modify_namespace
# Defined as global so it can be seen by the TestFilterSubcommand() class
# setUpClass() classmethod.
OUTDIR = os.path.join("tests", "test_output", "pdp_filter")
# Convenience struct for passing arguments to run filter commands
Params = namedtuple("Params", "infname outfname outdir tgtdir suffix filt")
# Convenience struct for scheduling
Scheduling = namedtuple("Scheduling", "scheduler workers")
class TestFilterSubcommand(PDPTestCase):
"""Class defining tests of the pdp filter subcommand."""
@classmethod
def setUpClass(cls):
# Clean up old output directory
if os.path.isdir(OUTDIR):
shutil.rmtree(OUTDIR)
def setUp(self):
"""Set parameters for tests."""
self.datadir = os.path.join("tests", "test_input", "pdp_filter")
self.outdir = OUTDIR
self.targetdir = os.path.join("tests", "test_targets", "pdp_filter")
self.prodigal_exe = "prodigal"
self.scheduling = Scheduling("multiprocessing", None)
# null logger instance that does nothing
self.logger = logging.getLogger("TestFilterSubcommand logger")
self.logger.addHandler(logging.NullHandler())
# base Namespace
self.base_namespace = Namespace(
filt_prodigal=False,
filt_prodigaligr=False,
filt_alnvar=None,
filt_outdir=self.outdir,
filt_prodigal_exe=self.prodigal_exe,
filt_force=True,
filt_suffix="prodigal",
filt_spacerlen=150,
filt_flanklen=150,
filt_minserate=0.05,
filt_minsecount=0,
scheduler=self.scheduling.scheduler,
workers=self.scheduling.workers,
verbose=True,
disable_tqdm=True,
recovery=False,
nucmer_exe="nucmer",
deltafilter_exe="delta-filter",
maxmatch=False,
jobprefix="test_02_subcmd_filter",
)
def filter_run(self, params):
"""Runs a pdp filter command with passed settings
:param params: Params namedtuple defining command parameters
Checks the output and target directories for equality
"""
filt_ns = modify_namespace(
self.base_namespace,
{
"infilename": params.infname,
"outfilename": params.outfname,
"filt_outdir": params.outdir,
"filt_suffix": params.suffix,
},
)
# Decide whether we're using alnvar, prodigal or prodigaligr filters
if params.filt == "prodigal":
filt_ns = modify_namespace(filt_ns, {"filt_prodigal": True})
elif params.filt == "prodigaligr":
filt_ns = modify_namespace(filt_ns, {"filt_prodigaligr": True})
elif params.filt == "alnvar":
filt_ns = modify_namespace(filt_ns, {"filt_alnvar": "atrosepticum_NCBI"})
subcommands.subcmd_filter(filt_ns, self.logger)
# Check file contents
self.assertDirsEqual(params.outdir, params.tgtdir)
def test_filter_prodigal_run(self):
"""filter subcommand produces correct annotation with --prodigal.
pdp filter -v --disable_tqdm --prodigal \
--outdir tests/test_output/pdp_filter/prodigal \
--suffix prodigal \
tests/test_input/pdp_filter/seqfixed_conf.json \
tests/test_output/pdp_filter/prodconf.json
"""
suffix = "prodigal"
self.filter_run(
Params(
os.path.join(self.datadir, "seqfixed_conf.json"),
os.path.join(self.outdir, "prodconf.json"),
os.path.join(self.outdir, suffix),
os.path.join(self.targetdir, suffix),
suffix,
"prodigal",
)
)
def test_filter_prodigaligr_run(self):
"""filter subcommand produces correct annotation with --prodigaligr.
pdp filter -v --disable_tqdm --prodigal \
--outdir tests/test_output/pdp_filter/prodigaligr \
--suffix prodigaligr \
tests/test_input/pdp_filter/seqfixed_conf.json \
tests/test_output/pdp_filter/prodigrconf.json
"""
suffix = "prodigaligr"
self.filter_run(
Params(
os.path.join(self.datadir, "seqfixed_conf.json"),
os.path.join(self.outdir, "prodigrconf.json"),
os.path.join(self.outdir, suffix),
os.path.join(self.targetdir, suffix),
suffix,
"prodigaligr",
)
)
def test_filter_alnvar_run(self):
"""filter subcommand produces correct annotation with --alnvar.
pdp filter -v --disable_tqdm --alnvar betavasculorum_NCBI\
--outdir tests/test_output/pdp_filter/alnvar \
--min_sim_error_rate 0.005 \
--suffix alnvar \
tests/test_input/pdp_filter/seqfixed_conf.json \
tests/test_output/pdp_filter/alnvarconf.json
"""
suffix = "alnvar"
self.filter_run(
Params(
os.path.join(self.datadir, "seqfixed_conf.json"),
os.path.join(self.outdir, "alnvarconf.json"),
os.path.join(self.outdir, suffix),
os.path.join(self.targetdir, suffix),
suffix,
"alnvar",
)
)
def test_invalid_conf_file(self):
"""Script exits when filter config file has wrong suffix.
pdp filter -v --disable_tqdm --prodigal \
--outdir tests/test_output/pdp_filter \
--suffix prodigal \
tests/test_input/pdp_filter/fixedconf.nojson \
tests / test_output / pdp_config / prodconf.json """
with pytest.raises(PDPFilterException):
subcommands.subcmd_filter(
modify_namespace(
self.base_namespace,
{
"infilename": os.path.join(self.datadir, "fixedconf.nojson"),
"outfilename": os.path.join(self.outdir, "prodconf.json"),
"filt_outdir": os.path.join(self.outdir, "prodigal"),
"filt_prodigal": True,
},
),
self.logger,
)
def test_tsv_conf_file(self):
"""Error raised when tab .conf file provided for filter.
pdp filter -v --disable_tqdm --prodigal \
--outdir tests/test_output/pdp_filter \
--suffix prodigal \
tests/test_input/pdp_filter/testin.conf \
tests / test_output / pdp_config / prodconf.json """
with pytest.raises(PDPFilterException):
subcommands.subcmd_filter(
modify_namespace(
self.base_namespace,
{
"infilename": os.path.join(self.datadir, "testin.conf"),
"outfilename": os.path.join(self.outdir, "prodconf.json"),
"filt_prodigal": True,
},
),
self.logger,
)
|
|
from datetime import datetime
from cmd2 import Cmd as Lobotomy
from blessings import Terminal
from framework.logging.logger import Logger
t = Terminal()
class Run(Lobotomy):
def __init__(self):
Lobotomy.__init__(self)
# APK related commands
# --------------------
# loader, decompile, debuggable,
# profiler, permissions, components
#
@staticmethod
def do_loader(args):
"""
Description: Load target APK for analysis wth androguard --
Requirements: Target APK
Usage: loader </path/to/apk>
"""
try:
from framework.brains.apk.loader import Loader
loader = Loader(args)
global apk, apks
apk, apks = loader.run_loader()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Loader"))
Logger.run_logger(e.message)
@staticmethod
def do_decompile(args):
"""
Description: Decompile target APK with apktool.jar
Requirements: Target APK
Usage: decompile <name_of_output_directory> && </path/to/apk>
"""
try:
from framework.brains.apk.decompile import Decompile
decompile = Decompile(args.split()[0], args.split()[1])
decompile.run_decompile()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Decompile"))
Logger.run_logger(e.message)
@staticmethod
def do_profiler(args):
"""
Description: Run profiling on the target APK loaded
Requirements: Loaded APK
Usage: profiler
"""
try:
from framework.brains.apk.enumeration.profiler import Profiler
p = Profiler(globals()["apk"])
p.run_profiler()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Profiler"))
Logger.run_logger(e.message)
@staticmethod
def do_permissions(args):
"""
Description: List enumeration and api mappings from target APK
Requirements: Loaded APK
Usage: permissions <list> || <map>
"""
try:
from framework.brains.apk.enumeration.permissions import Permissions
p = Permissions(globals()["apk"], globals()["apks"])
if args == "list":
p.run_list_permissions()
if args == "map":
p.run_map_permissions()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Permissions"))
Logger.run_logger(e.message)
@staticmethod
def do_components(args):
"""
Description: Enumerate components for target APK
Requirements: Loaded APK
Usage: permissions
"""
try:
from framework.brains.apk.enumeration.components import Components
c = Components(globals()["apk"])
c.enum_component()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Components"))
Logger.run_logger(e.message)
@staticmethod
def do_attacksurface(args):
"""
Description: Enumerates attacksurface for target APK
Requirements: Loaded APK
Usage: attacksurface
"""
try:
from framework.brains.apk.enumeration.attack_surface import AttackSurface
c = AttackSurface(globals()["apk"])
c.run_enum_attack_surface()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import AttackSurface"))
Logger.run_logger(e.message)
@staticmethod
def do_debuggable(args):
"""
Description: Make target APK debuggable
Requirements: Target APK
Usage: debuggable <name_of_output_directory> && </path/to/apk>
"""
try:
from framework.brains.apk.debuggable import Debuggable
d = Debuggable(args.split()[0], args.split()[1])
d.run_debuggable()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Debuggable"))
Logger.run_logger(e.message)
# dex2jar
# --------------------
# d2j
#
@staticmethod
def do_d2j(args):
"""
Description: Runs d2j-dex2jar.sh on the target APK
Requirements: Target APK
Usage: d2j <directory_name> </path/to/apk>
"""
try:
from framework.brains.dex2jar.d2j import D2J
d = D2J(args.split()[0], args.split()[1])
d.run_d2j()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import D2J"))
Logger.run_logger(e.message)
# Bowser
# --------------------
# bowser enum, bowser parseUri
#
@staticmethod
def do_bowser(args):
"""
Description: Runs the bowser toolkit on a target APK
Requirements: Loaded APK, Lobotomy web services
Usage: bowser <enum> || <parseUri>
"""
try:
from framework.brains.bowser.bowser import Bowser
b = Bowser(globals()["apks"], globals()["apk"])
if args.split()[0] == "enum":
b.run_bowser()
if args.split()[0] == "parseUri":
b.run_parse_uri()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Bowser"))
Logger.run_logger(e.message)
# Dynamic
# --------------------
# logcat, instrumentation
#
@staticmethod
def do_logcat(args):
"""
Description: Runs logcat against the target APK and sends the output
to its RESTFul service handler
Requirements: Loaded APK
Usage: logcat
"""
try:
from framework.brains.dynamic.logcat import Logcat
l = Logcat()
l.run_logcat()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Logcat"))
Logger.run_logger(e.message)
@staticmethod
def do_frida(args):
"""
Description: Runs the Frida instrumentation toolkit against a target process
Requirements: Loaded APK
Usage: frida
"""
try:
from framework.brains.dynamic.frida.instrumentation import Instrumentation
i = Instrumentation(globals()["apk"])
i.run_instrumentation()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import Instrumentation"))
Logger.run_logger(e.message)
# Surgical
# --------------------
# This module is designed to attempt and find
# potential vulnerabilities
#
@staticmethod
def do_surgical(args):
"""
Description: Instantiates the SurgicalAPI with available functions and operations
Requirements: Loaded APK
Usage: surgical
"""
try:
from framework.brains.surgical.api import SurgicalAPI
s = SurgicalAPI(globals()["apks"])
s.run_surgical()
except ImportError as e:
print(t.red("[{0}] ".format(datetime.now()) + "Unable to import the SurgicalAPI"))
Logger.run_logger(e.message)
|
|
# Copyright (c) 2015 Intel, Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""HDFS native protocol (hdfs) driver for manila shares.
Manila share is a directory in HDFS. And this share does not use
service VM instance (share server). The instance directly talks
to the HDFS cluster.
The initial version only supports single namenode and flat network.
Configuration Requirements:
To enable access control, HDFS file system must have ACLs enabled.
"""
import math
import os
import pipes
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import units
import six
from manila import exception
from manila.i18n import _
from manila.share import driver
from manila import utils
LOG = log.getLogger(__name__)
hdfs_native_share_opts = [
cfg.HostAddressOpt('hdfs_namenode_ip',
help='The IP of the HDFS namenode.'),
cfg.PortOpt('hdfs_namenode_port',
default=9000,
help='The port of HDFS namenode service.'),
cfg.PortOpt('hdfs_ssh_port',
default=22,
help='HDFS namenode SSH port.'),
cfg.StrOpt('hdfs_ssh_name',
help='HDFS namenode ssh login name.'),
cfg.StrOpt('hdfs_ssh_pw',
help='HDFS namenode SSH login password, '
'This parameter is not necessary, if '
'\'hdfs_ssh_private_key\' is configured.'),
cfg.StrOpt('hdfs_ssh_private_key',
help='Path to HDFS namenode SSH private '
'key for login.'),
]
CONF = cfg.CONF
CONF.register_opts(hdfs_native_share_opts)
class HDFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
"""HDFS Share Driver.
Executes commands relating to shares.
API version history:
1.0 - Initial Version
"""
def __init__(self, *args, **kwargs):
super(HDFSNativeShareDriver, self).__init__(False, *args, **kwargs)
self.configuration.append_config_values(hdfs_native_share_opts)
self.backend_name = self.configuration.safe_get(
'share_backend_name') or 'HDFS-Native'
self.ssh_connections = {}
self._hdfs_execute = None
self._hdfs_bin = None
self._hdfs_base_path = None
def do_setup(self, context):
"""Do initialization while the share driver starts."""
super(HDFSNativeShareDriver, self).do_setup(context)
host = self.configuration.hdfs_namenode_ip
local_hosts = socket.gethostbyname_ex(socket.gethostname())[2]
if host in local_hosts:
self._hdfs_execute = self._hdfs_local_execute
else:
self._hdfs_execute = self._hdfs_remote_execute
self._hdfs_bin = 'hdfs'
self._hdfs_base_path = (
'hdfs://' + self.configuration.hdfs_namenode_ip + ':'
+ six.text_type(self.configuration.hdfs_namenode_port))
def _hdfs_local_execute(self, *cmd, **kwargs):
if 'run_as_root' not in kwargs:
kwargs.update({'run_as_root': False})
return utils.execute(*cmd, **kwargs)
def _hdfs_remote_execute(self, *cmd, **kwargs):
host = self.configuration.hdfs_namenode_ip
check_exit_code = kwargs.pop('check_exit_code', False)
return self._run_ssh(host, cmd, check_exit_code)
def _run_ssh(self, host, cmd_list, check_exit_code=False):
command = ' '.join(pipes.quote(cmd_arg) for cmd_arg in cmd_list)
connection = self.ssh_connections.get(host)
if not connection:
hdfs_ssh_name = self.configuration.hdfs_ssh_name
password = self.configuration.hdfs_ssh_pw
privatekey = self.configuration.hdfs_ssh_private_key
hdfs_ssh_port = self.configuration.hdfs_ssh_port
ssh_conn_timeout = self.configuration.ssh_conn_timeout
min_size = self.configuration.ssh_min_pool_conn
max_size = self.configuration.ssh_max_pool_conn
ssh_pool = utils.SSHPool(host,
hdfs_ssh_port,
ssh_conn_timeout,
hdfs_ssh_name,
password=password,
privatekey=privatekey,
min_size=min_size,
max_size=max_size)
ssh = ssh_pool.create()
self.ssh_connections[host] = (ssh_pool, ssh)
else:
ssh_pool, ssh = connection
if not ssh.get_transport().is_active():
ssh_pool.remove(ssh)
ssh = ssh_pool.create()
self.ssh_connections[host] = (ssh_pool, ssh)
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
msg = (_('Error running SSH command: %(cmd)s. '
'Error: %(excmsg)s.') %
{'cmd': command, 'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
def _set_share_size(self, share, size=None):
share_dir = '/' + share['name']
if not size:
sizestr = six.text_type(share['size']) + 'g'
else:
sizestr = six.text_type(size) + 'g'
try:
self._hdfs_execute(self._hdfs_bin, 'dfsadmin',
'-setSpaceQuota', sizestr, share_dir)
except exception.ProcessExecutionError as e:
msg = (_('Failed to set space quota for the '
'share %(sharename)s. Error: %(excmsg)s.') %
{'sharename': share['name'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
def _create_share(self, share):
"""Creates a share."""
if share['share_proto'].lower() != 'hdfs':
msg = _('Only HDFS protocol supported!')
LOG.error(msg)
raise exception.HDFSException(msg)
share_dir = '/' + share['name']
try:
self._hdfs_execute(self._hdfs_bin, 'dfs',
'-mkdir', share_dir)
except exception.ProcessExecutionError as e:
msg = (_('Failed to create directory in hdfs for the '
'share %(sharename)s. Error: %(excmsg)s.') %
{'sharename': share['name'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
# set share size
self._set_share_size(share)
try:
self._hdfs_execute(self._hdfs_bin, 'dfsadmin',
'-allowSnapshot', share_dir)
except exception.ProcessExecutionError as e:
msg = (_('Failed to allow snapshot for the '
'share %(sharename)s. Error: %(excmsg)s.') %
{'sharename': share['name'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
def _get_share_path(self, share):
"""Return share path on storage provider."""
return os.path.join(self._hdfs_base_path, share['name'])
def _get_snapshot_path(self, snapshot):
"""Return snapshot path on storage provider."""
snapshot_dir = '.snapshot'
return os.path.join('/', snapshot['share_name'],
snapshot_dir, snapshot['name'])
def get_network_allocations_number(self):
return 0
def create_share(self, context, share, share_server=None):
"""Create a HDFS directory which acted as a share."""
self._create_share(share)
return self._get_share_path(share)
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Creates a snapshot."""
self._create_share(share)
share_path = '/' + share['name']
snapshot_path = self._get_snapshot_path(snapshot)
try:
# check if the directory is empty
(out, __) = self._hdfs_execute(
self._hdfs_bin, 'dfs', '-ls', snapshot_path)
# only copy files when the snapshot directory is not empty
if out:
copy_path = snapshot_path + "/*"
cmd = [self._hdfs_bin, 'dfs', '-cp',
copy_path, share_path]
self._hdfs_execute(*cmd)
except exception.ProcessExecutionError as e:
msg = (_('Failed to create share %(sharename)s from '
'snapshot %(snapshotname)s. Error: %(excmsg)s.') %
{'sharename': share['name'],
'snapshotname': snapshot['name'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
return self._get_share_path(share)
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot."""
share_dir = '/' + snapshot['share_name']
snapshot_name = snapshot['name']
cmd = [self._hdfs_bin, 'dfs', '-createSnapshot',
share_dir, snapshot_name]
try:
self._hdfs_execute(*cmd)
except exception.ProcessExecutionError as e:
msg = (_('Failed to create snapshot %(snapshotname)s for '
'the share %(sharename)s. Error: %(excmsg)s.') %
{'snapshotname': snapshot_name,
'sharename': snapshot['share_name'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
def delete_share(self, context, share, share_server=None):
"""Deletes share storage."""
share_dir = '/' + share['name']
cmd = [self._hdfs_bin, 'dfs', '-rm', '-r', share_dir]
try:
self._hdfs_execute(*cmd)
except exception.ProcessExecutionError as e:
msg = (_('Failed to delete share %(sharename)s. '
'Error: %(excmsg)s.') %
{'sharename': share['name'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot."""
share_dir = '/' + snapshot['share_name']
cmd = [self._hdfs_bin, 'dfs', '-deleteSnapshot',
share_dir, snapshot['name']]
try:
self._hdfs_execute(*cmd)
except exception.ProcessExecutionError as e:
msg = (_('Failed to delete snapshot %(snapshotname)s. '
'Error: %(excmsg)s.') %
{'snapshotname': snapshot['name'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
def ensure_share(self, context, share, share_server=None):
"""Ensure the storage are exported."""
def allow_access(self, context, share, access, share_server=None):
"""Allows access to the share for a given user."""
if access['access_type'] != 'user':
msg = _("Only 'user' access type allowed!")
LOG.error(msg)
raise exception.InvalidShareAccess(msg)
# Note(jun): For directories in HDFS, the x permission is
# required to access a child of the directory.
if access['access_level'] == 'rw':
access_level = 'rwx'
elif access['access_level'] == 'ro':
access_level = 'r-x'
else:
msg = (_('The access level %(accesslevel)s was unsupported.') %
{'accesslevel': access['access_level']})
LOG.error(msg)
raise exception.InvalidShareAccess(msg)
share_dir = '/' + share['name']
user_access = ':'.join([access['access_type'],
access['access_to'],
access_level])
cmd = [self._hdfs_bin, 'dfs', '-setfacl', '-m', '-R',
user_access, share_dir]
try:
(__, out) = self._hdfs_execute(*cmd, check_exit_code=True)
except exception.ProcessExecutionError as e:
msg = (_('Failed to set ACL of share %(sharename)s for '
'user: %(username)s'
'Error: %(excmsg)s.') %
{'sharename': share['name'],
'username': access['access_to'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
def deny_access(self, context, share, access, share_server=None):
"""Denies the access to the share for a given user."""
share_dir = '/' + share['name']
access_name = ':'.join([access['access_type'], access['access_to']])
cmd = [self._hdfs_bin, 'dfs', '-setfacl', '-x', '-R',
access_name, share_dir]
try:
(__, out) = self._hdfs_execute(*cmd, check_exit_code=True)
except exception.ProcessExecutionError as e:
msg = (_('Failed to deny ACL of share %(sharename)s for '
'user: %(username)s'
'Error: %(excmsg)s.') %
{'sharename': share['name'],
'username': access['access_to'],
'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
def extend_share(self, share, new_size, share_server=None):
"""Extend share storage."""
self._set_share_size(share, new_size)
def _check_hdfs_state(self):
try:
(out, __) = self._hdfs_execute(self._hdfs_bin, 'fsck', '/')
except exception.ProcessExecutionError as e:
msg = (_('Failed to check hdfs state. Error: %(excmsg)s.') %
{'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
if 'HEALTHY' in out:
return True
else:
return False
def check_for_setup_error(self):
"""Return an error if the prerequisites are met."""
if not self.configuration.hdfs_namenode_ip:
msg = _('Not specify the hdfs cluster yet! '
'Add the ip of hdfs namenode in the '
'hdfs_namenode_ip configuration parameter.')
LOG.error(msg)
raise exception.HDFSException(msg)
if not self._check_hdfs_state():
msg = _('HDFS is not in healthy state.')
LOG.error(msg)
raise exception.HDFSException(msg)
def _get_available_capacity(self):
"""Calculate available space on path."""
try:
(out, __) = self._hdfs_execute(self._hdfs_bin, 'dfsadmin',
'-report')
except exception.ProcessExecutionError as e:
msg = (_('Failed to check available capacity for hdfs.'
'Error: %(excmsg)s.') %
{'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
lines = out.splitlines()
try:
total = int(lines[1].split()[2])
free = int(lines[2].split()[2])
except (IndexError, ValueError) as e:
msg = (_('Failed to get hdfs capacity info. '
'Error: %(excmsg)s.') %
{'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.HDFSException(msg)
return total, free
def _update_share_stats(self):
"""Retrieves stats info of share directories group."""
data = dict(share_backend_name=self.backend_name,
storage_protocol='HDFS',
reserved_percentage=self.configuration.
reserved_share_percentage)
total, free = self._get_available_capacity()
data['total_capacity_gb'] = math.ceil(total / units.Gi)
data['free_capacity_gb'] = math.ceil(free / units.Gi)
super(HDFSNativeShareDriver, self)._update_share_stats(data)
|
|
from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_float_dtype,
is_integer_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Float64Index,
Index,
Int64Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
UInt64Index,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import get_dtype # noqa:F401
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import (
DatetimeArray,
PandasArray,
PeriodArray,
TimedeltaArray,
period_array,
)
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
"""make a length k index or n categories"""
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
"""make a length k IntervalIndex"""
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeNumericIndex(k=10, name=None, *, dtype):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
if is_integer_dtype(dtype):
values = np.arange(k, dtype=dtype)
if is_unsigned_integer_dtype(dtype):
values += 2 ** (dtype.itemsize * 8 - 1)
elif is_float_dtype(dtype):
values = np.random.random_sample(k) - np.random.random_sample(1)
values.sort()
values = values * (10 ** np.random.randint(0, 9))
else:
raise NotImplementedError(f"wrong dtype {dtype}")
return Index(values, dtype=dtype, name=name)
def makeIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="int64")
return Int64Index(base_idx)
def makeUIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="uint64")
return UInt64Index(base_idx)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="float64")
return Float64Index(base_idx)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None) # noqa: PDF010
cython_table = pd.core.common._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from com._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
class TestVarConv2dOp(OpTest):
def setUp(self):
self.init_op_type()
self.set_data()
self.compute()
def init_op_type(self):
self.op_type = "var_conv_2d"
def set_data(self):
input_channel = 3
output_channel = 2
filter_size = [2, 3]
stride = [1, 1]
row = [2, 4]
col = [3, 2]
self.init_data(input_channel, output_channel, filter_size, stride, row,
col)
def init_data(self, input_channel, output_channel, filter_size, stride, row,
col):
feature = [row[i] * col[i] for i in range(len(row))]
numel = sum(feature) * input_channel
x_data = np.random.random((numel, 1)).astype('float32')
x_lod = [[x * input_channel for x in feature]]
row_data = np.random.random((sum(row), 10)).astype('float32')
col_data = np.random.random((sum(col), 10)).astype('float32')
w_shape = (output_channel,
input_channel * filter_size[0] * filter_size[1])
w_data = np.random.random(w_shape).astype('float32')
self.inputs = {
'X': (x_data, x_lod),
'ROW': (row_data, [row]),
'COLUMN': (col_data, [col]),
'W': w_data
}
self.attrs = {
'InputChannel': input_channel,
'OutputChannel': output_channel,
'StrideH': stride[0],
'StrideW': stride[1],
'KernelH': filter_size[0],
'KernelW': filter_size[1],
}
def compute(self):
in_ch = self.attrs['InputChannel']
out_ch = self.attrs['OutputChannel']
kernel_h = self.attrs['KernelH']
kernel_w = self.attrs['KernelW']
stride_h = self.attrs['StrideH']
stride_w = self.attrs['StrideW']
row_data, row_lod = self.inputs['ROW']
col_data, col_lod = self.inputs['COLUMN']
x_data, x_lod = self.inputs['X']
w_data = self.inputs['W']
out_data = np.zeros((0, 1)).astype('float32')
col_res_data, col_res_lod = self.Im2Col()
out_lod = [[]]
col_data_offset = 0
batch_size = len(x_lod[0])
for idx in range(batch_size):
width = col_lod[0][idx]
height = row_lod[0][idx]
top_im_x = 0
if width != 0:
top_im_x = (width - 1) // stride_w + 1
top_im_y = 0
if height != 0:
top_im_y = (height - 1) // stride_h + 1
top_im_size = top_im_x * top_im_y
out_lod[0].append(out_ch * top_im_size)
if top_im_size == 0:
out_tmp = np.zeros((out_ch * top_im_size, 1)).astype('float32')
else:
col_batch_data = col_res_data[col_data_offset:col_data_offset +
col_res_lod[0][idx]]
gemm_shape = (in_ch * kernel_h * kernel_w, top_im_size)
col_batch_data = col_batch_data.reshape(gemm_shape)
out_tmp = np.dot(w_data, col_batch_data).reshape(-1, 1)
out_data = np.vstack((out_data, out_tmp))
col_data_offset += col_res_lod[0][idx]
self.outputs = {
'Out': (out_data.astype('float32'), out_lod),
'Col': (col_res_data, col_res_lod)
}
def Im2Col(self):
in_ch = self.attrs['InputChannel']
kernel_h = self.attrs['KernelH']
kernel_w = self.attrs['KernelW']
stride_h = self.attrs['StrideH']
stride_w = self.attrs['StrideW']
row_data, row_lod = self.inputs['ROW']
col_data, col_lod = self.inputs['COLUMN']
x_data, x_lod = self.inputs['X']
col_res_lod = [[]]
top_size = 0
batch_size = len(x_lod[0])
for idx in range(batch_size):
width = col_lod[0][idx]
height = row_lod[0][idx]
top_im_x = 0
if width != 0:
top_im_x = (width - 1) // stride_w + 1
top_im_y = 0
if height != 0:
top_im_y = (height - 1) // stride_h + 1
top_x = top_im_x * top_im_y
top_y = in_ch * kernel_h * kernel_w
col_res_lod[0].append(top_x * top_y)
top_size += top_x * top_y
col_res = np.zeros((top_size, 1)).astype('float32')
kernel_win_size = kernel_h * kernel_w
half_kernel_h = kernel_h // 2
half_kernel_w = kernel_w // 2
t_offset, b_offset = 0, 0
for idx in range(batch_size):
width = col_lod[0][idx]
height = row_lod[0][idx]
if width == 0 or height == 0:
continue
top_im_x = (width - 1) // stride_w + 1
top_im_y = (height - 1) // stride_h + 1
top_x = top_im_x * top_im_y
for z in range(in_ch):
row_offset = kernel_win_size * z
im_offset = z * width * height
for y in range(0, height, stride_h):
for x in range(0, width, stride_w):
col_offset = x // stride_w + y // stride_h * top_im_x
for ky in range(kernel_h):
for kx in range(kernel_w):
im_y = y + ky - half_kernel_h
im_x = x + kx - half_kernel_w
if im_x >= 0 and im_x < width and im_y >= 0 and im_y < height:
col_res[t_offset +
(row_offset + ky * kernel_w + kx) * top_x +
col_offset] = \
x_data[b_offset + im_offset + im_y * width + im_x]
t_offset += col_res_lod[0][idx]
b_offset += x_lod[0][idx]
return col_res, col_res_lod
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', max_relative_error=0.005)
class TestVarConv2dOpCase1(TestVarConv2dOp):
def set_data(self):
# set in_ch 1
input_channel = 1
output_channel = 2
filter_size = [2, 3]
stride = [1, 1]
row = [1, 4]
col = [3, 2]
self.init_data(input_channel, output_channel, filter_size, stride, row,
col)
class TestVarConv2dOpCase2(TestVarConv2dOp):
def set_data(self):
# set out_ch 1
input_channel = 2
output_channel = 1
filter_size = [3, 3]
stride = [2, 2]
row = [4, 7]
col = [5, 2]
self.init_data(input_channel, output_channel, filter_size, stride, row,
col)
class TestVarConv2dOpCase3(TestVarConv2dOp):
def set_data(self):
# set batch 1
input_channel = 2
output_channel = 1
filter_size = [3, 3]
stride = [2, 2]
row = [7]
col = [2]
self.init_data(input_channel, output_channel, filter_size, stride, row,
col)
class TestVarConv2dOpCase4(TestVarConv2dOp):
def set_data(self):
# set filter size very large
input_channel = 3
output_channel = 4
filter_size = [6, 6]
stride = [2, 2]
row = [4, 7]
col = [5, 2]
self.init_data(input_channel, output_channel, filter_size, stride, row,
col)
class TestVarConv2dOpCase5(TestVarConv2dOp):
def set_data(self):
# set input very small
input_channel = 5
output_channel = 3
filter_size = [3, 3]
stride = [1, 1]
row = [1, 1]
col = [1, 1]
self.init_data(input_channel, output_channel, filter_size, stride, row,
col)
class TestVarConv2dOpCase6(TestVarConv2dOp):
def set_data(self):
input_channel = 1
output_channel = 3
filter_size = [3, 3]
stride = [1, 1]
row = [1, 1]
col = [1, 1]
self.init_data(input_channel, output_channel, filter_size, stride, row,
col)
class TestVarConv2dOpCase7(TestVarConv2dOp):
def set_data(self):
input_channel = 2
output_channel = 3
filter_size = [3, 3]
stride = [1, 1]
row = [5, 4]
col = [6, 7]
self.init_data(input_channel, output_channel, filter_size, stride, row,
col)
class TestVarConv2dApi(unittest.TestCase):
def test_api(self):
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[1], lod_level=1)
row = fluid.layers.data(name='row', shape=[6], lod_level=1)
col = fluid.layers.data(name='col', shape=[6], lod_level=1)
out = fluid.contrib.var_conv_2d(
input=x,
row=row,
col=col,
input_channel=3,
output_channel=5,
filter_size=[3, 3],
stride=1)
place = fluid.CPUPlace()
x_tensor = fluid.create_lod_tensor(
np.random.rand(116, 1).astype('float32'), [[60, 56]], place)
row_tensor = fluid.create_lod_tensor(
np.random.rand(9, 6).astype('float32'), [[5, 4]], place)
col_tensor = fluid.create_lod_tensor(
np.random.rand(13, 6).astype('float32'), [[6, 7]], place)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
ret = exe.run(
feed={'x': x_tensor,
'row': row_tensor,
'col': col_tensor},
fetch_list=[out],
return_numpy=False)
if __name__ == '__main__':
unittest.main()
|
|
from django.core.cache import cache, InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.views.generic import View
from ratelimit.decorators import ratelimit
from ratelimit.exceptions import Ratelimited
from ratelimit.mixins import RatelimitMixin
from ratelimit.utils import is_ratelimited, _split_rate
rf = RequestFactory()
class MockUser(object):
def __init__(self, authenticated=False):
self.pk = 1
self.authenticated = authenticated
def is_authenticated(self):
return self.authenticated
class RateParsingTests(TestCase):
def test_simple(self):
tests = (
('100/s', (100, 1)),
('100/10s', (100, 10)),
('100/10', (100, 10)),
('100/m', (100, 60)),
('400/10m', (400, 600)),
('1000/h', (1000, 3600)),
('800/d', (800, 24 * 60 * 60)),
)
for i, o in tests:
assert o == _split_rate(i)
def mykey(group, request):
return request.META['REMOTE_ADDR'][::-1]
class RatelimitTests(TestCase):
def setUp(self):
cache.clear()
def test_no_key(self):
@ratelimit(rate='1/m', block=True)
def view(request):
return True
req = rf.get('/')
with self.assertRaises(ImproperlyConfigured):
view(req)
def test_ip(self):
@ratelimit(key='ip', rate='1/m', block=True)
def view(request):
return True
req = rf.get('/')
assert view(req), 'First request works.'
with self.assertRaises(Ratelimited):
view(req)
def test_block(self):
@ratelimit(key='ip', rate='1/m', block=True)
def blocked(request):
return request.limited
@ratelimit(key='ip', rate='1/m', block=False)
def unblocked(request):
return request.limited
req = rf.get('/')
assert not blocked(req), 'First request works.'
with self.assertRaises(Ratelimited):
blocked(req)
assert unblocked(req), 'Request is limited but not blocked.'
def test_method(self):
post = rf.post('/')
get = rf.get('/')
@ratelimit(key='ip', method='POST', rate='1/m', group='a')
def limit_post(request):
return request.limited
@ratelimit(key='ip', method=['POST', 'GET'], rate='1/m', group='a')
def limit_get(request):
return request.limited
assert not limit_post(post), 'Do not limit first POST.'
assert limit_post(post), 'Limit second POST.'
assert not limit_post(get), 'Do not limit GET.'
assert limit_get(post), 'Limit first POST.'
assert limit_get(get), 'Limit first GET.'
def test_unsafe_methods(self):
@ratelimit(key='ip', method=ratelimit.UNSAFE, rate='0/m')
def limit_unsafe(request):
return request.limited
get = rf.get('/')
head = rf.head('/')
options = rf.options('/')
delete = rf.delete('/')
post = rf.post('/')
put = rf.put('/')
assert not limit_unsafe(get)
assert not limit_unsafe(head)
assert not limit_unsafe(options)
assert limit_unsafe(delete)
assert limit_unsafe(post)
assert limit_unsafe(put)
# TODO: When all supported versions have this, drop the `if`.
if hasattr(rf, 'patch'):
patch = rf.patch('/')
assert limit_unsafe(patch)
def test_key_get(self):
req_a = rf.get('/', {'foo': 'a'})
req_b = rf.get('/', {'foo': 'b'})
@ratelimit(key='get:foo', rate='1/m', method='GET')
def view(request):
return request.limited
assert not view(req_a)
assert view(req_a)
assert not view(req_b)
assert view(req_b)
def test_key_post(self):
req_a = rf.post('/', {'foo': 'a'})
req_b = rf.post('/', {'foo': 'b'})
@ratelimit(key='post:foo', rate='1/m')
def view(request):
return request.limited
assert not view(req_a)
assert view(req_a)
assert not view(req_b)
assert view(req_b)
def test_key_header(self):
req = rf.post('/')
req.META['HTTP_X_REAL_IP'] = '1.2.3.4'
@ratelimit(key='header:x-real-ip', rate='1/m')
@ratelimit(key='header:x-missing-header', rate='1/m')
def view(request):
return request.limited
assert not view(req)
assert view(req)
def test_rate(self):
req = rf.post('/')
@ratelimit(key='ip', rate='2/m')
def twice(request):
return request.limited
assert not twice(req), 'First request is not limited.'
del req.limited
assert not twice(req), 'Second request is not limited.'
del req.limited
assert twice(req), 'Third request is limited.'
def test_zero_rate(self):
req = rf.post('/')
@ratelimit(key='ip', rate='0/m')
def never(request):
return request.limited
assert never(req)
def test_none_rate(self):
req = rf.post('/')
@ratelimit(key='ip', rate=None)
def always(request):
return request.limited
assert not always(req)
del req.limited
assert not always(req)
del req.limited
assert not always(req)
del req.limited
assert not always(req)
del req.limited
assert not always(req)
del req.limited
assert not always(req)
def test_callable_rate(self):
auth = rf.post('/')
unauth = rf.post('/')
auth.user = MockUser(authenticated=True)
unauth.user = MockUser(authenticated=False)
def get_rate(group, request):
if request.user.is_authenticated():
return (2, 60)
return (1, 60)
@ratelimit(key='user_or_ip', rate=get_rate)
def view(request):
return request.limited
assert not view(unauth)
assert view(unauth)
assert not view(auth)
assert not view(auth)
assert view(auth)
def test_callable_rate_none(self):
req = rf.post('/')
req.never_limit = False
get_rate = lambda g, r: None if r.never_limit else '1/m'
@ratelimit(key='ip', rate=get_rate)
def view(request):
return request.limited
assert not view(req)
del req.limited
assert view(req)
req.never_limit = True
del req.limited
assert not view(req)
del req.limited
assert not view(req)
def test_callable_rate_zero(self):
auth = rf.post('/')
unauth = rf.post('/')
auth.user = MockUser(authenticated=True)
unauth.user = MockUser(authenticated=False)
def get_rate(group, request):
if request.user.is_authenticated():
return '1/m'
return '0/m'
@ratelimit(key='ip', rate=get_rate)
def view(request):
return request.limited
assert view(unauth)
del unauth.limited
assert not view(auth)
del auth.limited
assert view(auth)
assert view(unauth)
@override_settings(RATELIMIT_USE_CACHE='fake-cache')
def test_bad_cache(self):
"""The RATELIMIT_USE_CACHE setting works if the cache exists."""
@ratelimit(key='ip', rate='1/m')
def view(request):
return request
req = rf.post('/')
with self.assertRaises(InvalidCacheBackendError):
view(req)
@override_settings(RATELIMIT_USE_CACHE='connection-errors')
def test_cache_connection_error(self):
@ratelimit(key='ip', rate='1/m')
def view(request):
return request
req = rf.post('/')
assert view(req)
def test_user_or_ip(self):
"""Allow custom functions to set cache keys."""
@ratelimit(key='user_or_ip', rate='1/m', block=False)
def view(request):
return request.limited
unauth = rf.post('/')
unauth.user = MockUser(authenticated=False)
assert not view(unauth), 'First unauthenticated request is allowed.'
assert view(unauth), 'Second unauthenticated request is limited.'
auth = rf.post('/')
auth.user = MockUser(authenticated=True)
assert not view(auth), 'First authenticated request is allowed.'
assert view(auth), 'Second authenticated is limited.'
def test_key_path(self):
@ratelimit(key='ratelimit.tests.mykey', rate='1/m')
def view(request):
return request.limited
req = rf.post('/')
assert not view(req)
assert view(req)
def test_callable_key(self):
@ratelimit(key=mykey, rate='1/m')
def view(request):
return request.limited
req = rf.post('/')
assert not view(req)
assert view(req)
def test_stacked_decorator(self):
"""Allow @ratelimit to be stacked."""
# Put the shorter one first and make sure the second one doesn't
# reset request.limited back to False.
@ratelimit(rate='1/m', block=False, key=lambda x, y: 'min')
@ratelimit(rate='10/d', block=False, key=lambda x, y: 'day')
def view(request):
return request.limited
req = rf.post('/')
assert not view(req), 'First unauthenticated request is allowed.'
assert view(req), 'Second unauthenticated request is limited.'
def test_stacked_methods(self):
"""Different methods should result in different counts."""
@ratelimit(rate='1/m', key='ip', method='GET')
@ratelimit(rate='1/m', key='ip', method='POST')
def view(request):
return request.limited
get = rf.get('/')
post = rf.post('/')
assert not view(get)
assert not view(post)
assert view(get)
assert view(post)
def test_sorted_methods(self):
"""Order of the methods shouldn't matter."""
@ratelimit(rate='1/m', key='ip', method=['GET', 'POST'], group='a')
def get_post(request):
return request.limited
@ratelimit(rate='1/m', key='ip', method=['POST', 'GET'], group='a')
def post_get(request):
return request.limited
req = rf.get('/')
assert not get_post(req)
assert post_get(req)
def test_is_ratelimited(self):
def get_key(group, request):
return 'test_is_ratelimited_key'
def not_increment(request):
return is_ratelimited(request, increment=False,
method=is_ratelimited.ALL, key=get_key,
rate='1/m', group='a')
def do_increment(request):
return is_ratelimited(request, increment=True,
method=is_ratelimited.ALL, key=get_key,
rate='1/m', group='a')
req = rf.get('/')
# Does not increment. Count still 0. Does not rate limit
# because 0 < 1.
assert not not_increment(req), 'Request should not be rate limited.'
# Increments. Does not rate limit because 0 < 1. Count now 1.
assert not do_increment(req), 'Request should not be rate limited.'
# Does not increment. Count still 1. Not limited because 1 > 1
# is false.
assert not not_increment(req), 'Request should not be rate limited.'
# Count = 2, 2 > 1.
assert do_increment(req), 'Request should be rate limited.'
assert not_increment(req), 'Request should be rate limited.'
@override_settings(RATELIMIT_USE_CACHE='connection-errors')
def test_is_ratelimited_cache_connection_error_without_increment(self):
def get_key(group, request):
return 'test_is_ratelimited_key'
def not_increment(request):
return is_ratelimited(request, increment=False,
method=is_ratelimited.ALL, key=get_key,
rate='1/m', group='a')
req = rf.get('/')
assert not not_increment(req)
@override_settings(RATELIMIT_USE_CACHE='connection-errors')
def test_is_ratelimited_cache_connection_error_with_increment(self):
def get_key(group, request):
return 'test_is_ratelimited_key'
def do_increment(request):
return is_ratelimited(request, increment=True,
method=is_ratelimited.ALL, key=get_key,
rate='1/m', group='a')
req = rf.get('/')
assert not do_increment(req)
assert req.limited is False
class RatelimitCBVTests(TestCase):
def setUp(self):
cache.clear()
def test_limit_ip(self):
class RLView(RatelimitMixin, View):
ratelimit_key = 'ip'
ratelimit_method = ratelimit.ALL
ratelimit_rate = '1/m'
ratelimit_block = True
rlview = RLView.as_view()
req = rf.get('/')
assert rlview(req), 'First request works.'
with self.assertRaises(Ratelimited):
rlview(req)
def test_block(self):
class BlockedView(RatelimitMixin, View):
ratelimit_group = 'cbv:block'
ratelimit_key = 'ip'
ratelimit_method = ratelimit.ALL
ratelimit_rate = '1/m'
ratelimit_block = True
def get(self, request, *args, **kwargs):
return request.limited
class UnBlockedView(RatelimitMixin, View):
ratelimit_group = 'cbv:block'
ratelimit_key = 'ip'
ratelimit_method = ratelimit.ALL
ratelimit_rate = '1/m'
ratelimit_block = False
def get(self, request, *args, **kwargs):
return request.limited
blocked = BlockedView.as_view()
unblocked = UnBlockedView.as_view()
req = rf.get('/')
assert not blocked(req), 'First request works.'
with self.assertRaises(Ratelimited):
blocked(req)
assert unblocked(req), 'Request is limited but not blocked.'
def test_method(self):
post = rf.post('/')
get = rf.get('/')
class LimitPostView(RatelimitMixin, View):
ratelimit_group = 'cbv:method'
ratelimit_key = 'ip'
ratelimit_method = ['POST']
ratelimit_rate = '1/m'
def post(self, request, *args, **kwargs):
return request.limited
get = post
class LimitGetView(RatelimitMixin, View):
ratelimit_group = 'cbv:method'
ratelimit_key = 'ip'
ratelimit_method = ['POST', 'GET']
ratelimit_rate = '1/m'
def post(self, request, *args, **kwargs):
return request.limited
get = post
limit_post = LimitPostView.as_view()
limit_get = LimitGetView.as_view()
assert not limit_post(post), 'Do not limit first POST.'
assert limit_post(post), 'Limit second POST.'
assert not limit_post(get), 'Do not limit GET.'
assert limit_get(post), 'Limit first POST.'
assert limit_get(get), 'Limit first GET.'
def test_rate(self):
req = rf.post('/')
class TwiceView(RatelimitMixin, View):
ratelimit_key = 'ip'
ratelimit_rate = '2/m'
def post(self, request, *args, **kwargs):
return request.limited
get = post
twice = TwiceView.as_view()
assert not twice(req), 'First request is not limited.'
assert not twice(req), 'Second request is not limited.'
assert twice(req), 'Third request is limited.'
@override_settings(RATELIMIT_USE_CACHE='fake-cache')
def test_bad_cache(self):
"""The RATELIMIT_USE_CACHE setting works if the cache exists."""
self.skipTest('I do not know why this fails when the other works.')
class BadCacheView(RatelimitMixin, View):
ratelimit_key = 'ip'
def post(self, request, *args, **kwargs):
return request
get = post
view = BadCacheView.as_view()
req = rf.post('/')
with self.assertRaises(InvalidCacheBackendError):
view(req)
def test_keys(self):
"""Allow custom functions to set cache keys."""
def user_or_ip(group, req):
if req.user.is_authenticated():
return 'uip:%d' % req.user.pk
return 'uip:%s' % req.META['REMOTE_ADDR']
class KeysView(RatelimitMixin, View):
ratelimit_key = user_or_ip
ratelimit_block = False
ratelimit_rate = '1/m'
def post(self, request, *args, **kwargs):
return request.limited
get = post
view = KeysView.as_view()
req = rf.post('/')
req.user = MockUser(authenticated=False)
assert not view(req), 'First unauthenticated request is allowed.'
assert view(req), 'Second unauthenticated request is limited.'
del req.limited
req.user = MockUser(authenticated=True)
assert not view(req), 'First authenticated request is allowed.'
assert view(req), 'Second authenticated is limited.'
def test_method_decorator(self):
class TestView(View):
@ratelimit(key='ip', rate='1/m', block=False)
def post(self, request):
return request.limited
view = TestView.as_view()
req = rf.post('/')
assert not view(req)
assert view(req)
|
|
## This is a brewtroller port to Python
##All credits goes to brewtroller
## TODO need to put better diclamer
from Enum import *
from timer import *
from Test_I2C import resetHeatOutput
# unsigned long lastHop, grainInStart;
# unsigned int boilAdds, triggered;
# Used to determine if the given step is the active step in the program.
def stepIsActive( brewStep ) :
if (stepProgram[brewStep] == PROGRAM_IDLE):
return False
else:
return True
# Usd to determine if the given ZONE is the active ZONE in the program.
# Returns true is any step in the given ZONE is the active step, false otherwise.
def zoneIsActive( brewZone ) :
if (brewZone == ZONE_MASH) :
if stepIsActive(STEP_FILL): return 1
if stepIsActive(STEP_DELAY) : return 1
if stepIsActive(STEP_PREHEAT): return 1
if stepIsActive(STEP_ADDGRAIN) : return 1
if stepIsActive(STEP_REFILL): return 1
if stepIsActive(STEP_DOUGHIN) : return 1
if stepIsActive(STEP_ACID): return 1
if stepIsActive(STEP_PROTEIN) : return 1
if stepIsActive(STEP_SACCH): return 1
if stepIsActive(STEP_SACCH2) : return 1
if stepIsActive(STEP_MASHOUT): return 1
if stepIsActive(STEP_MASHHOLD) : return 1
if stepIsActive(STEP_SPARGE): return 1
else:
return 0
elif (brewZone == ZONE_BOIL) :
if (stepIsActive(STEP_BOIL) or stepIsActive(STEP_CHILL) ):
return 1
else:
return 0;
# Returns 0 if start was successful or 1 if unable to start due to conflict with other step
# Performs any logic required at start of step
# TO DO: Power Loss Recovery Handling
def stepInit(pgm, brewStep) :
print pgm, brewStep
# Nothing more to do if starting 'Idle' program
if(pgm == PROGRAM_IDLE): return 1
# Abort Fill/Mash step init if mash Zone is not free
if (brewStep >= STEP_FILL and brewStep <= STEP_MASHHOLD and zoneIsActive(ZONE_MASH)): return 1
# Abort sparge init if either zone is currently active
elif (brewStep == STEP_SPARGE and (zoneIsActive(ZONE_MASH) or zoneIsActive(ZONE_BOIL))): return 1
# Allow Boil step init while sparge is still going
# If we made it without an abort, save the program number for stepCore
setProgramStep(brewStep, pgm)
# if (brewStep == STEP_FILL):
# Step Init: Fill
# REMOVED Section
if (brewStep == STEP_DELAY):
# Step Init: Delay
# Load delay minutes from EEPROM if timer is not already populated via Power Loss Recovery
if ( timerValue[TIMER_MASH]==0 ): setTimer(TIMER_MASH, BrewConfig['StartDelayMinutes'])
print "Step Delay Initialized with time: " + str(timerValue[TIMER_MASH])
elif (brewStep == STEP_PREHEAT):
# //Step Init: Preheat
setpoint[VS_MASH]= calcStrikeTemp(pgm)
preheated[VS_MASH] = 0
# No timer used for preheat
clearTimer(TIMER_MASH)
elif (brewStep == STEP_ADDGRAIN):
# Step Init: Add Grain
# Disable HLT and Mash heat output during 'Add Grain' to avoid
# dry running heat elements and burns from HERMS recirc
resetHeatOutput(VS_MASH)
# In manual volume mode show the target mash volume as a guide to the user
## TODO tgtVol[VS_MASH] = mashVol
elif brewStep == STEP_DOUGHIN :
# Step Init: Dough In
setpoint[VS_MASH] = getProgMashTemp(pgm, MASH_DOUGHIN);
preheated[VS_MASH] = 0;
# //Set timer only if empty (for purposed of power loss recovery)
if (not(timerValue[TIMER_MASH])):
setTimer(TIMER_MASH, getProgMashMins(pgm, MASH_DOUGHIN));
# //Leave timer paused until preheated
pauseTimer(TIMER_MASH)
print "Initializing Dough In..."
elif (brewStep == STEP_ACID):
# //Step Init: Acid Rest
setpoint[TS_MASH] = getProgMashTemp(pgm, MASH_ACID);
preheated[VS_MASH] = 0;
# //Set timer only if empty (for purposed of power loss recovery)
if (not(timerValue[TIMER_MASH])):
setTimer(TIMER_MASH, getProgMashMins(pgm, MASH_ACID));
# //Leave timer paused until preheated
timerStatus[TIMER_MASH] = 0;
elif (brewStep == STEP_PROTEIN) :
# Step Init: Protein
setpoint[TS_MASH] = getProgMashTemp(pgm, MASH_PROTEIN);
preheated[VS_MASH] = 0;
# Set timer only if empty (for purposed of power loss recovery)
if (not(timerValue[TIMER_MASH])):
setTimer(TIMER_MASH, getProgMashMins(pgm, MASH_PROTEIN));
# Leave timer paused until preheated
timerStatus[TIMER_MASH] = 0;
elif (brewStep == STEP_SACCH) :
# //Step Init: Sacch
print "Initializing SACCH ..."
setpoint[TS_MASH] = getProgMashTemp(pgm, MASH_SACCH);
preheated[VS_MASH] = 0;
# //Set timer only if empty (for purposed of power loss recovery)
if (not(timerValue[TIMER_MASH])):
setTimer(TIMER_MASH, getProgMashMins(pgm, MASH_SACCH));
# //Leave timer paused until preheated
timerStatus[TIMER_MASH] = 0;
elif (brewStep == STEP_SACCH2):
# //Step Init: Sacch2
print "Initializing SACH2 ..."
setpoint[TS_MASH] = getProgMashTemp(pgm, MASH_SACCH2);
preheated[VS_MASH] = 0;
# //Set timer only if empty (for purposed of power loss recovery)
if (not(timerValue[TIMER_MASH])):
setTimer(TIMER_MASH, getProgMashMins(pgm, MASH_SACCH2));
# //Leave timer paused until preheated
timerStatus[TIMER_MASH] = 0;
elif (brewStep == STEP_MASHOUT):
# //Step Init: Mash Out
print "Initializing MashOut ..."
setpoint[TS_MASH] = getProgMashTemp(pgm, MASH_MASHOUT);
preheated[VS_MASH] = 0;
# //Set timer only if empty (for purposed of power loss recovery)
if (not(timerValue[TIMER_MASH])):
setTimer(TIMER_MASH, getProgMashMins(pgm, MASH_MASHOUT));
# //Leave timer paused until preheated
timerStatus[TIMER_MASH] = 0;
print "Exit Initialization MashOut ..."
elif (brewStep == STEP_MASHHOLD):
print "Initializing MASH HOLD step ", setpoint[TS_MASH]
# //Set HLT to Sparge Temp
# //Cycle through steps and use last non-zero step for mash setpoint
if (not(setpoint[TS_MASH])):
i = MASH_MASHOUT;
while (setpoint[TS_MASH] == 0 and i >= MASH_DOUGHIN and i <= MASH_MASHOUT):
i=i-1
setSetpoint[TS_MASH] = getProgMashTemp(pgm, i)
elif (brewStep == STEP_BOIL):
# //Step Init: Boil
print "Initializing BOIL step ", setpoint[TS_MASH]
## TODO Pump
## resetHeatOutput(VS_PUMP); # turn off the pump if we are moving to boil.
setpoint[VS_KETTLE] = getBoilTemp();
preheated[VS_KETTLE] = 0;
##boilAdds = getProgAdds(pgm);
# //Set timer only if empty (for purposes of power loss recovery)
if (not(timerValue[TIMER_BOIL])) :
# //Clean start of Boil
setTimer(TIMER_BOIL, getProgBoil(pgm));
triggered = 0;
##setBoilAddsTrig(triggered);
##else :
# //Assuming power loss recovery
##triggered = getBoilAddsTrig();
# //Leave timer paused until preheated
timerStatus[TIMER_BOIL] = 0;
lastHop = 0;
##boilControlState = CONTROLSTATE_AUTO;
elif (brewStep == STEP_CHILL) :
# //Step Init: Chill
pitchTemp = getProgPitch(pgm);
# //Call event handler
## TODO replace event with writing to file so that webserver can pick up
#eventHandler(EVENT_STEPINIT, brewStep)
def stepCore():
global stepProgram
if (stepIsActive(STEP_FILL)):
print "Running Active Step fill"
stepFill(STEP_FILL);
if (stepIsActive(STEP_PREHEAT)):
print "Running Active step Preheat", temp[VS_MASH], "; target ", setpoint[VS_MASH]
if (setpoint[VS_MASH] and temp[VS_MASH] >= setpoint[VS_MASH]) :
stepAdvance(STEP_PREHEAT);
if (stepIsActive(STEP_DELAY)):
print "Running Active Delay Step"
if (timerValue[TIMER_MASH] == 0):
stepAdvance(STEP_DELAY);
if (stepIsActive(STEP_ADDGRAIN)):
## TODO understand how input would work here
stepAdvance(STEP_ADDGRAIN)
if (stepIsActive(STEP_REFILL)):
stepFill(STEP_REFILL);
for brewStep in range (STEP_DOUGHIN, STEP_MASHOUT+1):
if (stepIsActive(brewStep)):
stepMash(brewStep);
if (stepIsActive(STEP_MASHHOLD)):
print "Running MASH HOLD. Boil ZONE is ", zoneIsActive(ZONE_BOIL)
if ( not(zoneIsActive(ZONE_BOIL))):
stepAdvance(STEP_MASHHOLD);
if (stepIsActive(STEP_SPARGE)):
stepAdvance(STEP_SPARGE);
if (stepIsActive(STEP_BOIL)):
print "BOILING ", timerValue[TIMER_BOIL]
# PREBOIL_ALARM
## TODO figure this out
## if not((triggered & 32768) and temp[TS_KETTLE] >= PREBOIL_ALARM) :
## setAlarm(1);
## triggered |= 32768;
## setBoilAddsTrig(triggered);
if not(preheated[VS_KETTLE] and temp[TS_KETTLE] >= setpoint[VS_KETTLE] and setpoint[VS_KETTLE] > 0):
preheated[VS_KETTLE] = 1;
# //Unpause Timer
if not(timerStatus[TIMER_BOIL]): pauseTimer(TIMER_BOIL);
## TODO need to figure out boil addition logic at some stage
## if (preheated[VS_KETTLE]) :
### //Boil Addition
## if ((boilAdds <> triggered) and 1):
## lastHop = millis();
## setAlarm(1);
## triggered |= 1;
## setBoilAddsTrig(triggered);
##
### //Timed additions (See hoptimes[] array at top of AutoBrew.pde)
## for i in range(0,9):
## if (((boilAdds <> triggered) and (1<<(i + 1))) and timerValue[TIMER_BOIL] <= hoptimes[i] * 60000):
## lastHop = millis();
## setAlarm(1);
## triggered |= (1<<(i + 1));
## setBoilAddsTrig(triggered);
# //Exit Condition
if(preheated[VS_KETTLE] and timerValue[TIMER_BOIL] == 0):
stepAdvance(STEP_BOIL);
if (stepIsActive(STEP_CHILL)):
stepAdvance(STEP_CHILL);
if (stepIsActive(STEP_DONE)):
global EXIT
print "Exiting"
EXIT= True
#//stepCore logic for Fill and Refill
def stepFill(brewStep):
# TODO add loginc to wait to start for fill stage aka Manual fill
stepAdvance(brewStep)
#//stepCore Logic for all mash steps
def stepMash(brewStep):
# smartHERMSHLT();
if not(preheated[VS_MASH] and temp[VS_MASH] >= setpoint[VS_MASH]):
preheated[VS_MASH] = 1;
# //Unpause Timer
if not(timerStatus[TIMER_MASH]): pauseTimer(TIMER_MASH)
# //Exit Condition (and skip unused mash steps)
print "DEBUG: Exit Conditions ", setpoint[VS_MASH] == 0 , preheated[VS_MASH], timerValue[TIMER_MASH] == 0
if (setpoint[VS_MASH] == 0 or (preheated[VS_MASH] and timerValue[TIMER_MASH] == 0)):
stepAdvance(brewStep);
#//Advances program to next brew step
#//Returns 0 if successful or 1 if unable to advance due to conflict with another step
def stepAdvance(brewStep):
# //Save program for next step/rollback
program = stepProgram[brewStep];
stepExit(brewStep);
# //Advance step (if applicable)
if (brewStep + 1 < NUM_BREW_STEPS) :
if (stepInit(program, brewStep + 1)):
# //Init Failed: Rollback
stepExit(brewStep + 1); #//Just to make sure we clean up a partial start
setProgramStep(program, brewStep); #//Show the step we started with as active
print "Step Advance ->" + str( brewStep)
return 1;
# //Init Successful
return 0;
#//Performs exit logic specific to each step
#//Note: If called directly (as opposed through stepAdvance) acts as a program abort
def stepExit(brewStep):
# //Mark step idle
setProgramStep(brewStep, PROGRAM_IDLE);
# //Perform step closeout functions
if (brewStep == STEP_DELAY):
# //Step Exit: Delay
clearTimer(TIMER_MASH);
elif (brewStep == STEP_ADDGRAIN):
# //Step Exit: Add Grain
print "Exit Add Grain"
elif (brewStep == STEP_PREHEAT or (brewStep >= STEP_DOUGHIN and brewStep <= STEP_MASHHOLD)):
# //Step Exit: Preheat/Mash
clearTimer(TIMER_MASH);
resetHeatOutput(VS_MASH);
# elif (brewStep == STEP_SPARGE):
# //Step Exit: Sparge
elif (brewStep == STEP_BOIL):
# //Step Exit: Boil
# TODO 0 Min Addition
## if ((boilAdds ^ triggered) & 2048):
## setAlarm(1);
## triggered |= 2048;
## setBoilAddsTrig(triggered);
## delay(HOPADD_DELAY);
resetHeatOutput(VS_KETTLE);
clearTimer(TIMER_BOIL);
# elif (brewStep == STEP_CHILL):
# //Step Exit: Chill
## TODO replace event with writing to file so that webserver can pick up
## eventHandler(EVENT_STEPEXIT, brewStep);
#def resetSpargeValves():
#ifdef SMART_HERMS_HLT
#void smartHERMSHLT() {
# if (setpoint[VS_MASH] != 0) setpoint[VS_HLT] = constrain(setpoint[VS_MASH] * 2 - temp[TS_MASH], setpoint[VS_MASH] + MASH_HEAT_LOSS * SETPOINT_DIV * 100, HLT_MAX_TEMP * SETPOINT_DIV * 100);
#}
#endif
##
##unsigned long calcStrikeVol(byte pgm) {
## unsigned int mashRatio = getProgRatio(pgm);
## unsigned long retValue;
## if (mashRatio) {
## retValue = round(getProgGrain(pgm) * mashRatio / 100.0);
##
## //Convert qts to gal for US
## #ifndef USEMETRIC
## retValue = round(retValue / 4.0);
## #endif
## retValue += getVolLoss(TS_MASH);
## }
## else {
## //No Sparge Logic (Matio Ratio = 0)
## retValue = calcPreboilVol(pgm);
##
## //Add Water Lost in Spent Grain
## retValue += calcGrainLoss(pgm);
##
## //Add Loss from other Vessels
## retValue += (getVolLoss(TS_HLT) + getVolLoss(TS_MASH));
## }
##
## #ifdef DEBUG_PROG_CALC_VOLS
## logStart_P(LOGDEBUG);
## logField_P(PSTR(StrikeVol:));
## logFieldI( retValue);
## logEnd();
## #endif
##
## return retValue;
##}
##
##
##def calcGrainLoss(byte pgm) {
## unsigned long retValue;
## retValue = round(getProgGrain(pgm) * GRAIN_VOL_LOSS);
##
## #ifdef DEBUG_PROG_CALC_VOLS
## logStart_P(LOGDEBUG);
## logField_P(PSTR(GrainLoss));
## logFieldI(retValue);
## logEnd();
## #endif
##
## return retValue;
##}
##
##unsigned long calcGrainVolume(byte pgm) {
## return round (getProgGrain(pgm) * GRAIN2VOL);
##}
##
## Calculates the strike temperature for the mash.
##
def calcStrikeTemp(pgm):
strikeTemp = getFirstStepTemp(pgm);
## TODO Calculate
##return (strikeTemp + round(.4 * (strikeTemp - getGrainTemp()) / (calcStrikeVol(pgm) / getProgGrain(pgm))) + 1.7 + STRIKE_TEMP_OFFSET) * SETPOINT_DIV;
return BrewConfig["StrikeWaterTemp"]
def getFirstStepTemp(pgm):
firstStep = 0;
i = MASH_DOUGHIN;
while (firstStep == 0 and i <= MASH_MASHOUT):
firstStep = BrewConfig["MASH_TEMP"][i];
i=i+1
return firstStep;
def setProgramStep(brewStep, actPgm):
global stepProgram
stepProgram[brewStep] = actPgm
def getProgPitch(pgm):
##TODO calc pitch temp
return 70
def getProgMashTemp(actStep, mashstep):
return BrewConfig["MASH_TEMP"][mashstep]
def getProgMashMins(actStep, mashstep):
return BrewConfig["MASH_MINUTES"][mashstep]
def getBoilTemp():
return BrewConfig["BOIL_TEMP"]
def getProgBoil(pgm):
return BrewConfig["BOIL_TIME"]
|
|
from itertools import chain
import numpy as np
from cached_property import cached_property
from devito.ir.equations import ClusterizedEq
from devito.ir.support import (PARALLEL, PARALLEL_IF_PVT, IterationSpace, DataSpace,
Scope, detect_io, normalize_properties)
from devito.symbolics import estimate_cost
from devito.tools import as_tuple, flatten, frozendict
from devito.types import normalize_syncs
__all__ = ["Cluster", "ClusterGroup"]
class Cluster(object):
"""
A Cluster is an ordered sequence of expressions in an IterationSpace.
Parameters
----------
exprs : expr-like or list of expr-like
An ordered sequence of expressions computing a tensor.
ispace : IterationSpace
The cluster iteration space.
dspace : DataSpace
The cluster data space.
guards : dict, optional
Mapper from Dimensions to expr-like, representing the conditions under
which the Cluster should be computed.
properties : dict, optional
Mapper from Dimensions to Property, describing the Cluster properties
such as its parallel Dimensions.
syncs : dict, optional
Mapper from Dimensions to lists of SyncOps, that is ordered sequences of
synchronization operations that must be performed in order to compute the
Cluster asynchronously.
"""
def __init__(self, exprs, ispace, dspace, guards=None, properties=None, syncs=None):
self._exprs = tuple(ClusterizedEq(i, ispace=ispace, dspace=dspace)
for i in as_tuple(exprs))
self._ispace = ispace
self._dspace = dspace
self._guards = frozendict(guards or {})
self._syncs = frozendict(syncs or {})
properties = dict(properties or {})
properties.update({i.dim: properties.get(i.dim, set()) for i in ispace.intervals})
self._properties = frozendict(properties)
def __repr__(self):
return "Cluster([%s])" % ('\n' + ' '*9).join('%s' % i for i in self.exprs)
@classmethod
def from_clusters(cls, *clusters):
"""
Build a new Cluster from a sequence of pre-existing Clusters with
compatible IterationSpace.
"""
assert len(clusters) > 0
root = clusters[0]
if not all(root.ispace.is_compatible(c.ispace) for c in clusters):
raise ValueError("Cannot build a Cluster from Clusters with "
"incompatible IterationSpace")
if not all(root.guards == c.guards for c in clusters):
raise ValueError("Cannot build a Cluster from Clusters with "
"non-homogeneous guards")
exprs = chain(*[c.exprs for c in clusters])
ispace = IterationSpace.union(*[c.ispace for c in clusters])
dspace = DataSpace.union(*[c.dspace for c in clusters])
guards = root.guards
properties = {}
for c in clusters:
for d, v in c.properties.items():
properties[d] = normalize_properties(properties.get(d, v), v)
try:
syncs = normalize_syncs(*[c.syncs for c in clusters])
except ValueError:
raise ValueError("Cannot build a Cluster from Clusters with "
"non-compatible synchronization operations")
return Cluster(exprs, ispace, dspace, guards, properties, syncs)
def rebuild(self, *args, **kwargs):
"""
Build a new Cluster from the attributes given as keywords. All other
attributes are taken from ``self``.
"""
# Shortcut for backwards compatibility
if args:
if len(args) != 1:
raise ValueError("rebuild takes at most one positional argument (exprs)")
if kwargs.get('exprs'):
raise ValueError("`exprs` provided both as arg and kwarg")
kwargs['exprs'] = args[0]
return Cluster(exprs=kwargs.get('exprs', self.exprs),
ispace=kwargs.get('ispace', self.ispace),
dspace=kwargs.get('dspace', self.dspace),
guards=kwargs.get('guards', self.guards),
properties=kwargs.get('properties', self.properties),
syncs=kwargs.get('syncs', self.syncs))
@property
def exprs(self):
return self._exprs
@property
def ispace(self):
return self._ispace
@property
def itintervals(self):
return self.ispace.itintervals
@property
def sub_iterators(self):
return self.ispace.sub_iterators
@property
def directions(self):
return self.ispace.directions
@property
def dspace(self):
return self._dspace
@property
def guards(self):
return self._guards
@property
def properties(self):
return self._properties
@property
def syncs(self):
return self._syncs
@cached_property
def sync_locks(self):
return frozendict({k: tuple(i for i in v if i.is_SyncLock)
for k, v in self.syncs.items()})
@cached_property
def free_symbols(self):
return set().union(*[e.free_symbols for e in self.exprs])
@cached_property
def dimensions(self):
return set().union(*[i._defines for i in self.ispace.dimensions])
@cached_property
def used_dimensions(self):
"""
The Dimensions that *actually* appear among the expressions in ``self``.
These do not necessarily coincide the IterationSpace Dimensions; for
example, reduction or redundant (i.e., invariant) Dimensions won't
appear in an expression.
"""
return set().union(*[i._defines for i in self.free_symbols if i.is_Dimension])
@cached_property
def scope(self):
return Scope(self.exprs)
@cached_property
def functions(self):
return self.scope.functions
@cached_property
def has_increments(self):
return any(e.is_Increment for e in self.exprs)
@cached_property
def is_scalar(self):
return not any(f.is_Function for f in self.scope.writes)
@cached_property
def is_dense(self):
"""
A Cluster is dense if at least one of the following conditions is True:
* It is defined over a unique Grid and all of the Grid Dimensions
are PARALLEL.
* Only DiscreteFunctions are written and only affine index functions
are used (e.g., `a[x+1, y-2]` is OK, while `a[b[x], y-2]` is not)
"""
# Hopefully it's got a unique Grid and all Dimensions are PARALLEL (or
# at most PARALLEL_IF_PVT). This is a quick and easy check so we try it first
try:
pset = {PARALLEL, PARALLEL_IF_PVT}
grid = self.grid
for d in grid.dimensions:
if not any(pset & v for k, v in self.properties.items()
if d in k._defines):
raise ValueError
return True
except ValueError:
pass
# Fallback to legacy is_dense checks
return (not any(e.conditionals for e in self.exprs) and
not any(f.is_SparseFunction for f in self.functions) and
not self.is_scalar and
all(a.is_regular for a in self.scope.accesses))
@cached_property
def grid(self):
if len(self.grids) == 1:
return self.grids[0]
raise ValueError("Cluster has no unique Grid")
@cached_property
def grids(self):
"""
The Grid's over which the Cluster is defined.
"""
return tuple(set(i.grid for i in self.exprs if i.grid is not None))
@cached_property
def dtype(self):
"""
The arithmetic data type of the Cluster. If the Cluster performs
floating point arithmetic, then the expressions performing integer
arithmetic are ignored, assuming that they are only carrying out array
index calculations. If two expressions perform floating point
calculations with mixed precision, the data type with highest precision
is returned.
"""
dtypes = {i.dtype for i in self.exprs}
fdtypes = {i for i in dtypes if np.issubdtype(i, np.floating)}
if len(fdtypes) > 1:
raise NotImplementedError("Unsupported Cluster with mixed floating "
"point arithmetic %s" % str(fdtypes))
elif len(fdtypes) == 1:
return fdtypes.pop()
elif len(dtypes) == 1:
return dtypes.pop()
else:
raise ValueError("Unsupported Cluster [mixed integer arithmetic ?]")
@cached_property
def ops(self):
"""Number of operations performed at each iteration."""
return sum(estimate_cost(i) for i in self.exprs)
@cached_property
def traffic(self):
"""
The Cluster compulsary traffic (number of reads/writes), as a mapper
from Functions to IntervalGroups.
Notes
-----
If a Function is both read and written, then it is counted twice.
"""
reads, writes = detect_io(self.exprs, relax=True)
accesses = [(i, 'r') for i in reads] + [(i, 'w') for i in writes]
ret = {}
for i, mode in accesses:
if not i.is_Tensor:
continue
elif i in self.dspace.parts:
# Stencils extend the data spaces beyond the iteration spaces
intervals = self.dspace.parts[i]
# Assume that invariant dimensions always cause new loads/stores
invariants = self.ispace.intervals.drop(intervals.dimensions)
intervals = intervals.generate('union', invariants, intervals)
ret[(i, mode)] = intervals
else:
ret[(i, mode)] = self.ispace.intervals
return ret
class ClusterGroup(tuple):
"""
An immutable, totally-ordered sequence of Clusters.
Parameters
----------
clusters : tuple of Clusters
Input elements.
itintervals : tuple of IterationIntervals, optional
The region of iteration space shared by the ``clusters``.
"""
def __new__(cls, clusters, itintervals=None):
obj = super(ClusterGroup, cls).__new__(cls, flatten(as_tuple(clusters)))
obj._itintervals = itintervals
return obj
@classmethod
def concatenate(cls, *cgroups):
return list(chain(*cgroups))
@cached_property
def exprs(self):
return flatten(c.exprs for c in self)
@cached_property
def scope(self):
return Scope(exprs=self.exprs)
@cached_property
def itintervals(self):
"""The prefix IterationIntervals common to all Clusters in self."""
return self._itintervals
@cached_property
def guards(self):
"""The guards of each Cluster in self."""
return tuple(i.guards for i in self)
@cached_property
def sync_locks(self):
"""The synchronization locks of each Cluster in self."""
return tuple(i.sync_locks for i in self)
@cached_property
def dspace(self):
"""Return the DataSpace of this ClusterGroup."""
return DataSpace.union(*[i.dspace.reset() for i in self])
@cached_property
def dtype(self):
"""
The arithmetic data type of this ClusterGroup. If at least one
Cluster performs floating point arithmetic, then Clusters performing
integer arithmetic are ignored. If two Clusters perform floating
point calculations with different precision, return the data type with
highest precision.
"""
dtypes = {i.dtype for i in self}
fdtypes = {i for i in dtypes if np.issubdtype(i, np.floating)}
if len(fdtypes) > 1:
raise NotImplementedError("Unsupported ClusterGroup with mixed floating "
"point arithmetic %s" % str(fdtypes))
elif len(fdtypes) == 1:
return fdtypes.pop()
elif len(dtypes) == 1:
return dtypes.pop()
else:
raise ValueError("Unsupported ClusterGroup [mixed integer arithmetic ?]")
@cached_property
def meta(self):
"""
Returns
-------
dtype, DSpace
The data type and the data space of the ClusterGroup.
"""
return (self.dtype, self.dspace)
|
|
import mock
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer.functions.connection import convolution_2d
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*(testing.product({
'c_contiguous': [True, False],
'cover_all': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'cudnn_deterministic': [True, False],
}) + testing.product({
'c_contiguous': [False],
'cover_all': [False],
'cudnn_deterministic': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class TestConvolution2DFunction(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 1
self.use_cudnn = 'always'
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(self.W_dtype)
self.b = numpy.random.uniform(
-1, 1, out_channels).astype(self.x_dtype)
self.x = numpy.random.uniform(
-1, 1, (2, 3, 4, 3)).astype(self.x_dtype)
if self.cover_all:
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 3, 2)).astype(self.x_dtype)
else:
self.gy = numpy.random.uniform(
-1, 1, (2, 2, 2, 2)).astype(self.x_dtype)
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}
@attr.gpu
def test_forward_consistency(self, nobias=False):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if nobias else chainer.Variable(self.b)
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y_cpu = functions.convolution_2d(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y_gpu = functions.convolution_2d(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
testing.assert_allclose(
y_cpu.data, y_gpu.data.get(), **self.check_forward_options)
@attr.gpu
def test_forward_consistency_im2col(self):
self.use_cudnn = 'never'
self.test_forward_consistency()
@attr.gpu
def test_forward_consistency_im2col_nobias(self):
self.use_cudnn = 'never'
self.test_forward_consistency(nobias=True)
def check_backward(self, x_data, W_data, b_data, y_grad):
xp = cuda.get_array_module(x_data)
# cuDNN < v3 does not support deterministic algorithms.
# In that case, Chainer should raise errors.
# As this behavior is tested by TestConvolution2DCudnnCall,
# we simply skip the test here.
should_raise_error = ((xp is not numpy) and
self.use_cudnn and
self.cudnn_deterministic and
cuda.cudnn_enabled and
cuda.cudnn.cudnn.getVersion() < 3000)
if should_raise_error:
return
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(y_grad.flags.c_contiguous)
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
args = (x_data, W_data)
if b_data is not None:
args = args + (b_data,)
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
gradient_check.check_backward(
convolution_2d.Convolution2DFunction(
self.stride, self.pad, self.cover_all),
args, y_grad, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@condition.retry(3)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.use_cudnn = 'never'
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col_nobias(self):
self.use_cudnn = 'never'
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False, True],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 1
self.x = cuda.cupy.random.uniform(
-1, 1, (2, 3, 4, 3)).astype(self.dtype)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (2, 2, 2, 2)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.should_call_cudnn = chainer.should_use_cudnn('>=auto') and (
cuda.cudnn.cudnn.getVersion() >= 3000 or
self.dtype != numpy.float16)
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return functions.convolution_2d(
x, W, None, stride=self.stride, pad=self.pad)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
with mock.patch('cupy.cudnn.cudnn.convolutionForward') as func:
self.forward()
self.assertEqual(func.called, self.should_call_cudnn)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y = self.forward()
y.grad = self.gy
if cuda.cudnn.cudnn.getVersion() >= 3000:
name = 'cupy.cudnn.cudnn.convolutionBackwardData_v3'
else:
name = 'cupy.cudnn.cudnn.convolutionBackwardData_v2'
should_raise_error = (self.cudnn_deterministic and
self.should_call_cudnn
and cuda.cudnn.cudnn.getVersion() < 3000)
if should_raise_error:
with self.assertRaises(ValueError):
y.backward()
else:
with mock.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.should_call_cudnn)
@testing.parameterize(*testing.product({
'c_contiguous': [True, False],
'nobias': [True, False],
}))
@attr.gpu
@attr.cudnn
class TestConvolution2DFunctionCudnnDeterministic(unittest.TestCase):
def setUp(self):
self.cudnn_version = cuda.cudnn.cudnn.getVersion()
self.stride = 2
self.pad = 1
batch_sz = 2
in_channels = 64
out_channels = 64
kh, kw = (3, 3)
in_h, in_w = (32, 128)
out_h, out_w = (16, 64)
# should be same types for cudnn test
x_dtype = numpy.float32
W_dtype = numpy.float32
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(x_dtype)
self.x = numpy.random.uniform(
-1, 1, (batch_sz, in_channels, in_h, in_w)).astype(x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (batch_sz, out_channels, out_h, out_w)).astype(x_dtype)
def test_called(self):
with mock.patch(
'chainer.functions.connection.convolution_2d.libcudnn',
autospec=True) as mlibcudnn:
if self.cudnn_version < 3000:
with self.assertRaises(ValueError):
x, W, b, y = self._run()
return
# cuDNN version >= v3 supports `cudnn_deterministic` option
x, W, b, y = self._run()
# in Convolution2DFunction.backward_gpu()
self.assertFalse(
mlibcudnn.getConvolutionBackwardFilterAlgorithm.called)
self.assertEqual(
mlibcudnn.convolutionBackwardFilter_v3.call_count, 1)
self.assertFalse(
mlibcudnn.getConvolutionBackwardDataAlgorithm.called)
self.assertEqual(
mlibcudnn.convolutionBackwardData_v3.call_count, 1)
def test_cudnn_deterministic(self):
if self.cudnn_version < 3000:
# `cudnn_deterministic` option is not supported
return
x1, W1, b1, y1 = self._run()
x2, W2, b2, y2 = self._run()
cuda.cupy.testing.assert_array_equal(x1.grad, x2.grad)
cuda.cupy.testing.assert_array_equal(y1.data, y2.data)
cuda.cupy.testing.assert_array_equal(W1.grad, W2.grad)
def _contiguous(self, x_data, W_data, b_data, gy_data):
if not self.c_contiguous:
x_data = numpy.asfortranarray(x_data)
W_data = numpy.asfortranarray(W_data)
gy_data = numpy.asfortranarray(gy_data)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(gy_data.flags.c_contiguous)
b = numpy.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
return x_data, W_data, b_data, gy_data
def _run(self):
with chainer.using_config('use_cudnn', 'always'):
print(chainer.should_use_cudnn('>=auto'))
with chainer.using_config('cudnn_deterministic', True):
# verify data continuity and move to gpu
x_data, W_data, b_data, gy_data = \
tuple(cuda.to_gpu(data) for data in self._contiguous(
self.x, self.W, self.b, self.gy))
x, W, b, y = self._run_forward(x_data, W_data, b_data)
y.grad = gy_data
y.backward()
return x, W, b, y
def _run_forward(self, x_data, W_data, b_data):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = None if self.nobias else chainer.Variable(b_data)
y = functions.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=False)
return x, W, b, y
testing.run_module(__name__, __file__)
|
|
from datetime import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import tensorflow as tf
import generative_adversarial_networks.tensorflow.utils as utils
class ConvLayer(object):
def __init__(self, name, in_depth, out_depth, apply_batch_norm,
filter_size=5, stride=2, activation=lambda x: x):
self.W = tf.get_variable(
'W_{}'.format(name),
shape=(filter_size, filter_size, in_depth, out_depth),
initializer=tf.contrib.layers.xavier_initializer()
)
self.b = tf.get_variable(
'b_{}'.format(name),
shape=(out_depth,),
initializer=tf.zeros_initializer(),
)
self.name = name
self.activation = activation
self.stride = stride
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
conv_out = tf.nn.conv2d(
X,
self.W,
strides=[1, self.stride, self.stride, 1],
padding='SAME'
)
conv_out = tf.nn.bias_add(conv_out, self.b)
# apply batch normalization
if self.apply_batch_norm:
conv_out = tf.contrib.layers.batch_norm(
conv_out,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
reuse=reuse,
scope=self.name,
)
return self.activation(conv_out)
class FractionallyStridedConvLayer(object):
def __init__(self, name, in_depth, out_depth, output_shape, apply_batch_norm,
filter_size=5, stride=2, activation=lambda x: x):
self.W = tf.get_variable(
'W_{}'.format(name),
shape=(filter_size, filter_size, out_depth, in_depth),
initializer=tf.contrib.layers.xavier_initializer()
)
self.b = tf.get_variable(
'b_{}'.format(name),
shape=(out_depth,),
initializer=tf.zeros_initializer(),
)
self.activation = activation
self.stride = stride
self.name = name
self.output_shape = output_shape
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
conv_out = tf.nn.conv2d_transpose(
value=X,
filter=self.W,
output_shape=self.output_shape,
strides=[1, self.stride, self.stride, 1],
)
conv_out = tf.nn.bias_add(conv_out, self.b)
# apply batch normalization
if self.apply_batch_norm:
conv_out = tf.contrib.layers.batch_norm(
conv_out,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
reuse=reuse,
scope=self.name,
)
return self.activation(conv_out)
class DenseLayer(object):
def __init__(self, name, num_in, num_out, apply_batch_norm, activation=lambda x: x):
self.W = tf.get_variable(
"W_%s" % name,
shape=(num_in, num_out),
initializer=tf.random_normal_initializer(stddev=0.02),
)
self.b = tf.get_variable(
"b_%s" % name,
shape=(num_out,),
initializer=tf.zeros_initializer(),
)
self.activation = activation
self.name = name
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
a = tf.matmul(X, self.W) + self.b
# apply batch normalization
if self.apply_batch_norm:
a = tf.contrib.layers.batch_norm(
a,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
reuse=reuse,
scope=self.name,
)
return self.activation(a)
class DCGAN(object):
def __init__(self, img_size, img_channels, d_sizes, g_sizes, opt_lr, opt_beta1):
self.img_size = img_size
self.img_channels = img_channels
self.latent_dims = g_sizes['z']
self.X = tf.placeholder(tf.float32, shape=[None, img_size, img_size, img_channels], name='X')
self.Z = tf.placeholder(tf.float32, shape=[None, self.latent_dims], name='Z')
self.sample_images = self.build_generator(self.Z, g_sizes)
logits = self.build_discriminator(self.X, d_sizes)
with tf.variable_scope('discriminator') as scope:
scope.reuse_variables()
sample_logits = self.discriminator_forward(self.sample_images, reuse=True)
with tf.variable_scope('generator') as scope:
scope.reuse_variables()
self.samples_images_test = self.generator_forward(self.Z, reuse=True, is_training=False)
self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=tf.ones_like(logits))
self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=sample_logits, labels=tf.zeros_like(sample_logits))
self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake)
self.g_cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=sample_logits, labels=tf.ones_like(sample_logits)))
real_predictions = tf.cast(logits > 0, tf.float32)
fake_predictions = tf.cast(sample_logits < 0, tf.float32)
num_correct = tf.reduce_sum(real_predictions) + tf.reduce_sum(fake_predictions)
# self.d_accuracy = num_correct / tf.cast(num_predictions, tf.float32)
self.d_accuracy = num_correct / tf.cast((tf.shape(real_predictions)[0] + tf.shape(fake_predictions)[0]), tf.float32)
# optimizers
self.d_params = [t for t in tf.trainable_variables() if t.name.startswith('d')]
self.g_params = [t for t in tf.trainable_variables() if t.name.startswith('g')]
self.d_train_op = tf.train.AdamOptimizer(opt_lr, beta1=opt_beta1) \
.minimize(self.d_cost, var_list=self.d_params)
self.g_train_op = tf.train.AdamOptimizer(opt_lr, beta1=opt_beta1) \
.minimize(self.g_cost, var_list=self.g_params)
self.init_op = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init_op)
def build_generator(self, Z, g_sizes):
with tf.variable_scope('generator'):
dims = [self.img_size]
dim = self.img_size
for _, _, stride, _ in reversed(g_sizes['conv_layers']):
dim = int(np.ceil(float(dim) / stride))
dims.append(dim)
dims = list(reversed(dims))
print('dims: {}'.format(dims))
self.g_dims = dims
num_in = self.latent_dims
self.g_dense_layers = []
count = 0
for num_out, apply_batch_norm in g_sizes['dense_layers']:
name = 'g_dense_layer_{}'.format(count)
count += 1
layer = DenseLayer(name, num_in, num_out, apply_batch_norm,
activation=tf.nn.relu)
self.g_dense_layers.append(layer)
num_in = num_out
# final dense layer
num_out = g_sizes['projection'] * dims[0] * dims[0]
name = 'g_dense_layer_{}'.format(count)
layer = DenseLayer(name, num_in, num_out, not g_sizes['bn_after_projection'],
activation=tf.nn.relu)
self.g_dense_layers.append(layer)
# fractually-strided conf-layers
num_in = g_sizes['projection']
self.g_conv_layers = []
# output may use tanh or sigmoid
num_relus = len(g_sizes['conv_layers']) - 1
activations = [tf.nn.relu] * num_relus + [g_sizes['output_activation']]
for i in range(len(g_sizes['conv_layers'])):
name = 'fs_conv_layers_{}'.format(i)
num_out, filter_size, stride, apply_batch_norm = g_sizes['conv_layers'][i]
activation = activations[i]
batch_size = tf.shape(Z)[0]
output_shape = [batch_size, dims[i + 1], dims[i + 1], num_out]
print('num_in: {}, num_out: {}, output_shape: {}'.format(num_in, num_out, output_shape))
layer = FractionallyStridedConvLayer(
name, num_in, num_out, output_shape, apply_batch_norm, filter_size, stride,
activation=activation
)
self.g_conv_layers.append(layer)
num_in = num_out
self.g_sizes = g_sizes
return self.generator_forward(Z)
def generator_forward(self, Z, reuse=None, is_training=True):
output = Z
for layer in self.g_dense_layers:
output = layer.forward(output, reuse, is_training)
output = tf.reshape(output, [-1, self.g_dims[0], self.g_dims[0], self.g_sizes['projection']])
if self.g_sizes['bn_after_projection']:
output = tf.contrib.layers.batch_norm(
output,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
reuse=reuse,
scope='bn_after_projection'
)
for layer in self.g_conv_layers:
output = layer.forward(output, reuse, is_training)
return output
def build_discriminator(self, X, d_sizes):
with tf.variable_scope('discriminator'):
self.d_conv_layers = []
num_in = self.img_channels
dim = self.img_size
count = 0
for num_out, filter_size, stride, apply_batch_norm, in d_sizes['conv_layers']:
name = 'conv_layer_{}'.format(count)
count += 1
layer = ConvLayer(name, num_in, num_out, apply_batch_norm, filter_size, stride,
activation=tf.nn.leaky_relu)
self.d_conv_layers.append(layer)
num_in = num_out
print('dim: {}'.format(dim))
dim = int(np.ceil(float(dim) / stride))
num_in = num_in * dim * dim
self.d_dense_layers = []
for num_out, apply_batch_norm, in d_sizes['dense_layers']:
name = 'dense_layer_{}'.format(count)
count += 1
layer = DenseLayer(name, num_in, num_out, apply_batch_norm, tf.nn.leaky_relu)
self.d_dense_layers.append(layer)
num_in = num_out
# final logistic layer
name = 'dense_layer_{}'.format(count)
self.d_final_layer = DenseLayer(name, num_in, 1, False)
logits = self.discriminator_forward(X)
return logits
def discriminator_forward(self, X, reuse=None, is_training=True):
output = X
for layer in self.d_conv_layers:
output = layer.forward(output, reuse, is_training)
output = tf.contrib.layers.flatten(output)
for layer in self.d_dense_layers:
output = layer.forward(output, reuse, is_training)
logits = self.d_final_layer.forward(output, reuse, is_training)
return logits
def fit(self, X, epochs, batch_size, save_sample_interval=100, output_root='tmp'):
d_costs = []
g_costs = []
n = len(X)
n_batches = n // batch_size
step = 0
for i in range(epochs):
print('Starting epoche: {}'.format(i))
np.random.shuffle(X)
for j in range(n_batches):
t0 = datetime.now()
batch = X[j * batch_size:(j + 1) * batch_size]
if type(X[0]) is str:
# celeb
batch = utils.files2images(batch)
Z = np.random.uniform(-1, 1, size=(batch_size, self.latent_dims))
# discriminator training
_, d_cost, d_acc = self.sess.run([self.d_train_op, self.d_cost, self.d_accuracy], {
self.X: batch, self.Z: Z
})
d_costs.append(d_cost)
# generator training
_, g_cost1 = self.sess.run([self.g_train_op, self.g_cost], {
self.Z: Z
})
_, g_cost2 = self.sess.run([self.g_train_op, self.g_cost], {
self.Z: Z
})
g_costs.append((g_cost1 + g_cost2) / 2)
print('Batch {}/{}: dt: {}, d_acc: {:.2f}'.format(j + 1, n_batches, datetime.now() - t0, d_acc))
step += 1
if step % save_sample_interval == 0:
print('Saving a sample...')
n_samples = 64
samples = self.sample(n_samples)
self._save_samples_image(os.path.join(output_root, 'samples_{:05d}.png'.format(step)), samples)
plt.clf()
plt.plot(d_costs, label='Discriminator Cost')
plt.plot(g_costs, label='Generator Cost')
plt.legend()
plt.savefig(os.path.join(output_root, 'training_costs.png'))
def _save_samples_image(self, filepath, samples):
n_samples = samples.shape[0]
n_samples_sqrt = int(np.sqrt(n_samples))
d = samples.shape[1]
if samples.shape[-1] == 1:
# gray image: (N x N)
samples = samples.reshape(n_samples, d, d)
flat_image = np.empty([n_samples_sqrt * d, n_samples_sqrt * d])
k = 0
for i in range(n_samples_sqrt):
for j in range(n_samples_sqrt):
flat_image[i * d:(i + 1) * d, j * d:(j + 1) * d] = samples[k].reshape(d, d)
k += 1
elif samples.shape[-1] == 3:
# color image: (N x N x 3)
flat_image = np.empty([n_samples_sqrt * d, n_samples_sqrt * d, 3])
k = 0
for i in range(n_samples_sqrt):
for j in range(n_samples_sqrt):
flat_image[i * d:(i + 1) * d, j * d:(j + 1) * d] = samples[k]
k += 1
else:
raise Exception('Invalid image shape!')
sp.misc.imsave(filepath, flat_image)
def sample(self, n):
Z = np.random.uniform(-1, 1, size=[n, self.latent_dims])
samples = self.sess.run(self.samples_images_test, {
self.Z: Z
})
return samples
|
|
#
# This file is part of snmpsim software.
#
# Copyright (c) 2010-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/snmpsim/license.html
#
# Managed value variation module: simulate a numeric value
#
# Valid values in module options are:
#
# 2 - Integer
# 65 - Counter32
# 66 - Gauge32
# 67 - TimeTicks
# 70 - Counter64
#
import math
import random
import time
from pysnmp.proto import rfc1902
from snmpsim import error
from snmpsim import log
from snmpsim.utils import split
BOOTED = time.time()
INTEGER_TYPES = set(
(rfc1902.Counter32.tagSet,
rfc1902.Counter64.tagSet,
rfc1902.TimeTicks.tagSet,
rfc1902.Gauge32.tagSet,
rfc1902.Integer.tagSet)
)
def init(**context):
if context['mode'] == 'variating':
random.seed()
if context['mode'] == 'recording':
moduleContext['settings'] = {}
if context['options']:
for x in split(context['options'], ','):
for k, v in split(x, ':'):
if k == 'addon':
if k in moduleContext['settings']:
moduleContext['settings'][k].append(v)
else:
moduleContext['settings'][k] = [v]
else:
moduleContext['settings'][k] = v
if 'iterations' in moduleContext['settings']:
moduleContext['settings']['iterations'] = int(
moduleContext['settings']['iterations'])
if moduleContext['settings']['iterations']:
# no reason for more
moduleContext['settings']['iterations'] = 1
if 'period' in moduleContext['settings']:
moduleContext['settings']['period'] = float(
moduleContext['settings']['period'])
else:
moduleContext['settings']['period'] = 10.0
if 'taglist' not in moduleContext['settings']:
moduleContext['settings']['taglist'] = '2-65-66-67-70'
def variate(oid, tag, value, **context):
if not context['nextFlag'] and not context['exactMatch']:
return context['origOid'], tag, context['errorStatus']
if context['setFlag']:
return context['origOid'], tag, context['errorStatus']
if 'settings' not in recordContext:
recordContext['settings'] = dict(
[split(x, '=') for x in split(value, ',')])
for k in recordContext['settings']:
if k != 'function':
recordContext['settings'][k] = float(recordContext['settings'][k])
if 'min' not in recordContext['settings']:
recordContext['settings']['min'] = 0
if 'max' not in recordContext['settings']:
if tag == '70':
recordContext['settings']['max'] = 0xffffffffffffffff
else:
recordContext['settings']['max'] = 0xffffffff
if 'rate' not in recordContext['settings']:
recordContext['settings']['rate'] = 1
if 'function' in recordContext['settings']:
f = split(recordContext['settings']['function'], '%')
recordContext['settings']['function'] = getattr(math, f[0]), f[1:]
else:
recordContext['settings']['function'] = lambda x: x, ()
vold, told = recordContext['settings'].get(
'initial', recordContext['settings']['min']), BOOTED
if 'cumulative' in recordContext['settings']:
if 'value' not in recordContext:
recordContext['value'] = vold, told
vold, told = recordContext['value']
tnow = time.time()
if 'atime' in recordContext['settings']:
t = tnow
else:
t = tnow - BOOTED
f, args = recordContext['settings']['function']
_args = []
if args:
for x in args:
if x == '<time>':
_args.append(t * recordContext['settings']['rate'])
else:
_args.append(float(x))
else:
_args.append(t * recordContext['settings']['rate'])
v = f(*_args)
if 'scale' in recordContext['settings']:
v *= recordContext['settings']['scale']
if 'offset' in recordContext['settings']:
if 'cumulative' in recordContext['settings']:
rate = recordContext['settings']['rate']
v += recordContext['settings']['offset'] * (tnow - told) * rate
else:
v += recordContext['settings']['offset']
deviation = recordContext['settings'].get('deviation')
if deviation:
v += random.randrange(-deviation, deviation)
if 'cumulative' in recordContext['settings']:
v = max(v, 0)
v += vold
if v < recordContext['settings']['min']:
v = recordContext['settings']['min']
elif v > recordContext['settings']['max']:
if 'wrap' in recordContext['settings']:
v %= recordContext['settings']['max']
v += recordContext['settings']['min']
else:
v = recordContext['settings']['max']
if 'cumulative' in recordContext['settings']:
recordContext['value'] = v, tnow
return oid, tag, v
def record(oid, tag, value, **context):
if 'started' not in moduleContext:
moduleContext['started'] = time.time()
if 'iterations' not in moduleContext:
moduleContext['iterations'] = min(
1, moduleContext['settings'].get('iterations', 0))
# single-run recording
iterations = moduleContext['settings'].get('iterations')
if not iterations:
if context['origValue'].tagSet not in INTEGER_TYPES:
if 'hextag' in context:
tag = context['hextag']
if 'hexvalue' in context:
value = context['hexvalue']
return oid, tag, value
if ('taglist' not in moduleContext['settings'] or
tag not in moduleContext['settings']['taglist']):
return oid, tag, value
value = 'initial=%s' % value
if context['origValue'].tagSet == rfc1902.TimeTicks.tagSet:
value += ',rate=100'
elif context['origValue'].tagSet == rfc1902.Integer.tagSet:
value += ',rate=0'
return oid, tag + ':numeric', value
# multiple-iteration recording
if oid not in moduleContext:
settings = {
'initial': value
}
if context['origValue'].tagSet == rfc1902.TimeTicks.tagSet:
settings['rate'] = 100
elif context['origValue'].tagSet == rfc1902.Integer.tagSet:
settings['rate'] = 0 # may be constants
if 'addon' in moduleContext['settings']:
settings.update(
dict([split(x, '=')
for x in moduleContext['settings']['addon']]))
moduleContext[oid] = {}
moduleContext[oid]['settings'] = settings
if moduleContext['iterations']:
if context['stopFlag']: # switching to final iteration
log.info('numeric: %s iterations '
'remaining' % moduleContext['iterations'])
moduleContext['iterations'] -= 1
moduleContext['started'] = time.time()
running = time.time() - moduleContext['started']
wait = max(0, float(moduleContext['settings']['period']) - running)
raise error.MoreDataNotification(period=wait)
else: # storing values on first iteration
moduleContext[oid]['time'] = time.time()
moduleContext[oid]['value'] = context['origValue']
if 'hexvalue' in moduleContext[oid]:
moduleContext[oid]['hexvalue'] = context['hexvalue']
if 'hextag' in moduleContext[oid]:
moduleContext[oid]['hextag'] = context['hextag']
raise error.NoDataNotification()
else:
if context['stopFlag']:
raise error.NoDataNotification()
if 'value' in moduleContext[oid]:
if context['origValue'].tagSet not in INTEGER_TYPES:
if 'hextag' in moduleContext[oid]:
tag = moduleContext[oid]['hextag']
if 'hexvalue' in moduleContext[oid]:
value = moduleContext[oid]['hexvalue']
return oid, tag, value
if tag not in moduleContext['settings']['taglist']:
return oid, tag, moduleContext[oid]['value']
diff = int(context['origValue']) - int(moduleContext[oid]['value'])
runtime = time.time() - moduleContext[oid]['time']
moduleContext[oid]['settings']['rate'] = diff / runtime
tag += ':numeric'
value = ','.join(
['%s=%s' % (k, v)
for k, v in moduleContext[oid]['settings'].items()])
return oid, tag, value
else:
raise error.NoDataNotification()
def shutdown(**context):
pass
|
|
import os
import glob
#####################################################
######Init the files##################################
#####################################################
os.remove("a0.txt")
os.remove("a1.txt")
os.remove("a2.txt")
os.remove("a3.txt")
os.remove("a4.txt")
os.remove("a5.txt")
os.remove("a6.txt")
os.remove("a7.txt")
os.remove("a8.txt")
os.remove("a9.txt")
os.remove("n0.txt")
os.remove("n1.txt")
os.remove("n2.txt")
os.remove("n3.txt")
os.remove("n4.txt")
os.remove("n5.txt")
os.remove("n6.txt")
os.remove("n7.txt")
os.remove("n8.txt")
os.remove("n9.txt")
os.remove("v0.txt")
os.remove("v1.txt")
os.remove("v2.txt")
os.remove("v3.txt")
os.remove("v4.txt")
os.remove("v5.txt")
os.remove("v6.txt")
os.remove("v7.txt")
os.remove("v8.txt")
os.remove("v9.txt")
file_a0 = open("a0.txt", "a")
file_a1 = open("a1.txt", "a")
file_a2 = open("a2.txt", "a")
file_a3 = open("a3.txt", "a")
file_a4 = open("a4.txt", "a")
file_a5 = open("a5.txt", "a")
file_a6 = open("a6.txt", "a")
file_a7 = open("a7.txt", "a")
file_a8 = open("a8.txt", "a")
file_a9 = open("a9.txt", "a")
format_a = [file_a0,file_a1,file_a2,file_a3,file_a4,file_a5,file_a6,file_a7,file_a8,file_a9]
file_n0 = open("n0.txt", "a")
file_n1 = open("n1.txt", "a")
file_n2 = open("n2.txt", "a")
file_n3 = open("n3.txt", "a")
file_n4 = open("n4.txt", "a")
file_n5 = open("n5.txt", "a")
file_n6 = open("n6.txt", "a")
file_n7 = open("n7.txt", "a")
file_n8 = open("n8.txt", "a")
file_n9 = open("n9.txt", "a")
format_n = [file_n0,file_n1,file_n2,file_n3,file_n4,file_n5,file_n6,file_n7,file_n8,file_n9]
file_v0 = open("v0.txt", "a")
file_v1 = open("v1.txt", "a")
file_v2 = open("v2.txt", "a")
file_v3 = open("v3.txt", "a")
file_v4 = open("v4.txt", "a")
file_v5 = open("v5.txt", "a")
file_v6 = open("v6.txt", "a")
file_v7 = open("v7.txt", "a")
file_v8 = open("v8.txt", "a")
file_v9 = open("v9.txt", "a")
format_v = [file_v0,file_v1,file_v2,file_v3,file_v4,file_v5,file_v6,file_v7,file_v8,file_v9]
the_attack_files = glob.glob("../All_Attack/*.txt")
the_normal_files = glob.glob("../Training_Data_Master/*.txt")
the_vali_files = glob.glob("../Vali_Data/*.txt")
#####################################################
########Format the files##############################
#####################################################
attack_words = []
normal_words = []
vali_words = []
compress = 404
#####################################################
########Read in the sequences########################
#########separate them into 2D arrays################
#####################################################
for f in the_attack_files:
e = open(f,"r+")
attack_words.extend([e.read().split()])
e.close()
for f in the_normal_files:
e = open(f,"r+")
normal_words.extend([e.read().split()])
e.close()
for f in the_vali_files:
e = open(f,"r+")
vali_words.extend([e.read().split()])
e.close()
files_a = len(attack_words)/10
files_n = len(normal_words)/10
files_v = len(vali_words)/10
print("Normal Words: " + str(len(normal_words)))
print("Average normal words per formatted file: " + str(files_n))
print("Attack Words: " + str(len(attack_words)))
print("Average attack words per formatted file: " + str(files_a))
print("Validation Words: " + str(len(vali_words)))
print("Average validation words per formatted file: " + str(files_v))
input_n = raw_input("Please input a value for n: ")
print("Performing formatting with " + str(input_n) + " grams...")
n = int(input_n)
y = 0
index = 0
to_write = format_n[index]
for norm in normal_words:
for x in range(0,len(norm) - (len(norm) % n) - (n-1)):
for form in range(0, n):
if(form < n-1):
to_write.write(str(norm[x+form]) + " ")
elif(form == n-1):
to_write.write(str(norm[x+form]) + " 0\n")
to_write.write("new\n")
y += 1
if(y % files_n == 0 and index < 9):
print( str(y) + " instances in norm_block...")
#print("X: " + str(y))
#print("Ending: " + str(index) + "\n Starting: " + str(index+1))
to_write.close()
index = index + 1
to_write = format_n[index]
y = 0
index = 0
to_write = format_a[index]
att_dict = dict()
##interest = {10,11,12,19,39,40,41,45,57,60,63,85,120,122,163,172,191,196,199,201,206,219,220,221,224,242,243,258,268,292,311}
for q in range(0,405):
att_dict[q] = 0
print"Att_dict:\n" + (str(att_dict))
for norm in attack_words:
for r in range(0,len(norm) - (len(norm) % n) - (n-1)):
## if(int(norm[r]) in interest):
## norm[r] = str(compress)
## att_dict[r] = att_dict[r] + 1
## if(int(norm[x]) <= 237 and int(norm[x]) >= 226):
## print("AHAHA")
## norm[x] = str(compress)
for form in range(0, n):
att_dict[int(norm[r])] += 1
if(form < n-1):
to_write.write(str(norm[r+form]) + " ")
elif(form == n-1):
to_write.write(str(norm[r+form]) + " 1\n")
to_write.write("new\n")
y += 1
if(y % files_a == 0 and index < 9):
print( str(y) + " instances in att_block...")
#print("Ending: " + str(index) + "\n Starting: " + str(index+1))
to_write.close()
index = index + 1
to_write = format_a[index]
y = 0
index = 0
to_write = format_v[index]
for norm in vali_words:
for x in range(0,len(norm) - (len(norm) % n) - (n-1)):
for form in range(0,n):
if(form < n-1):
to_write.write(str(norm[x+form]) + " ")
elif(form == n-1):
to_write.write(str(norm[x+form]) + " 0\n")
to_write.write("new\n")
y += 1
if(y % files_v == 0 and index < 9):
print( str(y) + " instances in vali_block...")
#print("Ending: " + str(index) + "\n Starting: " + str(index+1))
to_write.close()
index = index + 1
to_write = format_v[index]
#####################################################
########Generate the n-gram##########################
#########and write that to the file##################
#####################################################
#n = 3
#for norm in normal_words:
# for x in range(0,len(norm)-(n-1)):
# file__.write(str(norm[x]) + " " + str(norm[x+1]) + " " + str(norm[x+2]) + " 0\n")
#for att in attack_words:
# for x in range(0,len(att)-(n-1)):
# file_.write(str(att[x]) + " " + str(att[x+1]) + " " + str(att[x+2]) + " 1\n")
#for vali in vali_words:
# for x in range(0,len(vali)-(n-1)):
# file_v.write(str(vali[x]) + " " + str(vali[x+1]) + " " + str(vali[x+2]) + " 0\n")
# file_v.write("new\n")
print("Here is the attack distribution:\n")
for t in range(0,405):
if(int(att_dict[t]) <= 100 and int(att_dict[t]) != 0):
print("SYS_CALL: " + str(t) + " FREQ: " + str(att_dict[t]))
print("Data Formatted...")
|
|
"""Style settings panel."""
from __future__ import unicode_literals
import wx
from . import gui
from . import color_setting_dialog
from . import grid_helper
from . import settings_codes as sc
from ..rgba import RGBA
from ..x11colors import name2hex
from .. import util
class StyleSettings(gui.StyleSettingsPanel, grid_helper.GridHelper):
"""Style settings handler."""
def __init__(self, parent, scheme, update):
"""Initialize."""
super(StyleSettings, self).__init__(parent)
if util.platform() == "windows":
self.SetDoubleBuffered(False)
self.diag = None
self.setup_keybindings()
self.parent = parent
self.m_plist_grid.GetGridWindow().Bind(wx.EVT_MOTION, self.on_mouse_motion)
self.m_plist_grid.SetDefaultCellBackgroundColour(self.GetBackgroundColour())
self.read_plist(scheme)
self.update_plist = update
def read_plist(self, scheme):
"""Read the plist."""
color = scheme["settings"][0]["settings"].get("foreground", "#000000").strip()
if not color.startswith('#'):
color = name2hex(color)
foreground = RGBA(color)
color = scheme["settings"][0]["settings"].get("background", "#FFFFFF").strip()
if not color.startswith('#'):
color = name2hex(color)
background = RGBA(color)
self.bg_color = background
self.fg_color = foreground
count = 0
for s in scheme["settings"]:
if "name" in s:
self.m_plist_grid.AppendRows(1)
self.update_row(count, s)
count += 1
self.resize_table()
self.go_cell(self.m_plist_grid, 0, 0)
def update_row(self, count, s):
"""Update stye row."""
self.m_plist_grid.SetCellValue(count, 0, s["name"])
self.m_plist_grid.SetCellValue(count, 4, s.get("scope", ""))
settings = s["settings"]
b = self.m_plist_grid.GetCellBackgroundColour(count, 0)
if "background" in settings:
try:
named_color = name2hex(settings["background"].strip())
color = named_color if named_color is not None else settings["background"].strip()
bg = RGBA(color)
bg.apply_alpha(self.bg_color.get_rgb())
self.m_plist_grid.SetCellValue(count, 2, color)
except:
bg = self.bg_color
self.m_plist_grid.SetCellValue(count, 2, "")
else:
bg = self.bg_color
b = self.m_plist_grid.GetCellBackgroundColour(count, 0)
b.Set(bg.r, bg.g, bg.b)
self.m_plist_grid.SetCellBackgroundColour(count, 0, b)
self.m_plist_grid.SetCellBackgroundColour(count, 1, b)
self.m_plist_grid.SetCellBackgroundColour(count, 2, b)
self.m_plist_grid.SetCellBackgroundColour(count, 3, b)
self.m_plist_grid.SetCellBackgroundColour(count, 4, b)
if "foreground" in settings:
try:
named_color = name2hex(settings["foreground"].strip())
color = named_color if named_color is not None else settings["foreground"].strip()
fg = RGBA(color)
fg.apply_alpha(self.bg_color.get_rgb())
self.m_plist_grid.SetCellValue(count, 1, color)
except:
fg = self.fg_color
self.m_plist_grid.SetCellValue(count, 1, "")
else:
fg = self.fg_color
f = self.m_plist_grid.GetCellTextColour(count, 0)
f.Set(fg.r, fg.g, fg.b)
self.m_plist_grid.SetCellTextColour(count, 0, f)
self.m_plist_grid.SetCellTextColour(count, 1, f)
self.m_plist_grid.SetCellTextColour(count, 2, f)
self.m_plist_grid.SetCellTextColour(count, 3, f)
self.m_plist_grid.SetCellTextColour(count, 4, f)
fs_setting = settings.get("fontStyle", "")
font_style = []
for x in fs_setting.split(" "):
if x in ["bold", "italic", "underline"]:
font_style.append(x)
self.m_plist_grid.SetCellValue(count, 3, " ".join(font_style))
fs = self.m_plist_grid.GetCellFont(count, 0)
fs.SetWeight(wx.FONTWEIGHT_NORMAL)
fs.SetStyle(wx.FONTSTYLE_NORMAL)
fs.SetUnderlined(False)
if "bold" in font_style:
fs.SetWeight(wx.FONTWEIGHT_BOLD)
if "italic" in font_style:
fs.SetStyle(wx.FONTSTYLE_ITALIC)
if "underline" in font_style:
fs.SetUnderlined(True)
self.m_plist_grid.SetCellFont(count, 0, fs)
self.m_plist_grid.SetCellFont(count, 1, fs)
self.m_plist_grid.SetCellFont(count, 2, fs)
self.m_plist_grid.SetCellFont(count, 3, fs)
self.m_plist_grid.SetCellFont(count, 4, fs)
def resize_table(self):
"""Resize the table."""
self.m_plist_grid.BeginBatch()
nb_size = self.parent.GetSize()
total_size = 0
for x in range(0, 5):
self.m_plist_grid.AutoSizeColumn(x)
total_size += self.m_plist_grid.GetColSize(x)
delta = nb_size[0] - 20 - total_size
if delta > 0:
self.m_plist_grid.SetColSize(4, self.m_plist_grid.GetColSize(4) + delta)
self.m_plist_grid.EndBatch()
def set_object(self, obj):
"""Set the object."""
row = self.m_plist_grid.GetGridCursorRow()
self.update_row(row, obj)
self.update_plist(sc.MODIFY, {"table": "style", "index": row, "data": obj})
self.resize_table()
def edit_cell(self):
"""Handle editting the cell."""
grid = self.m_plist_grid
row = grid.GetGridCursorRow()
editor = self.GetParent().GetParent().GetParent()
self.diag = color_setting_dialog.ColorEditor(
editor,
{
"name": grid.GetCellValue(row, 0),
"scope": grid.GetCellValue(row, 4),
"settings": {
"foreground": grid.GetCellValue(row, 1),
"background": grid.GetCellValue(row, 2),
"fontStyle": grid.GetCellValue(row, 3)
}
}
)
self.diag.ShowModal()
self.diag.Destroy()
self.diag = None
def delete_row(self):
"""Handle row delete."""
row = self.m_plist_grid.GetGridCursorRow()
self.m_plist_grid.DeleteRows(row, 1)
self.m_plist_grid.GetParent().update_plist(sc.DELETE, {"table": "style", "index": row})
def insert_row(self):
"""Handle inserting into row."""
obj = {
"name": "New Item",
"scope": "comment",
"settings": {
"foreground": "#FFFFFF",
"background": "#000000",
"fontStyle": ""
}
}
editor = self.GetParent().GetParent().GetParent()
self.diag = color_setting_dialog.ColorEditor(
editor,
obj,
insert=True
)
self.diag.ShowModal()
self.diag.Destroy()
self.diag = None
def row_up(self):
"""Handle row up."""
grid = self.m_plist_grid
row = grid.GetGridCursorRow()
col = grid.GetGridCursorCol()
if row > 0:
text = [grid.GetCellValue(row, x) for x in range(0, 5)]
bg = [grid.GetCellBackgroundColour(row, x) for x in range(0, 5)]
fg = [grid.GetCellTextColour(row, x) for x in range(0, 5)]
font = [grid.GetCellFont(row, x) for x in range(0, 5)]
grid.DeleteRows(row, 1, False)
grid.InsertRows(row - 1, 1, True)
[grid.SetCellValue(row - 1, x, text[x]) for x in range(0, 5)]
[grid.SetCellBackgroundColour(row - 1, x, bg[x]) for x in range(0, 5)]
[grid.SetCellTextColour(row - 1, x, fg[x]) for x in range(0, 5)]
[grid.SetCellFont(row - 1, x, font[x]) for x in range(0, 5)]
self.go_cell(grid, row - 1, col, True)
grid.GetParent().update_plist(sc.MOVE, {"from": row, "to": row - 1})
grid.SetFocus()
def row_down(self):
"""Handle row down."""
grid = self.m_plist_grid
row = grid.GetGridCursorRow()
col = grid.GetGridCursorCol()
if row < grid.GetNumberRows() - 1:
text = [grid.GetCellValue(row, x) for x in range(0, 5)]
bg = [grid.GetCellBackgroundColour(row, x) for x in range(0, 5)]
fg = [grid.GetCellTextColour(row, x) for x in range(0, 5)]
font = [grid.GetCellFont(row, x) for x in range(0, 5)]
grid.DeleteRows(row, 1, False)
grid.InsertRows(row + 1, 1, True)
[grid.SetCellValue(row + 1, x, text[x]) for x in range(0, 5)]
[grid.SetCellBackgroundColour(row + 1, x, bg[x]) for x in range(0, 5)]
[grid.SetCellTextColour(row + 1, x, fg[x]) for x in range(0, 5)]
[grid.SetCellFont(row + 1, x, font[x]) for x in range(0, 5)]
self.go_cell(grid, row + 1, col, True)
grid.GetParent().update_plist(sc.MOVE, {"from": row, "to": row + 1})
grid.SetFocus()
def is_fontstyle_cell(self):
"""Check if fontstyle cell."""
return self.m_plist_grid.GetGridCursorCol() == 3
def toggle_font_style(self, row, attr):
"""Toggle the font style."""
# if not self.is_fontstyle_cell():
# return
grid = self.m_plist_grid
text = [grid.GetCellValue(row, x) for x in range(0, 5)]
style = text[3].split(" ")
try:
idx = style.index(attr)
del style[idx]
except:
style.append(attr)
text[3] = " ".join(style)
obj = {
"name": text[0],
"scope": text[4],
"settings": {
"foreground": text[1],
"background": text[2],
"fontStyle": text[3]
}
}
grid.GetParent().update_row(row, obj)
self.update_plist(sc.MODIFY, {"table": "style", "index": row, "data": obj})
self.resize_table()
def toggle_bold(self):
"""Toggle bold."""
self.toggle_font_style(self.m_plist_grid.GetGridCursorRow(), "bold")
def toggle_italic(self):
"""Toggle italic."""
self.toggle_font_style(self.m_plist_grid.GetGridCursorRow(), "italic")
def toggle_underline(self):
"""Toggle underline."""
self.toggle_font_style(self.m_plist_grid.GetGridCursorRow(), "underline")
def on_mouse_motion(self, event):
"""Handle mouse motion event."""
self.mouse_motion(event)
def on_edit_cell(self, event):
"""Handle editing cell event."""
self.edit_cell()
def on_grid_key_down(self, event):
"""Handle key down event on grid."""
self.grid_key_down(event)
def on_grid_select_cell(self, event):
"""Handle grid select event."""
self.grid_select_cell(event)
def on_row_up_click(self, event):
"""Handle row up click."""
self.row_up()
def on_row_down_click(self, event):
"""Handle row down click."""
self.row_down()
def on_row_add_click(self, event):
"""Handle row add click."""
self.insert_row()
def on_row_delete_click(self, event):
"""Handle row delete click."""
self.delete_row()
def on_grid_label_left_click(self, event):
"""Handle grid label left click."""
return
|
|
# A simple DOC file parser based on pyole
import os
import struct
import logging
import datetime
from pyole import *
class FIBBase(OLEBase):
wIdent = 0
nFib = 0
unused = 0
lid = 0
pnNext = 0
Flags1 = 0
fDot = 0
fGlsy = 0
fComplex = 0
fHasPic = 0
cQuickSaves = 0
fEncrypted = 0
fWhichTblStm = 0
fReadOnlyRecommended = 0
fWriteReservation = 0
fExtChar = 0
fLoadOverride = 0
fFarEast = 0
fObfuscated = 0
nFibBack = 0
lKey = 0
envr = 0
Flag2 = 0
fMac = 0
fEmptySpecial = 0
fLoadOverridePage = 0
reserved1 = 0
reserved2 = 0
fSpare0 = 0
reserved3 = 0
reserved4 = 0
reserved5 = 0
reserved6 = 0
def __init__(self, data):
self.wIdent = 0
self.nFib = 0
self.unused = 0
self.pnNext = 0
self.Flags1 = 0
self.fDot = 0
self.fGlsy = 0
self.fComplex = 0
self.fHasPic = 0
self.cQuickSaves = 0
self.fEncrypted = 0
self.fWhichTblStm = 0
self.fReadOnlyRecommended = 0
self.fWriteReservation = 0
self.fExtChar = 0
self.fLoadOverride = 0
self.fFarEast = 0
self.fObfuscated = 0
self.nFibBack = 0
self.lKey = 0
self.envr = 0
self.Flag2 = 0
self.fMac = 0
self.fEmptySpecial = 0
self.fLoadOverridePage = 0
self.reserved1 = 0
self.reserved2 = 0
self.fSpare0 = 0
self.reserved3 = 0
self.reserved4 = 0
self.reserved5 = 0
self.reserved6 = 0
self.wIdent = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.wIdent: ' + str(hex(self.wIdent)))
if self.wIdent != 0xA5EC:
self._raise_exception('DOC.FIB.FIBBase.wIdent has an abnormal value.')
self.nFib = struct.unpack('<H', data[0x02:0x04])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.nFib: ' + str(hex(self.nFib)))
if self.nFib != 0x00C1:
self._raise_exception('DOC.FIB.FIBBase.nFib has an abnormal value.')
self.unused = struct.unpack('<H', data[0x04:0x06])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.unused: ' + str(hex(self.unused)))
#if self.unused != 0:
# self.ole_logger.warning('DOC.FIB.FIBBase.unused is not zero.')
self.lid = struct.unpack('<H', data[0x06:0x08])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.lid: ' + str(hex(self.lid)))
self.pnNext = struct.unpack('<H', data[0x08:0x0A])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.pnNext: ' + str(hex(self.pnNext)))
if self.pnNext != 0:
self.ole_logger.warning('DOC.FIB.FIBBase.pnNext is not zero.')
self.Flags1 = struct.unpack('<H', data[0x0A:0x0C])[0]
self.fDot = self.Flags1 & 0x0001
self.ole_logger.debug('DOC.FIB.FIBBase.fDot: ' + str(self.fDot))
self.fGlsy = (self.Flags1 & 0x0002) >> 1
self.ole_logger.debug('DOC.FIB.FIBBase.fGlsy: ' + str(self.fGlsy))
self.fComplex = (self.Flags1 & 0x0004) >> 2
self.ole_logger.debug('DOC.FIB.FIBBase.fComplex: ' + str(self.fComplex))
self.fHasPic = (self.Flags1 & 0x0008) >> 3
self.ole_logger.debug('DOC.FIB.FIBBase.fHasPic: ' + str(self.fHasPic))
self.cQuickSaves = (self.Flags1 & 0x00F0) >> 4
self.ole_logger.debug('DOC.FIB.FIBBase.cQuickSaves: ' + str(self.cQuickSaves))
self.fEncrypted = (self.Flags1 & 0x0100) >> 8
self.ole_logger.debug('DOC.FIB.FIBBase.fEncrypted: ' + str(self.fEncrypted))
if self.fEncrypted == 1:
self.ole_logger.warning('File is encrypted.')
self.fWhichTblStm = (self.Flags1 & 0x0200) >> 9
self.ole_logger.debug('DOC.FIB.FIBBase.fWhichTblStm: ' + str(self.fWhichTblStm))
self.fReadOnlyRecommended = (self.Flags1 & 0x0400) >> 10
self.ole_logger.debug('DOC.FIB.FIBBase.fReadOnlyRecommended: ' + str(self.fReadOnlyRecommended))
self.fWriteReservation = (self.Flags1 & 0x0800) >> 11
self.ole_logger.debug('DOC.FIB.FIBBase.fWriteReservation: ' + str(self.fWriteReservation))
self.fExtChar = (self.Flags1 & 0x1000) >> 12
self.ole_logger.debug('DOC.FIB.FIBBase.fExtChar: ' + str(self.fExtChar))
if (self.Flags1 & 0x1000) >> 12 != 1:
self._raise_exception('DOC.FIB.FIBBase.fExtChar has an abnormal value.')
self.fLoadOverride = (self.Flags1 & 0x2000) >> 13
self.ole_logger.debug('DOC.FIB.FIBBase.fLoadOverride: ' + str(self.fLoadOverride))
self.fFarEast = (self.Flags1 & 0x4000) >> 14
self.ole_logger.debug('DOC.FIB.FIBBase.fFarEast: ' + str(self.fFarEast))
if self.fFarEast == 1:
self.ole_logger.warning('The installation language of the application that created the document was an East Asian language.')
self.fObfuscated = (self.Flags1 & 0x8000) >> 15
self.ole_logger.debug('DOC.FIB.FIBBase.fObfuscated: ' + str(self.fObfuscated))
if self.fObfuscated == 1:
if self.fEncrypted == 1:
self.ole_logger.warning('File is obfuscated by using XOR obfuscation.')
self.nFibBack = struct.unpack('<H', data[0x0C:0x0E])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.nFibBack: ' + str(hex(self.nFibBack)))
if self.nFibBack != 0x00BF and self.nFibBack != 0x00C1:
self._raise_exception('DOC.FIB.FIBBase.nFibBack has an abnormal value.')
self.lKey = struct.unpack('<I', data[0x0E:0x12])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.lKey: ' + str(hex(self.lKey)))
if self.fEncrypted == 1:
if self.fObfuscated == 1:
self.ole_logger.info('The XOR obfuscation key is: ' + str(hex(self.lKey)))
else:
if self.lKey != 0:
self._raise_exception('DOC.FIB.FIBBase.lKey has an abnormal value.')
self.envr = ord(data[0x12])
self.ole_logger.debug('DOC.FIB.FIBBase.envr: ' + str(hex(self.envr)))
if self.envr != 0:
self._raise_exception('DOC.FIB.FIBBase.envr has an abnormal value.')
self.Flag2 = ord(data[0x13])
self.fMac = self.Flag2 & 0x01
self.ole_logger.debug('DOC.FIB.FIBBase.fMac: ' + str(hex(self.fMac)))
if self.fMac != 0:
self._raise_exception('DOC.FIB.FIBBase.fMac has an abnormal value.')
self.fEmptySpecial = (self.Flag2 & 0x02) >> 1
self.ole_logger.debug('DOC.FIB.FIBBase.fEmptySpecial: ' + str(hex(self.fEmptySpecial)))
if self.fEmptySpecial != 0:
self.ole_logger.warning('DOC.FIB.FIBBase.fEmptySpecial is not zero.')
self.fLoadOverridePage = (self.Flag2 & 0x04) >> 2
self.ole_logger.debug('DOC.FIB.FIBBase.fLoadOverridePage: ' + str(hex(self.fLoadOverridePage)))
self.reserved1 = (self.Flag2 & 0x08) >> 3
self.ole_logger.debug('DOC.FIB.FIBBase.reserved1: ' + str(hex(self.reserved1)))
self.reserved2 = (self.Flag2 & 0x10) >> 4
self.ole_logger.debug('DOC.FIB.FIBBase.reserved2: ' + str(hex(self.reserved2)))
self.fSpare0 = (self.Flag2 & 0xE0) >> 5
self.ole_logger.debug('DOC.FIB.FIBBase.fSpare0: ' + str(hex(self.fSpare0)))
self.reserved3 = struct.unpack('<H', data[0x14:0x16])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.reserved3: ' + str(hex(self.reserved3)))
self.reserved4 = struct.unpack('<H', data[0x16:0x18])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.reserved4: ' + str(hex(self.reserved4)))
self.reserved5 = struct.unpack('<I', data[0x18:0x1C])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.reserved5: ' + str(hex(self.reserved5)))
self.reserved6 = struct.unpack('<I', data[0x1C:0x20])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.reserved6: ' + str(hex(self.reserved6)))
class FibRgFcLcb(OLEBase):
fcSttbfAssoc = 0
lcbSttbfAssoc = 0
fcSttbfRMark = 0
lcbSttbfRMark = 0
fcSttbSavedBy = 0
lcbSttbSavedBy = 0
dwLowDateTime = 0
dwHighDateTime = 0
def __init__(self, data):
self.fcSttbfAssoc = 0
self.lcbSttbfAssoc = 0
self.fcSttbfRMark = 0
self.lcbSttbfRMark = 0
self.fcSttbSavedBy = 0
self.lcbSttbSavedBy = 0
self.dwLowDateTime = 0
self.dwHighDateTime = 0
self.fcSttbfAssoc = struct.unpack('<I', data[0x100:0x104])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.fcSttbfAssoc: ' + str(hex(self.fcSttbfAssoc)))
self.lcbSttbfAssoc = struct.unpack('<I', data[0x104:0x108])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.lcbSttbfAssoc: ' + str(hex(self.lcbSttbfAssoc)))
self.fcSttbfRMark = struct.unpack('<I', data[0x198:0x19C])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.fcSttbfRMark: ' + str(hex(self.fcSttbfRMark)))
self.lcbSttbfRMark = struct.unpack('<I', data[0x19C:0x1A0])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.lcbSttbfRMark: ' + str(hex(self.lcbSttbfRMark)))
self.fcSttbSavedBy = struct.unpack('<I', data[0x238:0x23C])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.fcSttbSavedBy: ' + str(hex(self.fcSttbSavedBy)))
self.lcbSttbSavedBy = struct.unpack('<I', data[0x23C:0x240])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.lcbSttbSavedBy: ' + str(hex(self.lcbSttbSavedBy)))
self.dwLowDateTime = struct.unpack('<I', data[0x2B8:0x2BC])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.dwLowDateTime: ' + str(hex(self.dwLowDateTime)))
self.dwHighDateTime = struct.unpack('<I', data[0x2BC:0x2C0])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.dwHighDateTime: ' + str(hex(self.dwHighDateTime)))
class FIB(OLEBase):
FIBBase = None
csw = 0
fibRgW = ''
cslw = 0
fibRgLw = ''
cbRgFcLcb = 0
fibRgFcLcbBlob = ''
cswNew = 0
def __init__(self, data):
self.FIBBase = None
self.csw = 0
self.fibRgW = ''
self.cslw = 0
self.fibRgLw = ''
self.cbRgFcLcb = 0
self.fibRgFcLcbBlob = ''
self.cswNew = 0
self.ole_logger.debug('######## FIB ########')
self.FIBBase = FIBBase(data[0:0x20])
self.csw = struct.unpack('<H', data[0x20:0x22])[0]
self.ole_logger.debug('DOC.FIB.csw: ' + str(hex(self.csw)))
if self.csw != 0x000E:
self._raise_exception('DOC.FIB.csw has an abnormal value.')
self.fibRgW = data[0x22:0x3E]
self.cslw = struct.unpack('<H', data[0x3E:0x40])[0]
self.ole_logger.debug('DOC.FIB.cslw: ' + str(hex(self.cslw)))
if self.cslw != 0x0016:
self._raise_exception('DOC.FIB.cslw has an abnormal value.')
self.fibRgLw = data[0x40:0x98]
self.cbRgFcLcb = struct.unpack('<H', data[0x98:0x9A])[0]
self.ole_logger.debug('DOC.FIB.cbRgFcLcb: ' + str(hex(self.cbRgFcLcb)))
'''
if self.FIBBase.nFib == 0x00C1 and self.cbRgFcLcb != 0x005D:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
if self.FIBBase.nFib == 0x00D9 and self.cbRgFcLcb != 0x006C:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
if self.FIBBase.nFib == 0x0101 and self.cbRgFcLcb != 0x0088:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
if self.FIBBase.nFib == 0x010C and self.cbRgFcLcb != 0x00A4:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
if self.FIBBase.nFib == 0x0112 and self.cbRgFcLcb != 0x00B7:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
'''
self.fibRgFcLcbBlob = FibRgFcLcb(data[0x9A:0x9A+self.cbRgFcLcb*8])
self.cswNew = struct.unpack('<H', data[0x9A+self.cbRgFcLcb*8:0x9A+self.cbRgFcLcb*8+0x02])[0]
self.ole_logger.debug('DOC.FIB.cswNew: ' + str(hex(self.cswNew)))
class DOCFile(OLEBase):
OLE = None
FIB = None
SummaryInfo = None
DocumentSummaryInfo = None
def __init__(self, filename):
self.OLE = None
self.FIB = None
self.SummaryInfo = None
self.DocumentSummaryInfo = None
if os.path.isfile(filename) == False:
self._raise_exception('Invalid file: ' + filename)
self.OLE = OLEFile(filename)
self.ole_logger.debug('***** Parse Word Document *****')
self.FIB = FIB(self.OLE.find_object_by_name('WordDocument'))
def show_rmark_authors(self):
if self.FIB.fibRgFcLcbBlob.fcSttbfRMark != 0:
table_stream = ''
if self.FIB.FIBBase.fWhichTblStm == 1:
table_stream = self.OLE.find_object_by_name('1Table')
elif self.FIB.FIBBase.fWhichTblStm == 1:
table_stream = self.OLE.find_object_by_name('0Table')
else:
print 'DOC.FIB.FIBBase.fWhichTblStm has an abnormal value.'
return
if len(table_stream) > 0:
#print table_stream
offset = self.FIB.fibRgFcLcbBlob.fcSttbfRMark
length = self.FIB.fibRgFcLcbBlob.lcbSttbfRMark
SttbfRMark = table_stream[offset:offset+length]
fExtend = struct.unpack('<H', SttbfRMark[0x00:0x02])[0]
if fExtend != 0xFFFF:
print 'fExtend has an abnormal value.'
return
cbExtra = struct.unpack('<H', SttbfRMark[0x04:0x06])[0]
if cbExtra != 0:
print 'cbExtra has an abnormal value.'
return
cData = struct.unpack('<H', SttbfRMark[0x02:0x04])[0]
offset = 0
for i in range(0, cData):
cchData = struct.unpack('<H', SttbfRMark[0x06+offset:0x08+offset])[0]
Data = SttbfRMark[0x06+offset+0x02:0x08+offset+cchData*2]
print Data.decode('utf-16')
offset = offset + 0x02 + cchData*2
else:
print 'Failed to read the Table Stream.'
else:
print 'No revision marks or comments author information.'
if __name__ == '__main__':
init_logging(True)
try:
docfile = DOCFile('oletest.doc')
docfile.show_rmark_authors()
except Exception as e:
print e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.