CombinedText stringlengths 4 3.42M |
|---|
#!/usr/bin/env python
"""Base class for all FAUCET unit tests."""
# pylint: disable=missing-docstring
# pylint: disable=too-many-arguments
import collections
import glob
import ipaddress
import json
import os
import random
import re
import shutil
import subprocess
import time
import unittest
import yaml
import requests
from requests.exceptions import ConnectionError
# pylint: disable=import-error
from mininet.log import error, output
from mininet.net import Mininet
from mininet.node import Intf
from mininet.util import dumpNodeConnections, pmonitor
from ryu.ofproto import ofproto_v1_3 as ofp
import faucet_mininet_test_util
import faucet_mininet_test_topo
class FaucetTestBase(unittest.TestCase):
"""Base class for all FAUCET unit tests."""
ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss'
FAUCET_VIPV4 = ipaddress.ip_interface(u'10.0.0.254/24')
FAUCET_VIPV4_2 = ipaddress.ip_interface(u'172.16.0.254/24')
FAUCET_VIPV6 = ipaddress.ip_interface(u'fc00::1:254/64')
FAUCET_VIPV6_2 = ipaddress.ip_interface(u'fc01::1:254/64')
OFCTL = 'ovs-ofctl -OOpenFlow13'
BOGUS_MAC = '01:02:03:04:05:06'
FAUCET_MAC = '0e:00:00:00:00:01'
LADVD = 'ladvd -e lo -f'
ONEMBPS = (1024 * 1024)
DB_TIMEOUT = 5
CONFIG = ''
CONFIG_GLOBAL = ''
GAUGE_CONFIG_DBS = ''
N_UNTAGGED = 0
N_TAGGED = 0
NUM_DPS = 1
LINKS_PER_HOST = 1
RUN_GAUGE = True
REQUIRES_METERS = False
PORT_ACL_TABLE = 0
VLAN_TABLE = 1
VLAN_ACL_TABLE = 2
ETH_SRC_TABLE = 3
IPV4_FIB_TABLE = 4
IPV6_FIB_TABLE = 5
VIP_TABLE = 6
FLOOD_TABLE = 8
ETH_DST_TABLE = 7
config = None
dpid = None
hardware = 'Open vSwitch'
hw_switch = False
gauge_controller = None
gauge_of_port = None
prom_port = None
net = None
of_port = None
ctl_privkey = None
ctl_cert = None
ca_certs = None
port_map = {'port_1': 1, 'port_2': 2, 'port_3': 3, 'port_4': 4}
switch_map = {}
tmpdir = None
net = None
topo = None
cpn_intf = None
config_ports = {}
env = collections.defaultdict(dict)
rand_dpids = set()
def __init__(self, name, config, root_tmpdir, ports_sock, max_test_load):
super(FaucetTestBase, self).__init__(name)
self.config = config
self.root_tmpdir = root_tmpdir
self.ports_sock = ports_sock
self.max_test_load = max_test_load
def rand_dpid(self):
reserved_range = 100
while True:
dpid = random.randint(1, (2**32 - reserved_range)) + reserved_range
if dpid not in self.rand_dpids:
self.rand_dpids.add(dpid)
return str(dpid)
def _set_var(self, controller, var, value):
self.env[controller][var] = value
def _set_var_path(self, controller, var, path):
self._set_var(controller, var, os.path.join(self.tmpdir, path))
def _set_prom_port(self, name='faucet'):
self._set_var(name, 'FAUCET_PROMETHEUS_PORT', str(self.prom_port))
self._set_var(name, 'FAUCET_PROMETHEUS_ADDR', faucet_mininet_test_util.LOCALHOST)
def _set_static_vars(self):
self._set_var_path('faucet', 'FAUCET_CONFIG', 'faucet.yaml')
self._set_var_path('faucet', 'FAUCET_LOG', 'faucet.log')
self._set_var_path('faucet', 'FAUCET_EXCEPTION_LOG', 'faucet-exception.log')
self._set_var_path('gauge', 'GAUGE_CONFIG', 'gauge.yaml')
self._set_var_path('gauge', 'GAUGE_LOG', 'gauge.log')
self._set_var_path('gauge', 'GAUGE_EXCEPTION_LOG', 'gauge-exception.log')
self.faucet_config_path = self.env['faucet']['FAUCET_CONFIG']
self.gauge_config_path = self.env['gauge']['GAUGE_CONFIG']
self.debug_log_path = os.path.join(
self.tmpdir, 'ofchannel.txt')
self.monitor_stats_file = os.path.join(
self.tmpdir, 'ports.txt')
self.monitor_state_file = os.path.join(
self.tmpdir, 'state.txt')
self.monitor_flow_table_file = os.path.join(
self.tmpdir, 'flow.txt')
if self.config is not None:
if 'hw_switch' in self.config:
self.hw_switch = self.config['hw_switch']
if self.hw_switch:
self.dpid = self.config['dpid']
self.cpn_intf = self.config['cpn_intf']
self.hardware = self.config['hardware']
if 'ctl_privkey' in self.config:
self.ctl_privkey = self.config['ctl_privkey']
if 'ctl_cert' in self.config:
self.ctl_cert = self.config['ctl_cert']
if 'ca_certs' in self.config:
self.ca_certs = self.config['ca_certs']
dp_ports = self.config['dp_ports']
self.port_map = {}
self.switch_map = {}
for i, switch_port in enumerate(dp_ports):
test_port_name = 'port_%u' % (i + 1)
self.port_map[test_port_name] = switch_port
self.switch_map[test_port_name] = dp_ports[switch_port]
def _set_vars(self):
self._set_prom_port()
def _write_faucet_config(self):
faucet_config = '\n'.join((
self.get_config_header(
self.CONFIG_GLOBAL, self.debug_log_path, self.dpid, self.hardware),
self.CONFIG % self.port_map))
if self.config_ports:
faucet_config = faucet_config % self.config_ports
with open(self.faucet_config_path, 'w') as faucet_config_file:
faucet_config_file.write(faucet_config)
def _write_gauge_config(self):
gauge_config = self.get_gauge_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_flow_table_file)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
with open(self.gauge_config_path, 'w') as gauge_config_file:
gauge_config_file.write(gauge_config)
def _test_name(self):
return faucet_mininet_test_util.flat_test_name(self.id())
def _tmpdir_name(self):
tmpdir = os.path.join(self.root_tmpdir, self._test_name())
os.mkdir(tmpdir)
return tmpdir
def _controller_lognames(self):
lognames = []
for controller in self.net.controllers:
logname = controller.logname()
if os.path.exists(logname) and os.path.getsize(logname) > 0:
lognames.append(logname)
return lognames
def _wait_load(self, load_retries=120):
for _ in range(load_retries):
load = os.getloadavg()[0]
time.sleep(random.randint(1, 7))
if load < self.max_test_load:
return
output('load average too high %f, waiting' % load)
self.fail('load average %f consistently too high' % load)
def _allocate_config_ports(self):
for port_name in list(self.config_ports.keys()):
self.config_ports[port_name] = None
for config in (self.CONFIG, self.CONFIG_GLOBAL, self.GAUGE_CONFIG_DBS):
if re.search(port_name, config):
port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.config_ports[port_name] = port
output('allocating port %u for %s' % (port, port_name))
def _allocate_faucet_ports(self):
if self.hw_switch:
self.of_port = self.config['of_port']
else:
self.of_port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.prom_port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
def _allocate_gauge_ports(self):
if self.hw_switch:
self.gauge_of_port = self.config['gauge_of_port']
else:
self.gauge_of_port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
def setUp(self):
self.tmpdir = self._tmpdir_name()
self._set_static_vars()
if self.hw_switch:
self.topo_class = faucet_mininet_test_topo.FaucetHwSwitchTopo
self.dpid = faucet_mininet_test_util.str_int_dpid(self.dpid)
else:
self.topo_class = faucet_mininet_test_topo.FaucetSwitchTopo
self.dpid = self.rand_dpid()
def tearDown(self):
"""Clean up after a test."""
with open(os.path.join(self.tmpdir, 'prometheus.log'), 'w') as prom_log:
prom_log.write(self.scrape_prometheus())
switch_names = [switch.name for switch in self.net.switches]
if self.net is not None:
self.net.stop()
self.net = None
faucet_mininet_test_util.return_free_ports(
self.ports_sock, self._test_name())
if 'OVS_LOGDIR' in os.environ:
ovs_log_dir = os.environ['OVS_LOGDIR']
if ovs_log_dir and os.path.exists(ovs_log_dir):
for ovs_log in glob.glob(os.path.join(ovs_log_dir, '*.log')):
lines = []
for name in switch_names:
lines.extend(self.matching_lines_from_file(name, ovs_log))
if lines:
switch_ovs_log_name = os.path.join(self.tmpdir, os.path.basename(ovs_log))
with open(switch_ovs_log_name, 'w') as switch_ovs_log:
switch_ovs_log.write('\n'.join(lines))
# must not be any controller exception.
self.verify_no_exception(self.env['faucet']['FAUCET_EXCEPTION_LOG'])
for _, debug_log_name in self._get_ofchannel_logs():
with open(debug_log_name) as debug_log:
self.assertFalse(
re.search('OFPErrorMsg', debug_log.read()),
msg='debug log has OFPErrorMsgs')
def _attach_physical_switch(self):
"""Bridge a physical switch into test topology."""
switch = self.net.switches[0]
mapped_base = max(len(self.switch_map), len(self.port_map))
for i, test_host_port in enumerate(sorted(self.switch_map)):
port_i = i + 1
mapped_port_i = mapped_base + port_i
phys_port = Intf(self.switch_map[test_host_port], node=switch)
switch.cmd('ip link set dev %s up' % phys_port)
switch.cmd(
('ovs-vsctl add-port %s %s -- '
'set Interface %s ofport_request=%u') % (
switch.name,
phys_port.name,
phys_port.name,
mapped_port_i))
for port_pair in ((port_i, mapped_port_i), (mapped_port_i, port_i)):
port_x, port_y = port_pair
switch.cmd('%s add-flow %s in_port=%u,actions=output:%u' % (
self.OFCTL, switch.name, port_x, port_y))
def start_net(self):
"""Start Mininet network."""
controller_intf = 'lo'
if self.hw_switch:
controller_intf = self.cpn_intf
self._start_faucet(controller_intf)
self.pre_start_net()
if self.hw_switch:
self._attach_physical_switch()
self._wait_debug_log()
for port_no in self._dp_ports():
self.set_port_up(port_no, wait=False)
dumpNodeConnections(self.net.hosts)
self.reset_all_ipv4_prefix(prefix=24)
def _get_controller(self):
"""Return first controller."""
return self.net.controllers[0]
def _start_gauge_check(self):
return None
def _start_check(self):
if not self._wait_controllers_healthy():
return 'not all controllers healthy'
if not self._wait_controllers_connected():
return 'not all controllers connected to switch'
if not self._wait_ofctl_up():
return 'ofctl not up'
if not self.wait_dp_status(1):
return 'prometheus port not up'
if self.config_ports:
for port_name, port in list(self.config_ports.items()):
if port is not None and not port_name.startswith('gauge'):
if not self._get_controller().listen_port(port):
return 'faucet not listening on %u (%s)' % (
port, port_name)
return self._start_gauge_check()
def _start_faucet(self, controller_intf):
last_error_txt = ''
for _ in range(3):
faucet_mininet_test_util.return_free_ports(
self.ports_sock, self._test_name())
self._allocate_config_ports()
self._allocate_faucet_ports()
self._set_vars()
self._write_faucet_config()
self.net = Mininet(
self.topo, controller=faucet_mininet_test_topo.FAUCET(
name='faucet', tmpdir=self.tmpdir,
controller_intf=controller_intf,
env=self.env['faucet'],
ctl_privkey=self.ctl_privkey,
ctl_cert=self.ctl_cert,
ca_certs=self.ca_certs,
ports_sock=self.ports_sock,
port=self.of_port,
test_name=self._test_name()))
if self.RUN_GAUGE:
self._allocate_gauge_ports()
self._write_gauge_config()
self.gauge_controller = faucet_mininet_test_topo.Gauge(
name='gauge', tmpdir=self.tmpdir,
env=self.env['gauge'],
controller_intf=controller_intf,
ctl_privkey=self.ctl_privkey,
ctl_cert=self.ctl_cert,
ca_certs=self.ca_certs,
port=self.gauge_of_port)
self.net.addController(self.gauge_controller)
self.net.start()
self._wait_load()
last_error_txt = self._start_check()
if last_error_txt is None:
self._config_tableids()
self._wait_load()
return
self.net.stop()
last_error_txt += '\n\n' + self._dump_controller_logs()
error('%s: %s' % (self._test_name(), last_error_txt))
time.sleep(faucet_mininet_test_util.MIN_PORT_AGE)
self.fail(last_error_txt)
def _ofctl_rest_url(self, req):
"""Return control URL for Ryu ofctl module."""
return 'http://%s:%u/%s' % (
faucet_mininet_test_util.LOCALHOST, self._get_controller().ofctl_port, req)
def _ofctl(self, req):
try:
ofctl_result = requests.get(req).text
except ConnectionError:
return None
return ofctl_result
def _ofctl_up(self):
switches = self._ofctl(self._ofctl_rest_url('stats/switches'))
return switches is not None and re.search(r'^\[[^\]]+\]$', switches)
def _wait_ofctl_up(self, timeout=10):
for _ in range(timeout):
if self._ofctl_up():
return True
time.sleep(1)
return False
def _ofctl_get(self, int_dpid, req, timeout):
for _ in range(timeout):
ofctl_result = self._ofctl(self._ofctl_rest_url(req))
try:
ofmsgs = json.loads(ofctl_result)[int_dpid]
return [json.dumps(ofmsg) for ofmsg in ofmsgs]
except ValueError:
# Didn't get valid JSON, try again
time.sleep(1)
continue
return []
def _curl_portmod(self, int_dpid, port_no, config, mask):
"""Use curl to send a portmod command via the ofctl module."""
curl_format = ' '.join((
'curl -X POST -d',
'\'{"dpid": %s, "port_no": %u, "config": %u, "mask": %u}\'',
self._ofctl_rest_url('stats/portdesc/modify')))
return curl_format % (int_dpid, port_no, config, mask)
def _signal_proc_on_port(self, host, port, signal):
tcp_pattern = '%s/tcp' % port
fuser_out = host.cmd('fuser %s -k -%u' % (tcp_pattern, signal))
return re.search(r'%s:\s+\d+' % tcp_pattern, fuser_out)
def _get_ofchannel_logs(self):
with open(self.env['faucet']['FAUCET_CONFIG']) as config_file:
config = yaml.load(config_file)
ofchannel_logs = []
for dp_name, dp_config in config['dps'].items():
if 'ofchannel_log' in dp_config:
debug_log = dp_config['ofchannel_log']
ofchannel_logs.append((dp_name, debug_log))
return ofchannel_logs
def _dump_controller_logs(self):
dump_txt = ''
test_logs = glob.glob(os.path.join(self.tmpdir, '*.log'))
for controller in self.net.controllers:
for test_log_name in test_logs:
basename = os.path.basename(test_log_name)
if basename.startswith(controller.name):
with open(test_log_name) as test_log:
dump_txt += '\n'.join((
'',
basename,
'=' * len(basename),
'',
test_log.read()))
break
return dump_txt
def _controllers_healthy(self):
for controller in self.net.controllers:
if not controller.healthy():
return False
return True
def _controllers_connected(self):
for controller in self.net.controllers:
if not controller.connected():
return False
return True
def _wait_controllers_healthy(self, timeout=30):
for _ in range(timeout):
if self._controllers_healthy():
return True
time.sleep(1)
return False
def _wait_controllers_connected(self, timeout=30):
for _ in range(timeout):
if self._controllers_connected():
return True
time.sleep(1)
return False
def _wait_debug_log(self):
"""Require all switches to have exchanged flows with controller."""
ofchannel_logs = self._get_ofchannel_logs()
for _, debug_log in ofchannel_logs:
for _ in range(60):
if (os.path.exists(debug_log) and
os.path.getsize(debug_log) > 0):
return True
time.sleep(1)
return False
def verify_no_exception(self, exception_log_name):
if not os.path.exists(exception_log_name):
return
with open(exception_log_name) as exception_log:
exception_contents = exception_log.read()
self.assertEqual(
'',
exception_contents,
msg='%s log contains %s' % (
exception_log_name, exception_contents))
def tcpdump_helper(self, tcpdump_host, tcpdump_filter, funcs=None,
vflags='-v', timeout=10, packets=2, root_intf=False):
intf = tcpdump_host.intf().name
if root_intf:
intf = intf.split('.')[0]
tcpdump_cmd = faucet_mininet_test_util.timeout_soft_cmd(
'tcpdump -i %s -e -n -U %s -c %u %s' % (
intf, vflags, packets, tcpdump_filter),
timeout)
tcpdump_out = tcpdump_host.popen(
tcpdump_cmd,
stdin=faucet_mininet_test_util.DEVNULL,
stderr=subprocess.STDOUT,
close_fds=True)
popens = {tcpdump_host: tcpdump_out}
tcpdump_started = False
tcpdump_txt = ''
for host, line in pmonitor(popens):
if host == tcpdump_host:
if tcpdump_started:
tcpdump_txt += line.strip()
elif re.search('tcpdump: listening on ', line):
# when we see tcpdump start, then call provided functions.
tcpdump_started = True
if funcs is not None:
for func in funcs:
func()
else:
error('tcpdump_helper: %s' % line)
self.assertTrue(tcpdump_started, msg='%s did not start' % tcpdump_cmd)
return tcpdump_txt
def pre_start_net(self):
"""Hook called after Mininet initializtion, before Mininet started."""
return
def get_config_header(self, config_global, debug_log, dpid, hardware):
"""Build v2 FAUCET config header."""
return """
%s
dps:
faucet-1:
ofchannel_log: %s
dp_id: 0x%x
hardware: "%s"
""" % (config_global, debug_log, int(dpid), hardware)
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['faucet-1']
type: 'port_state'
interval: 5
db: 'state_file'
flow_table:
dps: ['faucet-1']
type: 'flow_table'
interval: 5
db: 'flow_file'
"""
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file):
"""Build Gauge config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
flow_file:
type: 'text'
file: %s
couchdb:
type: gaugedb
gdb_type: nosql
nosql_db: couch
db_username: couch
db_password: 123
db_ip: 'localhost'
db_port: 5001
driver: 'couchdb'
views:
switch_view: '_design/switches/_view/switch'
match_view: '_design/flows/_view/match'
tag_view: '_design/tags/_view/tags'
switches_doc: 'switches_bak'
flows_doc: 'flows_bak'
db_update_counter: 2
%s
""" % (faucet_config_file,
self.get_gauge_watcher_config(),
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file,
self.GAUGE_CONFIG_DBS)
def get_exabgp_conf(self, peer, peer_config=''):
return """
neighbor %s {
router-id 2.2.2.2;
local-address %s;
connect %s;
peer-as 1;
local-as 2;
%s
}
""" % (peer, peer, '%(bgp_port)d', peer_config)
def get_all_groups_desc_from_dpid(self, dpid, timeout=2):
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
return self._ofctl_get(
int_dpid, 'stats/groupdesc/%s' % int_dpid, timeout)
def get_all_flows_from_dpid(self, dpid, timeout=10):
"""Return all flows from DPID."""
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
return self._ofctl_get(
int_dpid, 'stats/flow/%s' % int_dpid, timeout)
def _port_stat(self, port_stats, port):
if port_stats:
for port_stat in port_stats:
port_stat = json.loads(port_stat)
if port_stat['port_no'] == port:
return port_stat
return None
def get_port_stats_from_dpid(self, dpid, port, timeout=2):
"""Return port stats for a port."""
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
port_stats = self._ofctl_get(
int_dpid, 'stats/port/%s' % int_dpid, timeout)
return self._port_stat(port_stats, port)
def get_port_desc_from_dpid(self, dpid, port, timeout=2):
"""Return port desc for a port."""
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
port_stats = self._ofctl_get(
int_dpid, 'stats/portdesc/%s' % int_dpid, timeout)
return self._port_stat(port_stats, port)
def wait_matching_in_group_table(self, action, group_id, timeout=10):
groupdump = os.path.join(self.tmpdir, 'groupdump-%s.txt' % self.dpid)
for _ in range(timeout):
group_dump = self.get_all_groups_desc_from_dpid(self.dpid, 1)
with open(groupdump, 'w') as groupdump_file:
for group_desc in group_dump:
group_dict = json.loads(group_desc)
groupdump_file.write(str(group_dict) + '\n')
if group_dict['group_id'] == group_id:
actions = set(group_dict['buckets'][0]['actions'])
if set([action]).issubset(actions):
return True
time.sleep(1)
return False
def get_matching_flows_on_dpid(self, dpid, match, timeout=10, table_id=None,
actions=None, match_exact=False):
flowdump = os.path.join(self.tmpdir, 'flowdump-%s.txt' % dpid)
with open(flowdump, 'w') as flowdump_file:
for _ in range(timeout):
flow_dicts = []
flow_dump = self.get_all_flows_from_dpid(dpid)
for flow in flow_dump:
flow_dict = json.loads(flow)
flowdump_file.write(str(flow_dict) + '\n')
if (table_id is not None and
flow_dict['table_id'] != table_id):
continue
if actions is not None:
if not set(actions).issubset(set(flow_dict['actions'])):
continue
if match is not None:
if match_exact:
if match.items() != flow_dict['match'].items():
continue
elif not set(match.items()).issubset(set(flow_dict['match'].items())):
continue
flow_dicts.append(flow_dict)
if flow_dicts:
return flow_dicts
time.sleep(1)
return flow_dicts
def get_matching_flow_on_dpid(self, dpid, match, timeout=10, table_id=None,
actions=None, match_exact=None):
flow_dicts = self.get_matching_flows_on_dpid(
dpid, match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact)
if flow_dicts:
return flow_dicts[0]
return []
def get_matching_flow(self, match, timeout=10, table_id=None,
actions=None, match_exact=None):
return self.get_matching_flow_on_dpid(
self.dpid, match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact)
def get_group_id_for_matching_flow(self, match, timeout=10, table_id=None):
for _ in range(timeout):
flow_dict = self.get_matching_flow(
match, timeout=timeout, table_id=table_id)
if flow_dict:
for action in flow_dict['actions']:
if action.startswith('GROUP'):
_, group_id = action.split(':')
return int(group_id)
time.sleep(1)
self.fail(
'Cannot find group_id for matching flow %s' % match)
def matching_flow_present_on_dpid(self, dpid, match, timeout=10, table_id=None,
actions=None, match_exact=None):
"""Return True if matching flow is present on a DPID."""
if self.get_matching_flow_on_dpid(
dpid, match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact):
return True
return False
def matching_flow_present(self, match, timeout=10, table_id=None,
actions=None, match_exact=None):
"""Return True if matching flow is present on default DPID."""
return self.matching_flow_present_on_dpid(
self.dpid, match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact)
def wait_until_matching_flow(self, match, timeout=10, table_id=None,
actions=None, match_exact=False):
"""Wait (require) for flow to be present on default DPID."""
self.assertTrue(
self.matching_flow_present(
match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact),
msg=match)
def wait_until_controller_flow(self):
self.wait_until_matching_flow(None, actions=[u'OUTPUT:CONTROLLER'])
def mac_learned(self, mac, timeout=10, in_port=None):
"""Return True if a MAC has been learned on default DPID."""
for eth_field, table_id in (
(u'dl_src', self.ETH_SRC_TABLE),
(u'dl_dst', self.ETH_DST_TABLE)):
match = {eth_field: u'%s' % mac}
if in_port is not None and table_id == self.ETH_SRC_TABLE:
match[u'in_port'] = in_port
if not self.matching_flow_present(
match, timeout=timeout, table_id=table_id):
return False
return True
def mac_as_int(self, mac):
return long(mac.replace(':', ''), 16)
def mac_from_int(self, mac_int):
mac_int_str = '%012x' % long(mac_int)
return ':'.join([x.encode('hex') for x in str(mac_int_str).decode('hex')])
def prom_macs_learned(self, port=None, vlan=None):
labels = {
'n': r'\d+',
'port': r'\d+',
'vlan': r'\d+',
}
if port:
labels['port'] = str(port)
if vlan:
labels['vlan'] = str(vlan)
port_learned_macs_prom = self.scrape_prometheus_var(
'learned_macs', labels=labels, default=[], multiple=True, dpid=True)
macs = [self.mac_from_int(mac_int) for _, mac_int in port_learned_macs_prom if mac_int]
return macs
def prom_mac_learned(self, mac, port=None, vlan=None):
return mac in self.prom_macs_learned(port=port, vlan=vlan)
def host_learned(self, host, timeout=10, in_port=None):
"""Return True if a host has been learned on default DPID."""
return self.mac_learned(host.MAC(), timeout, in_port)
def get_host_intf_mac(self, host, intf):
return host.cmd('cat /sys/class/net/%s/address' % intf).strip()
def host_ip(self, host, family, family_re):
host_ip_cmd = (
r'ip -o -f %s addr show %s|'
'grep -m 1 -Eo "%s %s"|cut -f2 -d " "' % (
family,
host.defaultIntf(),
family,
family_re))
return host.cmd(host_ip_cmd).strip()
def host_ipv4(self, host):
"""Return first IPv4/netmask for host's default interface."""
return self.host_ip(host, 'inet', r'[0-9\\.]+\/[0-9]+')
def host_ipv6(self, host):
"""Return first IPv6/netmask for host's default interface."""
return self.host_ip(host, 'inet6', r'[0-9a-f\:]+\/[0-9]+')
def reset_ipv4_prefix(self, host, prefix=24):
host.setIP(host.IP(), prefixLen=prefix)
def reset_all_ipv4_prefix(self, prefix=24):
for host in self.net.hosts:
self.reset_ipv4_prefix(host, prefix)
def require_host_learned(self, host, retries=8, in_port=None):
"""Require a host be learned on default DPID."""
host_ip_net = self.host_ipv4(host)
if not host_ip_net:
host_ip_net = self.host_ipv6(host)
broadcast = ipaddress.ip_interface(
unicode(host_ip_net)).network.broadcast_address
broadcast_str = str(broadcast)
packets = 1
if broadcast.version == 4:
ping_cmd = 'ping -b'
if broadcast.version == 6:
ping_cmd = 'ping6'
broadcast_str = 'ff02::1'
# stimulate host learning with a broadcast ping
ping_cli = faucet_mininet_test_util.timeout_cmd(
'%s -I%s -W1 -c%u %s' % (
ping_cmd, host.defaultIntf().name, packets, broadcast_str), 3)
for _ in range(retries):
if self.host_learned(host, timeout=1, in_port=in_port):
return
ping_result = host.cmd(ping_cli)
self.assertTrue(re.search(
r'%u packets transmitted' % packets, ping_result), msg='%s: %s' % (
ping_cli, ping_result))
self.fail('host %s (%s) could not be learned (%s: %s)' % (
host, host.MAC(), ping_cli, ping_result))
def get_prom_port(self):
return int(self.env['faucet']['FAUCET_PROMETHEUS_PORT'])
def get_prom_addr(self):
return self.env['faucet']['FAUCET_PROMETHEUS_ADDR']
def _prometheus_url(self, controller):
if controller == 'faucet':
return 'http://%s:%u' % (
self.get_prom_addr(), self.get_prom_port())
elif controller == 'gauge':
return 'http://%s:%u' % (
self.get_prom_addr(), self.config_ports['gauge_prom_port'])
def scrape_prometheus(self, controller='faucet'):
url = self._prometheus_url(controller)
try:
prom_lines = requests.get(url).text.split('\n')
except ConnectionError:
return ''
prom_vars = []
for prom_line in prom_lines:
if not prom_line.startswith('#'):
prom_vars.append(prom_line)
return '\n'.join(prom_vars)
def scrape_prometheus_var(self, var, labels=None, any_labels=False, default=None,
dpid=True, multiple=False, controller='faucet', retries=1):
label_values_re = r''
if any_labels:
label_values_re = r'\{[^\}]+\}'
else:
if labels is None:
labels = {}
if dpid:
labels.update({'dp_id': '0x%x' % long(self.dpid)})
if labels:
label_values = []
for label, value in sorted(list(labels.items())):
label_values.append('%s="%s"' % (label, value))
label_values_re = r'\{%s\}' % r'\S+'.join(label_values)
var_re = r'^%s%s$' % (var, label_values_re)
for _ in range(retries):
results = []
prom_lines = self.scrape_prometheus(controller)
for prom_line in prom_lines.splitlines():
prom_var_data = prom_line.split(' ')
self.assertEqual(
2, len(prom_var_data),
msg='Invalid prometheus line in %s' % prom_lines)
prom_var, value = prom_var_data
if prom_var.startswith(var):
var_match = re.search(var_re, prom_var)
if var_match:
value_int = long(float(value))
results.append((var, value_int))
if not multiple:
break
if results:
if multiple:
return results
return results[0][1]
time.sleep(1)
return default
def gauge_smoke_test(self):
watcher_files = set([
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_flow_table_file])
found_watcher_files = set()
for _ in range(60):
for watcher_file in watcher_files:
if (os.path.exists(watcher_file)
and os.path.getsize(watcher_file)):
found_watcher_files.add(watcher_file)
if watcher_files == found_watcher_files:
break
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
time.sleep(1)
found_watcher_files = set()
missing_watcher_files = watcher_files - found_watcher_files
self.assertEqual(
missing_watcher_files, set(), msg='Gauge missing logs: %s' % missing_watcher_files)
self.hup_gauge()
self.verify_no_exception(self.env['faucet']['FAUCET_EXCEPTION_LOG'])
def prometheus_smoke_test(self):
prom_out = self.scrape_prometheus()
for nonzero_var in (
r'of_packet_ins', r'of_flowmsgs_sent', r'of_dp_connections',
r'faucet_config\S+name=\"flood\"', r'faucet_pbr_version\S+version='):
self.assertTrue(
re.search(r'%s\S+\s+[1-9]+' % nonzero_var, prom_out),
msg='expected %s to be nonzero (%s)' % (nonzero_var, prom_out))
for zero_var in (
'of_errors', 'of_dp_disconnections'):
self.assertTrue(
re.search(r'%s\S+\s+0' % zero_var, prom_out),
msg='expected %s to be present and zero (%s)' % (zero_var, prom_out))
def get_configure_count(self):
"""Return the number of times FAUCET has processed a reload request."""
for _ in range(3):
count = self.scrape_prometheus_var(
'faucet_config_reload_requests', default=None, dpid=False)
if count is not None:
return count
time.sleep(1)
self.fail('configure count stayed zero')
def hup_faucet(self):
"""Send a HUP signal to the controller."""
controller = self._get_controller()
self.assertTrue(
self._signal_proc_on_port(controller, controller.port, 1))
def hup_gauge(self):
self.assertTrue(
self._signal_proc_on_port(
self.gauge_controller, int(self.gauge_of_port), 1))
def verify_controller_fping(self, host, faucet_vip,
total_packets=100, packet_interval_ms=100):
fping_bin = 'fping'
if faucet_vip.version == 6:
fping_bin = 'fping6'
fping_cli = '%s -s -c %u -i %u -p 1 -T 1 %s' % (
fping_bin, total_packets, packet_interval_ms, faucet_vip.ip)
timeout = int(((1000.0 / packet_interval_ms) * total_packets) * 1.5)
fping_out = host.cmd(faucet_mininet_test_util.timeout_cmd(
fping_cli, timeout))
error('%s: %s' % (self._test_name(), fping_out))
self.assertTrue(
not re.search(r'\s+0 ICMP Echo Replies received', fping_out),
msg=fping_out)
def verify_vlan_flood_limited(self, vlan_first_host, vlan_second_host,
other_vlan_host):
"""Verify that flooding doesn't cross VLANs."""
for first_host, second_host in (
(vlan_first_host, vlan_second_host),
(vlan_second_host, vlan_first_host)):
tcpdump_filter = 'ether host %s or ether host %s' % (
first_host.MAC(), second_host.MAC())
tcpdump_txt = self.tcpdump_helper(
other_vlan_host, tcpdump_filter, [
lambda: first_host.cmd('arp -d %s' % second_host.IP()),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())],
packets=1)
self.assertTrue(
re.search('0 packets captured', tcpdump_txt), msg=tcpdump_txt)
def verify_ping_mirrored(self, first_host, second_host, mirror_host):
self.net.ping((first_host, second_host))
for host in (first_host, second_host):
self.require_host_learned(host)
self.retry_net_ping(hosts=(first_host, second_host))
mirror_mac = mirror_host.MAC()
tcpdump_filter = (
'not ether src %s and '
'(icmp[icmptype] == 8 or icmp[icmptype] == 0)') % mirror_mac
first_ping_second = 'ping -c1 %s' % second_host.IP()
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
lambda: first_host.cmd(first_ping_second)])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt),
msg=tcpdump_txt)
self.assertTrue(re.search(
'%s: ICMP echo reply' % first_host.IP(), tcpdump_txt),
msg=tcpdump_txt)
def verify_eapol_mirrored(self, first_host, second_host, mirror_host):
self.net.ping((first_host, second_host))
for host in (first_host, second_host):
self.require_host_learned(host)
self.retry_net_ping(hosts=(first_host, second_host))
mirror_mac = mirror_host.MAC()
tmp_eap_conf = os.path.join(self.tmpdir, 'eap.conf')
tcpdump_filter = (
'not ether src %s and ether proto 0x888e' % mirror_mac)
eap_conf_cmd = (
'echo "eapol_version=2\nap_scan=0\nnetwork={\n'
'key_mgmt=IEEE8021X\neap=MD5\nidentity=\\"login\\"\n'
'password=\\"password\\"\n}\n" > %s' % tmp_eap_conf)
wpa_supplicant_cmd = faucet_mininet_test_util.timeout_cmd(
'wpa_supplicant -c%s -Dwired -i%s -d' % (
tmp_eap_conf,
first_host.defaultIntf().name),
5)
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
lambda: first_host.cmd(eap_conf_cmd),
lambda: first_host.cmd(wpa_supplicant_cmd)])
self.assertTrue(
re.search('01:80:c2:00:00:03, ethertype EAPOL', tcpdump_txt),
msg=tcpdump_txt)
def bogus_mac_flooded_to_port1(self):
first_host, second_host, third_host = self.net.hosts[0:3]
unicast_flood_filter = 'ether host %s' % self.BOGUS_MAC
static_bogus_arp = 'arp -s %s %s' % (first_host.IP(), self.BOGUS_MAC)
curl_first_host = 'curl -m 5 http://%s' % first_host.IP()
tcpdump_txt = self.tcpdump_helper(
first_host, unicast_flood_filter,
[lambda: second_host.cmd(static_bogus_arp),
lambda: second_host.cmd(curl_first_host),
lambda: self.net.ping(hosts=(second_host, third_host))])
return not re.search('0 packets captured', tcpdump_txt)
def verify_port1_unicast(self, unicast_status):
# Unicast flooding rule for from port 1
self.assertEqual(
self.matching_flow_present(
{u'dl_vlan': u'100', u'in_port': int(self.port_map['port_1'])},
table_id=self.FLOOD_TABLE,
match_exact=True),
unicast_status)
# Unicast flood rule exists that output to port 1
self.assertEqual(
self.matching_flow_present(
{u'dl_vlan': u'100', u'in_port': int(self.port_map['port_2'])},
table_id=self.FLOOD_TABLE,
actions=[u'OUTPUT:%u' % self.port_map['port_1']],
match_exact=True),
unicast_status)
def verify_lldp_blocked(self):
first_host, second_host = self.net.hosts[0:2]
lldp_filter = 'ether proto 0x88cc'
ladvd_mkdir = 'mkdir -p /var/run/ladvd'
send_lldp = '%s -L -o %s' % (
faucet_mininet_test_util.timeout_cmd(self.LADVD, 30),
second_host.defaultIntf())
tcpdump_txt = self.tcpdump_helper(
first_host, lldp_filter,
[lambda: second_host.cmd(ladvd_mkdir),
lambda: second_host.cmd(send_lldp),
lambda: second_host.cmd(send_lldp),
lambda: second_host.cmd(send_lldp)],
timeout=20, packets=5)
if re.search(second_host.MAC(), tcpdump_txt):
return False
return True
def is_cdp_blocked(self):
first_host, second_host = self.net.hosts[0:2]
cdp_filter = 'ether host 01:00:0c:cc:cc:cc and ether[20:2]==0x2000'
ladvd_mkdir = 'mkdir -p /var/run/ladvd'
send_cdp = '%s -C -o %s' % (
faucet_mininet_test_util.timeout_cmd(self.LADVD, 30),
second_host.defaultIntf())
tcpdump_txt = self.tcpdump_helper(
first_host,
cdp_filter,
[lambda: second_host.cmd(ladvd_mkdir),
lambda: second_host.cmd(send_cdp),
lambda: second_host.cmd(send_cdp),
lambda: second_host.cmd(send_cdp)],
timeout=20, packets=5)
if re.search(second_host.MAC(), tcpdump_txt):
return False
return True
def verify_hup_faucet(self, timeout=3):
"""HUP and verify the HUP was processed."""
start_configure_count = self.get_configure_count()
self.hup_faucet()
for _ in range(timeout):
configure_count = self.get_configure_count()
if configure_count > start_configure_count:
return
time.sleep(1)
self.fail('HUP not processed by FAUCET')
def force_faucet_reload(self, new_config):
"""Force FAUCET to reload by adding new line to config file."""
with open(self.env['faucet']['FAUCET_CONFIG'], 'a') as config_file:
config_file.write(new_config)
self.verify_hup_faucet()
def get_host_port_stats(self, hosts_switch_ports):
port_stats = {}
for host, switch_port in hosts_switch_ports:
port_stats[host] = self.get_port_stats_from_dpid(self.dpid, switch_port)
return port_stats
def of_bytes_mbps(self, start_port_stats, end_port_stats, var, seconds):
return (end_port_stats[var] - start_port_stats[var]) * 8 / seconds / self.ONEMBPS
def verify_iperf_min(self, hosts_switch_ports, min_mbps, server_ip):
"""Verify minimum performance and OF counters match iperf approximately."""
seconds = 5
prop = 0.1
start_port_stats = self.get_host_port_stats(hosts_switch_ports)
hosts = []
for host, _ in hosts_switch_ports:
hosts.append(host)
client_host, server_host = hosts
iperf_mbps = self.iperf(
client_host, server_host, server_ip, seconds)
self.assertTrue(iperf_mbps > min_mbps)
# TODO: account for drops.
for _ in range(3):
end_port_stats = self.get_host_port_stats(hosts_switch_ports)
approx_match = True
for host in hosts:
of_rx_mbps = self.of_bytes_mbps(
start_port_stats[host], end_port_stats[host], 'rx_bytes', seconds)
of_tx_mbps = self.of_bytes_mbps(
start_port_stats[host], end_port_stats[host], 'tx_bytes', seconds)
output(of_rx_mbps, of_tx_mbps)
max_of_mbps = float(max(of_rx_mbps, of_tx_mbps))
iperf_to_max = 0
if max_of_mbps:
iperf_to_max = iperf_mbps / max_of_mbps
msg = 'iperf: %fmbps, of: %fmbps (%f)' % (
iperf_mbps, max_of_mbps, iperf_to_max)
output(msg)
if ((iperf_to_max < (1.0 - prop)) or
(iperf_to_max > (1.0 + prop))):
approx_match = False
if approx_match:
return
time.sleep(1)
self.fail(msg=msg)
def wait_port_status(self, port_no, expected_status, timeout=10):
for _ in range(timeout):
port_status = self.scrape_prometheus_var(
'port_status', {'port': port_no}, default=None)
if port_status is not None and port_status == expected_status:
return
time.sleep(1)
self.fail('port %s status %s != expected %u' % (
port_no, port_status, expected_status))
def set_port_status(self, port_no, status, wait):
self.assertEqual(
0,
os.system(self._curl_portmod(
self.dpid,
port_no,
status,
ofp.OFPPC_PORT_DOWN)))
if wait:
expected_status = 1
if status == ofp.OFPPC_PORT_DOWN:
expected_status = 0
self.wait_port_status(port_no, expected_status)
def set_port_down(self, port_no, wait=True):
self.set_port_status(port_no, ofp.OFPPC_PORT_DOWN, wait)
def set_port_up(self, port_no, wait=True):
self.set_port_status(port_no, 0, wait)
def wait_dp_status(self, expected_status, controller='faucet', timeout=60):
for _ in range(timeout):
dp_status = self.scrape_prometheus_var(
'dp_status', {}, controller=controller, default=None)
if dp_status is not None and dp_status == expected_status:
return True
time.sleep(1)
return False
def _get_tableid(self, name):
return self.scrape_prometheus_var(
'faucet_config_table_names', {'name': name})
def quiet_commands(self, host, commands):
for command in commands:
result = host.cmd(command)
self.assertEqual('', result, msg='%s: %s' % (command, result))
def _config_tableids(self):
self.PORT_ACL_TABLE = self._get_tableid('port_acl')
self.VLAN_TABLE = self._get_tableid('vlan')
self.VLAN_ACL_TABLE = self._get_tableid('vlan_acl')
self.ETH_SRC_TABLE = self._get_tableid('eth_src')
self.IPV4_FIB_TABLE = self._get_tableid('ipv4_fib')
self.IPV6_FIB_TABLE = self._get_tableid('ipv6_fib')
self.VIP_TABLE = self._get_tableid('vip')
self.ETH_DST_TABLE = self._get_tableid('eth_dst')
self.FLOOD_TABLE = self._get_tableid('flood')
def _dp_ports(self):
port_count = self.N_TAGGED + self.N_UNTAGGED
return list(sorted(self.port_map.values()))[:port_count]
def flap_port(self, port_no, flap_time=1):
self.set_port_down(port_no)
time.sleep(flap_time)
self.set_port_up(port_no)
def flap_all_switch_ports(self, flap_time=1):
"""Flap all ports on switch."""
for port_no in self._dp_ports():
self.flap_port(port_no, flap_time=flap_time)
def get_mac_of_intf(self, host, intf):
"""Get MAC address of a port."""
return host.cmd(
'|'.join((
'ip link show %s' % intf,
'grep -o "..:..:..:..:..:.."',
'head -1',
'xargs echo -n'))).lower()
def add_macvlan(self, host, macvlan_intf, ipa=None, ipm=24):
self.assertEqual(
'',
host.cmd('ip link add link %s %s type macvlan' % (
host.defaultIntf(), macvlan_intf)))
self.assertEqual(
'',
host.cmd('ip link set dev %s up' % macvlan_intf))
if ipa:
self.assertEqual(
'',
host.cmd('ip address add %s/%s brd + dev %s' % (
ipa, ipm, macvlan_intf)))
def add_host_ipv6_address(self, host, ip_v6, intf=None):
"""Add an IPv6 address to a Mininet host."""
if intf is None:
intf = host.intf()
self.assertEqual(
'',
host.cmd('ip -6 addr add %s dev %s' % (ip_v6, intf)))
def add_host_route(self, host, ip_dst, ip_gw):
"""Add an IP route to a Mininet host."""
host.cmd('ip -%u route del %s' % (
ip_dst.version, ip_dst.network.with_prefixlen))
add_cmd = 'ip -%u route add %s via %s' % (
ip_dst.version, ip_dst.network.with_prefixlen, ip_gw)
self.quiet_commands(host, (add_cmd,))
def _one_ip_ping(self, host, ping_cmd, retries, require_host_learned):
if require_host_learned:
self.require_host_learned(host)
for _ in range(retries):
ping_result = host.cmd(ping_cmd)
if re.search(self.ONE_GOOD_PING, ping_result):
return
self.assertTrue(
re.search(self.ONE_GOOD_PING, ping_result),
msg='%s: %s' % (ping_cmd, ping_result))
def one_ipv4_ping(self, host, dst, retries=3, require_host_learned=True, intf=None):
"""Ping an IPv4 destination from a host."""
if intf is None:
intf = host.defaultIntf()
ping_cmd = 'ping -c1 -I%s %s' % (intf, dst)
return self._one_ip_ping(host, ping_cmd, retries, require_host_learned)
def one_ipv4_controller_ping(self, host):
"""Ping the controller from a host with IPv4."""
self.one_ipv4_ping(host, self.FAUCET_VIPV4.ip)
self.verify_ipv4_host_learned_mac(
host, self.FAUCET_VIPV4.ip, self.FAUCET_MAC)
def one_ipv6_ping(self, host, dst, retries=3):
"""Ping an IPv6 destination from a host."""
ping_cmd = 'ping6 -c1 %s' % dst
return self._one_ip_ping(host, ping_cmd, retries, require_host_learned=True)
def one_ipv6_controller_ping(self, host):
"""Ping the controller from a host with IPv6."""
self.one_ipv6_ping(host, self.FAUCET_VIPV6.ip)
self.verify_ipv6_host_learned_mac(
host, self.FAUCET_VIPV6.ip, self.FAUCET_MAC)
def retry_net_ping(self, hosts=None, required_loss=0, retries=3):
loss = None
for _ in range(retries):
if hosts is None:
loss = self.net.pingAll()
else:
loss = self.net.ping(hosts)
if loss <= required_loss:
return
time.sleep(1)
self.fail('ping %f loss > required loss %f' % (loss, required_loss))
def tcp_port_free(self, host, port, ipv=4):
listen_out = host.cmd(
faucet_mininet_test_util.tcp_listening_cmd(port, ipv))
if listen_out:
return listen_out
return None
def wait_for_tcp_free(self, host, port, timeout=10, ipv=4):
"""Wait for a host to start listening on a port."""
for _ in range(timeout):
listen_out = self.tcp_port_free(host, port, ipv)
if listen_out is None:
return
time.sleep(1)
self.fail('%s busy on port %u (%s)' % (host, port, listen_out))
def wait_for_tcp_listen(self, host, port, timeout=10, ipv=4):
"""Wait for a host to start listening on a port."""
for _ in range(timeout):
listen_out = self.tcp_port_free(host, port, ipv)
if listen_out is not None:
return
time.sleep(1)
self.fail('%s never listened on port %u' % (host, port))
def serve_hello_on_tcp_port(self, host, port):
"""Serve 'hello' on a TCP port on a host."""
host.cmd(faucet_mininet_test_util.timeout_cmd(
'echo hello | nc -l %s %u &' % (host.IP(), port), 10))
self.wait_for_tcp_listen(host, port)
def wait_nonzero_packet_count_flow(self, match, timeout=10, table_id=None, actions=None):
"""Wait for a flow to be present and have a non-zero packet_count."""
for _ in range(timeout):
flow = self.get_matching_flow(match, timeout=1, table_id=table_id, actions=actions)
if flow and flow['packet_count'] > 0:
return
time.sleep(1)
if flow:
self.fail('flow %s matching %s had zero packet count' % (flow, match))
else:
self.fail('no flow matching %s' % match)
def verify_tp_dst_blocked(self, port, first_host, second_host, table_id=0, mask=None):
"""Verify that a TCP port on a host is blocked from another host."""
self.serve_hello_on_tcp_port(second_host, port)
self.quiet_commands(
first_host,
(faucet_mininet_test_util.timeout_cmd(
'nc %s %u' % (second_host.IP(), port), 10), ))
if table_id is not None:
if mask is None:
match_port = int(port)
else:
match_port = '/'.join((str(port), str(mask)))
self.wait_nonzero_packet_count_flow(
{u'tp_dst': match_port}, table_id=table_id)
def verify_tp_dst_notblocked(self, port, first_host, second_host, table_id=0, mask=None):
"""Verify that a TCP port on a host is NOT blocked from another host."""
self.serve_hello_on_tcp_port(second_host, port)
self.assertEqual(
'hello\r\n',
first_host.cmd('nc -w 5 %s %u' % (second_host.IP(), port)))
if table_id is not None:
self.wait_nonzero_packet_count_flow(
{u'tp_dst': int(port)}, table_id=table_id)
def swap_host_macs(self, first_host, second_host):
"""Swap the MAC addresses of two Mininet hosts."""
first_host_mac = first_host.MAC()
second_host_mac = second_host.MAC()
first_host.setMAC(second_host_mac)
second_host.setMAC(first_host_mac)
def start_exabgp(self, exabgp_conf, timeout=30):
"""Start exabgp process on controller host."""
exabgp_conf_file_name = os.path.join(self.tmpdir, 'exabgp.conf')
exabgp_log = os.path.join(self.tmpdir, 'exabgp.log')
exabgp_err = os.path.join(self.tmpdir, 'exabgp.err')
exabgp_env = ' '.join((
'exabgp.daemon.user=root',
'exabgp.log.all=true',
'exabgp.log.level=DEBUG',
'exabgp.log.destination=%s' % exabgp_log,
))
bgp_port = self.config_ports['bgp_port']
exabgp_conf = exabgp_conf % {'bgp_port': bgp_port}
with open(exabgp_conf_file_name, 'w') as exabgp_conf_file:
exabgp_conf_file.write(exabgp_conf)
controller = self._get_controller()
exabgp_cmd = faucet_mininet_test_util.timeout_cmd(
'exabgp %s -d 2> %s > /dev/null &' % (
exabgp_conf_file_name, exabgp_err), 600)
exabgp_cli = 'env %s %s' % (exabgp_env, exabgp_cmd)
controller.cmd(exabgp_cli)
for _ in range(timeout):
if os.path.exists(exabgp_log):
return (exabgp_log, exabgp_err)
time.sleep(1)
self.fail('exabgp (%s) did not start' % exabgp_cli)
def wait_bgp_up(self, neighbor, vlan, exabgp_log, exabgp_err):
"""Wait for BGP to come up."""
label_values = {
'neighbor': neighbor,
'vlan': vlan,
}
for _ in range(60):
uptime = self.scrape_prometheus_var(
'bgp_neighbor_uptime', label_values, default=0)
if uptime > 0:
return
time.sleep(1)
exabgp_log_content = []
for log_name in (exabgp_log, exabgp_err):
if os.path.exists(log_name):
with open(log_name) as log:
exabgp_log_content.append(log.read())
self.fail('exabgp did not peer with FAUCET: %s' % '\n'.join(exabgp_log_content))
def matching_lines_from_file(self, exp, log_name):
with open(log_name) as log_file:
return [log_line for log_line in log_file if re.search(exp, log_line)]
return []
def exabgp_updates(self, exabgp_log):
"""Verify that exabgp process has received BGP updates."""
controller = self._get_controller()
# exabgp should have received our BGP updates
for _ in range(60):
updates = controller.cmd(
r'grep UPDATE %s |grep -Eo "\S+ next-hop \S+"' % exabgp_log)
if updates:
return updates
time.sleep(1)
self.fail('exabgp did not receive BGP updates')
def wait_exabgp_sent_updates(self, exabgp_log_name):
"""Verify that exabgp process has sent BGP updates."""
for _ in range(60):
if self.matching_lines_from_file(r'>> [1-9]+[0-9]* UPDATE', exabgp_log_name):
return
time.sleep(1)
self.fail('exabgp did not send BGP updates')
def ping_all_when_learned(self, retries=3):
"""Verify all hosts can ping each other once FAUCET has learned all."""
# Cause hosts to send traffic that FAUCET can use to learn them.
for _ in range(retries):
loss = self.net.pingAll()
# we should have learned all hosts now, so should have no loss.
for host in self.net.hosts:
self.require_host_learned(host)
if loss == 0:
return
self.assertEqual(0, loss)
def wait_for_route_as_flow(self, nexthop, prefix, vlan_vid=None, timeout=10,
with_group_table=False, nonzero_packets=False):
"""Verify a route has been added as a flow."""
exp_prefix = u'%s/%s' % (
prefix.network_address, prefix.netmask)
if prefix.version == 6:
nw_dst_match = {u'ipv6_dst': exp_prefix}
table_id = self.IPV6_FIB_TABLE
else:
nw_dst_match = {u'nw_dst': exp_prefix}
table_id = self.IPV4_FIB_TABLE
nexthop_action = u'SET_FIELD: {eth_dst:%s}' % nexthop
if vlan_vid is not None:
nw_dst_match[u'dl_vlan'] = unicode(vlan_vid)
if with_group_table:
group_id = self.get_group_id_for_matching_flow(
nw_dst_match)
self.wait_matching_in_group_table(
nexthop_action, group_id, timeout)
else:
if nonzero_packets:
self.wait_nonzero_packet_count_flow(
nw_dst_match, timeout=timeout, table_id=table_id,
actions=[nexthop_action])
else:
self.wait_until_matching_flow(
nw_dst_match, timeout=timeout, table_id=table_id,
actions=[nexthop_action])
def host_ipv4_alias(self, host, alias_ip, intf=None):
"""Add an IPv4 alias address to a host."""
if intf is None:
intf = host.intf()
del_cmd = 'ip addr del %s dev %s' % (
alias_ip.with_prefixlen, intf)
add_cmd = 'ip addr add %s dev %s label %s:1' % (
alias_ip.with_prefixlen, intf, intf)
host.cmd(del_cmd)
self.quiet_commands(host, (add_cmd,))
def _ip_neigh(self, host, ipa, ip_ver):
neighbors = host.cmd('ip -%u neighbor show %s' % (ip_ver, ipa))
neighbors_fields = neighbors.split()
if len(neighbors_fields) >= 5:
return neighbors.split()[4]
return None
def _verify_host_learned_mac(self, host, ipa, ip_ver, mac, retries):
for _ in range(retries):
if self._ip_neigh(host, ipa, ip_ver) == mac:
return
time.sleep(1)
self.fail(
'could not verify %s resolved to %s' % (ipa, mac))
def verify_ipv4_host_learned_mac(self, host, ipa, mac, retries=3):
self._verify_host_learned_mac(host, ipa, 4, mac, retries)
def verify_ipv4_host_learned_host(self, host, learned_host):
learned_ip = ipaddress.ip_interface(unicode(self.host_ipv4(learned_host)))
self.verify_ipv4_host_learned_mac(host, learned_ip.ip, learned_host.MAC())
def verify_ipv6_host_learned_mac(self, host, ip6, mac, retries=3):
self._verify_host_learned_mac(host, ip6, 6, mac, retries)
def verify_ipv6_host_learned_host(self, host, learned_host):
learned_ip6 = ipaddress.ip_interface(unicode(self.host_ipv6(learned_host)))
self.verify_ipv6_host_learned_mac(host, learned_ip6.ip, learned_host.MAC())
def iperf_client(self, client_host, iperf_client_cmd):
for _ in range(3):
iperf_results = client_host.cmd(iperf_client_cmd)
iperf_csv = iperf_results.strip().split(',')
if len(iperf_csv) == 9:
return int(iperf_csv[-1]) / self.ONEMBPS
time.sleep(1)
self.fail('%s: %s' % (iperf_client_cmd, iperf_results))
def iperf(self, client_host, server_host, server_ip, seconds):
for _ in range(3):
port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
iperf_base_cmd = 'iperf -f M -p %u' % port
if server_ip.version == 6:
iperf_base_cmd += ' -V'
iperf_server_cmd = '%s -s -B %s' % (iperf_base_cmd, server_ip)
iperf_server_cmd = faucet_mininet_test_util.timeout_cmd(
iperf_server_cmd, (seconds * 3) + 5)
iperf_client_cmd = faucet_mininet_test_util.timeout_cmd(
'%s -y c -c %s -t %u' % (iperf_base_cmd, server_ip, seconds),
seconds + 5)
server_start_exp = r'Server listening on TCP port %u' % port
server_out = server_host.popen(
iperf_server_cmd,
stdin=faucet_mininet_test_util.DEVNULL,
stderr=subprocess.STDOUT,
close_fds=True)
popens = {server_host: server_out}
lines = []
for host, line in pmonitor(popens):
if host == server_host:
lines.append(line)
if re.search(server_start_exp, line):
self.wait_for_tcp_listen(
server_host, port, ipv=server_ip.version)
iperf_mbps = self.iperf_client(
client_host, iperf_client_cmd)
self._signal_proc_on_port(server_host, port, 9)
return iperf_mbps
time.sleep(1)
self.fail('%s never started (%s, %s)' % (
iperf_server_cmd, server_start_exp, ' '.join(lines)))
def verify_ipv4_routing(self, first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=False):
"""Verify one host can IPV4 route to another via FAUCET."""
self.host_ipv4_alias(first_host, first_host_routed_ip)
self.host_ipv4_alias(second_host, second_host_routed_ip)
self.add_host_route(
first_host, second_host_routed_ip, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_routed_ip, self.FAUCET_VIPV4.ip)
self.net.ping(hosts=(first_host, second_host))
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network,
with_group_table=with_group_table)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network,
with_group_table=with_group_table)
self.one_ipv4_ping(first_host, second_host_routed_ip.ip)
self.one_ipv4_ping(second_host, first_host_routed_ip.ip)
self.verify_ipv4_host_learned_host(first_host, second_host)
self.verify_ipv4_host_learned_host(second_host, first_host)
# verify at least 1M iperf
for client_host, server_host, server_ip in (
(first_host, second_host, second_host_routed_ip.ip),
(second_host, first_host, first_host_routed_ip.ip)):
iperf_mbps = self.iperf(
client_host, server_host, server_ip, 5)
error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip))
self.assertGreater(iperf_mbps, 1)
# verify packets matched routing flows
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network,
with_group_table=with_group_table,
nonzero_packets=True)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network,
with_group_table=with_group_table,
nonzero_packets=True)
def verify_ipv4_routing_mesh(self, with_group_table=False):
"""Verify hosts can route to each other via FAUCET."""
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')
second_host_routed_ip2 = ipaddress.ip_interface(u'10.0.3.1/24')
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2,
with_group_table=with_group_table)
self.swap_host_macs(first_host, second_host)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2,
with_group_table=with_group_table)
def host_drop_all_ips(self, host):
for ipv in (4, 6):
host.cmd('ip -%u addr flush dev %s' % (ipv, host.defaultIntf()))
def setup_ipv6_hosts_addresses(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
"""Configure host IPv6 addresses for testing."""
for host in first_host, second_host:
host.cmd('ip -6 addr flush dev %s' % host.intf())
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_ipv6_address(first_host, first_host_routed_ip)
self.add_host_ipv6_address(second_host, second_host_routed_ip)
for host in first_host, second_host:
self.require_host_learned(host)
def verify_ipv6_routing(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip,
with_group_table=False):
"""Verify one host can IPV6 route to another via FAUCET."""
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.add_host_route(
first_host, second_host_routed_ip, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_routed_ip, self.FAUCET_VIPV6.ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network,
with_group_table=with_group_table)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network,
with_group_table=with_group_table)
self.one_ipv6_controller_ping(first_host)
self.one_ipv6_controller_ping(second_host)
self.one_ipv6_ping(first_host, second_host_routed_ip.ip)
# verify at least 1M iperf
for client_host, server_host, server_ip in (
(first_host, second_host, second_host_routed_ip.ip),
(second_host, first_host, first_host_routed_ip.ip)):
iperf_mbps = self.iperf(
client_host, server_host, server_ip, 5)
error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip))
self.assertGreater(iperf_mbps, 1)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.verify_ipv6_host_learned_mac(
first_host, second_host_ip.ip, second_host.MAC())
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.verify_ipv6_host_learned_mac(
second_host, first_host_ip.ip, first_host.MAC())
def verify_ipv6_routing_pair(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip,
with_group_table=False):
"""Verify hosts can route IPv6 to each other via FAUCET."""
self.setup_ipv6_hosts_addresses(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
def verify_ipv6_routing_mesh(self, with_group_table=False):
"""Verify IPv6 routing between hosts and multiple subnets."""
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')
second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')
second_host_routed_ip2 = ipaddress.ip_interface(u'fc00::30:1/112')
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2,
with_group_table=with_group_table)
self.swap_host_macs(first_host, second_host)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2,
with_group_table=with_group_table)
def verify_invalid_bgp_route(self, pattern):
"""Check if we see the pattern in Faucet's log."""
lines = self.matching_lines_from_file(pattern, self.env['faucet']['FAUCET_LOG'])
self.assertGreater(len(lines), 0, msg='%s not found' % pattern)
dump OVS switch flows using nice ofctl utility.
#!/usr/bin/env python
"""Base class for all FAUCET unit tests."""
# pylint: disable=missing-docstring
# pylint: disable=too-many-arguments
import collections
import glob
import ipaddress
import json
import os
import random
import re
import shutil
import subprocess
import time
import unittest
import yaml
import requests
from requests.exceptions import ConnectionError
# pylint: disable=import-error
from mininet.log import error, output
from mininet.net import Mininet
from mininet.node import Intf
from mininet.util import dumpNodeConnections, pmonitor
from ryu.ofproto import ofproto_v1_3 as ofp
import faucet_mininet_test_util
import faucet_mininet_test_topo
class FaucetTestBase(unittest.TestCase):
"""Base class for all FAUCET unit tests."""
ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss'
FAUCET_VIPV4 = ipaddress.ip_interface(u'10.0.0.254/24')
FAUCET_VIPV4_2 = ipaddress.ip_interface(u'172.16.0.254/24')
FAUCET_VIPV6 = ipaddress.ip_interface(u'fc00::1:254/64')
FAUCET_VIPV6_2 = ipaddress.ip_interface(u'fc01::1:254/64')
OFCTL = 'ovs-ofctl -OOpenFlow13'
BOGUS_MAC = '01:02:03:04:05:06'
FAUCET_MAC = '0e:00:00:00:00:01'
LADVD = 'ladvd -e lo -f'
ONEMBPS = (1024 * 1024)
DB_TIMEOUT = 5
CONFIG = ''
CONFIG_GLOBAL = ''
GAUGE_CONFIG_DBS = ''
N_UNTAGGED = 0
N_TAGGED = 0
NUM_DPS = 1
LINKS_PER_HOST = 1
RUN_GAUGE = True
REQUIRES_METERS = False
PORT_ACL_TABLE = 0
VLAN_TABLE = 1
VLAN_ACL_TABLE = 2
ETH_SRC_TABLE = 3
IPV4_FIB_TABLE = 4
IPV6_FIB_TABLE = 5
VIP_TABLE = 6
FLOOD_TABLE = 8
ETH_DST_TABLE = 7
config = None
dpid = None
hardware = 'Open vSwitch'
hw_switch = False
gauge_controller = None
gauge_of_port = None
prom_port = None
net = None
of_port = None
ctl_privkey = None
ctl_cert = None
ca_certs = None
port_map = {'port_1': 1, 'port_2': 2, 'port_3': 3, 'port_4': 4}
switch_map = {}
tmpdir = None
net = None
topo = None
cpn_intf = None
config_ports = {}
env = collections.defaultdict(dict)
rand_dpids = set()
def __init__(self, name, config, root_tmpdir, ports_sock, max_test_load):
super(FaucetTestBase, self).__init__(name)
self.config = config
self.root_tmpdir = root_tmpdir
self.ports_sock = ports_sock
self.max_test_load = max_test_load
def rand_dpid(self):
reserved_range = 100
while True:
dpid = random.randint(1, (2**32 - reserved_range)) + reserved_range
if dpid not in self.rand_dpids:
self.rand_dpids.add(dpid)
return str(dpid)
def _set_var(self, controller, var, value):
self.env[controller][var] = value
def _set_var_path(self, controller, var, path):
self._set_var(controller, var, os.path.join(self.tmpdir, path))
def _set_prom_port(self, name='faucet'):
self._set_var(name, 'FAUCET_PROMETHEUS_PORT', str(self.prom_port))
self._set_var(name, 'FAUCET_PROMETHEUS_ADDR', faucet_mininet_test_util.LOCALHOST)
def _set_static_vars(self):
self._set_var_path('faucet', 'FAUCET_CONFIG', 'faucet.yaml')
self._set_var_path('faucet', 'FAUCET_LOG', 'faucet.log')
self._set_var_path('faucet', 'FAUCET_EXCEPTION_LOG', 'faucet-exception.log')
self._set_var_path('gauge', 'GAUGE_CONFIG', 'gauge.yaml')
self._set_var_path('gauge', 'GAUGE_LOG', 'gauge.log')
self._set_var_path('gauge', 'GAUGE_EXCEPTION_LOG', 'gauge-exception.log')
self.faucet_config_path = self.env['faucet']['FAUCET_CONFIG']
self.gauge_config_path = self.env['gauge']['GAUGE_CONFIG']
self.debug_log_path = os.path.join(
self.tmpdir, 'ofchannel.txt')
self.monitor_stats_file = os.path.join(
self.tmpdir, 'ports.txt')
self.monitor_state_file = os.path.join(
self.tmpdir, 'state.txt')
self.monitor_flow_table_file = os.path.join(
self.tmpdir, 'flow.txt')
if self.config is not None:
if 'hw_switch' in self.config:
self.hw_switch = self.config['hw_switch']
if self.hw_switch:
self.dpid = self.config['dpid']
self.cpn_intf = self.config['cpn_intf']
self.hardware = self.config['hardware']
if 'ctl_privkey' in self.config:
self.ctl_privkey = self.config['ctl_privkey']
if 'ctl_cert' in self.config:
self.ctl_cert = self.config['ctl_cert']
if 'ca_certs' in self.config:
self.ca_certs = self.config['ca_certs']
dp_ports = self.config['dp_ports']
self.port_map = {}
self.switch_map = {}
for i, switch_port in enumerate(dp_ports):
test_port_name = 'port_%u' % (i + 1)
self.port_map[test_port_name] = switch_port
self.switch_map[test_port_name] = dp_ports[switch_port]
def _set_vars(self):
self._set_prom_port()
def _write_faucet_config(self):
faucet_config = '\n'.join((
self.get_config_header(
self.CONFIG_GLOBAL, self.debug_log_path, self.dpid, self.hardware),
self.CONFIG % self.port_map))
if self.config_ports:
faucet_config = faucet_config % self.config_ports
with open(self.faucet_config_path, 'w') as faucet_config_file:
faucet_config_file.write(faucet_config)
def _write_gauge_config(self):
gauge_config = self.get_gauge_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_flow_table_file)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
with open(self.gauge_config_path, 'w') as gauge_config_file:
gauge_config_file.write(gauge_config)
def _test_name(self):
return faucet_mininet_test_util.flat_test_name(self.id())
def _tmpdir_name(self):
tmpdir = os.path.join(self.root_tmpdir, self._test_name())
os.mkdir(tmpdir)
return tmpdir
def _controller_lognames(self):
lognames = []
for controller in self.net.controllers:
logname = controller.logname()
if os.path.exists(logname) and os.path.getsize(logname) > 0:
lognames.append(logname)
return lognames
def _wait_load(self, load_retries=120):
for _ in range(load_retries):
load = os.getloadavg()[0]
time.sleep(random.randint(1, 7))
if load < self.max_test_load:
return
output('load average too high %f, waiting' % load)
self.fail('load average %f consistently too high' % load)
def _allocate_config_ports(self):
for port_name in list(self.config_ports.keys()):
self.config_ports[port_name] = None
for config in (self.CONFIG, self.CONFIG_GLOBAL, self.GAUGE_CONFIG_DBS):
if re.search(port_name, config):
port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.config_ports[port_name] = port
output('allocating port %u for %s' % (port, port_name))
def _allocate_faucet_ports(self):
if self.hw_switch:
self.of_port = self.config['of_port']
else:
self.of_port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.prom_port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
def _allocate_gauge_ports(self):
if self.hw_switch:
self.gauge_of_port = self.config['gauge_of_port']
else:
self.gauge_of_port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
def setUp(self):
self.tmpdir = self._tmpdir_name()
self._set_static_vars()
if self.hw_switch:
self.topo_class = faucet_mininet_test_topo.FaucetHwSwitchTopo
self.dpid = faucet_mininet_test_util.str_int_dpid(self.dpid)
else:
self.topo_class = faucet_mininet_test_topo.FaucetSwitchTopo
self.dpid = self.rand_dpid()
def tearDown(self):
"""Clean up after a test."""
with open(os.path.join(self.tmpdir, 'prometheus.log'), 'w') as prom_log:
prom_log.write(self.scrape_prometheus())
switch_names = []
for switch in self.net.switches:
switch_dump_name = os.path.join(self.tmpdir, '%s-dumpflows.txt' % switch.name)
switch_names.append(switch.name)
switch.cmd('%s dump-flows %s > %s' % (self.OFCTL, switch.name, switch_dump_name))
if self.net is not None:
self.net.stop()
self.net = None
faucet_mininet_test_util.return_free_ports(
self.ports_sock, self._test_name())
if 'OVS_LOGDIR' in os.environ:
ovs_log_dir = os.environ['OVS_LOGDIR']
if ovs_log_dir and os.path.exists(ovs_log_dir):
for ovs_log in glob.glob(os.path.join(ovs_log_dir, '*.log')):
lines = []
for name in switch_names:
lines.extend(self.matching_lines_from_file(name, ovs_log))
if lines:
switch_ovs_log_name = os.path.join(self.tmpdir, os.path.basename(ovs_log))
with open(switch_ovs_log_name, 'w') as switch_ovs_log:
switch_ovs_log.write('\n'.join(lines))
# must not be any controller exception.
self.verify_no_exception(self.env['faucet']['FAUCET_EXCEPTION_LOG'])
for _, debug_log_name in self._get_ofchannel_logs():
with open(debug_log_name) as debug_log:
self.assertFalse(
re.search('OFPErrorMsg', debug_log.read()),
msg='debug log has OFPErrorMsgs')
def _attach_physical_switch(self):
"""Bridge a physical switch into test topology."""
switch = self.net.switches[0]
mapped_base = max(len(self.switch_map), len(self.port_map))
for i, test_host_port in enumerate(sorted(self.switch_map)):
port_i = i + 1
mapped_port_i = mapped_base + port_i
phys_port = Intf(self.switch_map[test_host_port], node=switch)
switch.cmd('ip link set dev %s up' % phys_port)
switch.cmd(
('ovs-vsctl add-port %s %s -- '
'set Interface %s ofport_request=%u') % (
switch.name,
phys_port.name,
phys_port.name,
mapped_port_i))
for port_pair in ((port_i, mapped_port_i), (mapped_port_i, port_i)):
port_x, port_y = port_pair
switch.cmd('%s add-flow %s in_port=%u,actions=output:%u' % (
self.OFCTL, switch.name, port_x, port_y))
def start_net(self):
"""Start Mininet network."""
controller_intf = 'lo'
if self.hw_switch:
controller_intf = self.cpn_intf
self._start_faucet(controller_intf)
self.pre_start_net()
if self.hw_switch:
self._attach_physical_switch()
self._wait_debug_log()
for port_no in self._dp_ports():
self.set_port_up(port_no, wait=False)
dumpNodeConnections(self.net.hosts)
self.reset_all_ipv4_prefix(prefix=24)
def _get_controller(self):
"""Return first controller."""
return self.net.controllers[0]
def _start_gauge_check(self):
return None
def _start_check(self):
if not self._wait_controllers_healthy():
return 'not all controllers healthy'
if not self._wait_controllers_connected():
return 'not all controllers connected to switch'
if not self._wait_ofctl_up():
return 'ofctl not up'
if not self.wait_dp_status(1):
return 'prometheus port not up'
if self.config_ports:
for port_name, port in list(self.config_ports.items()):
if port is not None and not port_name.startswith('gauge'):
if not self._get_controller().listen_port(port):
return 'faucet not listening on %u (%s)' % (
port, port_name)
return self._start_gauge_check()
def _start_faucet(self, controller_intf):
last_error_txt = ''
for _ in range(3):
faucet_mininet_test_util.return_free_ports(
self.ports_sock, self._test_name())
self._allocate_config_ports()
self._allocate_faucet_ports()
self._set_vars()
self._write_faucet_config()
self.net = Mininet(
self.topo, controller=faucet_mininet_test_topo.FAUCET(
name='faucet', tmpdir=self.tmpdir,
controller_intf=controller_intf,
env=self.env['faucet'],
ctl_privkey=self.ctl_privkey,
ctl_cert=self.ctl_cert,
ca_certs=self.ca_certs,
ports_sock=self.ports_sock,
port=self.of_port,
test_name=self._test_name()))
if self.RUN_GAUGE:
self._allocate_gauge_ports()
self._write_gauge_config()
self.gauge_controller = faucet_mininet_test_topo.Gauge(
name='gauge', tmpdir=self.tmpdir,
env=self.env['gauge'],
controller_intf=controller_intf,
ctl_privkey=self.ctl_privkey,
ctl_cert=self.ctl_cert,
ca_certs=self.ca_certs,
port=self.gauge_of_port)
self.net.addController(self.gauge_controller)
self.net.start()
self._wait_load()
last_error_txt = self._start_check()
if last_error_txt is None:
self._config_tableids()
self._wait_load()
return
self.net.stop()
last_error_txt += '\n\n' + self._dump_controller_logs()
error('%s: %s' % (self._test_name(), last_error_txt))
time.sleep(faucet_mininet_test_util.MIN_PORT_AGE)
self.fail(last_error_txt)
def _ofctl_rest_url(self, req):
"""Return control URL for Ryu ofctl module."""
return 'http://%s:%u/%s' % (
faucet_mininet_test_util.LOCALHOST, self._get_controller().ofctl_port, req)
def _ofctl(self, req):
try:
ofctl_result = requests.get(req).text
except ConnectionError:
return None
return ofctl_result
def _ofctl_up(self):
switches = self._ofctl(self._ofctl_rest_url('stats/switches'))
return switches is not None and re.search(r'^\[[^\]]+\]$', switches)
def _wait_ofctl_up(self, timeout=10):
for _ in range(timeout):
if self._ofctl_up():
return True
time.sleep(1)
return False
def _ofctl_get(self, int_dpid, req, timeout):
for _ in range(timeout):
ofctl_result = self._ofctl(self._ofctl_rest_url(req))
try:
ofmsgs = json.loads(ofctl_result)[int_dpid]
return [json.dumps(ofmsg) for ofmsg in ofmsgs]
except ValueError:
# Didn't get valid JSON, try again
time.sleep(1)
continue
return []
def _curl_portmod(self, int_dpid, port_no, config, mask):
"""Use curl to send a portmod command via the ofctl module."""
curl_format = ' '.join((
'curl -X POST -d',
'\'{"dpid": %s, "port_no": %u, "config": %u, "mask": %u}\'',
self._ofctl_rest_url('stats/portdesc/modify')))
return curl_format % (int_dpid, port_no, config, mask)
def _signal_proc_on_port(self, host, port, signal):
tcp_pattern = '%s/tcp' % port
fuser_out = host.cmd('fuser %s -k -%u' % (tcp_pattern, signal))
return re.search(r'%s:\s+\d+' % tcp_pattern, fuser_out)
def _get_ofchannel_logs(self):
with open(self.env['faucet']['FAUCET_CONFIG']) as config_file:
config = yaml.load(config_file)
ofchannel_logs = []
for dp_name, dp_config in config['dps'].items():
if 'ofchannel_log' in dp_config:
debug_log = dp_config['ofchannel_log']
ofchannel_logs.append((dp_name, debug_log))
return ofchannel_logs
def _dump_controller_logs(self):
dump_txt = ''
test_logs = glob.glob(os.path.join(self.tmpdir, '*.log'))
for controller in self.net.controllers:
for test_log_name in test_logs:
basename = os.path.basename(test_log_name)
if basename.startswith(controller.name):
with open(test_log_name) as test_log:
dump_txt += '\n'.join((
'',
basename,
'=' * len(basename),
'',
test_log.read()))
break
return dump_txt
def _controllers_healthy(self):
for controller in self.net.controllers:
if not controller.healthy():
return False
return True
def _controllers_connected(self):
for controller in self.net.controllers:
if not controller.connected():
return False
return True
def _wait_controllers_healthy(self, timeout=30):
for _ in range(timeout):
if self._controllers_healthy():
return True
time.sleep(1)
return False
def _wait_controllers_connected(self, timeout=30):
for _ in range(timeout):
if self._controllers_connected():
return True
time.sleep(1)
return False
def _wait_debug_log(self):
"""Require all switches to have exchanged flows with controller."""
ofchannel_logs = self._get_ofchannel_logs()
for _, debug_log in ofchannel_logs:
for _ in range(60):
if (os.path.exists(debug_log) and
os.path.getsize(debug_log) > 0):
return True
time.sleep(1)
return False
def verify_no_exception(self, exception_log_name):
if not os.path.exists(exception_log_name):
return
with open(exception_log_name) as exception_log:
exception_contents = exception_log.read()
self.assertEqual(
'',
exception_contents,
msg='%s log contains %s' % (
exception_log_name, exception_contents))
def tcpdump_helper(self, tcpdump_host, tcpdump_filter, funcs=None,
vflags='-v', timeout=10, packets=2, root_intf=False):
intf = tcpdump_host.intf().name
if root_intf:
intf = intf.split('.')[0]
tcpdump_cmd = faucet_mininet_test_util.timeout_soft_cmd(
'tcpdump -i %s -e -n -U %s -c %u %s' % (
intf, vflags, packets, tcpdump_filter),
timeout)
tcpdump_out = tcpdump_host.popen(
tcpdump_cmd,
stdin=faucet_mininet_test_util.DEVNULL,
stderr=subprocess.STDOUT,
close_fds=True)
popens = {tcpdump_host: tcpdump_out}
tcpdump_started = False
tcpdump_txt = ''
for host, line in pmonitor(popens):
if host == tcpdump_host:
if tcpdump_started:
tcpdump_txt += line.strip()
elif re.search('tcpdump: listening on ', line):
# when we see tcpdump start, then call provided functions.
tcpdump_started = True
if funcs is not None:
for func in funcs:
func()
else:
error('tcpdump_helper: %s' % line)
self.assertTrue(tcpdump_started, msg='%s did not start' % tcpdump_cmd)
return tcpdump_txt
def pre_start_net(self):
"""Hook called after Mininet initializtion, before Mininet started."""
return
def get_config_header(self, config_global, debug_log, dpid, hardware):
"""Build v2 FAUCET config header."""
return """
%s
dps:
faucet-1:
ofchannel_log: %s
dp_id: 0x%x
hardware: "%s"
""" % (config_global, debug_log, int(dpid), hardware)
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['faucet-1']
type: 'port_state'
interval: 5
db: 'state_file'
flow_table:
dps: ['faucet-1']
type: 'flow_table'
interval: 5
db: 'flow_file'
"""
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file):
"""Build Gauge config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
flow_file:
type: 'text'
file: %s
couchdb:
type: gaugedb
gdb_type: nosql
nosql_db: couch
db_username: couch
db_password: 123
db_ip: 'localhost'
db_port: 5001
driver: 'couchdb'
views:
switch_view: '_design/switches/_view/switch'
match_view: '_design/flows/_view/match'
tag_view: '_design/tags/_view/tags'
switches_doc: 'switches_bak'
flows_doc: 'flows_bak'
db_update_counter: 2
%s
""" % (faucet_config_file,
self.get_gauge_watcher_config(),
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file,
self.GAUGE_CONFIG_DBS)
def get_exabgp_conf(self, peer, peer_config=''):
return """
neighbor %s {
router-id 2.2.2.2;
local-address %s;
connect %s;
peer-as 1;
local-as 2;
%s
}
""" % (peer, peer, '%(bgp_port)d', peer_config)
def get_all_groups_desc_from_dpid(self, dpid, timeout=2):
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
return self._ofctl_get(
int_dpid, 'stats/groupdesc/%s' % int_dpid, timeout)
def get_all_flows_from_dpid(self, dpid, timeout=10):
"""Return all flows from DPID."""
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
return self._ofctl_get(
int_dpid, 'stats/flow/%s' % int_dpid, timeout)
def _port_stat(self, port_stats, port):
if port_stats:
for port_stat in port_stats:
port_stat = json.loads(port_stat)
if port_stat['port_no'] == port:
return port_stat
return None
def get_port_stats_from_dpid(self, dpid, port, timeout=2):
"""Return port stats for a port."""
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
port_stats = self._ofctl_get(
int_dpid, 'stats/port/%s' % int_dpid, timeout)
return self._port_stat(port_stats, port)
def get_port_desc_from_dpid(self, dpid, port, timeout=2):
"""Return port desc for a port."""
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
port_stats = self._ofctl_get(
int_dpid, 'stats/portdesc/%s' % int_dpid, timeout)
return self._port_stat(port_stats, port)
def wait_matching_in_group_table(self, action, group_id, timeout=10):
groupdump = os.path.join(self.tmpdir, 'groupdump-%s.txt' % self.dpid)
for _ in range(timeout):
group_dump = self.get_all_groups_desc_from_dpid(self.dpid, 1)
with open(groupdump, 'w') as groupdump_file:
for group_desc in group_dump:
group_dict = json.loads(group_desc)
groupdump_file.write(str(group_dict) + '\n')
if group_dict['group_id'] == group_id:
actions = set(group_dict['buckets'][0]['actions'])
if set([action]).issubset(actions):
return True
time.sleep(1)
return False
def get_matching_flows_on_dpid(self, dpid, match, timeout=10, table_id=None,
actions=None, match_exact=False):
flowdump = os.path.join(self.tmpdir, 'flowdump-%s.txt' % dpid)
with open(flowdump, 'w') as flowdump_file:
for _ in range(timeout):
flow_dicts = []
flow_dump = self.get_all_flows_from_dpid(dpid)
for flow in flow_dump:
flow_dict = json.loads(flow)
flowdump_file.write(str(flow_dict) + '\n')
if (table_id is not None and
flow_dict['table_id'] != table_id):
continue
if actions is not None:
if not set(actions).issubset(set(flow_dict['actions'])):
continue
if match is not None:
if match_exact:
if match.items() != flow_dict['match'].items():
continue
elif not set(match.items()).issubset(set(flow_dict['match'].items())):
continue
flow_dicts.append(flow_dict)
if flow_dicts:
return flow_dicts
time.sleep(1)
return flow_dicts
def get_matching_flow_on_dpid(self, dpid, match, timeout=10, table_id=None,
actions=None, match_exact=None):
flow_dicts = self.get_matching_flows_on_dpid(
dpid, match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact)
if flow_dicts:
return flow_dicts[0]
return []
def get_matching_flow(self, match, timeout=10, table_id=None,
actions=None, match_exact=None):
return self.get_matching_flow_on_dpid(
self.dpid, match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact)
def get_group_id_for_matching_flow(self, match, timeout=10, table_id=None):
for _ in range(timeout):
flow_dict = self.get_matching_flow(
match, timeout=timeout, table_id=table_id)
if flow_dict:
for action in flow_dict['actions']:
if action.startswith('GROUP'):
_, group_id = action.split(':')
return int(group_id)
time.sleep(1)
self.fail(
'Cannot find group_id for matching flow %s' % match)
def matching_flow_present_on_dpid(self, dpid, match, timeout=10, table_id=None,
actions=None, match_exact=None):
"""Return True if matching flow is present on a DPID."""
if self.get_matching_flow_on_dpid(
dpid, match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact):
return True
return False
def matching_flow_present(self, match, timeout=10, table_id=None,
actions=None, match_exact=None):
"""Return True if matching flow is present on default DPID."""
return self.matching_flow_present_on_dpid(
self.dpid, match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact)
def wait_until_matching_flow(self, match, timeout=10, table_id=None,
actions=None, match_exact=False):
"""Wait (require) for flow to be present on default DPID."""
self.assertTrue(
self.matching_flow_present(
match, timeout=timeout, table_id=table_id,
actions=actions, match_exact=match_exact),
msg=match)
def wait_until_controller_flow(self):
self.wait_until_matching_flow(None, actions=[u'OUTPUT:CONTROLLER'])
def mac_learned(self, mac, timeout=10, in_port=None):
"""Return True if a MAC has been learned on default DPID."""
for eth_field, table_id in (
(u'dl_src', self.ETH_SRC_TABLE),
(u'dl_dst', self.ETH_DST_TABLE)):
match = {eth_field: u'%s' % mac}
if in_port is not None and table_id == self.ETH_SRC_TABLE:
match[u'in_port'] = in_port
if not self.matching_flow_present(
match, timeout=timeout, table_id=table_id):
return False
return True
def mac_as_int(self, mac):
return long(mac.replace(':', ''), 16)
def mac_from_int(self, mac_int):
mac_int_str = '%012x' % long(mac_int)
return ':'.join([x.encode('hex') for x in str(mac_int_str).decode('hex')])
def prom_macs_learned(self, port=None, vlan=None):
labels = {
'n': r'\d+',
'port': r'\d+',
'vlan': r'\d+',
}
if port:
labels['port'] = str(port)
if vlan:
labels['vlan'] = str(vlan)
port_learned_macs_prom = self.scrape_prometheus_var(
'learned_macs', labels=labels, default=[], multiple=True, dpid=True)
macs = [self.mac_from_int(mac_int) for _, mac_int in port_learned_macs_prom if mac_int]
return macs
def prom_mac_learned(self, mac, port=None, vlan=None):
return mac in self.prom_macs_learned(port=port, vlan=vlan)
def host_learned(self, host, timeout=10, in_port=None):
"""Return True if a host has been learned on default DPID."""
return self.mac_learned(host.MAC(), timeout, in_port)
def get_host_intf_mac(self, host, intf):
return host.cmd('cat /sys/class/net/%s/address' % intf).strip()
def host_ip(self, host, family, family_re):
host_ip_cmd = (
r'ip -o -f %s addr show %s|'
'grep -m 1 -Eo "%s %s"|cut -f2 -d " "' % (
family,
host.defaultIntf(),
family,
family_re))
return host.cmd(host_ip_cmd).strip()
def host_ipv4(self, host):
"""Return first IPv4/netmask for host's default interface."""
return self.host_ip(host, 'inet', r'[0-9\\.]+\/[0-9]+')
def host_ipv6(self, host):
"""Return first IPv6/netmask for host's default interface."""
return self.host_ip(host, 'inet6', r'[0-9a-f\:]+\/[0-9]+')
def reset_ipv4_prefix(self, host, prefix=24):
host.setIP(host.IP(), prefixLen=prefix)
def reset_all_ipv4_prefix(self, prefix=24):
for host in self.net.hosts:
self.reset_ipv4_prefix(host, prefix)
def require_host_learned(self, host, retries=8, in_port=None):
"""Require a host be learned on default DPID."""
host_ip_net = self.host_ipv4(host)
if not host_ip_net:
host_ip_net = self.host_ipv6(host)
broadcast = ipaddress.ip_interface(
unicode(host_ip_net)).network.broadcast_address
broadcast_str = str(broadcast)
packets = 1
if broadcast.version == 4:
ping_cmd = 'ping -b'
if broadcast.version == 6:
ping_cmd = 'ping6'
broadcast_str = 'ff02::1'
# stimulate host learning with a broadcast ping
ping_cli = faucet_mininet_test_util.timeout_cmd(
'%s -I%s -W1 -c%u %s' % (
ping_cmd, host.defaultIntf().name, packets, broadcast_str), 3)
for _ in range(retries):
if self.host_learned(host, timeout=1, in_port=in_port):
return
ping_result = host.cmd(ping_cli)
self.assertTrue(re.search(
r'%u packets transmitted' % packets, ping_result), msg='%s: %s' % (
ping_cli, ping_result))
self.fail('host %s (%s) could not be learned (%s: %s)' % (
host, host.MAC(), ping_cli, ping_result))
def get_prom_port(self):
return int(self.env['faucet']['FAUCET_PROMETHEUS_PORT'])
def get_prom_addr(self):
return self.env['faucet']['FAUCET_PROMETHEUS_ADDR']
def _prometheus_url(self, controller):
if controller == 'faucet':
return 'http://%s:%u' % (
self.get_prom_addr(), self.get_prom_port())
elif controller == 'gauge':
return 'http://%s:%u' % (
self.get_prom_addr(), self.config_ports['gauge_prom_port'])
def scrape_prometheus(self, controller='faucet'):
url = self._prometheus_url(controller)
try:
prom_lines = requests.get(url).text.split('\n')
except ConnectionError:
return ''
prom_vars = []
for prom_line in prom_lines:
if not prom_line.startswith('#'):
prom_vars.append(prom_line)
return '\n'.join(prom_vars)
def scrape_prometheus_var(self, var, labels=None, any_labels=False, default=None,
dpid=True, multiple=False, controller='faucet', retries=1):
label_values_re = r''
if any_labels:
label_values_re = r'\{[^\}]+\}'
else:
if labels is None:
labels = {}
if dpid:
labels.update({'dp_id': '0x%x' % long(self.dpid)})
if labels:
label_values = []
for label, value in sorted(list(labels.items())):
label_values.append('%s="%s"' % (label, value))
label_values_re = r'\{%s\}' % r'\S+'.join(label_values)
var_re = r'^%s%s$' % (var, label_values_re)
for _ in range(retries):
results = []
prom_lines = self.scrape_prometheus(controller)
for prom_line in prom_lines.splitlines():
prom_var_data = prom_line.split(' ')
self.assertEqual(
2, len(prom_var_data),
msg='Invalid prometheus line in %s' % prom_lines)
prom_var, value = prom_var_data
if prom_var.startswith(var):
var_match = re.search(var_re, prom_var)
if var_match:
value_int = long(float(value))
results.append((var, value_int))
if not multiple:
break
if results:
if multiple:
return results
return results[0][1]
time.sleep(1)
return default
def gauge_smoke_test(self):
watcher_files = set([
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_flow_table_file])
found_watcher_files = set()
for _ in range(60):
for watcher_file in watcher_files:
if (os.path.exists(watcher_file)
and os.path.getsize(watcher_file)):
found_watcher_files.add(watcher_file)
if watcher_files == found_watcher_files:
break
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
time.sleep(1)
found_watcher_files = set()
missing_watcher_files = watcher_files - found_watcher_files
self.assertEqual(
missing_watcher_files, set(), msg='Gauge missing logs: %s' % missing_watcher_files)
self.hup_gauge()
self.verify_no_exception(self.env['faucet']['FAUCET_EXCEPTION_LOG'])
def prometheus_smoke_test(self):
prom_out = self.scrape_prometheus()
for nonzero_var in (
r'of_packet_ins', r'of_flowmsgs_sent', r'of_dp_connections',
r'faucet_config\S+name=\"flood\"', r'faucet_pbr_version\S+version='):
self.assertTrue(
re.search(r'%s\S+\s+[1-9]+' % nonzero_var, prom_out),
msg='expected %s to be nonzero (%s)' % (nonzero_var, prom_out))
for zero_var in (
'of_errors', 'of_dp_disconnections'):
self.assertTrue(
re.search(r'%s\S+\s+0' % zero_var, prom_out),
msg='expected %s to be present and zero (%s)' % (zero_var, prom_out))
def get_configure_count(self):
"""Return the number of times FAUCET has processed a reload request."""
for _ in range(3):
count = self.scrape_prometheus_var(
'faucet_config_reload_requests', default=None, dpid=False)
if count is not None:
return count
time.sleep(1)
self.fail('configure count stayed zero')
def hup_faucet(self):
"""Send a HUP signal to the controller."""
controller = self._get_controller()
self.assertTrue(
self._signal_proc_on_port(controller, controller.port, 1))
def hup_gauge(self):
self.assertTrue(
self._signal_proc_on_port(
self.gauge_controller, int(self.gauge_of_port), 1))
def verify_controller_fping(self, host, faucet_vip,
total_packets=100, packet_interval_ms=100):
fping_bin = 'fping'
if faucet_vip.version == 6:
fping_bin = 'fping6'
fping_cli = '%s -s -c %u -i %u -p 1 -T 1 %s' % (
fping_bin, total_packets, packet_interval_ms, faucet_vip.ip)
timeout = int(((1000.0 / packet_interval_ms) * total_packets) * 1.5)
fping_out = host.cmd(faucet_mininet_test_util.timeout_cmd(
fping_cli, timeout))
error('%s: %s' % (self._test_name(), fping_out))
self.assertTrue(
not re.search(r'\s+0 ICMP Echo Replies received', fping_out),
msg=fping_out)
def verify_vlan_flood_limited(self, vlan_first_host, vlan_second_host,
other_vlan_host):
"""Verify that flooding doesn't cross VLANs."""
for first_host, second_host in (
(vlan_first_host, vlan_second_host),
(vlan_second_host, vlan_first_host)):
tcpdump_filter = 'ether host %s or ether host %s' % (
first_host.MAC(), second_host.MAC())
tcpdump_txt = self.tcpdump_helper(
other_vlan_host, tcpdump_filter, [
lambda: first_host.cmd('arp -d %s' % second_host.IP()),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())],
packets=1)
self.assertTrue(
re.search('0 packets captured', tcpdump_txt), msg=tcpdump_txt)
def verify_ping_mirrored(self, first_host, second_host, mirror_host):
self.net.ping((first_host, second_host))
for host in (first_host, second_host):
self.require_host_learned(host)
self.retry_net_ping(hosts=(first_host, second_host))
mirror_mac = mirror_host.MAC()
tcpdump_filter = (
'not ether src %s and '
'(icmp[icmptype] == 8 or icmp[icmptype] == 0)') % mirror_mac
first_ping_second = 'ping -c1 %s' % second_host.IP()
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
lambda: first_host.cmd(first_ping_second)])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt),
msg=tcpdump_txt)
self.assertTrue(re.search(
'%s: ICMP echo reply' % first_host.IP(), tcpdump_txt),
msg=tcpdump_txt)
def verify_eapol_mirrored(self, first_host, second_host, mirror_host):
self.net.ping((first_host, second_host))
for host in (first_host, second_host):
self.require_host_learned(host)
self.retry_net_ping(hosts=(first_host, second_host))
mirror_mac = mirror_host.MAC()
tmp_eap_conf = os.path.join(self.tmpdir, 'eap.conf')
tcpdump_filter = (
'not ether src %s and ether proto 0x888e' % mirror_mac)
eap_conf_cmd = (
'echo "eapol_version=2\nap_scan=0\nnetwork={\n'
'key_mgmt=IEEE8021X\neap=MD5\nidentity=\\"login\\"\n'
'password=\\"password\\"\n}\n" > %s' % tmp_eap_conf)
wpa_supplicant_cmd = faucet_mininet_test_util.timeout_cmd(
'wpa_supplicant -c%s -Dwired -i%s -d' % (
tmp_eap_conf,
first_host.defaultIntf().name),
5)
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
lambda: first_host.cmd(eap_conf_cmd),
lambda: first_host.cmd(wpa_supplicant_cmd)])
self.assertTrue(
re.search('01:80:c2:00:00:03, ethertype EAPOL', tcpdump_txt),
msg=tcpdump_txt)
def bogus_mac_flooded_to_port1(self):
first_host, second_host, third_host = self.net.hosts[0:3]
unicast_flood_filter = 'ether host %s' % self.BOGUS_MAC
static_bogus_arp = 'arp -s %s %s' % (first_host.IP(), self.BOGUS_MAC)
curl_first_host = 'curl -m 5 http://%s' % first_host.IP()
tcpdump_txt = self.tcpdump_helper(
first_host, unicast_flood_filter,
[lambda: second_host.cmd(static_bogus_arp),
lambda: second_host.cmd(curl_first_host),
lambda: self.net.ping(hosts=(second_host, third_host))])
return not re.search('0 packets captured', tcpdump_txt)
def verify_port1_unicast(self, unicast_status):
# Unicast flooding rule for from port 1
self.assertEqual(
self.matching_flow_present(
{u'dl_vlan': u'100', u'in_port': int(self.port_map['port_1'])},
table_id=self.FLOOD_TABLE,
match_exact=True),
unicast_status)
# Unicast flood rule exists that output to port 1
self.assertEqual(
self.matching_flow_present(
{u'dl_vlan': u'100', u'in_port': int(self.port_map['port_2'])},
table_id=self.FLOOD_TABLE,
actions=[u'OUTPUT:%u' % self.port_map['port_1']],
match_exact=True),
unicast_status)
def verify_lldp_blocked(self):
first_host, second_host = self.net.hosts[0:2]
lldp_filter = 'ether proto 0x88cc'
ladvd_mkdir = 'mkdir -p /var/run/ladvd'
send_lldp = '%s -L -o %s' % (
faucet_mininet_test_util.timeout_cmd(self.LADVD, 30),
second_host.defaultIntf())
tcpdump_txt = self.tcpdump_helper(
first_host, lldp_filter,
[lambda: second_host.cmd(ladvd_mkdir),
lambda: second_host.cmd(send_lldp),
lambda: second_host.cmd(send_lldp),
lambda: second_host.cmd(send_lldp)],
timeout=20, packets=5)
if re.search(second_host.MAC(), tcpdump_txt):
return False
return True
def is_cdp_blocked(self):
first_host, second_host = self.net.hosts[0:2]
cdp_filter = 'ether host 01:00:0c:cc:cc:cc and ether[20:2]==0x2000'
ladvd_mkdir = 'mkdir -p /var/run/ladvd'
send_cdp = '%s -C -o %s' % (
faucet_mininet_test_util.timeout_cmd(self.LADVD, 30),
second_host.defaultIntf())
tcpdump_txt = self.tcpdump_helper(
first_host,
cdp_filter,
[lambda: second_host.cmd(ladvd_mkdir),
lambda: second_host.cmd(send_cdp),
lambda: second_host.cmd(send_cdp),
lambda: second_host.cmd(send_cdp)],
timeout=20, packets=5)
if re.search(second_host.MAC(), tcpdump_txt):
return False
return True
def verify_hup_faucet(self, timeout=3):
"""HUP and verify the HUP was processed."""
start_configure_count = self.get_configure_count()
self.hup_faucet()
for _ in range(timeout):
configure_count = self.get_configure_count()
if configure_count > start_configure_count:
return
time.sleep(1)
self.fail('HUP not processed by FAUCET')
def force_faucet_reload(self, new_config):
"""Force FAUCET to reload by adding new line to config file."""
with open(self.env['faucet']['FAUCET_CONFIG'], 'a') as config_file:
config_file.write(new_config)
self.verify_hup_faucet()
def get_host_port_stats(self, hosts_switch_ports):
port_stats = {}
for host, switch_port in hosts_switch_ports:
port_stats[host] = self.get_port_stats_from_dpid(self.dpid, switch_port)
return port_stats
def of_bytes_mbps(self, start_port_stats, end_port_stats, var, seconds):
return (end_port_stats[var] - start_port_stats[var]) * 8 / seconds / self.ONEMBPS
def verify_iperf_min(self, hosts_switch_ports, min_mbps, server_ip):
"""Verify minimum performance and OF counters match iperf approximately."""
seconds = 5
prop = 0.1
start_port_stats = self.get_host_port_stats(hosts_switch_ports)
hosts = []
for host, _ in hosts_switch_ports:
hosts.append(host)
client_host, server_host = hosts
iperf_mbps = self.iperf(
client_host, server_host, server_ip, seconds)
self.assertTrue(iperf_mbps > min_mbps)
# TODO: account for drops.
for _ in range(3):
end_port_stats = self.get_host_port_stats(hosts_switch_ports)
approx_match = True
for host in hosts:
of_rx_mbps = self.of_bytes_mbps(
start_port_stats[host], end_port_stats[host], 'rx_bytes', seconds)
of_tx_mbps = self.of_bytes_mbps(
start_port_stats[host], end_port_stats[host], 'tx_bytes', seconds)
output(of_rx_mbps, of_tx_mbps)
max_of_mbps = float(max(of_rx_mbps, of_tx_mbps))
iperf_to_max = 0
if max_of_mbps:
iperf_to_max = iperf_mbps / max_of_mbps
msg = 'iperf: %fmbps, of: %fmbps (%f)' % (
iperf_mbps, max_of_mbps, iperf_to_max)
output(msg)
if ((iperf_to_max < (1.0 - prop)) or
(iperf_to_max > (1.0 + prop))):
approx_match = False
if approx_match:
return
time.sleep(1)
self.fail(msg=msg)
def wait_port_status(self, port_no, expected_status, timeout=10):
for _ in range(timeout):
port_status = self.scrape_prometheus_var(
'port_status', {'port': port_no}, default=None)
if port_status is not None and port_status == expected_status:
return
time.sleep(1)
self.fail('port %s status %s != expected %u' % (
port_no, port_status, expected_status))
def set_port_status(self, port_no, status, wait):
self.assertEqual(
0,
os.system(self._curl_portmod(
self.dpid,
port_no,
status,
ofp.OFPPC_PORT_DOWN)))
if wait:
expected_status = 1
if status == ofp.OFPPC_PORT_DOWN:
expected_status = 0
self.wait_port_status(port_no, expected_status)
def set_port_down(self, port_no, wait=True):
self.set_port_status(port_no, ofp.OFPPC_PORT_DOWN, wait)
def set_port_up(self, port_no, wait=True):
self.set_port_status(port_no, 0, wait)
def wait_dp_status(self, expected_status, controller='faucet', timeout=60):
for _ in range(timeout):
dp_status = self.scrape_prometheus_var(
'dp_status', {}, controller=controller, default=None)
if dp_status is not None and dp_status == expected_status:
return True
time.sleep(1)
return False
def _get_tableid(self, name):
return self.scrape_prometheus_var(
'faucet_config_table_names', {'name': name})
def quiet_commands(self, host, commands):
for command in commands:
result = host.cmd(command)
self.assertEqual('', result, msg='%s: %s' % (command, result))
def _config_tableids(self):
self.PORT_ACL_TABLE = self._get_tableid('port_acl')
self.VLAN_TABLE = self._get_tableid('vlan')
self.VLAN_ACL_TABLE = self._get_tableid('vlan_acl')
self.ETH_SRC_TABLE = self._get_tableid('eth_src')
self.IPV4_FIB_TABLE = self._get_tableid('ipv4_fib')
self.IPV6_FIB_TABLE = self._get_tableid('ipv6_fib')
self.VIP_TABLE = self._get_tableid('vip')
self.ETH_DST_TABLE = self._get_tableid('eth_dst')
self.FLOOD_TABLE = self._get_tableid('flood')
def _dp_ports(self):
port_count = self.N_TAGGED + self.N_UNTAGGED
return list(sorted(self.port_map.values()))[:port_count]
def flap_port(self, port_no, flap_time=1):
self.set_port_down(port_no)
time.sleep(flap_time)
self.set_port_up(port_no)
def flap_all_switch_ports(self, flap_time=1):
"""Flap all ports on switch."""
for port_no in self._dp_ports():
self.flap_port(port_no, flap_time=flap_time)
def get_mac_of_intf(self, host, intf):
"""Get MAC address of a port."""
return host.cmd(
'|'.join((
'ip link show %s' % intf,
'grep -o "..:..:..:..:..:.."',
'head -1',
'xargs echo -n'))).lower()
def add_macvlan(self, host, macvlan_intf, ipa=None, ipm=24):
self.assertEqual(
'',
host.cmd('ip link add link %s %s type macvlan' % (
host.defaultIntf(), macvlan_intf)))
self.assertEqual(
'',
host.cmd('ip link set dev %s up' % macvlan_intf))
if ipa:
self.assertEqual(
'',
host.cmd('ip address add %s/%s brd + dev %s' % (
ipa, ipm, macvlan_intf)))
def add_host_ipv6_address(self, host, ip_v6, intf=None):
"""Add an IPv6 address to a Mininet host."""
if intf is None:
intf = host.intf()
self.assertEqual(
'',
host.cmd('ip -6 addr add %s dev %s' % (ip_v6, intf)))
def add_host_route(self, host, ip_dst, ip_gw):
"""Add an IP route to a Mininet host."""
host.cmd('ip -%u route del %s' % (
ip_dst.version, ip_dst.network.with_prefixlen))
add_cmd = 'ip -%u route add %s via %s' % (
ip_dst.version, ip_dst.network.with_prefixlen, ip_gw)
self.quiet_commands(host, (add_cmd,))
def _one_ip_ping(self, host, ping_cmd, retries, require_host_learned):
if require_host_learned:
self.require_host_learned(host)
for _ in range(retries):
ping_result = host.cmd(ping_cmd)
if re.search(self.ONE_GOOD_PING, ping_result):
return
self.assertTrue(
re.search(self.ONE_GOOD_PING, ping_result),
msg='%s: %s' % (ping_cmd, ping_result))
def one_ipv4_ping(self, host, dst, retries=3, require_host_learned=True, intf=None):
"""Ping an IPv4 destination from a host."""
if intf is None:
intf = host.defaultIntf()
ping_cmd = 'ping -c1 -I%s %s' % (intf, dst)
return self._one_ip_ping(host, ping_cmd, retries, require_host_learned)
def one_ipv4_controller_ping(self, host):
"""Ping the controller from a host with IPv4."""
self.one_ipv4_ping(host, self.FAUCET_VIPV4.ip)
self.verify_ipv4_host_learned_mac(
host, self.FAUCET_VIPV4.ip, self.FAUCET_MAC)
def one_ipv6_ping(self, host, dst, retries=3):
"""Ping an IPv6 destination from a host."""
ping_cmd = 'ping6 -c1 %s' % dst
return self._one_ip_ping(host, ping_cmd, retries, require_host_learned=True)
def one_ipv6_controller_ping(self, host):
"""Ping the controller from a host with IPv6."""
self.one_ipv6_ping(host, self.FAUCET_VIPV6.ip)
self.verify_ipv6_host_learned_mac(
host, self.FAUCET_VIPV6.ip, self.FAUCET_MAC)
def retry_net_ping(self, hosts=None, required_loss=0, retries=3):
loss = None
for _ in range(retries):
if hosts is None:
loss = self.net.pingAll()
else:
loss = self.net.ping(hosts)
if loss <= required_loss:
return
time.sleep(1)
self.fail('ping %f loss > required loss %f' % (loss, required_loss))
def tcp_port_free(self, host, port, ipv=4):
listen_out = host.cmd(
faucet_mininet_test_util.tcp_listening_cmd(port, ipv))
if listen_out:
return listen_out
return None
def wait_for_tcp_free(self, host, port, timeout=10, ipv=4):
"""Wait for a host to start listening on a port."""
for _ in range(timeout):
listen_out = self.tcp_port_free(host, port, ipv)
if listen_out is None:
return
time.sleep(1)
self.fail('%s busy on port %u (%s)' % (host, port, listen_out))
def wait_for_tcp_listen(self, host, port, timeout=10, ipv=4):
"""Wait for a host to start listening on a port."""
for _ in range(timeout):
listen_out = self.tcp_port_free(host, port, ipv)
if listen_out is not None:
return
time.sleep(1)
self.fail('%s never listened on port %u' % (host, port))
def serve_hello_on_tcp_port(self, host, port):
"""Serve 'hello' on a TCP port on a host."""
host.cmd(faucet_mininet_test_util.timeout_cmd(
'echo hello | nc -l %s %u &' % (host.IP(), port), 10))
self.wait_for_tcp_listen(host, port)
def wait_nonzero_packet_count_flow(self, match, timeout=10, table_id=None, actions=None):
"""Wait for a flow to be present and have a non-zero packet_count."""
for _ in range(timeout):
flow = self.get_matching_flow(match, timeout=1, table_id=table_id, actions=actions)
if flow and flow['packet_count'] > 0:
return
time.sleep(1)
if flow:
self.fail('flow %s matching %s had zero packet count' % (flow, match))
else:
self.fail('no flow matching %s' % match)
def verify_tp_dst_blocked(self, port, first_host, second_host, table_id=0, mask=None):
"""Verify that a TCP port on a host is blocked from another host."""
self.serve_hello_on_tcp_port(second_host, port)
self.quiet_commands(
first_host,
(faucet_mininet_test_util.timeout_cmd(
'nc %s %u' % (second_host.IP(), port), 10), ))
if table_id is not None:
if mask is None:
match_port = int(port)
else:
match_port = '/'.join((str(port), str(mask)))
self.wait_nonzero_packet_count_flow(
{u'tp_dst': match_port}, table_id=table_id)
def verify_tp_dst_notblocked(self, port, first_host, second_host, table_id=0, mask=None):
"""Verify that a TCP port on a host is NOT blocked from another host."""
self.serve_hello_on_tcp_port(second_host, port)
self.assertEqual(
'hello\r\n',
first_host.cmd('nc -w 5 %s %u' % (second_host.IP(), port)))
if table_id is not None:
self.wait_nonzero_packet_count_flow(
{u'tp_dst': int(port)}, table_id=table_id)
def swap_host_macs(self, first_host, second_host):
"""Swap the MAC addresses of two Mininet hosts."""
first_host_mac = first_host.MAC()
second_host_mac = second_host.MAC()
first_host.setMAC(second_host_mac)
second_host.setMAC(first_host_mac)
def start_exabgp(self, exabgp_conf, timeout=30):
"""Start exabgp process on controller host."""
exabgp_conf_file_name = os.path.join(self.tmpdir, 'exabgp.conf')
exabgp_log = os.path.join(self.tmpdir, 'exabgp.log')
exabgp_err = os.path.join(self.tmpdir, 'exabgp.err')
exabgp_env = ' '.join((
'exabgp.daemon.user=root',
'exabgp.log.all=true',
'exabgp.log.level=DEBUG',
'exabgp.log.destination=%s' % exabgp_log,
))
bgp_port = self.config_ports['bgp_port']
exabgp_conf = exabgp_conf % {'bgp_port': bgp_port}
with open(exabgp_conf_file_name, 'w') as exabgp_conf_file:
exabgp_conf_file.write(exabgp_conf)
controller = self._get_controller()
exabgp_cmd = faucet_mininet_test_util.timeout_cmd(
'exabgp %s -d 2> %s > /dev/null &' % (
exabgp_conf_file_name, exabgp_err), 600)
exabgp_cli = 'env %s %s' % (exabgp_env, exabgp_cmd)
controller.cmd(exabgp_cli)
for _ in range(timeout):
if os.path.exists(exabgp_log):
return (exabgp_log, exabgp_err)
time.sleep(1)
self.fail('exabgp (%s) did not start' % exabgp_cli)
def wait_bgp_up(self, neighbor, vlan, exabgp_log, exabgp_err):
"""Wait for BGP to come up."""
label_values = {
'neighbor': neighbor,
'vlan': vlan,
}
for _ in range(60):
uptime = self.scrape_prometheus_var(
'bgp_neighbor_uptime', label_values, default=0)
if uptime > 0:
return
time.sleep(1)
exabgp_log_content = []
for log_name in (exabgp_log, exabgp_err):
if os.path.exists(log_name):
with open(log_name) as log:
exabgp_log_content.append(log.read())
self.fail('exabgp did not peer with FAUCET: %s' % '\n'.join(exabgp_log_content))
def matching_lines_from_file(self, exp, log_name):
with open(log_name) as log_file:
return [log_line for log_line in log_file if re.search(exp, log_line)]
return []
def exabgp_updates(self, exabgp_log):
"""Verify that exabgp process has received BGP updates."""
controller = self._get_controller()
# exabgp should have received our BGP updates
for _ in range(60):
updates = controller.cmd(
r'grep UPDATE %s |grep -Eo "\S+ next-hop \S+"' % exabgp_log)
if updates:
return updates
time.sleep(1)
self.fail('exabgp did not receive BGP updates')
def wait_exabgp_sent_updates(self, exabgp_log_name):
"""Verify that exabgp process has sent BGP updates."""
for _ in range(60):
if self.matching_lines_from_file(r'>> [1-9]+[0-9]* UPDATE', exabgp_log_name):
return
time.sleep(1)
self.fail('exabgp did not send BGP updates')
def ping_all_when_learned(self, retries=3):
"""Verify all hosts can ping each other once FAUCET has learned all."""
# Cause hosts to send traffic that FAUCET can use to learn them.
for _ in range(retries):
loss = self.net.pingAll()
# we should have learned all hosts now, so should have no loss.
for host in self.net.hosts:
self.require_host_learned(host)
if loss == 0:
return
self.assertEqual(0, loss)
def wait_for_route_as_flow(self, nexthop, prefix, vlan_vid=None, timeout=10,
with_group_table=False, nonzero_packets=False):
"""Verify a route has been added as a flow."""
exp_prefix = u'%s/%s' % (
prefix.network_address, prefix.netmask)
if prefix.version == 6:
nw_dst_match = {u'ipv6_dst': exp_prefix}
table_id = self.IPV6_FIB_TABLE
else:
nw_dst_match = {u'nw_dst': exp_prefix}
table_id = self.IPV4_FIB_TABLE
nexthop_action = u'SET_FIELD: {eth_dst:%s}' % nexthop
if vlan_vid is not None:
nw_dst_match[u'dl_vlan'] = unicode(vlan_vid)
if with_group_table:
group_id = self.get_group_id_for_matching_flow(
nw_dst_match)
self.wait_matching_in_group_table(
nexthop_action, group_id, timeout)
else:
if nonzero_packets:
self.wait_nonzero_packet_count_flow(
nw_dst_match, timeout=timeout, table_id=table_id,
actions=[nexthop_action])
else:
self.wait_until_matching_flow(
nw_dst_match, timeout=timeout, table_id=table_id,
actions=[nexthop_action])
def host_ipv4_alias(self, host, alias_ip, intf=None):
"""Add an IPv4 alias address to a host."""
if intf is None:
intf = host.intf()
del_cmd = 'ip addr del %s dev %s' % (
alias_ip.with_prefixlen, intf)
add_cmd = 'ip addr add %s dev %s label %s:1' % (
alias_ip.with_prefixlen, intf, intf)
host.cmd(del_cmd)
self.quiet_commands(host, (add_cmd,))
def _ip_neigh(self, host, ipa, ip_ver):
neighbors = host.cmd('ip -%u neighbor show %s' % (ip_ver, ipa))
neighbors_fields = neighbors.split()
if len(neighbors_fields) >= 5:
return neighbors.split()[4]
return None
def _verify_host_learned_mac(self, host, ipa, ip_ver, mac, retries):
for _ in range(retries):
if self._ip_neigh(host, ipa, ip_ver) == mac:
return
time.sleep(1)
self.fail(
'could not verify %s resolved to %s' % (ipa, mac))
def verify_ipv4_host_learned_mac(self, host, ipa, mac, retries=3):
self._verify_host_learned_mac(host, ipa, 4, mac, retries)
def verify_ipv4_host_learned_host(self, host, learned_host):
learned_ip = ipaddress.ip_interface(unicode(self.host_ipv4(learned_host)))
self.verify_ipv4_host_learned_mac(host, learned_ip.ip, learned_host.MAC())
def verify_ipv6_host_learned_mac(self, host, ip6, mac, retries=3):
self._verify_host_learned_mac(host, ip6, 6, mac, retries)
def verify_ipv6_host_learned_host(self, host, learned_host):
learned_ip6 = ipaddress.ip_interface(unicode(self.host_ipv6(learned_host)))
self.verify_ipv6_host_learned_mac(host, learned_ip6.ip, learned_host.MAC())
def iperf_client(self, client_host, iperf_client_cmd):
for _ in range(3):
iperf_results = client_host.cmd(iperf_client_cmd)
iperf_csv = iperf_results.strip().split(',')
if len(iperf_csv) == 9:
return int(iperf_csv[-1]) / self.ONEMBPS
time.sleep(1)
self.fail('%s: %s' % (iperf_client_cmd, iperf_results))
def iperf(self, client_host, server_host, server_ip, seconds):
for _ in range(3):
port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
iperf_base_cmd = 'iperf -f M -p %u' % port
if server_ip.version == 6:
iperf_base_cmd += ' -V'
iperf_server_cmd = '%s -s -B %s' % (iperf_base_cmd, server_ip)
iperf_server_cmd = faucet_mininet_test_util.timeout_cmd(
iperf_server_cmd, (seconds * 3) + 5)
iperf_client_cmd = faucet_mininet_test_util.timeout_cmd(
'%s -y c -c %s -t %u' % (iperf_base_cmd, server_ip, seconds),
seconds + 5)
server_start_exp = r'Server listening on TCP port %u' % port
server_out = server_host.popen(
iperf_server_cmd,
stdin=faucet_mininet_test_util.DEVNULL,
stderr=subprocess.STDOUT,
close_fds=True)
popens = {server_host: server_out}
lines = []
for host, line in pmonitor(popens):
if host == server_host:
lines.append(line)
if re.search(server_start_exp, line):
self.wait_for_tcp_listen(
server_host, port, ipv=server_ip.version)
iperf_mbps = self.iperf_client(
client_host, iperf_client_cmd)
self._signal_proc_on_port(server_host, port, 9)
return iperf_mbps
time.sleep(1)
self.fail('%s never started (%s, %s)' % (
iperf_server_cmd, server_start_exp, ' '.join(lines)))
def verify_ipv4_routing(self, first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=False):
"""Verify one host can IPV4 route to another via FAUCET."""
self.host_ipv4_alias(first_host, first_host_routed_ip)
self.host_ipv4_alias(second_host, second_host_routed_ip)
self.add_host_route(
first_host, second_host_routed_ip, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_routed_ip, self.FAUCET_VIPV4.ip)
self.net.ping(hosts=(first_host, second_host))
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network,
with_group_table=with_group_table)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network,
with_group_table=with_group_table)
self.one_ipv4_ping(first_host, second_host_routed_ip.ip)
self.one_ipv4_ping(second_host, first_host_routed_ip.ip)
self.verify_ipv4_host_learned_host(first_host, second_host)
self.verify_ipv4_host_learned_host(second_host, first_host)
# verify at least 1M iperf
for client_host, server_host, server_ip in (
(first_host, second_host, second_host_routed_ip.ip),
(second_host, first_host, first_host_routed_ip.ip)):
iperf_mbps = self.iperf(
client_host, server_host, server_ip, 5)
error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip))
self.assertGreater(iperf_mbps, 1)
# verify packets matched routing flows
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network,
with_group_table=with_group_table,
nonzero_packets=True)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network,
with_group_table=with_group_table,
nonzero_packets=True)
def verify_ipv4_routing_mesh(self, with_group_table=False):
"""Verify hosts can route to each other via FAUCET."""
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')
second_host_routed_ip2 = ipaddress.ip_interface(u'10.0.3.1/24')
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2,
with_group_table=with_group_table)
self.swap_host_macs(first_host, second_host)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2,
with_group_table=with_group_table)
def host_drop_all_ips(self, host):
for ipv in (4, 6):
host.cmd('ip -%u addr flush dev %s' % (ipv, host.defaultIntf()))
def setup_ipv6_hosts_addresses(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
"""Configure host IPv6 addresses for testing."""
for host in first_host, second_host:
host.cmd('ip -6 addr flush dev %s' % host.intf())
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_ipv6_address(first_host, first_host_routed_ip)
self.add_host_ipv6_address(second_host, second_host_routed_ip)
for host in first_host, second_host:
self.require_host_learned(host)
def verify_ipv6_routing(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip,
with_group_table=False):
"""Verify one host can IPV6 route to another via FAUCET."""
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.add_host_route(
first_host, second_host_routed_ip, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_routed_ip, self.FAUCET_VIPV6.ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network,
with_group_table=with_group_table)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network,
with_group_table=with_group_table)
self.one_ipv6_controller_ping(first_host)
self.one_ipv6_controller_ping(second_host)
self.one_ipv6_ping(first_host, second_host_routed_ip.ip)
# verify at least 1M iperf
for client_host, server_host, server_ip in (
(first_host, second_host, second_host_routed_ip.ip),
(second_host, first_host, first_host_routed_ip.ip)):
iperf_mbps = self.iperf(
client_host, server_host, server_ip, 5)
error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip))
self.assertGreater(iperf_mbps, 1)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.verify_ipv6_host_learned_mac(
first_host, second_host_ip.ip, second_host.MAC())
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.verify_ipv6_host_learned_mac(
second_host, first_host_ip.ip, first_host.MAC())
def verify_ipv6_routing_pair(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip,
with_group_table=False):
"""Verify hosts can route IPv6 to each other via FAUCET."""
self.setup_ipv6_hosts_addresses(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
def verify_ipv6_routing_mesh(self, with_group_table=False):
"""Verify IPv6 routing between hosts and multiple subnets."""
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')
second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')
second_host_routed_ip2 = ipaddress.ip_interface(u'fc00::30:1/112')
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2,
with_group_table=with_group_table)
self.swap_host_macs(first_host, second_host)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2,
with_group_table=with_group_table)
def verify_invalid_bgp_route(self, pattern):
"""Check if we see the pattern in Faucet's log."""
lines = self.matching_lines_from_file(pattern, self.env['faucet']['FAUCET_LOG'])
self.assertGreater(len(lines), 0, msg='%s not found' % pattern)
|
#!/usr/bin/env python
#
#
# set some variables
base_url = 'https://cloud.skytap.com'
user = 'skynet@fulcrum.net'
token = '55b5e1a75ce9a122db3c976d161f96b15b5eb825'
working_dir = '/Users/thewellington/Development/skynet' # this is the path to the skynet.py file
control_dir = '/Users/thewellington/Development/skytap-control' # this is the path to the skytap-control directory
temp_dir = '/tmp'
# import necessary modules
import sys
import traceback
import getopt
import json
try:
import requests
except ImportError:
sys.stderr.write("You do not have the 'requests' module installed. Please see http://docs.python-requests.org/en/latest/ for more information.")
exit(1)
############################################################################
#### begin api interactions
def _api_get(url):
url, name, passwd = url, user, token
requisite_headers = { 'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
auth = (name, passwd)
response = requests.get(url, headers=requisite_headers, auth=auth)
return response.status_code, response.text
def _api_put(argv):
url, name, passwd = argv[0], argv[1], argv[2]
requisite_headers = { 'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
auth = (name, passwd)
if len(argv) > 3:
data = load_file(argv[3])
else:
data = None
response = requests.put(url, headers=requisite_headers, auth=auth, data=data)
return response.status_code, response.text
def _api_post(argv):
url, name, passwd = argv[0], argv[1], argv[2]
requisite_headers = { 'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
auth = (name, passwd)
if len(argv) > 3:
data = load_file(argv[3])
else:
data = None
response = requests.post(url, headers=requisite_headers, auth=auth, data=data)
return response.status_code, response.text
def _api_del(argv):
url, name, passwd = argv[0], argv[1], argv[2]
requisite_headers = { 'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
auth = (name, passwd)
response = requests.delete(url, headers=requisite_headers, auth=auth)
return response.status_code, response.text
def rest_usage():
print "usage: rest [put|get|post|delete] url name passwd"
sys.exit(-1)
cmds = {
"GET": _api_get,
"PUT": _api_put,
"POST": _api_post,
"DELETE": _api_del
}
def load_file(fname):
with open(fname) as f:
return f.read()
def rest(req, url, user, token):
# if len(argv) < 4:
# rest_usage()
if 'HTTPS' not in url.upper():
print "Secure connection required: HTTP not valid, please use HTTPS or https"
rest_usage()
cmd = req.upper()
if cmd not in cmds.keys():
rest_usage()
status,body=cmds[cmd](url)
print
if int(status) == 200:
json_output = json.loads(body)
print json.dumps(json_output, indent = 4)
else:
print "Oops! Error: status: %s\n%s" % (status, body)
print
############################################################################
#### begin custom functions
############################################################################
#### begin interface items
def usage(exitcode):
usage="""
########## Welcome to Skynet ##########
At this time Skynet takes two arguments at most. Here are your options
-h --help Produces this help file
-a --action Indicates the action you wish to take
-s --scope Defines the scope of the action(s)
EXAMPLES:
skynet -a suspend -s limited
"""
try:
exitcode
except NameError:
print usage
sys.exit(-1)
else:
print usage
sys.exit(exitcode)
# argument parser
def ui(argv):
# define variables to be used by getopts and sent them to null
action = ''
scope = ''
# print 'ARGV :', sys.argv[1:]
try:
options, remainder = getopt.gnu_getopt(sys.argv[1:], 'a:hs:t', ['help',
'action=',
'scope=',
])
except getopt.GetoptError:
usage()
sys.exit(2)
# print 'OPTIONS :', options
for opt, arg in options:
if opt in ('-h', '--help'):
print usage
sys.exit()
elif opt in ( '-a', '--action' ):
action = arg
elif opt in ( '-s', '--scope' ):
scope = arg
elif opt in ( '-t' ):
rest('get', base_url+'/configurations', user, token)
# print 'ACTION :', action
# print 'SCOPE :', scope
# print 'REMAINING :', remainder
# call main
if __name__=='__main__':
ui(sys.argv[1:])
get_configurations
#!/usr/bin/env python
#
#
# set some variables
base_url = 'https://cloud.skytap.com'
user = 'skynet@fulcrum.net'
token = '55b5e1a75ce9a122db3c976d161f96b15b5eb825'
working_dir = '/Users/thewellington/Development/skynet' # this is the path to the skynet.py file
control_dir = '/Users/thewellington/Development/skytap-control' # this is the path to the skytap-control directory
temp_dir = '/tmp'
# import necessary modules
import sys
import traceback
import getopt
import json
try:
import requests
except ImportError:
sys.stderr.write("You do not have the 'requests' module installed. Please see http://docs.python-requests.org/en/latest/ for more information.")
exit(1)
############################################################################
#### begin api interactions
def _api_get(url):
url, name, passwd = url, user, token
requisite_headers = { 'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
auth = (name, passwd)
response = requests.get(url, headers=requisite_headers, auth=auth)
return response.status_code, response.text
def _api_put(argv):
url, name, passwd = argv[0], argv[1], argv[2]
requisite_headers = { 'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
auth = (name, passwd)
if len(argv) > 3:
data = load_file(argv[3])
else:
data = None
response = requests.put(url, headers=requisite_headers, auth=auth, data=data)
return response.status_code, response.text
def _api_post(argv):
url, name, passwd = argv[0], argv[1], argv[2]
requisite_headers = { 'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
auth = (name, passwd)
if len(argv) > 3:
data = load_file(argv[3])
else:
data = None
response = requests.post(url, headers=requisite_headers, auth=auth, data=data)
return response.status_code, response.text
def _api_del(argv):
url, name, passwd = argv[0], argv[1], argv[2]
requisite_headers = { 'Accept' : 'application/json',
'Content-Type' : 'application/json'
}
auth = (name, passwd)
response = requests.delete(url, headers=requisite_headers, auth=auth)
return response.status_code, response.text
def rest_usage():
print "usage: rest [put|get|post|delete] url name passwd"
sys.exit(-1)
cmds = {
"GET": _api_get,
"PUT": _api_put,
"POST": _api_post,
"DELETE": _api_del
}
def load_file(fname):
with open(fname) as f:
return f.read()
def rest(req, url, user, token):
# if len(argv) < 4:
# rest_usage()
if 'HTTPS' not in url.upper():
print "Secure connection required: HTTP not valid, please use HTTPS or https"
rest_usage()
cmd = req.upper()
if cmd not in cmds.keys():
rest_usage()
status,body=cmds[cmd](url)
print
if int(status) == 200:
json_output = json.loads(body)
print json.dumps(json_output, indent = 4)
else:
print "Oops! Error: status: %s\n%s" % (status, body)
print
############################################################################
#### begin custom functions
def get_configurations():
body = rest('get', base_url+'/configurations', user, token)
json_output = json.loads(body)
#print json_output
#print json.dumps(json_output, indent = 4)
l = []
for j in json_output:
l.append(j.get('id'))
print l
############################################################################
#### begin interface items
def usage(exitcode):
usage="""
########## Welcome to Skynet ##########
At this time Skynet takes two arguments at most. Here are your options
-h --help Produces this help file
-a --action Indicates the action you wish to take
-s --scope Defines the scope of the action(s)
EXAMPLES:
skynet -a suspend -s limited
"""
try:
exitcode
except NameError:
print usage
sys.exit(-1)
else:
print usage
sys.exit(exitcode)
# argument parser
def ui(argv):
# define variables to be used by getopts and sent them to null
action = ''
scope = ''
# print 'ARGV :', sys.argv[1:]
try:
options, remainder = getopt.gnu_getopt(sys.argv[1:], 'a:hs:t', ['help',
'action=',
'scope=',
])
except getopt.GetoptError:
usage()
sys.exit(2)
# print 'OPTIONS :', options
for opt, arg in options:
if opt in ('-h', '--help'):
print usage
sys.exit()
elif opt in ( '-a', '--action' ):
action = arg
elif opt in ( '-s', '--scope' ):
scope = arg
elif opt in ( '-t' ):
rest('get', base_url+'/configurations', user, token)
# print 'ACTION :', action
# print 'SCOPE :', scope
# print 'REMAINING :', remainder
# call main
if __name__=='__main__':
ui(sys.argv[1:])
|
#!/usr/bin/env python
"""Mininet tests for FAUCET."""
# pylint: disable=missing-docstring
import os
import re
import shutil
import socket
import threading
import time
import unittest
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
import ipaddress
import scapy.all
import yaml
from mininet.net import Mininet
import faucet_mininet_test_base
import faucet_mininet_test_util
import faucet_mininet_test_topo
class QuietHTTPServer(HTTPServer):
def handle_error(self, _request, _client_address):
return
class PostHandler(SimpleHTTPRequestHandler):
def _log_post(self, influx_log):
content_len = int(self.headers.getheader('content-length', 0))
content = self.rfile.read(content_len).strip()
if content:
open(influx_log, 'a').write(content + '\n')
class FaucetTest(faucet_mininet_test_base.FaucetTestBase):
pass
@unittest.skip('currently flaky')
class FaucetAPITest(faucet_mininet_test_base.FaucetTestBase):
"""Test the Faucet API."""
NUM_DPS = 0
def setUp(self):
self.tmpdir = self._tmpdir_name()
name = 'faucet'
self._set_var_path(name, 'FAUCET_CONFIG', 'config/testconfigv2-simple.yaml')
self._set_var_path(name, 'FAUCET_LOG', 'faucet.log')
self._set_var_path(name, 'FAUCET_EXCEPTION_LOG', 'faucet-exception.log')
self._set_var_path(name, 'API_TEST_RESULT', 'result.txt')
self.results_file = self.env[name]['API_TEST_RESULT']
shutil.copytree('config', os.path.join(self.tmpdir, 'config'))
self.dpid = str(0xcafef00d)
self._set_prom_port(name)
self.of_port, _ = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.topo = faucet_mininet_test_topo.FaucetSwitchTopo(
self.ports_sock,
dpid=self.dpid,
n_untagged=7,
test_name=self._test_name())
self.net = Mininet(
self.topo,
controller=faucet_mininet_test_topo.FaucetAPI(
name=name,
tmpdir=self.tmpdir,
env=self.env[name],
port=self.of_port))
self.net.start()
self.reset_all_ipv4_prefix(prefix=24)
self.wait_for_tcp_listen(self._get_controller(), self.of_port)
def test_api(self):
for _ in range(10):
try:
with open(self.results_file, 'r') as results:
result = results.read().strip()
self.assertEquals('pass', result, result)
return
except IOError:
time.sleep(1)
self.fail('no result from API test')
class FaucetUntaggedTest(FaucetTest):
"""Basic untagged VLAN test."""
N_UNTAGGED = 4
N_TAGGED = 0
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def setUp(self):
super(FaucetUntaggedTest, self).setUp()
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid,
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED)
self.start_net()
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.gauge_smoke_test()
self.prometheus_smoke_test()
class FaucetUntaggedLogRotateTest(FaucetUntaggedTest):
def test_untagged(self):
faucet_log = self.env['faucet']['FAUCET_LOG']
self.assertTrue(os.path.exists(faucet_log))
os.rename(faucet_log, faucet_log + '.old')
self.assertTrue(os.path.exists(faucet_log + '.old'))
self.flap_all_switch_ports()
self.assertTrue(os.path.exists(faucet_log))
class FaucetUntaggedMeterParseTest(FaucetUntaggedTest):
REQUIRES_METERS = True
CONFIG_GLOBAL = """
meters:
lossymeter:
meter_id: 1
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 1000
}
]
acls:
lossyacl:
- rule:
actions:
meter: lossymeter
allow: 1
vlans:
100:
description: "untagged"
"""
class FaucetUntaggedApplyMeterTest(FaucetUntaggedMeterParseTest):
CONFIG = """
interfaces:
%(port_1)d:
acl_in: lossyacl
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
class FaucetUntaggedHairpinTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
# Create macvlan interfaces, with one in a seperate namespace,
# to force traffic between them to be hairpinned via FAUCET.
first_host, second_host = self.net.hosts[:2]
macvlan1_intf = 'macvlan1'
macvlan1_ipv4 = '10.0.0.100'
macvlan2_intf = 'macvlan2'
macvlan2_ipv4 = '10.0.0.101'
netns = first_host.name
self.add_macvlan(first_host, macvlan1_intf)
first_host.cmd('ip address add %s/24 brd + dev %s' % (macvlan1_ipv4, macvlan1_intf))
self.add_macvlan(first_host, macvlan2_intf)
macvlan2_mac = self.get_host_intf_mac(first_host, macvlan2_intf)
first_host.cmd('ip netns add %s' % netns)
first_host.cmd('ip link set %s netns %s' % (macvlan2_intf, netns))
for exec_cmd in (
('ip address add %s/24 brd + dev %s' % (
macvlan2_ipv4, macvlan2_intf),
'ip link set %s up' % macvlan2_intf)):
first_host.cmd('ip netns exec %s %s' % (netns, exec_cmd))
self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_intf)
self.one_ipv4_ping(first_host, second_host.IP())
first_host.cmd('ip netns del %s' % netns)
# Verify OUTPUT:IN_PORT flood rules are exercised.
self.wait_nonzero_packet_count_flow(
{u'in_port': self.port_map['port_1'],
u'dl_dst': u'ff:ff:ff:ff:ff:ff'},
table_id=self.FLOOD_TABLE, actions=[u'OUTPUT:IN_PORT'])
self.wait_nonzero_packet_count_flow(
{u'in_port': self.port_map['port_1'], u'dl_dst': macvlan2_mac},
table_id=self.ETH_DST_TABLE, actions=[u'OUTPUT:IN_PORT'])
class FaucetUntaggedGroupHairpinTest(FaucetUntaggedHairpinTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedTcpIPv4IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
second_host_ip = ipaddress.ip_address(unicode(second_host.IP()))
for _ in range(3):
self.ping_all_when_learned()
self.one_ipv4_ping(first_host, second_host_ip)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
1, second_host_ip)
self.flap_all_switch_ports()
class FaucetUntaggedTcpIPv6IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')
second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
for _ in range(3):
self.ping_all_when_learned()
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
1, second_host_ip.ip)
self.flap_all_switch_ports()
class FaucetSanityTest(FaucetUntaggedTest):
"""Sanity test - make sure test environment is correct before running all tess."""
def test_portmap(self):
test_ports = self.N_TAGGED + self.N_UNTAGGED
for i, host in enumerate(self.net.hosts):
in_port = 'port_%u' % (i + 1)
print 'verifying host/port mapping for %s' % in_port
self.require_host_learned(host, in_port=self.port_map[in_port])
class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest):
"""Testing Gauge Prometheus"""
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 5
db: 'prometheus'
"""
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
labels = {'port_name': '1', 'dp_id': '0x%x' % long(self.dpid)}
last_p1_bytes_in = 0
for poll in range(2):
self.ping_all_when_learned()
updated_counters = False
for _ in range(self.DB_TIMEOUT * 3):
p1_bytes_in = self.scrape_prometheus_var(
'of_port_rx_bytes', labels=labels, controller='gauge', dpid=False)
if p1_bytes_in is not None and p1_bytes_in > last_p1_bytes_in:
updated_counters = True
last_p1_bytes_in = p1_bytes_in
break
time.sleep(1)
if not updated_counters:
self.fail(msg='Gauge Prometheus counters not increasing')
class FaucetUntaggedInfluxTest(FaucetUntaggedTest):
"""Basic untagged VLAN test with Influx."""
server_thread = None
server = None
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['faucet-1']
type: 'port_state'
interval: 2
db: 'influx'
flow_table:
dps: ['faucet-1']
type: 'flow_table'
interval: 2
db: 'influx'
"""
def _wait_error_shipping(self, timeout=None):
if timeout is None:
timeout = self.DB_TIMEOUT * 2
gauge_log = self.env['gauge']['GAUGE_LOG']
for _ in range(timeout):
log_content = open(gauge_log).read()
if re.search('error shipping', log_content):
return
time.sleep(1)
self.fail('Influx error not noted in %s: %s' % (gauge_log, log_content))
def _verify_influx_log(self, influx_log):
self.assertTrue(os.path.exists(influx_log))
observed_vars = set()
for point_line in open(influx_log).readlines():
point_fields = point_line.strip().split()
self.assertEquals(3, len(point_fields), msg=point_fields)
ts_name, value_field, timestamp_str = point_fields
timestamp = int(timestamp_str)
value = float(value_field.split('=')[1])
ts_name_fields = ts_name.split(',')
self.assertGreater(len(ts_name_fields), 1)
observed_vars.add(ts_name_fields[0])
label_values = {}
for label_value in ts_name_fields[1:]:
label, value = label_value.split('=')
label_values[label] = value
if ts_name.startswith('flow'):
self.assertTrue('inst_count' in label_values,msg=point_line)
if 'vlan_vid' in label_values:
self.assertEquals(
int(label_values['vlan']), int(value) ^ 0x1000)
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
self.assertEquals(set([
'dropped_in', 'dropped_out', 'bytes_out', 'flow_packet_count',
'errors_in', 'bytes_in', 'flow_byte_count', 'port_state_reason',
'packets_in', 'packets_out']), observed_vars)
def _wait_influx_log(self, influx_log):
for _ in range(self.DB_TIMEOUT * 3):
if os.path.exists(influx_log):
return
time.sleep(1)
return
def _start_influx(self, handler):
for _ in range(3):
try:
self.server = QuietHTTPServer(
('127.0.0.1', self.influx_port), handler)
break
except socket.error:
time.sleep(7)
self.assertIsNotNone(
self.server,
msg='could not start test Influx server on %u' % self.influx_port)
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def _stop_influx(self):
self.server.shutdown()
self.server.socket.close()
def test_untagged(self):
influx_log = os.path.join(self.tmpdir, 'influx.log')
class InfluxPostHandler(PostHandler):
def do_POST(self):
self._log_post(influx_log)
return self.send_response(204)
self._start_influx(InfluxPostHandler)
self.ping_all_when_learned()
self.wait_gauge_up()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log(influx_log)
self._stop_influx()
self._verify_influx_log(influx_log)
class FaucetUntaggedInfluxDownTest(FaucetUntaggedInfluxTest):
def test_untagged(self):
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxUnreachableTest(FaucetUntaggedInfluxTest):
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file,
prometheus_port,
influx_port):
"""Build Gauge config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
flow_file:
type: 'text'
file: %s
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.2'
influx_port: %u
influx_user: 'faucet'
influx_pwd: ''
influx_timeout: 2
""" % (faucet_config_file,
self.get_gauge_watcher_config(),
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file,
influx_port)
def test_untagged(self):
self.gauge_controller.cmd(
'route add 127.0.0.2 gw 127.0.0.1 lo')
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxTooSlowTest(FaucetUntaggedInfluxTest):
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['faucet-1']
type: 'port_state'
interval: 2
db: 'influx'
"""
def test_untagged(self):
influx_log = os.path.join(self.tmpdir, 'influx.log')
class InfluxPostHandler(PostHandler):
DB_TIMEOUT = self.DB_TIMEOUT
def do_POST(self):
self._log_post(influx_log)
time.sleep(self.DB_TIMEOUT * 2)
return self.send_response(500)
self._start_influx(InfluxPostHandler)
self.ping_all_when_learned()
self.wait_gauge_up()
self._wait_influx_log(influx_log)
self._stop_influx()
self.assertTrue(os.path.exists(influx_log))
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetNailedForwardingTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
port: b2
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
port: b2
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: b1
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: b1
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
b1:
number: %(port_1)d
native_vlan: 100
acl_in: 1
b2:
number: %(port_2)d
native_vlan: 100
acl_in: 2
b3:
number: %(port_3)d
native_vlan: 100
acl_in: 3
b4:
number: %(port_4)d
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetUntaggedLLDPBlockedTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.assertTrue(self.verify_lldp_blocked())
class FaucetUntaggedCDPTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.assertFalse(self.is_cdp_blocked())
class FaucetUntaggedLLDPUnblockedTest(FaucetUntaggedTest):
CONFIG = """
drop_lldp: False
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.ping_all_when_learned()
self.assertFalse(self.verify_lldp_blocked())
class FaucetZodiacUntaggedTest(FaucetUntaggedTest):
"""Zodiac has only 3 ports available, and one controller so no Gauge."""
RUN_GAUGE = False
N_UNTAGGED = 3
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetTaggedAndUntaggedVlanTest(FaucetTest):
"""Test mixture of tagged and untagged hosts on the same VLAN."""
N_TAGGED = 1
N_UNTAGGED = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def setUp(self):
super(FaucetTaggedAndUntaggedVlanTest, self).setUp()
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid, n_tagged=1, n_untagged=3)
self.start_net()
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetZodiacTaggedAndUntaggedVlanTest(FaucetUntaggedTest):
RUN_GAUGE = False
N_TAGGED = 1
N_UNTAGGED = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: 2
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.net.pingAll()
learned_hosts = [
host for host in self.net.hosts if self.host_learned(host)]
self.assertEquals(2, len(learned_hosts))
self.assertEquals(2, self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
self.assertGreater(
self.scrape_prometheus_var(
'vlan_learn_bans', {'vlan': '100'}), 0)
class FaucetMaxHostsPortTest(FaucetUntaggedTest):
MAX_HOSTS = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
max_hosts: 3
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
self.ping_all_when_learned()
for i in range(10, 10+(self.MAX_HOSTS*2)):
mac_intf = 'mac%u' % i
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf)
second_host.cmd('ip address add %s/24 brd + dev %s' % (
mac_ipv4, mac_intf))
second_host.cmd('ping -c1 -I%s %s &' % (mac_intf, first_host.IP()))
flows = self.get_matching_flows_on_dpid(
self.dpid,
{u'dl_vlan': u'100', u'in_port': int(self.port_map['port_2'])},
table_id=self.ETH_SRC_TABLE)
self.assertEquals(self.MAX_HOSTS, len(flows))
self.assertEquals(
self.MAX_HOSTS,
len(self.scrape_prometheus_var(
'learned_macs',
{'port': self.port_map['port_2'], 'vlan': '100'},
multiple=True)))
self.assertGreater(
self.scrape_prometheus_var(
'port_learn_bans', {'port': self.port_map['port_2']}), 0)
class FaucetHostsTimeoutPrometheusTest(FaucetUntaggedTest):
"""Test for hosts that have been learnt are exported via prometheus.
Hosts should timeout, and the exported prometheus values should
be overwritten.
If the maximum number of MACs at any one time is 5, then only 5 values
should be exported, even if over 2 hours, there are 100 MACs learnt
"""
TIMEOUT = 10
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 10
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def mac_as_int(self, mac):
return long(mac.replace(':', ''), 16)
def macs_learned_on_port(self, port):
port_learned_macs_prom = self.scrape_prometheus_var(
'learned_macs', {'port': str(port), 'vlan': '100'},
default=[], multiple=True)
macs_learned = []
for _, mac_int in port_learned_macs_prom:
if mac_int:
macs_learned.append(mac_int)
return macs_learned
def verify_hosts_learned(self, hosts):
"""Check that hosts are learned by FAUCET on the expected ports."""
mac_ints_on_port_learned = {}
for mac, port in hosts.items():
self.mac_learned(mac)
if port not in mac_ints_on_port_learned:
mac_ints_on_port_learned[port] = set()
macs_learned = self.macs_learned_on_port(port)
mac_ints_on_port_learned[port].update(macs_learned)
for mac, port in hosts.items():
mac_int = self.mac_as_int(mac)
self.assertTrue(mac_int in mac_ints_on_port_learned[port])
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
learned_mac_ports = {}
learned_mac_ports[first_host.MAC()] = self.port_map['port_1']
mac_intfs = []
mac_ips = []
for i in range(10, 16):
if i == 14:
first_host.cmd('fping -c3 %s' % ' '.join(mac_ips))
# check first 4 are learnt
self.verify_hosts_learned(learned_mac_ports)
learned_mac_ports = {}
mac_intfs = []
mac_ips = []
# wait for first lot to time out.
# Adding 11 covers the random variation when a rule is added
time.sleep(self.TIMEOUT + 11)
mac_intf = 'mac%u' % i
mac_intfs.append(mac_intf)
mac_ipv4 = '10.0.0.%u' % i
mac_ips.append(mac_ipv4)
self.add_macvlan(second_host, mac_intf)
second_host.cmd('ip address add %s/24 dev brd + %s' % (
mac_ipv4, mac_intf))
address = second_host.cmd(
'|'.join((
'ip link show %s' % mac_intf,
'grep -o "..:..:..:..:..:.."',
'head -1',
'xargs echo -n')))
learned_mac_ports[address] = self.port_map['port_2']
first_host.cmd('fping -c3 %s' % ' '.join(mac_ips))
learned_mac_ports[first_host.MAC()] = self.port_map['port_1']
self.verify_hosts_learned(learned_mac_ports)
# Verify same or less number of hosts on a port reported by Prometheus
self.assertTrue((
len(self.macs_learned_on_port(self.port_map['port_1'])) <=
len(learned_mac_ports)))
class FaucetLearn50MACsOnPortTest(FaucetUntaggedTest):
MAX_HOSTS = 50
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
self.ping_all_when_learned()
mac_intf_ipv4s = []
for i in range(10, 10+self.MAX_HOSTS):
mac_intf_ipv4s.append(('mac%u' % i, '10.0.0.%u' % i))
# configure macvlan interfaces and stimulate learning
for mac_intf, mac_ipv4 in mac_intf_ipv4s:
self.add_macvlan(second_host, mac_intf)
second_host.cmd('ip address add %s/24 brd + dev %s' % (
mac_ipv4, mac_intf))
second_host.cmd('ping -c1 -I%s %s &' % (mac_intf, first_host.IP()))
# verify connectivity
for mac_intf, _ in mac_intf_ipv4s:
self.one_ipv4_ping(
second_host, first_host.IP(),
require_host_learned=False, intf=mac_intf)
# verify FAUCET thinks it learned this many hosts
self.assertGreater(
self.scrape_prometheus_var('vlan_hosts_learned', {'vlan': '100'}),
self.MAX_HOSTS)
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
"""Test handling HUP signal without config change."""
def _configure_count_with_retry(self, expected_count):
for _ in range(3):
configure_count = self.get_configure_count()
if configure_count == expected_count:
return
time.sleep(1)
self.fail('configure count %u != expected %u' % (
configure_count, expected_count))
def test_untagged(self):
"""Test that FAUCET receives HUP signal and keeps switching."""
init_config_count = self.get_configure_count()
for i in range(init_config_count, init_config_count+3):
self._configure_count_with_retry(i)
self.verify_hup_faucet()
self._configure_count_with_retry(i+1)
self.assertEqual(
self.scrape_prometheus_var('of_dp_disconnections', default=0),
0)
self.assertEqual(
self.scrape_prometheus_var('of_dp_connections', default=0),
1)
self.wait_until_controller_flow()
self.ping_all_when_learned()
class FaucetConfigReloadTest(FaucetTest):
"""Test handling HUP signal with config change."""
N_UNTAGGED = 4
N_TAGGED = 0
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
ACL = """
acls:
1:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
2:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 1
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def setUp(self):
super(FaucetConfigReloadTest, self).setUp()
self.acl_config_file = '%s/acl.yaml' % self.tmpdir
open(self.acl_config_file, 'w').write(self.ACL)
open(self.faucet_config_path, 'a').write(
'include:\n - %s' % self.acl_config_file)
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid,
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED)
self.start_net()
def _get_conf(self):
return yaml.load(open(self.faucet_config_path, 'r').read())
def _reload_conf(self, conf, restart, cold_start, change_expected=True):
open(self.faucet_config_path, 'w').write(yaml.dump(conf))
if restart:
var = 'faucet_config_reload_warm'
if cold_start:
var = 'faucet_config_reload_cold'
old_count = int(
self.scrape_prometheus_var(var, dpid=True, default=0))
self.verify_hup_faucet()
new_count = int(
self.scrape_prometheus_var(var, dpid=True, default=0))
if change_expected:
self.assertEquals(
old_count + 1, new_count,
msg='%s did not increment: %u' % (var, new_count))
else:
self.assertEquals(
old_count, new_count,
msg='%s incremented: %u' % (var, new_count))
def get_port_match_flow(self, port_no, table_id=None):
if table_id is None:
table_id = self.ETH_SRC_TABLE
flow = self.get_matching_flow_on_dpid(
self.dpid, {u'in_port': int(port_no)}, table_id)
return flow
def test_add_unknown_dp(self):
conf = self._get_conf()
conf['dps']['unknown'] = {
'dp_id': int(self.rand_dpid()),
'hardware': 'Open vSwitch',
}
self._reload_conf(
conf, restart=True, cold_start=False, change_expected=False)
def change_port_config(self, port, config_name, config_value,
restart=True, conf=None, cold_start=False):
if conf is None:
conf = self._get_conf()
conf['dps']['faucet-1']['interfaces'][port][config_name] = config_value
self._reload_conf(conf, restart, cold_start)
def change_vlan_config(self, vlan, config_name, config_value,
restart=True, conf=None, cold_start=False):
if conf is None:
conf = self._get_conf()
conf['vlans'][vlan][config_name] = config_value
self._reload_conf(conf, restart, cold_start)
def test_tabs_are_bad(self):
self.ping_all_when_learned()
orig_conf = self._get_conf()
self.force_faucet_reload('\t'.join(('tabs', 'are', 'bad')))
self.ping_all_when_learned()
self._reload_conf(
orig_conf, restart=True, cold_start=False, change_expected=False)
def test_port_change_vlan(self):
first_host, second_host = self.net.hosts[:2]
third_host, fourth_host = self.net.hosts[2:]
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200, restart=False)
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200, restart=True, cold_start=True)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{u'in_port': int(self.port_map[port_name])},
table_id=self.VLAN_TABLE,
actions=[u'SET_FIELD: {vlan_vid:4296}'])
self.one_ipv4_ping(first_host, second_host.IP(), require_host_learned=False)
# hosts 1 and 2 now in VLAN 200, so they shouldn't see floods for 3 and 4.
self.verify_vlan_flood_limited(
third_host, fourth_host, first_host)
def test_port_change_acl(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
orig_conf = self._get_conf()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1, cold_start=False)
self.wait_until_matching_flow(
{u'in_port': int(self.port_map['port_1']), u'tp_dst': 5001},
table_id=self.PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self._reload_conf(orig_conf, True, cold_start=False)
self.verify_tp_dst_notblocked(
5001, first_host, second_host, table_id=None)
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=None)
def test_port_change_permanent_learn(self):
first_host, second_host, third_host = self.net.hosts[0:3]
self.change_port_config(
self.port_map['port_1'], 'permanent_learn', True, cold_start=False)
self.ping_all_when_learned()
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.net.ping((second_host, third_host)))
self.assertEqual(0, self.net.ping((first_host, second_host)))
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1, cold_start=False)
self.wait_until_matching_flow(
{u'in_port': int(self.port_map['port_1']), u'tp_dst': 5001},
table_id=self.PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedBGPIPv4DefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["127.0.0.1"]
bgp_neighbor_as: 2
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('127.0.0.1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.net.hosts[:2]
first_host_alias_ip = ipaddress.ip_interface(u'10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up('127.0.0.1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
class FaucetUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["127.0.0.1"]
bgp_neighbor_as: 2
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_peer_conf = """
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.4.0/24 next-hop 10.0.0.254;
route 10.0.5.0/24 next-hop 10.10.0.1;
}
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('127.0.0.1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.net.hosts[:2]
# wait until 10.0.0.1 has been resolved
self.wait_for_route_as_flow(
first_host.MAC(), ipaddress.IPv4Network(u'10.99.99.0/24'))
self.wait_bgp_up('127.0.0.1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route('10.0.0.4/24 cannot be us')
self.verify_invalid_bgp_route('10.0.0.5/24 is not a connected network')
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network(u'10.0.3.0/24'))
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetUntaggedIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and export to BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["127.0.0.1"]
bgp_neighbor_as: 2
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('127.0.0.1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes sent."""
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.wait_bgp_up('127.0.0.1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
# exabgp should have received our BGP updates
updates = self.exabgp_updates(self.exabgp_log)
assert re.search('10.0.0.0/24 next-hop 10.0.0.254', updates)
assert re.search('10.0.1.0/24 next-hop 10.0.0.1', updates)
assert re.search('10.0.2.0/24 next-hop 10.0.0.2', updates)
assert re.search('10.0.2.0/24 next-hop 10.0.0.2', updates)
class FaucetZodiacUntaggedIPv4RouteTest(FaucetUntaggedIPv4RouteTest):
RUN_GAUGE = False
N_UNTAGGED = 3
class FaucetUntaggedVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.ping_all_when_learned()
self.verify_port1_unicast(True)
self.assertTrue(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.verify_port1_unicast(False)
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
unicast_flood: True
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.verify_port1_unicast(False)
# VLAN level config to disable flooding takes precedence,
# cannot enable port-only flooding.
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
unicast_flood: False
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.verify_port1_unicast(False)
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
self.assertEqual(0, self.net.ping((first_host, second_host)))
self.swap_host_macs(first_host, second_host)
self.net.ping((first_host, second_host))
for host, in_port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.require_host_learned(host, in_port=in_port)
self.assertEquals(0, self.net.ping((first_host, second_host)))
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
permanent_learn: True
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host, third_host = self.net.hosts[0:3]
# 3rd host impersonates 1st, 3rd host breaks but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.net.ping((second_host, third_host)))
self.assertEqual(0, self.net.ping((first_host, second_host)))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned()
class FaucetSingleUntaggedIPv4ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
for _ in range(5):
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.flap_all_switch_ports()
def test_fping_controller(self):
first_host = self.net.hosts[0]
self.one_ipv4_controller_ping(first_host)
self.verify_controller_fping(first_host, self.FAUCET_VIPV4)
def test_fuzz_controller(self):
first_host = self.net.hosts[0]
self.one_ipv4_controller_ping(first_host)
packets = 1000
for fuzz_cmd in (
('python -c \"from scapy.all import * ;'
'scapy.all.send(IP(dst=\'%s\')/'
'fuzz(%s(type=0)),count=%u)\"' % ('10.0.0.254', 'ICMP', packets)),
('python -c \"from scapy.all import * ;'
'scapy.all.send(IP(dst=\'%s\')/'
'fuzz(%s(type=8)),count=%u)\"' % ('10.0.0.254', 'ICMP', packets)),
('python -c \"from scapy.all import * ;'
'scapy.all.send(fuzz(%s(pdst=\'%s\')),'
'count=%u)\"' % ('ARP', '10.0.0.254', packets))):
self.assertTrue(
re.search('Sent %u packets' % packets, first_host.cmd(fuzz_cmd)))
self.one_ipv4_controller_ping(first_host)
class FaucetUntaggedIPv6RATest(FaucetUntaggedTest):
FAUCET_MAC = "0e:00:00:00:00:99"
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fe80::1:254/64", "fc00::1:254/112", "fc00::2:254/112", "10.0.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC
CONFIG = """
advertise_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_ndisc6(self):
first_host = self.net.hosts[0]
for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'):
self.assertEquals(
self.FAUCET_MAC.upper(),
first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip())
def test_rdisc6(self):
first_host = self.net.hosts[0]
rdisc6_results = sorted(list(set(first_host.cmd(
'rdisc6 -q %s' % first_host.defaultIntf()).splitlines())))
self.assertEquals(
['fc00::1:0/112', 'fc00::2:0/112'],
rdisc6_results)
def test_ra_advertise(self):
first_host = self.net.hosts[0]
tcpdump_filter = ' and '.join((
'ether dst 33:33:00:00:00:01',
'ether src %s' % self.FAUCET_MAC,
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [], timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > ff02::1:.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s' % (ra_required, tcpdump_txt))
def test_rs_reply(self):
first_host = self.net.hosts[0]
tcpdump_filter = ' and '.join((
'ether src %s' % self.FAUCET_MAC,
'ether dst %s' % first_host.MAC(),
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd(
'rdisc6 -1 %s' % first_host.defaultIntf())],
timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > fe80::.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter))
class FaucetSingleUntaggedIPv6ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(5):
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
def test_fping_controller(self):
first_host = self.net.hosts[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
self.verify_controller_fping(first_host, self.FAUCET_VIPV6)
def test_fuzz_controller(self):
first_host = self.net.hosts[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
fuzz_success = False
packets = 1000
for fuzz_class in dir(scapy.all):
if fuzz_class.startswith('ICMPv6'):
fuzz_cmd = (
'python -c \"from scapy.all import * ;'
'scapy.all.send(IPv6(dst=\'%s\')/'
'fuzz(%s()),count=%u)\"' % ('fc00::1:254', fuzz_class, packets))
if re.search('Sent %u packets' % packets, first_host.cmd(fuzz_cmd)):
print fuzz_class
fuzz_success = True
self.assertTrue(fuzz_success)
self.one_ipv6_controller_ping(first_host)
class FaucetTaggedAndUntaggedTest(FaucetTest):
N_TAGGED = 2
N_UNTAGGED = 4
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
native_vlan: 101
description: "b3"
%(port_4)d:
native_vlan: 101
description: "b4"
"""
def setUp(self):
super(FaucetTaggedAndUntaggedTest, self).setUp()
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid, n_tagged=2, n_untagged=2)
self.start_net()
def test_seperate_untagged_tagged(self):
tagged_host_pair = self.net.hosts[:2]
untagged_host_pair = self.net.hosts[2:]
self.verify_vlan_flood_limited(
tagged_host_pair[0], tagged_host_pair[1], untagged_host_pair[0])
self.verify_vlan_flood_limited(
untagged_host_pair[0], untagged_host_pair[1], tagged_host_pair[0])
# hosts within VLANs can ping each other
self.assertEquals(0, self.net.ping(tagged_host_pair))
self.assertEquals(0, self.net.ping(untagged_host_pair))
# hosts cannot ping hosts in other VLANs
self.assertEquals(
100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedACLTcpMaskTest(FaucetUntaggedACLTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
nw_proto: 6
# Match packets > 1023
tp_dst: 1024/1024
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def test_port_gt1023_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_blocked(1024, first_host, second_host, mask=1024)
self.verify_tp_dst_notblocked(1023, first_host, second_host, table_id=None)
class FaucetUntaggedVLANACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
vlans:
100:
description: "untagged"
acl_in: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=self.VLAN_ACL_TABLE)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=self.VLAN_ACL_TABLE)
class FaucetZodiacUntaggedACLTest(FaucetUntaggedACLTest):
RUN_GAUGE = False
N_UNTAGGED = 3
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
mirror: mirrorport
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
acl_in: 1
mirrorport:
number: %(port_3)d
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.net.hosts[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
def test_eapol_mirrored(self):
first_host, second_host, mirror_host = self.net.hosts[0:3]
self.verify_eapol_mirrored(first_host, second_host, mirror_host)
class FaucetZodiacUntaggedACLMirrorTest(FaucetUntaggedACLMirrorTest):
RUN_GAUGE = False
N_UNTAGGED = 3
class FaucetUntaggedACLMirrorDefaultAllowTest(FaucetUntaggedACLMirrorTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
mirror: mirrorport
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
acl_in: 1
mirrorport:
number: %(port_3)d
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
dl_dst: "06:06:06:06:06:06"
vlan_vid: 123
port: acloutport
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
acloutport:
number: %(port_2)d
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
dl_dst: "06:06:06:06:06:06"
vlan_vids: [123, 456]
port: acloutport
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
acloutport:
number: %(port_2)d
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
@unittest.skip('needs OVS dev or > v2.8')
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
mirror: %(port_1)d
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.net.hosts[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedTest(FaucetTest):
N_UNTAGGED = 0
N_TAGGED = 4
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def setUp(self):
super(FaucetTaggedTest, self).setUp()
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid, n_tagged=4)
self.start_net()
def test_tagged(self):
self.ping_all_when_learned()
class FaucetTaggedSwapVidOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
swap_vid: 101
port: acloutport
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
acl_in: 1
acloutport:
number: %(port_2)d
tagged_vlans: [101]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())], root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedPopVlansOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
dl_dst: "06:06:06:06:06:06"
pop_vlans: 1
port: acloutport
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
acl_in: 1
acloutport:
tagged_vlans: [100]
number: %(port_2)d
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
first_host, second_host = self.net.hosts[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
'ping -c1 %s' % second_host.IP())], packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetTaggedIPv6ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetTaggedICMPv6ACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x86dd
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
port: b2
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
acl_in: 1
b2:
number: %(port_2)d
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.net.hosts[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{u'ipv6_nd_target': u'fc00::1:2'}, table_id=self.PORT_ACL_TABLE)
class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')
for _ in range(3):
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn: true
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface(u'10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '4', 'vlan': '100'}),
1)
class FaucetTaggedProactiveNeighborIPv6RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:3/64"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn: true
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface(u'fc00::1:99/64')
faucet_vip_ip = ipaddress.ip_interface(u'fc00::1:3/126')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, ipaddress.ip_interface(u'fc00::1:1/64'))
# We use a narrower mask to force second_host to use the /128 route,
# since otherwise it would realize :99 is directly connected via ND and send direct.
self.add_host_ipv6_address(second_host, ipaddress.ip_interface(u'fc00::1:2/126'))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, faucet_vip_ip.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '6', 'vlan': '100'}),
1)
class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
vlanc:
vid: 100
description: "not used"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: vlanb
description: "b2"
%(port_3)d:
native_vlan: vlanb
description: "b3"
%(port_4)d:
native_vlan: vlanb
description: "b4"
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface(u'10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface(u'10.100.0.254/24')
second_host_ip = ipaddress.ip_interface(u'10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface(u'10.200.0.254/24')
first_host, second_host = self.net.hosts[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEquals(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEquals(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
class FaucetUntaggedIPv6InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["fc00::1:254/64"]
vlanb:
vid: 200
faucet_vips: ["fc01::1:254/64"]
faucet_mac: "%s"
vlanc:
vid: 100
description: "not used"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: vlanb
description: "b2"
%(port_3)d:
native_vlan: vlanb
description: "b3"
%(port_4)d:
native_vlan: vlanb
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface(u'fc00::1:1/64')
second_host_net = ipaddress.ip_interface(u'fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.add_host_ipv6_address(second_host, second_host_net)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedIPv4PolicyRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 200
description: "b2"
%(port_3)d:
native_vlan: 300
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface(u'10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface(u'10.0.0.254/24')
second_host_ip = ipaddress.ip_interface(u'10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface(u'10.20.0.254/24')
third_host_ip = ipaddress.ip_interface(u'10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface(u'10.30.0.254/24')
first_host, second_host, third_host = self.net.hosts[:3]
remote_ip = ipaddress.ip_interface(u'10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface(u'10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network(u'10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network(u'10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["172.16.0.254/24", "10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface(u'10.0.0.1/24')
second_host_net = ipaddress.ip_interface(u'172.16.0.1/24')
second_host.setIP(str(second_host_net.ip), prefixLen=24)
self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV4_2.ip)
self.one_ipv4_ping(first_host, second_host_net.ip)
self.one_ipv4_ping(second_host, first_host_net.ip)
class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/64", "fc01::1:254/64"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface(u'fc00::1:1/64')
second_host_net = ipaddress.ip_interface(u'fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_address(second_host, second_host_net)
self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedBGPIPv6DefaultRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["::1"]
bgp_neighbor_as: 2
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_peer_conf = """
static {
route ::/0 next-hop fc00::1:1 local-preference 100;
}
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
first_host_alias_ip = ipaddress.ip_interface(u'fc00::50:1/112')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV6.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.one_ipv6_controller_ping(first_host)
class FaucetUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["::1"]
bgp_neighbor_as: 2
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_peer_conf = """
static {
route fc00::10:1/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:1/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:1/112 next-hop fc00::1:2 local-preference 100;
route fc00::40:1/112 next-hop fc00::1:254;
route fc00::50:1/112 next-hop fc00::2:2;
}
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route('fc00::40:1/112 cannot be us')
self.verify_invalid_bgp_route('fc00::50:1/112 is not a connected network')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
first_host_ip = ipaddress.ip_interface(u'fc00::10:2/112')
first_host_ctrl_ip = ipaddress.ip_address(u'fc00::10:1')
second_host_ip = ipaddress.ip_interface(u'fc00::20:2/112')
second_host_ctrl_ip = ipaddress.ip_address(u'fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_ip.network)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["::1"]
bgp_neighbor_as: 2
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.verify_ipv6_routing_mesh()
second_host = self.net.hosts[1]
self.flap_all_switch_ports()
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv6Network(u'fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
updates = self.exabgp_updates(self.exabgp_log)
assert re.search('fc00::1:0/112 next-hop fc00::1:254', updates)
assert re.search('fc00::10:0/112 next-hop fc00::1:1', updates)
assert re.search('fc00::20:0/112 next-hop fc00::1:2', updates)
assert re.search('fc00::30:0/112 next-hop fc00::1:2', updates)
class FaucetTaggedIPv6RouteTest(FaucetTaggedTest):
"""Test basic IPv6 routing without BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
"""Test IPv6 routing works."""
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')
second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')
for _ in range(5):
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
class FaucetStringOfDPTest(FaucetTest):
NUM_HOSTS = 4
VID = 100
dpids = None
def build_net(self, stack=False, n_dps=1,
n_tagged=0, tagged_vid=100,
n_untagged=0, untagged_vid=100,
include=[], include_optional=[], acls={}, acl_in_dp={}):
"""Set up Mininet and Faucet for the given topology."""
self.dpids = [str(self.rand_dpid()) for _ in range(n_dps)]
self.dpid = self.dpids[0]
self.CONFIG = self.get_config(
self.dpids,
stack,
self.hardware,
self.debug_log_path,
n_tagged,
tagged_vid,
n_untagged,
untagged_vid,
include,
include_optional,
acls,
acl_in_dp,
)
open(self.faucet_config_path, 'w').write(self.CONFIG)
self.topo = faucet_mininet_test_topo.FaucetStringOfDPSwitchTopo(
self.ports_sock,
dpids=self.dpids,
n_tagged=n_tagged,
tagged_vid=tagged_vid,
n_untagged=n_untagged,
test_name=self._test_name(),
)
def get_config(self, dpids=[], stack=False, hardware=None, ofchannel_log=None,
n_tagged=0, tagged_vid=0, n_untagged=0, untagged_vid=0,
include=[], include_optional=[], acls={}, acl_in_dp={}):
"""Build a complete Faucet configuration for each datapath, using the given topology."""
def dp_name(i):
return 'faucet-%i' % (i + 1)
def add_vlans(n_tagged, tagged_vid, n_untagged, untagged_vid):
vlans_config = {}
if n_untagged:
vlans_config[untagged_vid] = {
'description': 'untagged',
}
if ((n_tagged and not n_untagged) or
(n_tagged and n_untagged and tagged_vid != untagged_vid)):
vlans_config[tagged_vid] = {
'description': 'tagged',
}
return vlans_config
def add_acl_to_port(name, port, interfaces_config):
if name in acl_in_dp and port in acl_in_dp[name]:
interfaces_config[port]['acl_in'] = acl_in_dp[name][port]
def add_dp_to_dp_ports(dp_config, port, interfaces_config, i,
dpid_count, stack, n_tagged, tagged_vid,
n_untagged, untagged_vid):
# Add configuration for the switch-to-switch links
# (0 for a single switch, 1 for an end switch, 2 for middle switches).
first_dp = i == 0
second_dp = i == 1
last_dp = i == dpid_count - 1
end_dp = first_dp or last_dp
num_switch_links = 0
if dpid_count > 1:
if end_dp:
num_switch_links = 1
else:
num_switch_links = 2
if stack and first_dp:
dp_config['stack'] = {
'priority': 1
}
first_stack_port = port
for stack_dp_port in range(num_switch_links):
tagged_vlans = None
peer_dp = None
if stack_dp_port == 0:
if first_dp:
peer_dp = i + 1
else:
peer_dp = i - 1
if first_dp or second_dp:
peer_port = first_stack_port
else:
peer_port = first_stack_port + 1
else:
peer_dp = i + 1
peer_port = first_stack_port
description = 'to %s' % dp_name(peer_dp)
interfaces_config[port] = {
'description': description,
}
if stack:
interfaces_config[port]['stack'] = {
'dp': dp_name(peer_dp),
'port': peer_port,
}
else:
if n_tagged and n_untagged and n_tagged != n_untagged:
tagged_vlans = [tagged_vid, untagged_vid]
elif ((n_tagged and not n_untagged) or
(n_tagged and n_untagged and tagged_vid == untagged_vid)):
tagged_vlans = [tagged_vid]
elif n_untagged and not n_tagged:
tagged_vlans = [untagged_vid]
if tagged_vlans:
interfaces_config[port]['tagged_vlans'] = tagged_vlans
add_acl_to_port(name, port, interfaces_config)
port += 1
def add_dp(name, dpid, i, dpid_count, stack,
n_tagged, tagged_vid, n_untagged, untagged_vid):
dpid_ofchannel_log = ofchannel_log + str(i)
dp_config = {
'dp_id': int(dpid),
'hardware': hardware,
'ofchannel_log': dpid_ofchannel_log,
'interfaces': {},
}
interfaces_config = dp_config['interfaces']
port = 1
for _ in range(n_tagged):
interfaces_config[port] = {
'tagged_vlans': [tagged_vid],
'description': 'b%i' % port,
}
add_acl_to_port(name, port, interfaces_config)
port += 1
for _ in range(n_untagged):
interfaces_config[port] = {
'native_vlan': untagged_vid,
'description': 'b%i' % port,
}
add_acl_to_port(name, port, interfaces_config)
port += 1
add_dp_to_dp_ports(
dp_config, port, interfaces_config, i, dpid_count, stack,
n_tagged, tagged_vid, n_untagged, untagged_vid)
return dp_config
config = {'version': 2}
if include:
config['include'] = list(include)
if include_optional:
config['include-optional'] = list(include_optional)
config['vlans'] = add_vlans(
n_tagged, tagged_vid, n_untagged, untagged_vid)
config['acls'] = acls.copy()
dpid_count = len(dpids)
config['dps'] = {}
for i, dpid in enumerate(dpids):
name = dp_name(i)
config['dps'][name] = add_dp(
name, dpid, i, dpid_count, stack,
n_tagged, tagged_vid, n_untagged, untagged_vid)
return yaml.dump(config, default_flow_style=False)
def matching_flow_present(self, match, timeout=10, table_id=None,
actions=None, match_exact=None):
"""Find the first DP that has a flow that matches match."""
for dpid in self.dpids:
if self.matching_flow_present_on_dpid(
dpid, match, timeout=timeout,
table_id=table_id, actions=actions,
match_exact=match_exact):
return True
return False
def eventually_all_reachable(self, retries=3):
"""Allow time for distributed learning to happen."""
for _ in range(retries):
loss = self.net.pingAll()
if loss == 0:
break
self.assertEquals(0, loss)
class FaucetStringOfDPUntaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self):
super(FaucetStringOfDPUntaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, n_untagged=self.NUM_HOSTS, untagged_vid=self.VID)
self.start_net()
def test_untagged(self):
"""All untagged hosts in multi switch topology can reach one another."""
self.assertEquals(0, self.net.pingAll())
class FaucetStringOfDPTaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self):
super(FaucetStringOfDPTaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, n_tagged=self.NUM_HOSTS, tagged_vid=self.VID)
self.start_net()
def test_tagged(self):
"""All tagged hosts in multi switch topology can reach one another."""
self.assertEquals(0, self.net.pingAll())
class FaucetStackStringOfDPTaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 3
def setUp(self):
super(FaucetStackStringOfDPTaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_tagged=self.NUM_HOSTS,
tagged_vid=self.VID)
self.start_net()
def test_tagged(self):
"""All tagged hosts in stack topology can reach each other."""
self.eventually_all_reachable()
class FaucetStackStringOfDPUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 2
def setUp(self):
super(FaucetStackStringOfDPUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
untagged_vid=self.VID)
self.start_net()
def test_untagged(self):
"""All untagged hosts in stack topology can reach each other."""
self.eventually_all_reachable()
class FaucetStringOfDPACLOverrideTest(FaucetStringOfDPTest):
NUM_DPS = 1
NUM_HOSTS = 2
# ACL rules which will get overridden.
ACLS = {
1: [
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5001,
'actions': {
'allow': 1,
},
}},
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5002,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# ACL rules which get put into an include-optional
# file, then reloaded into FAUCET.
ACLS_OVERRIDE = {
1: [
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5001,
'actions': {
'allow': 0,
},
}},
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5002,
'actions': {
'allow': 1,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
ACL_IN_DP = {
'faucet-1': {
# Port 1, acl_in = 1
1: 1,
},
}
def setUp(self):
super(FaucetStringOfDPACLOverrideTest, self).setUp()
self.acls_config = os.path.join(self.tmpdir, 'acls.yaml')
self.build_net(
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
untagged_vid=self.VID,
include_optional=[self.acls_config],
acls=self.ACLS,
acl_in_dp=self.ACL_IN_DP,
)
self.start_net()
def test_port5001_blocked(self):
"""Test that TCP port 5001 is blocked."""
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_notblocked(5001, first_host, second_host)
open(self.acls_config, 'w').write(self.get_config(acls=self.ACLS_OVERRIDE))
self.verify_hup_faucet()
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
"""Test that TCP port 5002 is not blocked."""
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_blocked(5002, first_host, second_host)
open(self.acls_config, 'w').write(self.get_config(acls=self.ACLS_OVERRIDE))
self.verify_hup_faucet()
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetGroupTableTest(FaucetUntaggedTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{u'dl_vlan': u'100', u'dl_dst': u'ff:ff:ff:ff:ff:ff'},
table_id=self.FLOOD_TABLE))
class FaucetTaggedGroupTableTest(FaucetTaggedTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{u'dl_vlan': u'100', u'dl_dst': u'ff:ff:ff:ff:ff:ff'},
table_id=self.FLOOD_TABLE))
class FaucetGroupTableUntaggedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
group_table_routing: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=True)
self.swap_host_macs(first_host, second_host)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=True)
class FaucetGroupUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
group_table_routing: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')
second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=True)
self.swap_host_macs(first_host, second_host)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=True)
class FaucetEthSrcMaskTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_src: 0e:0d:00:00:00:00/ff:ff:00:00:00:00
actions:
allow: 1
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
first_host.setMAC('0e:0d:00:00:00:99')
self.assertEqual(0, self.net.ping((first_host, second_host)))
self.wait_nonzero_packet_count_flow(
{u'dl_src': u'0e:0d:00:00:00:00/ff:ff:00:00:00:00'},
table_id=self.PORT_ACL_TABLE)
class FaucetDestRewriteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "00:00:00:00:00:02"
actions:
allow: 1
output:
dl_dst: "00:00:00:00:00:03"
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst 00:00:00:00:00:03')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '00:00:00:00:00:02')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC('00:00:00:00:00:02')
rewrite_host.setMAC('00:00:00:00:00:03')
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd('ping -c1 %s' % overridden_host.IP())
self.wait_until_matching_flow(
{u'dl_dst': u'00:00:00:00:00:03'},
table_id=self.ETH_DST_TABLE,
actions=[u'OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)])
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.net.hosts[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 1
use_idle_timeout: true
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def wait_for_host_removed(self, host, in_port, timeout=5):
for _ in range(timeout):
if not self.host_learned(host, in_port=in_port, timeout=1):
return
self.fail('flow matching %s still exists' % match)
def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30):
pattern = "OFPFlowRemoved"
mac = None
if src_mac:
pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac
mac = src_mac
if dst_mac:
pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac
mac = dst_mac
for i in range(timeout):
for _, debug_log in self._get_ofchannel_logs():
match = re.search(pattern, open(debug_log).read())
if match:
return
time.sleep(1)
self.fail('Not received OFPFlowRemoved for host %s' % mac)
def wait_for_host_log_msg(self, host_mac, msg, timeout=15):
controller = self._get_controller()
count = 0
for _ in range(timeout):
count = controller.cmd('grep -c "%s %s" %s' % (
msg, host_mac, self.env['faucet']['FAUCET_LOG']))
if int(count) != 0:
break
time.sleep(1)
self.assertGreaterEqual(int(count), 1,
'log msg "%s" for host %s not found' % (msg, host_mac))
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[:2]
self.swap_host_macs(first_host, second_host)
for host, port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.require_host_learned(host, in_port=int(port))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutRuleExpiredTest(FaucetWithUseIdleTimeoutTest):
def test_untagged(self):
"""Host that is actively sending should have its dst rule renewed as the
rule expires. Host that is not sending expires as usual.
"""
self.ping_all_when_learned()
first_host, second_host, third_host, fourth_host = self.net.hosts
self.host_ipv4_alias(first_host, ipaddress.ip_interface(u'10.99.99.1/24'))
first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC()))
first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP())
for host in (second_host, third_host, fourth_host):
self.host_drop_all_ips(host)
self.wait_for_host_log_msg(first_host.MAC(), 'refreshing host')
self.assertTrue(self.host_learned(
first_host, in_port=int(self.port_map['port_1'])))
for host, port in (
(second_host, self.port_map['port_2']),
(third_host, self.port_map['port_3']),
(fourth_host, self.port_map['port_4'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.wait_for_host_log_msg(host.MAC(), 'expiring host')
self.wait_for_host_removed(host, in_port=int(port))
wait_for_flowremoved_msg waits unnecessarily long.
#!/usr/bin/env python
"""Mininet tests for FAUCET."""
# pylint: disable=missing-docstring
import os
import re
import shutil
import socket
import threading
import time
import unittest
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
import ipaddress
import scapy.all
import yaml
from mininet.net import Mininet
import faucet_mininet_test_base
import faucet_mininet_test_util
import faucet_mininet_test_topo
class QuietHTTPServer(HTTPServer):
def handle_error(self, _request, _client_address):
return
class PostHandler(SimpleHTTPRequestHandler):
def _log_post(self, influx_log):
content_len = int(self.headers.getheader('content-length', 0))
content = self.rfile.read(content_len).strip()
if content:
open(influx_log, 'a').write(content + '\n')
class FaucetTest(faucet_mininet_test_base.FaucetTestBase):
pass
@unittest.skip('currently flaky')
class FaucetAPITest(faucet_mininet_test_base.FaucetTestBase):
"""Test the Faucet API."""
NUM_DPS = 0
def setUp(self):
self.tmpdir = self._tmpdir_name()
name = 'faucet'
self._set_var_path(name, 'FAUCET_CONFIG', 'config/testconfigv2-simple.yaml')
self._set_var_path(name, 'FAUCET_LOG', 'faucet.log')
self._set_var_path(name, 'FAUCET_EXCEPTION_LOG', 'faucet-exception.log')
self._set_var_path(name, 'API_TEST_RESULT', 'result.txt')
self.results_file = self.env[name]['API_TEST_RESULT']
shutil.copytree('config', os.path.join(self.tmpdir, 'config'))
self.dpid = str(0xcafef00d)
self._set_prom_port(name)
self.of_port, _ = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.topo = faucet_mininet_test_topo.FaucetSwitchTopo(
self.ports_sock,
dpid=self.dpid,
n_untagged=7,
test_name=self._test_name())
self.net = Mininet(
self.topo,
controller=faucet_mininet_test_topo.FaucetAPI(
name=name,
tmpdir=self.tmpdir,
env=self.env[name],
port=self.of_port))
self.net.start()
self.reset_all_ipv4_prefix(prefix=24)
self.wait_for_tcp_listen(self._get_controller(), self.of_port)
def test_api(self):
for _ in range(10):
try:
with open(self.results_file, 'r') as results:
result = results.read().strip()
self.assertEquals('pass', result, result)
return
except IOError:
time.sleep(1)
self.fail('no result from API test')
class FaucetUntaggedTest(FaucetTest):
"""Basic untagged VLAN test."""
N_UNTAGGED = 4
N_TAGGED = 0
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def setUp(self):
super(FaucetUntaggedTest, self).setUp()
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid,
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED)
self.start_net()
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.gauge_smoke_test()
self.prometheus_smoke_test()
class FaucetUntaggedLogRotateTest(FaucetUntaggedTest):
def test_untagged(self):
faucet_log = self.env['faucet']['FAUCET_LOG']
self.assertTrue(os.path.exists(faucet_log))
os.rename(faucet_log, faucet_log + '.old')
self.assertTrue(os.path.exists(faucet_log + '.old'))
self.flap_all_switch_ports()
self.assertTrue(os.path.exists(faucet_log))
class FaucetUntaggedMeterParseTest(FaucetUntaggedTest):
REQUIRES_METERS = True
CONFIG_GLOBAL = """
meters:
lossymeter:
meter_id: 1
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 1000
}
]
acls:
lossyacl:
- rule:
actions:
meter: lossymeter
allow: 1
vlans:
100:
description: "untagged"
"""
class FaucetUntaggedApplyMeterTest(FaucetUntaggedMeterParseTest):
CONFIG = """
interfaces:
%(port_1)d:
acl_in: lossyacl
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
class FaucetUntaggedHairpinTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
# Create macvlan interfaces, with one in a seperate namespace,
# to force traffic between them to be hairpinned via FAUCET.
first_host, second_host = self.net.hosts[:2]
macvlan1_intf = 'macvlan1'
macvlan1_ipv4 = '10.0.0.100'
macvlan2_intf = 'macvlan2'
macvlan2_ipv4 = '10.0.0.101'
netns = first_host.name
self.add_macvlan(first_host, macvlan1_intf)
first_host.cmd('ip address add %s/24 brd + dev %s' % (macvlan1_ipv4, macvlan1_intf))
self.add_macvlan(first_host, macvlan2_intf)
macvlan2_mac = self.get_host_intf_mac(first_host, macvlan2_intf)
first_host.cmd('ip netns add %s' % netns)
first_host.cmd('ip link set %s netns %s' % (macvlan2_intf, netns))
for exec_cmd in (
('ip address add %s/24 brd + dev %s' % (
macvlan2_ipv4, macvlan2_intf),
'ip link set %s up' % macvlan2_intf)):
first_host.cmd('ip netns exec %s %s' % (netns, exec_cmd))
self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_intf)
self.one_ipv4_ping(first_host, second_host.IP())
first_host.cmd('ip netns del %s' % netns)
# Verify OUTPUT:IN_PORT flood rules are exercised.
self.wait_nonzero_packet_count_flow(
{u'in_port': self.port_map['port_1'],
u'dl_dst': u'ff:ff:ff:ff:ff:ff'},
table_id=self.FLOOD_TABLE, actions=[u'OUTPUT:IN_PORT'])
self.wait_nonzero_packet_count_flow(
{u'in_port': self.port_map['port_1'], u'dl_dst': macvlan2_mac},
table_id=self.ETH_DST_TABLE, actions=[u'OUTPUT:IN_PORT'])
class FaucetUntaggedGroupHairpinTest(FaucetUntaggedHairpinTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedTcpIPv4IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
second_host_ip = ipaddress.ip_address(unicode(second_host.IP()))
for _ in range(3):
self.ping_all_when_learned()
self.one_ipv4_ping(first_host, second_host_ip)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
1, second_host_ip)
self.flap_all_switch_ports()
class FaucetUntaggedTcpIPv6IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')
second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
for _ in range(3):
self.ping_all_when_learned()
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
1, second_host_ip.ip)
self.flap_all_switch_ports()
class FaucetSanityTest(FaucetUntaggedTest):
"""Sanity test - make sure test environment is correct before running all tess."""
def test_portmap(self):
test_ports = self.N_TAGGED + self.N_UNTAGGED
for i, host in enumerate(self.net.hosts):
in_port = 'port_%u' % (i + 1)
print 'verifying host/port mapping for %s' % in_port
self.require_host_learned(host, in_port=self.port_map[in_port])
class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest):
"""Testing Gauge Prometheus"""
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 5
db: 'prometheus'
"""
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
labels = {'port_name': '1', 'dp_id': '0x%x' % long(self.dpid)}
last_p1_bytes_in = 0
for poll in range(2):
self.ping_all_when_learned()
updated_counters = False
for _ in range(self.DB_TIMEOUT * 3):
p1_bytes_in = self.scrape_prometheus_var(
'of_port_rx_bytes', labels=labels, controller='gauge', dpid=False)
if p1_bytes_in is not None and p1_bytes_in > last_p1_bytes_in:
updated_counters = True
last_p1_bytes_in = p1_bytes_in
break
time.sleep(1)
if not updated_counters:
self.fail(msg='Gauge Prometheus counters not increasing')
class FaucetUntaggedInfluxTest(FaucetUntaggedTest):
"""Basic untagged VLAN test with Influx."""
server_thread = None
server = None
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['faucet-1']
type: 'port_state'
interval: 2
db: 'influx'
flow_table:
dps: ['faucet-1']
type: 'flow_table'
interval: 2
db: 'influx'
"""
def _wait_error_shipping(self, timeout=None):
if timeout is None:
timeout = self.DB_TIMEOUT * 2
gauge_log = self.env['gauge']['GAUGE_LOG']
for _ in range(timeout):
log_content = open(gauge_log).read()
if re.search('error shipping', log_content):
return
time.sleep(1)
self.fail('Influx error not noted in %s: %s' % (gauge_log, log_content))
def _verify_influx_log(self, influx_log):
self.assertTrue(os.path.exists(influx_log))
observed_vars = set()
for point_line in open(influx_log).readlines():
point_fields = point_line.strip().split()
self.assertEquals(3, len(point_fields), msg=point_fields)
ts_name, value_field, timestamp_str = point_fields
timestamp = int(timestamp_str)
value = float(value_field.split('=')[1])
ts_name_fields = ts_name.split(',')
self.assertGreater(len(ts_name_fields), 1)
observed_vars.add(ts_name_fields[0])
label_values = {}
for label_value in ts_name_fields[1:]:
label, value = label_value.split('=')
label_values[label] = value
if ts_name.startswith('flow'):
self.assertTrue('inst_count' in label_values,msg=point_line)
if 'vlan_vid' in label_values:
self.assertEquals(
int(label_values['vlan']), int(value) ^ 0x1000)
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
self.assertEquals(set([
'dropped_in', 'dropped_out', 'bytes_out', 'flow_packet_count',
'errors_in', 'bytes_in', 'flow_byte_count', 'port_state_reason',
'packets_in', 'packets_out']), observed_vars)
def _wait_influx_log(self, influx_log):
for _ in range(self.DB_TIMEOUT * 3):
if os.path.exists(influx_log):
return
time.sleep(1)
return
def _start_influx(self, handler):
for _ in range(3):
try:
self.server = QuietHTTPServer(
('127.0.0.1', self.influx_port), handler)
break
except socket.error:
time.sleep(7)
self.assertIsNotNone(
self.server,
msg='could not start test Influx server on %u' % self.influx_port)
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def _stop_influx(self):
self.server.shutdown()
self.server.socket.close()
def test_untagged(self):
influx_log = os.path.join(self.tmpdir, 'influx.log')
class InfluxPostHandler(PostHandler):
def do_POST(self):
self._log_post(influx_log)
return self.send_response(204)
self._start_influx(InfluxPostHandler)
self.ping_all_when_learned()
self.wait_gauge_up()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log(influx_log)
self._stop_influx()
self._verify_influx_log(influx_log)
class FaucetUntaggedInfluxDownTest(FaucetUntaggedInfluxTest):
def test_untagged(self):
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxUnreachableTest(FaucetUntaggedInfluxTest):
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file,
prometheus_port,
influx_port):
"""Build Gauge config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
flow_file:
type: 'text'
file: %s
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.2'
influx_port: %u
influx_user: 'faucet'
influx_pwd: ''
influx_timeout: 2
""" % (faucet_config_file,
self.get_gauge_watcher_config(),
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file,
influx_port)
def test_untagged(self):
self.gauge_controller.cmd(
'route add 127.0.0.2 gw 127.0.0.1 lo')
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxTooSlowTest(FaucetUntaggedInfluxTest):
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['faucet-1']
type: 'port_state'
interval: 2
db: 'influx'
"""
def test_untagged(self):
influx_log = os.path.join(self.tmpdir, 'influx.log')
class InfluxPostHandler(PostHandler):
DB_TIMEOUT = self.DB_TIMEOUT
def do_POST(self):
self._log_post(influx_log)
time.sleep(self.DB_TIMEOUT * 2)
return self.send_response(500)
self._start_influx(InfluxPostHandler)
self.ping_all_when_learned()
self.wait_gauge_up()
self._wait_influx_log(influx_log)
self._stop_influx()
self.assertTrue(os.path.exists(influx_log))
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetNailedForwardingTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
port: b2
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
port: b2
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: b1
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: b1
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
b1:
number: %(port_1)d
native_vlan: 100
acl_in: 1
b2:
number: %(port_2)d
native_vlan: 100
acl_in: 2
b3:
number: %(port_3)d
native_vlan: 100
acl_in: 3
b4:
number: %(port_4)d
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetUntaggedLLDPBlockedTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.assertTrue(self.verify_lldp_blocked())
class FaucetUntaggedCDPTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.assertFalse(self.is_cdp_blocked())
class FaucetUntaggedLLDPUnblockedTest(FaucetUntaggedTest):
CONFIG = """
drop_lldp: False
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.ping_all_when_learned()
self.assertFalse(self.verify_lldp_blocked())
class FaucetZodiacUntaggedTest(FaucetUntaggedTest):
"""Zodiac has only 3 ports available, and one controller so no Gauge."""
RUN_GAUGE = False
N_UNTAGGED = 3
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetTaggedAndUntaggedVlanTest(FaucetTest):
"""Test mixture of tagged and untagged hosts on the same VLAN."""
N_TAGGED = 1
N_UNTAGGED = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def setUp(self):
super(FaucetTaggedAndUntaggedVlanTest, self).setUp()
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid, n_tagged=1, n_untagged=3)
self.start_net()
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetZodiacTaggedAndUntaggedVlanTest(FaucetUntaggedTest):
RUN_GAUGE = False
N_TAGGED = 1
N_UNTAGGED = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: 2
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.net.pingAll()
learned_hosts = [
host for host in self.net.hosts if self.host_learned(host)]
self.assertEquals(2, len(learned_hosts))
self.assertEquals(2, self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
self.assertGreater(
self.scrape_prometheus_var(
'vlan_learn_bans', {'vlan': '100'}), 0)
class FaucetMaxHostsPortTest(FaucetUntaggedTest):
MAX_HOSTS = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
max_hosts: 3
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
self.ping_all_when_learned()
for i in range(10, 10+(self.MAX_HOSTS*2)):
mac_intf = 'mac%u' % i
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf)
second_host.cmd('ip address add %s/24 brd + dev %s' % (
mac_ipv4, mac_intf))
second_host.cmd('ping -c1 -I%s %s &' % (mac_intf, first_host.IP()))
flows = self.get_matching_flows_on_dpid(
self.dpid,
{u'dl_vlan': u'100', u'in_port': int(self.port_map['port_2'])},
table_id=self.ETH_SRC_TABLE)
self.assertEquals(self.MAX_HOSTS, len(flows))
self.assertEquals(
self.MAX_HOSTS,
len(self.scrape_prometheus_var(
'learned_macs',
{'port': self.port_map['port_2'], 'vlan': '100'},
multiple=True)))
self.assertGreater(
self.scrape_prometheus_var(
'port_learn_bans', {'port': self.port_map['port_2']}), 0)
class FaucetHostsTimeoutPrometheusTest(FaucetUntaggedTest):
"""Test for hosts that have been learnt are exported via prometheus.
Hosts should timeout, and the exported prometheus values should
be overwritten.
If the maximum number of MACs at any one time is 5, then only 5 values
should be exported, even if over 2 hours, there are 100 MACs learnt
"""
TIMEOUT = 10
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 10
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def mac_as_int(self, mac):
return long(mac.replace(':', ''), 16)
def macs_learned_on_port(self, port):
port_learned_macs_prom = self.scrape_prometheus_var(
'learned_macs', {'port': str(port), 'vlan': '100'},
default=[], multiple=True)
macs_learned = []
for _, mac_int in port_learned_macs_prom:
if mac_int:
macs_learned.append(mac_int)
return macs_learned
def verify_hosts_learned(self, hosts):
"""Check that hosts are learned by FAUCET on the expected ports."""
mac_ints_on_port_learned = {}
for mac, port in hosts.items():
self.mac_learned(mac)
if port not in mac_ints_on_port_learned:
mac_ints_on_port_learned[port] = set()
macs_learned = self.macs_learned_on_port(port)
mac_ints_on_port_learned[port].update(macs_learned)
for mac, port in hosts.items():
mac_int = self.mac_as_int(mac)
self.assertTrue(mac_int in mac_ints_on_port_learned[port])
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
learned_mac_ports = {}
learned_mac_ports[first_host.MAC()] = self.port_map['port_1']
mac_intfs = []
mac_ips = []
for i in range(10, 16):
if i == 14:
first_host.cmd('fping -c3 %s' % ' '.join(mac_ips))
# check first 4 are learnt
self.verify_hosts_learned(learned_mac_ports)
learned_mac_ports = {}
mac_intfs = []
mac_ips = []
# wait for first lot to time out.
# Adding 11 covers the random variation when a rule is added
time.sleep(self.TIMEOUT + 11)
mac_intf = 'mac%u' % i
mac_intfs.append(mac_intf)
mac_ipv4 = '10.0.0.%u' % i
mac_ips.append(mac_ipv4)
self.add_macvlan(second_host, mac_intf)
second_host.cmd('ip address add %s/24 dev brd + %s' % (
mac_ipv4, mac_intf))
address = second_host.cmd(
'|'.join((
'ip link show %s' % mac_intf,
'grep -o "..:..:..:..:..:.."',
'head -1',
'xargs echo -n')))
learned_mac_ports[address] = self.port_map['port_2']
first_host.cmd('fping -c3 %s' % ' '.join(mac_ips))
learned_mac_ports[first_host.MAC()] = self.port_map['port_1']
self.verify_hosts_learned(learned_mac_ports)
# Verify same or less number of hosts on a port reported by Prometheus
self.assertTrue((
len(self.macs_learned_on_port(self.port_map['port_1'])) <=
len(learned_mac_ports)))
class FaucetLearn50MACsOnPortTest(FaucetUntaggedTest):
MAX_HOSTS = 50
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
self.ping_all_when_learned()
mac_intf_ipv4s = []
for i in range(10, 10+self.MAX_HOSTS):
mac_intf_ipv4s.append(('mac%u' % i, '10.0.0.%u' % i))
# configure macvlan interfaces and stimulate learning
for mac_intf, mac_ipv4 in mac_intf_ipv4s:
self.add_macvlan(second_host, mac_intf)
second_host.cmd('ip address add %s/24 brd + dev %s' % (
mac_ipv4, mac_intf))
second_host.cmd('ping -c1 -I%s %s &' % (mac_intf, first_host.IP()))
# verify connectivity
for mac_intf, _ in mac_intf_ipv4s:
self.one_ipv4_ping(
second_host, first_host.IP(),
require_host_learned=False, intf=mac_intf)
# verify FAUCET thinks it learned this many hosts
self.assertGreater(
self.scrape_prometheus_var('vlan_hosts_learned', {'vlan': '100'}),
self.MAX_HOSTS)
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
"""Test handling HUP signal without config change."""
def _configure_count_with_retry(self, expected_count):
for _ in range(3):
configure_count = self.get_configure_count()
if configure_count == expected_count:
return
time.sleep(1)
self.fail('configure count %u != expected %u' % (
configure_count, expected_count))
def test_untagged(self):
"""Test that FAUCET receives HUP signal and keeps switching."""
init_config_count = self.get_configure_count()
for i in range(init_config_count, init_config_count+3):
self._configure_count_with_retry(i)
self.verify_hup_faucet()
self._configure_count_with_retry(i+1)
self.assertEqual(
self.scrape_prometheus_var('of_dp_disconnections', default=0),
0)
self.assertEqual(
self.scrape_prometheus_var('of_dp_connections', default=0),
1)
self.wait_until_controller_flow()
self.ping_all_when_learned()
class FaucetConfigReloadTest(FaucetTest):
"""Test handling HUP signal with config change."""
N_UNTAGGED = 4
N_TAGGED = 0
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
ACL = """
acls:
1:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
2:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 1
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def setUp(self):
super(FaucetConfigReloadTest, self).setUp()
self.acl_config_file = '%s/acl.yaml' % self.tmpdir
open(self.acl_config_file, 'w').write(self.ACL)
open(self.faucet_config_path, 'a').write(
'include:\n - %s' % self.acl_config_file)
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid,
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED)
self.start_net()
def _get_conf(self):
return yaml.load(open(self.faucet_config_path, 'r').read())
def _reload_conf(self, conf, restart, cold_start, change_expected=True):
open(self.faucet_config_path, 'w').write(yaml.dump(conf))
if restart:
var = 'faucet_config_reload_warm'
if cold_start:
var = 'faucet_config_reload_cold'
old_count = int(
self.scrape_prometheus_var(var, dpid=True, default=0))
self.verify_hup_faucet()
new_count = int(
self.scrape_prometheus_var(var, dpid=True, default=0))
if change_expected:
self.assertEquals(
old_count + 1, new_count,
msg='%s did not increment: %u' % (var, new_count))
else:
self.assertEquals(
old_count, new_count,
msg='%s incremented: %u' % (var, new_count))
def get_port_match_flow(self, port_no, table_id=None):
if table_id is None:
table_id = self.ETH_SRC_TABLE
flow = self.get_matching_flow_on_dpid(
self.dpid, {u'in_port': int(port_no)}, table_id)
return flow
def test_add_unknown_dp(self):
conf = self._get_conf()
conf['dps']['unknown'] = {
'dp_id': int(self.rand_dpid()),
'hardware': 'Open vSwitch',
}
self._reload_conf(
conf, restart=True, cold_start=False, change_expected=False)
def change_port_config(self, port, config_name, config_value,
restart=True, conf=None, cold_start=False):
if conf is None:
conf = self._get_conf()
conf['dps']['faucet-1']['interfaces'][port][config_name] = config_value
self._reload_conf(conf, restart, cold_start)
def change_vlan_config(self, vlan, config_name, config_value,
restart=True, conf=None, cold_start=False):
if conf is None:
conf = self._get_conf()
conf['vlans'][vlan][config_name] = config_value
self._reload_conf(conf, restart, cold_start)
def test_tabs_are_bad(self):
self.ping_all_when_learned()
orig_conf = self._get_conf()
self.force_faucet_reload('\t'.join(('tabs', 'are', 'bad')))
self.ping_all_when_learned()
self._reload_conf(
orig_conf, restart=True, cold_start=False, change_expected=False)
def test_port_change_vlan(self):
first_host, second_host = self.net.hosts[:2]
third_host, fourth_host = self.net.hosts[2:]
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200, restart=False)
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200, restart=True, cold_start=True)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{u'in_port': int(self.port_map[port_name])},
table_id=self.VLAN_TABLE,
actions=[u'SET_FIELD: {vlan_vid:4296}'])
self.one_ipv4_ping(first_host, second_host.IP(), require_host_learned=False)
# hosts 1 and 2 now in VLAN 200, so they shouldn't see floods for 3 and 4.
self.verify_vlan_flood_limited(
third_host, fourth_host, first_host)
def test_port_change_acl(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
orig_conf = self._get_conf()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1, cold_start=False)
self.wait_until_matching_flow(
{u'in_port': int(self.port_map['port_1']), u'tp_dst': 5001},
table_id=self.PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self._reload_conf(orig_conf, True, cold_start=False)
self.verify_tp_dst_notblocked(
5001, first_host, second_host, table_id=None)
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=None)
def test_port_change_permanent_learn(self):
first_host, second_host, third_host = self.net.hosts[0:3]
self.change_port_config(
self.port_map['port_1'], 'permanent_learn', True, cold_start=False)
self.ping_all_when_learned()
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.net.ping((second_host, third_host)))
self.assertEqual(0, self.net.ping((first_host, second_host)))
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1, cold_start=False)
self.wait_until_matching_flow(
{u'in_port': int(self.port_map['port_1']), u'tp_dst': 5001},
table_id=self.PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedBGPIPv4DefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["127.0.0.1"]
bgp_neighbor_as: 2
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('127.0.0.1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.net.hosts[:2]
first_host_alias_ip = ipaddress.ip_interface(u'10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up('127.0.0.1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
class FaucetUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["127.0.0.1"]
bgp_neighbor_as: 2
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_peer_conf = """
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.4.0/24 next-hop 10.0.0.254;
route 10.0.5.0/24 next-hop 10.10.0.1;
}
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('127.0.0.1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.net.hosts[:2]
# wait until 10.0.0.1 has been resolved
self.wait_for_route_as_flow(
first_host.MAC(), ipaddress.IPv4Network(u'10.99.99.0/24'))
self.wait_bgp_up('127.0.0.1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route('10.0.0.4/24 cannot be us')
self.verify_invalid_bgp_route('10.0.0.5/24 is not a connected network')
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network(u'10.0.3.0/24'))
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetUntaggedIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and export to BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["127.0.0.1"]
bgp_neighbor_as: 2
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('127.0.0.1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes sent."""
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.wait_bgp_up('127.0.0.1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
# exabgp should have received our BGP updates
updates = self.exabgp_updates(self.exabgp_log)
assert re.search('10.0.0.0/24 next-hop 10.0.0.254', updates)
assert re.search('10.0.1.0/24 next-hop 10.0.0.1', updates)
assert re.search('10.0.2.0/24 next-hop 10.0.0.2', updates)
assert re.search('10.0.2.0/24 next-hop 10.0.0.2', updates)
class FaucetZodiacUntaggedIPv4RouteTest(FaucetUntaggedIPv4RouteTest):
RUN_GAUGE = False
N_UNTAGGED = 3
class FaucetUntaggedVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.ping_all_when_learned()
self.verify_port1_unicast(True)
self.assertTrue(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.verify_port1_unicast(False)
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
unicast_flood: True
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.verify_port1_unicast(False)
# VLAN level config to disable flooding takes precedence,
# cannot enable port-only flooding.
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
unicast_flood: False
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.verify_port1_unicast(False)
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
self.assertEqual(0, self.net.ping((first_host, second_host)))
self.swap_host_macs(first_host, second_host)
self.net.ping((first_host, second_host))
for host, in_port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.require_host_learned(host, in_port=in_port)
self.assertEquals(0, self.net.ping((first_host, second_host)))
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
permanent_learn: True
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host, third_host = self.net.hosts[0:3]
# 3rd host impersonates 1st, 3rd host breaks but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.net.ping((second_host, third_host)))
self.assertEqual(0, self.net.ping((first_host, second_host)))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned()
class FaucetSingleUntaggedIPv4ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
for _ in range(5):
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.flap_all_switch_ports()
def test_fping_controller(self):
first_host = self.net.hosts[0]
self.one_ipv4_controller_ping(first_host)
self.verify_controller_fping(first_host, self.FAUCET_VIPV4)
def test_fuzz_controller(self):
first_host = self.net.hosts[0]
self.one_ipv4_controller_ping(first_host)
packets = 1000
for fuzz_cmd in (
('python -c \"from scapy.all import * ;'
'scapy.all.send(IP(dst=\'%s\')/'
'fuzz(%s(type=0)),count=%u)\"' % ('10.0.0.254', 'ICMP', packets)),
('python -c \"from scapy.all import * ;'
'scapy.all.send(IP(dst=\'%s\')/'
'fuzz(%s(type=8)),count=%u)\"' % ('10.0.0.254', 'ICMP', packets)),
('python -c \"from scapy.all import * ;'
'scapy.all.send(fuzz(%s(pdst=\'%s\')),'
'count=%u)\"' % ('ARP', '10.0.0.254', packets))):
self.assertTrue(
re.search('Sent %u packets' % packets, first_host.cmd(fuzz_cmd)))
self.one_ipv4_controller_ping(first_host)
class FaucetUntaggedIPv6RATest(FaucetUntaggedTest):
FAUCET_MAC = "0e:00:00:00:00:99"
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fe80::1:254/64", "fc00::1:254/112", "fc00::2:254/112", "10.0.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC
CONFIG = """
advertise_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_ndisc6(self):
first_host = self.net.hosts[0]
for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'):
self.assertEquals(
self.FAUCET_MAC.upper(),
first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip())
def test_rdisc6(self):
first_host = self.net.hosts[0]
rdisc6_results = sorted(list(set(first_host.cmd(
'rdisc6 -q %s' % first_host.defaultIntf()).splitlines())))
self.assertEquals(
['fc00::1:0/112', 'fc00::2:0/112'],
rdisc6_results)
def test_ra_advertise(self):
first_host = self.net.hosts[0]
tcpdump_filter = ' and '.join((
'ether dst 33:33:00:00:00:01',
'ether src %s' % self.FAUCET_MAC,
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [], timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > ff02::1:.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s' % (ra_required, tcpdump_txt))
def test_rs_reply(self):
first_host = self.net.hosts[0]
tcpdump_filter = ' and '.join((
'ether src %s' % self.FAUCET_MAC,
'ether dst %s' % first_host.MAC(),
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd(
'rdisc6 -1 %s' % first_host.defaultIntf())],
timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > fe80::.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter))
class FaucetSingleUntaggedIPv6ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(5):
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
def test_fping_controller(self):
first_host = self.net.hosts[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
self.verify_controller_fping(first_host, self.FAUCET_VIPV6)
def test_fuzz_controller(self):
first_host = self.net.hosts[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
fuzz_success = False
packets = 1000
for fuzz_class in dir(scapy.all):
if fuzz_class.startswith('ICMPv6'):
fuzz_cmd = (
'python -c \"from scapy.all import * ;'
'scapy.all.send(IPv6(dst=\'%s\')/'
'fuzz(%s()),count=%u)\"' % ('fc00::1:254', fuzz_class, packets))
if re.search('Sent %u packets' % packets, first_host.cmd(fuzz_cmd)):
print fuzz_class
fuzz_success = True
self.assertTrue(fuzz_success)
self.one_ipv6_controller_ping(first_host)
class FaucetTaggedAndUntaggedTest(FaucetTest):
N_TAGGED = 2
N_UNTAGGED = 4
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
native_vlan: 101
description: "b3"
%(port_4)d:
native_vlan: 101
description: "b4"
"""
def setUp(self):
super(FaucetTaggedAndUntaggedTest, self).setUp()
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid, n_tagged=2, n_untagged=2)
self.start_net()
def test_seperate_untagged_tagged(self):
tagged_host_pair = self.net.hosts[:2]
untagged_host_pair = self.net.hosts[2:]
self.verify_vlan_flood_limited(
tagged_host_pair[0], tagged_host_pair[1], untagged_host_pair[0])
self.verify_vlan_flood_limited(
untagged_host_pair[0], untagged_host_pair[1], tagged_host_pair[0])
# hosts within VLANs can ping each other
self.assertEquals(0, self.net.ping(tagged_host_pair))
self.assertEquals(0, self.net.ping(untagged_host_pair))
# hosts cannot ping hosts in other VLANs
self.assertEquals(
100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedACLTcpMaskTest(FaucetUntaggedACLTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
nw_proto: 6
# Match packets > 1023
tp_dst: 1024/1024
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def test_port_gt1023_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_blocked(1024, first_host, second_host, mask=1024)
self.verify_tp_dst_notblocked(1023, first_host, second_host, table_id=None)
class FaucetUntaggedVLANACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
vlans:
100:
description: "untagged"
acl_in: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=self.VLAN_ACL_TABLE)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=self.VLAN_ACL_TABLE)
class FaucetZodiacUntaggedACLTest(FaucetUntaggedACLTest):
RUN_GAUGE = False
N_UNTAGGED = 3
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
mirror: mirrorport
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
acl_in: 1
mirrorport:
number: %(port_3)d
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.net.hosts[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
def test_eapol_mirrored(self):
first_host, second_host, mirror_host = self.net.hosts[0:3]
self.verify_eapol_mirrored(first_host, second_host, mirror_host)
class FaucetZodiacUntaggedACLMirrorTest(FaucetUntaggedACLMirrorTest):
RUN_GAUGE = False
N_UNTAGGED = 3
class FaucetUntaggedACLMirrorDefaultAllowTest(FaucetUntaggedACLMirrorTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
mirror: mirrorport
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
acl_in: 1
mirrorport:
number: %(port_3)d
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
dl_dst: "06:06:06:06:06:06"
vlan_vid: 123
port: acloutport
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
acloutport:
number: %(port_2)d
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
dl_dst: "06:06:06:06:06:06"
vlan_vids: [123, 456]
port: acloutport
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
acloutport:
number: %(port_2)d
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
@unittest.skip('needs OVS dev or > v2.8')
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
mirror: %(port_1)d
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.net.hosts[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedTest(FaucetTest):
N_UNTAGGED = 0
N_TAGGED = 4
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def setUp(self):
super(FaucetTaggedTest, self).setUp()
self.topo = self.topo_class(
self.ports_sock, dpid=self.dpid, n_tagged=4)
self.start_net()
def test_tagged(self):
self.ping_all_when_learned()
class FaucetTaggedSwapVidOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
swap_vid: 101
port: acloutport
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
acl_in: 1
acloutport:
number: %(port_2)d
tagged_vlans: [101]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())], root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedPopVlansOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
dl_dst: "06:06:06:06:06:06"
pop_vlans: 1
port: acloutport
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
acl_in: 1
acloutport:
tagged_vlans: [100]
number: %(port_2)d
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
first_host, second_host = self.net.hosts[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
'ping -c1 %s' % second_host.IP())], packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetTaggedIPv6ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetTaggedICMPv6ACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x86dd
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
port: b2
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
acl_in: 1
b2:
number: %(port_2)d
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.net.hosts[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{u'ipv6_nd_target': u'fc00::1:2'}, table_id=self.PORT_ACL_TABLE)
class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')
for _ in range(3):
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn: true
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface(u'10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '4', 'vlan': '100'}),
1)
class FaucetTaggedProactiveNeighborIPv6RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:3/64"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn: true
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface(u'fc00::1:99/64')
faucet_vip_ip = ipaddress.ip_interface(u'fc00::1:3/126')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, ipaddress.ip_interface(u'fc00::1:1/64'))
# We use a narrower mask to force second_host to use the /128 route,
# since otherwise it would realize :99 is directly connected via ND and send direct.
self.add_host_ipv6_address(second_host, ipaddress.ip_interface(u'fc00::1:2/126'))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, faucet_vip_ip.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '6', 'vlan': '100'}),
1)
class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
vlanc:
vid: 100
description: "not used"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: vlanb
description: "b2"
%(port_3)d:
native_vlan: vlanb
description: "b3"
%(port_4)d:
native_vlan: vlanb
description: "b4"
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface(u'10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface(u'10.100.0.254/24')
second_host_ip = ipaddress.ip_interface(u'10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface(u'10.200.0.254/24')
first_host, second_host = self.net.hosts[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEquals(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEquals(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
class FaucetUntaggedIPv6InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["fc00::1:254/64"]
vlanb:
vid: 200
faucet_vips: ["fc01::1:254/64"]
faucet_mac: "%s"
vlanc:
vid: 100
description: "not used"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: vlanb
description: "b2"
%(port_3)d:
native_vlan: vlanb
description: "b3"
%(port_4)d:
native_vlan: vlanb
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface(u'fc00::1:1/64')
second_host_net = ipaddress.ip_interface(u'fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.add_host_ipv6_address(second_host, second_host_net)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedIPv4PolicyRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 200
description: "b2"
%(port_3)d:
native_vlan: 300
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface(u'10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface(u'10.0.0.254/24')
second_host_ip = ipaddress.ip_interface(u'10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface(u'10.20.0.254/24')
third_host_ip = ipaddress.ip_interface(u'10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface(u'10.30.0.254/24')
first_host, second_host, third_host = self.net.hosts[:3]
remote_ip = ipaddress.ip_interface(u'10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface(u'10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network(u'10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network(u'10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["172.16.0.254/24", "10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface(u'10.0.0.1/24')
second_host_net = ipaddress.ip_interface(u'172.16.0.1/24')
second_host.setIP(str(second_host_net.ip), prefixLen=24)
self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV4_2.ip)
self.one_ipv4_ping(first_host, second_host_net.ip)
self.one_ipv4_ping(second_host, first_host_net.ip)
class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/64", "fc01::1:254/64"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface(u'fc00::1:1/64')
second_host_net = ipaddress.ip_interface(u'fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_address(second_host, second_host_net)
self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedBGPIPv6DefaultRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["::1"]
bgp_neighbor_as: 2
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_peer_conf = """
static {
route ::/0 next-hop fc00::1:1 local-preference 100;
}
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
first_host_alias_ip = ipaddress.ip_interface(u'fc00::50:1/112')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV6.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.one_ipv6_controller_ping(first_host)
class FaucetUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["::1"]
bgp_neighbor_as: 2
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_peer_conf = """
static {
route fc00::10:1/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:1/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:1/112 next-hop fc00::1:2 local-preference 100;
route fc00::40:1/112 next-hop fc00::1:254;
route fc00::50:1/112 next-hop fc00::2:2;
}
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route('fc00::40:1/112 cannot be us')
self.verify_invalid_bgp_route('fc00::50:1/112 is not a connected network')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
first_host_ip = ipaddress.ip_interface(u'fc00::10:2/112')
first_host_ctrl_ip = ipaddress.ip_address(u'fc00::10:1')
second_host_ip = ipaddress.ip_interface(u'fc00::20:2/112')
second_host_ctrl_ip = ipaddress.ip_address(u'fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_ip.network)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
bgp_port: %(bgp_port)d
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_addresses: ["::1"]
bgp_neighbor_as: 2
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
exabgp_log = None
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.verify_ipv6_routing_mesh()
second_host = self.net.hosts[1]
self.flap_all_switch_ports()
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv6Network(u'fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
updates = self.exabgp_updates(self.exabgp_log)
assert re.search('fc00::1:0/112 next-hop fc00::1:254', updates)
assert re.search('fc00::10:0/112 next-hop fc00::1:1', updates)
assert re.search('fc00::20:0/112 next-hop fc00::1:2', updates)
assert re.search('fc00::30:0/112 next-hop fc00::1:2', updates)
class FaucetTaggedIPv6RouteTest(FaucetTaggedTest):
"""Test basic IPv6 routing without BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_tagged(self):
"""Test IPv6 routing works."""
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')
second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')
for _ in range(5):
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
class FaucetStringOfDPTest(FaucetTest):
NUM_HOSTS = 4
VID = 100
dpids = None
def build_net(self, stack=False, n_dps=1,
n_tagged=0, tagged_vid=100,
n_untagged=0, untagged_vid=100,
include=[], include_optional=[], acls={}, acl_in_dp={}):
"""Set up Mininet and Faucet for the given topology."""
self.dpids = [str(self.rand_dpid()) for _ in range(n_dps)]
self.dpid = self.dpids[0]
self.CONFIG = self.get_config(
self.dpids,
stack,
self.hardware,
self.debug_log_path,
n_tagged,
tagged_vid,
n_untagged,
untagged_vid,
include,
include_optional,
acls,
acl_in_dp,
)
open(self.faucet_config_path, 'w').write(self.CONFIG)
self.topo = faucet_mininet_test_topo.FaucetStringOfDPSwitchTopo(
self.ports_sock,
dpids=self.dpids,
n_tagged=n_tagged,
tagged_vid=tagged_vid,
n_untagged=n_untagged,
test_name=self._test_name(),
)
def get_config(self, dpids=[], stack=False, hardware=None, ofchannel_log=None,
n_tagged=0, tagged_vid=0, n_untagged=0, untagged_vid=0,
include=[], include_optional=[], acls={}, acl_in_dp={}):
"""Build a complete Faucet configuration for each datapath, using the given topology."""
def dp_name(i):
return 'faucet-%i' % (i + 1)
def add_vlans(n_tagged, tagged_vid, n_untagged, untagged_vid):
vlans_config = {}
if n_untagged:
vlans_config[untagged_vid] = {
'description': 'untagged',
}
if ((n_tagged and not n_untagged) or
(n_tagged and n_untagged and tagged_vid != untagged_vid)):
vlans_config[tagged_vid] = {
'description': 'tagged',
}
return vlans_config
def add_acl_to_port(name, port, interfaces_config):
if name in acl_in_dp and port in acl_in_dp[name]:
interfaces_config[port]['acl_in'] = acl_in_dp[name][port]
def add_dp_to_dp_ports(dp_config, port, interfaces_config, i,
dpid_count, stack, n_tagged, tagged_vid,
n_untagged, untagged_vid):
# Add configuration for the switch-to-switch links
# (0 for a single switch, 1 for an end switch, 2 for middle switches).
first_dp = i == 0
second_dp = i == 1
last_dp = i == dpid_count - 1
end_dp = first_dp or last_dp
num_switch_links = 0
if dpid_count > 1:
if end_dp:
num_switch_links = 1
else:
num_switch_links = 2
if stack and first_dp:
dp_config['stack'] = {
'priority': 1
}
first_stack_port = port
for stack_dp_port in range(num_switch_links):
tagged_vlans = None
peer_dp = None
if stack_dp_port == 0:
if first_dp:
peer_dp = i + 1
else:
peer_dp = i - 1
if first_dp or second_dp:
peer_port = first_stack_port
else:
peer_port = first_stack_port + 1
else:
peer_dp = i + 1
peer_port = first_stack_port
description = 'to %s' % dp_name(peer_dp)
interfaces_config[port] = {
'description': description,
}
if stack:
interfaces_config[port]['stack'] = {
'dp': dp_name(peer_dp),
'port': peer_port,
}
else:
if n_tagged and n_untagged and n_tagged != n_untagged:
tagged_vlans = [tagged_vid, untagged_vid]
elif ((n_tagged and not n_untagged) or
(n_tagged and n_untagged and tagged_vid == untagged_vid)):
tagged_vlans = [tagged_vid]
elif n_untagged and not n_tagged:
tagged_vlans = [untagged_vid]
if tagged_vlans:
interfaces_config[port]['tagged_vlans'] = tagged_vlans
add_acl_to_port(name, port, interfaces_config)
port += 1
def add_dp(name, dpid, i, dpid_count, stack,
n_tagged, tagged_vid, n_untagged, untagged_vid):
dpid_ofchannel_log = ofchannel_log + str(i)
dp_config = {
'dp_id': int(dpid),
'hardware': hardware,
'ofchannel_log': dpid_ofchannel_log,
'interfaces': {},
}
interfaces_config = dp_config['interfaces']
port = 1
for _ in range(n_tagged):
interfaces_config[port] = {
'tagged_vlans': [tagged_vid],
'description': 'b%i' % port,
}
add_acl_to_port(name, port, interfaces_config)
port += 1
for _ in range(n_untagged):
interfaces_config[port] = {
'native_vlan': untagged_vid,
'description': 'b%i' % port,
}
add_acl_to_port(name, port, interfaces_config)
port += 1
add_dp_to_dp_ports(
dp_config, port, interfaces_config, i, dpid_count, stack,
n_tagged, tagged_vid, n_untagged, untagged_vid)
return dp_config
config = {'version': 2}
if include:
config['include'] = list(include)
if include_optional:
config['include-optional'] = list(include_optional)
config['vlans'] = add_vlans(
n_tagged, tagged_vid, n_untagged, untagged_vid)
config['acls'] = acls.copy()
dpid_count = len(dpids)
config['dps'] = {}
for i, dpid in enumerate(dpids):
name = dp_name(i)
config['dps'][name] = add_dp(
name, dpid, i, dpid_count, stack,
n_tagged, tagged_vid, n_untagged, untagged_vid)
return yaml.dump(config, default_flow_style=False)
def matching_flow_present(self, match, timeout=10, table_id=None,
actions=None, match_exact=None):
"""Find the first DP that has a flow that matches match."""
for dpid in self.dpids:
if self.matching_flow_present_on_dpid(
dpid, match, timeout=timeout,
table_id=table_id, actions=actions,
match_exact=match_exact):
return True
return False
def eventually_all_reachable(self, retries=3):
"""Allow time for distributed learning to happen."""
for _ in range(retries):
loss = self.net.pingAll()
if loss == 0:
break
self.assertEquals(0, loss)
class FaucetStringOfDPUntaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self):
super(FaucetStringOfDPUntaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, n_untagged=self.NUM_HOSTS, untagged_vid=self.VID)
self.start_net()
def test_untagged(self):
"""All untagged hosts in multi switch topology can reach one another."""
self.assertEquals(0, self.net.pingAll())
class FaucetStringOfDPTaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self):
super(FaucetStringOfDPTaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, n_tagged=self.NUM_HOSTS, tagged_vid=self.VID)
self.start_net()
def test_tagged(self):
"""All tagged hosts in multi switch topology can reach one another."""
self.assertEquals(0, self.net.pingAll())
class FaucetStackStringOfDPTaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 3
def setUp(self):
super(FaucetStackStringOfDPTaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_tagged=self.NUM_HOSTS,
tagged_vid=self.VID)
self.start_net()
def test_tagged(self):
"""All tagged hosts in stack topology can reach each other."""
self.eventually_all_reachable()
class FaucetStackStringOfDPUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 2
def setUp(self):
super(FaucetStackStringOfDPUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
untagged_vid=self.VID)
self.start_net()
def test_untagged(self):
"""All untagged hosts in stack topology can reach each other."""
self.eventually_all_reachable()
class FaucetStringOfDPACLOverrideTest(FaucetStringOfDPTest):
NUM_DPS = 1
NUM_HOSTS = 2
# ACL rules which will get overridden.
ACLS = {
1: [
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5001,
'actions': {
'allow': 1,
},
}},
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5002,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# ACL rules which get put into an include-optional
# file, then reloaded into FAUCET.
ACLS_OVERRIDE = {
1: [
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5001,
'actions': {
'allow': 0,
},
}},
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5002,
'actions': {
'allow': 1,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
ACL_IN_DP = {
'faucet-1': {
# Port 1, acl_in = 1
1: 1,
},
}
def setUp(self):
super(FaucetStringOfDPACLOverrideTest, self).setUp()
self.acls_config = os.path.join(self.tmpdir, 'acls.yaml')
self.build_net(
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
untagged_vid=self.VID,
include_optional=[self.acls_config],
acls=self.ACLS,
acl_in_dp=self.ACL_IN_DP,
)
self.start_net()
def test_port5001_blocked(self):
"""Test that TCP port 5001 is blocked."""
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_notblocked(5001, first_host, second_host)
open(self.acls_config, 'w').write(self.get_config(acls=self.ACLS_OVERRIDE))
self.verify_hup_faucet()
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
"""Test that TCP port 5002 is not blocked."""
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
self.verify_tp_dst_blocked(5002, first_host, second_host)
open(self.acls_config, 'w').write(self.get_config(acls=self.ACLS_OVERRIDE))
self.verify_hup_faucet()
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetGroupTableTest(FaucetUntaggedTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{u'dl_vlan': u'100', u'dl_dst': u'ff:ff:ff:ff:ff:ff'},
table_id=self.FLOOD_TABLE))
class FaucetTaggedGroupTableTest(FaucetTaggedTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
"""
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{u'dl_vlan': u'100', u'dl_dst': u'ff:ff:ff:ff:ff:ff'},
table_id=self.FLOOD_TABLE))
class FaucetGroupTableUntaggedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
group_table_routing: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24')
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=True)
self.swap_host_macs(first_host, second_host)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=True)
class FaucetGroupUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
group_table_routing: True
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112')
second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112')
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=True)
self.swap_host_macs(first_host, second_host)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=True)
class FaucetEthSrcMaskTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_src: 0e:0d:00:00:00:00/ff:ff:00:00:00:00
actions:
allow: 1
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
first_host.setMAC('0e:0d:00:00:00:99')
self.assertEqual(0, self.net.ping((first_host, second_host)))
self.wait_nonzero_packet_count_flow(
{u'dl_src': u'0e:0d:00:00:00:00/ff:ff:00:00:00:00'},
table_id=self.PORT_ACL_TABLE)
class FaucetDestRewriteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "00:00:00:00:00:02"
actions:
allow: 1
output:
dl_dst: "00:00:00:00:00:03"
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst 00:00:00:00:00:03')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '00:00:00:00:00:02')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC('00:00:00:00:00:02')
rewrite_host.setMAC('00:00:00:00:00:03')
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd('ping -c1 %s' % overridden_host.IP())
self.wait_until_matching_flow(
{u'dl_dst': u'00:00:00:00:00:03'},
table_id=self.ETH_DST_TABLE,
actions=[u'OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)])
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.net.hosts[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 1
use_idle_timeout: true
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
"""
def wait_for_host_removed(self, host, in_port, timeout=5):
for _ in range(timeout):
if not self.host_learned(host, in_port=in_port, timeout=1):
return
self.fail('flow matching %s still exists' % match)
def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30):
pattern = "OFPFlowRemoved"
mac = None
if src_mac:
pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac
mac = src_mac
if dst_mac:
pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac
mac = dst_mac
for i in range(timeout):
for _, debug_log in self._get_ofchannel_logs():
if re.search(pattern, open(debug_log).read()):
return
time.sleep(1)
self.fail('Not received OFPFlowRemoved for host %s' % mac)
def wait_for_host_log_msg(self, host_mac, msg, timeout=15):
controller = self._get_controller()
count = 0
for _ in range(timeout):
count = controller.cmd('grep -c "%s %s" %s' % (
msg, host_mac, self.env['faucet']['FAUCET_LOG']))
if int(count) != 0:
break
time.sleep(1)
self.assertGreaterEqual(int(count), 1,
'log msg "%s" for host %s not found' % (msg, host_mac))
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[:2]
self.swap_host_macs(first_host, second_host)
for host, port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.require_host_learned(host, in_port=int(port))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutRuleExpiredTest(FaucetWithUseIdleTimeoutTest):
def test_untagged(self):
"""Host that is actively sending should have its dst rule renewed as the
rule expires. Host that is not sending expires as usual.
"""
self.ping_all_when_learned()
first_host, second_host, third_host, fourth_host = self.net.hosts
self.host_ipv4_alias(first_host, ipaddress.ip_interface(u'10.99.99.1/24'))
first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC()))
first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP())
for host in (second_host, third_host, fourth_host):
self.host_drop_all_ips(host)
self.wait_for_host_log_msg(first_host.MAC(), 'refreshing host')
self.assertTrue(self.host_learned(
first_host, in_port=int(self.port_map['port_1'])))
for host, port in (
(second_host, self.port_map['port_2']),
(third_host, self.port_map['port_3']),
(fourth_host, self.port_map['port_4'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.wait_for_host_log_msg(host.MAC(), 'expiring host')
self.wait_for_host_removed(host, in_port=int(port))
|
# -*- coding: utf-8 -*-
import json
from django.http import HttpResponse
from django.views.generic.edit import CreateView
from forms import FeedbackForm
from models import Feedback
from backends.base import BackendAggregator
backend = BackendAggregator()
class FeedbackView(CreateView):
template_name = 'feedback.html'
model = Feedback
form_class = FeedbackForm
def get_context_data(self, *args, **kwargs):
context = super(FeedbackView, self).get_context_data(*args, **kwargs)
context['feedback_form'] = context['form']
return context
def form_invalid(self, form):
if self.request.is_ajax():
return self.render_to_json_response(form.errors)
return super(FeedbackView, self).form_invalid(form)
def form_valid(self, form):
if self.request.is_ajax():
instance = form.save()
backend.post(instance)
return self.render_to_json_response('OK')
return super(FeedbackView, self).form_valid(form)
def render_to_json_response(self, context, **kwargs):
data = json.dumps(context)
kwargs['content_type'] = 'application/json'
return HttpResponse(data, **kwargs)
add logger
# -*- coding: utf-8 -*-
import json
import logging
from django.http import HttpResponse
from django.views.generic.edit import CreateView
from forms import FeedbackForm
from models import Feedback
from backends.base import BackendAggregator
backend = BackendAggregator()
logger = logging.getLogger('djfeedback.' + __name__)
class FeedbackView(CreateView):
template_name = 'feedback.html'
model = Feedback
form_class = FeedbackForm
def get_context_data(self, *args, **kwargs):
context = super(FeedbackView, self).get_context_data(*args, **kwargs)
context['feedback_form'] = context['form']
return context
def form_invalid(self, form):
if self.request.is_ajax():
return self.render_to_json_response(form.errors)
return super(FeedbackView, self).form_invalid(form)
def form_valid(self, form):
if self.request.is_ajax():
instance = form.save()
backend.post(instance)
return self.render_to_json_response('OK')
return super(FeedbackView, self).form_valid(form)
def render_to_json_response(self, context, **kwargs):
data = json.dumps(context)
kwargs['content_type'] = 'application/json'
return HttpResponse(data, **kwargs)
|
from django.views import generic
from .models import Gallery, GalleryImage
class IndexView(generic.ListView):
model = Gallery
template_name = "gallery/index.html"
context_object_name = "galleries"
class GalleryView(generic.DetailView):
model = Gallery
template_name = "gallery/gallery.html"
class GalleryImageView(generic.DetailView):
model = GalleryImage
template_name = "gallery/gallery_image.html"
def get_queryset(self):
query_set = (
super().get_queryset().filter(gallery__slug=self.kwargs.get("gallery_slug"))
)
return query_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
gallery = self.object.gallery
sort_order = self.object.sort_order
context["previous"] = get_previous_image(gallery, sort_order)
context["next"] = get_next_image(gallery, sort_order)
return context
def get_previous_image(gallery, sort_order):
previous = None
try:
previous = gallery.images.filter(sort_order__lt=sort_order).order_by(
"-sort_order"
)[0]
except IndexError:
pass # Don't worry about it
return previous
def get_next_image(gallery, sort_order):
next_image = None
try:
next_image = gallery.images.filter(sort_order__gt=sort_order).order_by(
"sort_order"
)[0]
except IndexError:
pass # Don't worry about it
return next_image
Remove duplication
from django.views import generic
from .models import Gallery, GalleryImage
class IndexView(generic.ListView):
model = Gallery
template_name = "gallery/index.html"
context_object_name = "galleries"
class GalleryView(generic.DetailView):
model = Gallery
template_name = "gallery/gallery.html"
class GalleryImageView(generic.DetailView):
model = GalleryImage
template_name = "gallery/gallery_image.html"
def get_queryset(self):
query_set = (
super().get_queryset().filter(gallery__slug=self.kwargs.get("gallery_slug"))
)
return query_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
gallery = self.object.gallery
sort_order = self.object.sort_order
context["previous"] = get_previous_image(gallery, sort_order)
context["next"] = get_next_image(gallery, sort_order)
return context
def get_previous_image(gallery, sort_order):
return get_image_from_filtered_sorted_query_set(
gallery, {"sort_order__lt": sort_order}, "-sort_order"
)
def get_next_image(gallery, sort_order):
return get_image_from_filtered_sorted_query_set(
gallery, {"sort_order__gt": sort_order}, "sort_order"
)
def get_image_from_filtered_sorted_query_set(gallery, filter_dict, sort_string):
image = None
try:
# Reminder: the ** in front of the dict converts it to keyword args
image = gallery.images.filter(**filter_dict).order_by(sort_string)[0]
except IndexError:
pass # Don't worry about it
return image
|
from __future__ import absolute_import, unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = (
"social.backends.steam.SteamOpenId",
"mezzanine.core.auth_backends.MezzanineBackend",
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
"mezzanine.accounts",
# "mezzanine.mobile",
"game_info",
"rest_framework",
"djangobb_forum",
"social.apps.django_app.default",
"donations",
"paypal.standard.ipn",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
"social.apps.django_app.context_processors.backends",
"social.apps.django_app.context_processors.login_redirect",
"donations.processors.donations",
"game_info.processors.servers",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
'djangobb_forum.middleware.LastLoginMiddleware',
'djangobb_forum.middleware.UsersOnline',
'djangobb_forum.middleware.TimezoneMiddleware',
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
"coverage",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
# Haystack settings
HAYSTACK_WHOOSH_PATH = os.path.join(PROJECT_ROOT, 'djangobb_index')
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
}
}
DONATION_AMOUNTS = (
# Amount, Days of premium
(2, 7),
(5, 30),
(10, 90),
(17, 180),
)
# Target donation amount for sidebar
MONTHLY_DONATION_AMOUNT = 180
PREMIUM_GROUP_NAME = "SM_PREMIUM"
ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS = ("first_name", "last_name")
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
Send users to the front page after logging in / registering
from __future__ import absolute_import, unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = (
"social.backends.steam.SteamOpenId",
"mezzanine.core.auth_backends.MezzanineBackend",
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
"mezzanine.accounts",
# "mezzanine.mobile",
"game_info",
"rest_framework",
"djangobb_forum",
"social.apps.django_app.default",
"donations",
"paypal.standard.ipn",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
"social.apps.django_app.context_processors.backends",
"social.apps.django_app.context_processors.login_redirect",
"donations.processors.donations",
"game_info.processors.servers",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
'djangobb_forum.middleware.LastLoginMiddleware',
'djangobb_forum.middleware.UsersOnline',
'djangobb_forum.middleware.TimezoneMiddleware',
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
"coverage",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
# Haystack settings
HAYSTACK_WHOOSH_PATH = os.path.join(PROJECT_ROOT, 'djangobb_index')
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
}
}
DONATION_AMOUNTS = (
# Amount, Days of premium
(2, 7),
(5, 30),
(10, 90),
(17, 180),
)
# Target donation amount for sidebar
MONTHLY_DONATION_AMOUNT = 180
PREMIUM_GROUP_NAME = "SM_PREMIUM"
ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS = ("first_name", "last_name")
LOGIN_REDIRECT_URL = "/"
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import argparse
import time
import pickle
import sys
import os
""" import modules from lar-cc/lib """
sys.path.insert(0, os.path.expanduser('~/projects/lar-cc/lib/py'))
sys.path.insert(0, './py/computation')
# sys.path.insert(0, '/home/mjirik/projects/lar-cc/lib/py')
from larcc import * # noqa
from fileio import writeFile, writeFilePickle, readFile
# input of test file nrn100.py (with definetion of V and FV)
# V = vertex coordinates
# FV = lists of vertex indices of every face (1-based, as required by pyplasm)
#
# sys.path.insert(1, '/Users/paoluzzi/Documents/RICERCA/pilsen/ricerca/')
# from nrn100 import *
def triangulateSquares(F,
a=[0, 1, 2], b=[2, 3, 0],
c=[1, 0, 2], d=[3, 2, 0]
):
"""
Convert squares to triangles
"""
FT = []
for face in F:
FT.append([face[a[0]], face[a[1]], face[a[2]]])
FT.append([face[b[0]], face[b[1]], face[b[2]]])
# FT.append([face[c[0]], face[c[1]], face[c[2]]])
# FT.append([face[d[0]], face[d[1]], face[d[2]]])
# FT.append([face[0], face[3], face[2]])
return FT
# scipy.sparse matrices required
# Computation of Vertex-to-vertex adjacency matrix
#
def adjacencyQuery(V, FV):
# dim = len(V[0])
csrFV = csrCreate(FV)
csrAdj = matrixProduct(csrTranspose(csrFV), csrFV)
return csrAdj
def adjacencyQuery0(dim, csrAdj, cell):
nverts = 4
cellAdjacencies = csrAdj.indices[
csrAdj.indptr[cell]:csrAdj.indptr[cell + 1]]
return [
acell
for acell in cellAdjacencies
if dim <= csrAdj[cell, acell] < nverts
]
# construction of the adjacency graph of vertices
# returns VV = adjacency lists (list of indices of vertices
# adjacent to a vertex) of vertices
#
def adjVerts(V, FV):
n = len(V)
VV = []
V2V = adjacencyQuery(V, FV)
V2V = V2V.tocsr()
for i in range(n):
dataBuffer = V2V[i].tocoo().data
colBuffer = V2V[i].tocoo().col
row = []
for val, j in zip(dataBuffer, colBuffer):
if val == 2:
row += [int(j)]
VV += [row]
return VV
def main():
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(
description="Laplacian smoothing"
)
parser.add_argument(
'-i', '--inputfile',
default=None,
required=True,
help='input file'
)
parser.add_argument(
'-o', '--outputfile',
default='smooth.obj',
help='input file'
)
parser.add_argument(
'-v', '--visualization', action='store_true',
help='Use visualization')
parser.add_argument(
'-d', '--debug', action='store_true',
help='Debug mode')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
t0 = time.time()
V, FV = readFile(args.inputfile)
t1 = time.time()
logger.info('Data imported %ss. #V: %i, #FV: %i' %
(str(t1 - t0), len(V), len(FV)))
csrAdj = adjacencyQuery(V, FV)
t2 = time.time()
logger.info('Adjency query %ss' %
(str(t2 - t1)))
# transformation of FV to 0-based indices (as required by LAR)
FV = [[v - 1 for v in face] for face in FV]
t3 = time.time()
logger.info('FV transformation %ss' %
(str(t3 - t2)))
if False:
# if args.visualization:
VIEW(STRUCT(MKPOLS((V, FV))))
VIEW(EXPLODE(1.2, 1.2, 1.2)(MKPOLS((V, FV))))
t4 = time.time()
VV = adjVerts(V, FV)
t5 = time.time()
logger.info('adj verts %ss' %
(str(t5 - t4)))
# VIEW(STRUCT(MKPOLS((V,CAT([DISTR([VV[v],v ]) for v in range(n)]))))) #
# long time to evaluate
# Iterative Laplacian smoothing
# input V = initial positions of vertices
# output V1 = new positions of vertices
#
V1 = AA(CCOMB)([[V[v] for v in adjs] for adjs in VV])
t6 = time.time()
logger.info('1st iteration %ss' %
(str(t6 - t5)))
# input V1
# output V2 = new positions of vertices
#
V2 = AA(CCOMB)([[V1[v] for v in adjs] for adjs in VV])
t7 = time.time()
logger.info('2st iteration %ss' %
(str(t7 - t6)))
if args.visualization:
# FV = triangulateSquares(FV)
tv1 = time.time()
logger.info('triangulation %ss' %
(str(tv1 - t7)))
VIEW(STRUCT(MKPOLS((V2, FV))))
import ipdb; ipdb.set_trace() # noqa BREAKPOINT
writeFilePickle(args.outputfile+'.pkl', V2, FV)
writeFile(args.outputfile, V2, FV)
logger.info("Data stored to ' %s" % (args.outputfile))
if __name__ == "__main__":
main()
move basis back
#! /usr/bin/python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import argparse
import time
import pickle
import sys
import os
""" import modules from lar-cc/lib """
sys.path.insert(0, os.path.expanduser('~/projects/lar-cc/lib/py'))
sys.path.insert(0, './py/computation')
# sys.path.insert(0, '/home/mjirik/projects/lar-cc/lib/py')
from larcc import * # noqa
from fileio import writeFile, readFile
# input of test file nrn100.py (with definetion of V and FV)
# V = vertex coordinates
# FV = lists of vertex indices of every face (1-based, as required by pyplasm)
#
# sys.path.insert(1, '/Users/paoluzzi/Documents/RICERCA/pilsen/ricerca/')
# from nrn100 import *
def triangulateSquares(F,
a=[0, 1, 2], b=[2, 3, 0],
c=[1, 0, 2], d=[3, 2, 0]
):
"""
Convert squares to triangles
"""
FT = []
for face in F:
FT.append([face[a[0]], face[a[1]], face[a[2]]])
FT.append([face[b[0]], face[b[1]], face[b[2]]])
# FT.append([face[c[0]], face[c[1]], face[c[2]]])
# FT.append([face[d[0]], face[d[1]], face[d[2]]])
# FT.append([face[0], face[3], face[2]])
return FT
# scipy.sparse matrices required
# Computation of Vertex-to-vertex adjacency matrix
#
def adjacencyQuery(V, FV):
# dim = len(V[0])
csrFV = csrCreate(FV)
csrAdj = matrixProduct(csrTranspose(csrFV), csrFV)
return csrAdj
def adjacencyQuery0(dim, csrAdj, cell):
nverts = 4
cellAdjacencies = csrAdj.indices[
csrAdj.indptr[cell]:csrAdj.indptr[cell + 1]]
return [
acell
for acell in cellAdjacencies
if dim <= csrAdj[cell, acell] < nverts
]
# construction of the adjacency graph of vertices
# returns VV = adjacency lists (list of indices of vertices
# adjacent to a vertex) of vertices
#
def adjVerts(V, FV):
n = len(V)
VV = []
V2V = adjacencyQuery(V, FV)
V2V = V2V.tocsr()
for i in range(n):
dataBuffer = V2V[i].tocoo().data
colBuffer = V2V[i].tocoo().col
row = []
for val, j in zip(dataBuffer, colBuffer):
if val == 2:
row += [int(j)]
VV += [row]
return VV
def main():
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(
description="Laplacian smoothing"
)
parser.add_argument(
'-i', '--inputfile',
default=None,
required=True,
help='input file'
)
parser.add_argument(
'-o', '--outputfile',
default='smooth.obj',
help='input file'
)
parser.add_argument(
'-v', '--visualization', action='store_true',
help='Use visualization')
parser.add_argument(
'-d', '--debug', action='store_true',
help='Debug mode')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
t0 = time.time()
V, FV = readFile(args.inputfile)
t1 = time.time()
logger.info('Data imported %ss. #V: %i, #FV: %i' %
(str(t1 - t0), len(V), len(FV)))
csrAdj = adjacencyQuery(V, FV)
t2 = time.time()
logger.info('Adjency query %ss' %
(str(t2 - t1)))
# transformation of FV to 0-based indices (as required by LAR)
FV = [[v - 1 for v in face] for face in FV]
t3 = time.time()
logger.info('FV transformation %ss' %
(str(t3 - t2)))
if False:
# if args.visualization:
VIEW(STRUCT(MKPOLS((V, FV))))
VIEW(EXPLODE(1.2, 1.2, 1.2)(MKPOLS((V, FV))))
t4 = time.time()
VV = adjVerts(V, FV)
t5 = time.time()
logger.info('adj verts %ss' %
(str(t5 - t4)))
# VIEW(STRUCT(MKPOLS((V,CAT([DISTR([VV[v],v ]) for v in range(n)]))))) #
# long time to evaluate
# Iterative Laplacian smoothing
# input V = initial positions of vertices
# output V1 = new positions of vertices
#
V1 = AA(CCOMB)([[V[v] for v in adjs] for adjs in VV])
t6 = time.time()
logger.info('1st iteration %ss' %
(str(t6 - t5)))
# input V1
# output V2 = new positions of vertices
#
V2 = AA(CCOMB)([[V1[v] for v in adjs] for adjs in VV])
t7 = time.time()
logger.info('2st iteration %ss' %
(str(t7 - t6)))
if args.visualization:
# FV = triangulateSquares(FV)
tv1 = time.time()
logger.info('triangulation %ss' %
(str(tv1 - t7)))
VIEW(STRUCT(MKPOLS((V2, FV))))
import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# move index basis back
FV = (np.array(FV) + 1).tolist()
# write outputs
writeFile(args.outputfile + '.pkl', V2, FV)
writeFile(args.outputfile, V2, FV)
logger.info("Data stored to ' %s" % (args.outputfile))
if __name__ == "__main__":
main()
|
from threading import Thread
from queue import Queue
class Connection(Thread):
daemon = True
def __init__(self, pool):
Thread.__init__(self)
self.pool = pool
def run(self):
self.thread()
self.end()
def end(self):
del self.pool[self.ident]
self.pool.next()
def thread(self):
return
class ConnectionPool(dict):
max = 20
def __init__(self, Conn):
dict.__init__(self)
self.queue = Queue()
self.Conn = Conn
def push(self, params=None):
conn = self.Conn(self)
conn.params = params
self.queue.put(conn)
if len(self) < self.max:
self.next()
def next(self):
if self.queue.empty():
return
conn = self.queue.get()
conn.start()
self[conn.ident] = conn
Rename method
from threading import Thread
from queue import Queue
class Connection(Thread):
daemon = True
def __init__(self, pool):
Thread.__init__(self)
self.pool = pool
def run(self):
self.main()
self.end()
def end(self):
del self.pool[self.ident]
self.pool.next()
def main(self):
return
class ConnectionPool(dict):
max = 20
def __init__(self, Conn):
dict.__init__(self)
self.queue = Queue()
self.Conn = Conn
def push(self, data=None):
conn = self.Conn(self)
conn.data = data
self.queue.put(conn)
if len(self) < self.max:
self.next()
def next(self):
if self.queue.empty():
return
conn = self.queue.get()
conn.start()
self[conn.ident] = conn
|
# -*- coding: utf-8-*-
"""
Author: Marco Dinacci <dev@dinointeractive.com>
Copyright © 2008-2009
"""
# logging
from mdlib.log import ConsoleLogger, DEBUG,INFO
logger = ConsoleLogger("game", INFO)
# configuration
from mdlib.panda import config as cfg
cfg.loadFile("../res/config.prc")
cfg.loadFile("../res/conf/options.prc")
cfg.loadFile("../res/conf/high.prc")
from mdlib.native import SystemManager
from mdlib.panda import eventCallback
from mdlib.panda.data import GOM
from mdlib.panda.input import InputManager
from direct.showbase.DirectObject import DirectObject
import direct.directbase.DirectStart
from pandac.PandaModules import WindowProperties, VirtualFileSystem, Filename
from pandac.PandaModules import ClockObject
from pandac.PandaModules import BitMask32, AntialiasAttrib, Quat, Vec3, Point3
from pandac.PandaModules import CollisionHandlerQueue, CollisionTraverser
from direct.interval.MetaInterval import Sequence
from direct.interval.FunctionInterval import Wait, Func
from direct.showbase.PythonUtil import Functor
from gui import ScreenManager
from view import GameView
from data import TrackResult, GameMode
from state import GS, GameState
import utils
import pprofile as profile
import event, entity
import sys, time
class Game(object):
DUMMY_VALUE = -999
def __init__(self, view):
self._view = view
self._subscribeToEvents()
self._setupInput()
self._camGroundZ = self.DUMMY_VALUE
self._lastTile = ""
self._tileType = "neutral"
self._lastTileType = "neutral"
self._track = None
self._ball = None
self._controlInverted = False
@eventCallback
def endTrack(self):
self._ball.slowDown()
self._view.hud.timer.stop()
trackTime = self._view.hud.timer.time
info = GS.getTrackInfo(GS.selectedTrack)
result = TrackResult()
result.bestTime = utils.strTimeToTenths(trackTime)
result.bid = GS.selectedBall
result.tid = info.tid
result.trophy = None
if result.bestTime <= info.bronze:
result.trophy = entity.bronze_cup_params
if result.bestTime <= info.silver:
result.trophy = entity.silver_cup_params
if result.bestTime <= info.gold:
result.trophy = entity.gold_cup_params
GS.lastTrackResult = result
GS.profile.update(result, GS.mode)
def start(self):
self._loadTrack(GS.selectedTrack)
self._loadBall(GS.selectedBall)
self._setupCollisionDetection()
# HACK necessary to get the track's copy in the scenegraph
self._track.reparentTo(self._view.scene._rootNode)
self._view.scene.addEntity(self._track)
self._view.scene.addEntity(self._ball)
self._view.scene.addEntity(self._player)
self._view.cam.followTarget(self._ball)
self._view.showCursor(False)
self._view.show()
self._view.hud.timer.resetAndStart()
def update(self, task):
# steer
if self._keyMap["right"] == True:
if self._ball.physics.speed > 0:
if self._controlInverted:
self._ball.turnLeft()
else:
self._ball.turnRight()
if self._keyMap["left"] == True:
if self._ball.physics.speed > 0:
if self._controlInverted:
self._ball.turnRight()
else:
self._ball.turnLeft()
if self._keyMap["forward"] == True:
if self._controlInverted:
self._ball.brake()
else:
self._ball.accelerate()
else:
self._ball.decelerate()
if self._keyMap["backward"] == True:
if self._controlInverted:
self._ball.accelerate()
else:
self._ball.brake()
if self._keyMap["jump"] == True:
self._ball.jump()
self._keyMap["jump"] = False
# special actions
t = self._tileType
if t == "neutral" or t == "N":
self._ball.neutral()
elif t == "jump" or t == "J":
self._ball.jump()
elif t == "accelerate" or t == "A":
self._ball.sprint()
elif t == "slow" or t == "S":
self._ball.slowDown()
elif t == "freeze" or t == "F":
if not self._lastTileType == "F" and not self._lastTileType == "freeze":
self._ball.freeze()
else:
print "unknown type: " , self._tileType
self._ball.neutral()
self._lastTileType = self._tileType
# special items
if self._ball.hasSpecialItem():
item = self._ball.specialItem
if item == "M":
self._ball.minimize()
elif item == "I":
self._ball.invisibleMode()
elif item == "+":
self._view.hud.timer.addTime(30)
self._view.hud.timer.flash()
elif item == "-":
self._view.hud.timer.removeTime(30)
self._view.hud.timer.flash()
elif item == "?":
delay = Wait(1)
f = Func(self.__setattr__,"_controlInverted", True)
f1 = Func(self.__setattr__,"_controlInverted", False)
Sequence(f, delay, f1).start()
self._ball.specialItem = None
if self._ball.physics.speed < 0:
self._ball.physics.speed = 0
self._tileType = "neutral"
return task.cont
def simulationStep(self, task):
self._view.cam.update()
return task.cont
def collisionStep(self, task):
dt = globalClock.getDt()
self._camGroundZ = self.DUMMY_VALUE
ballIsCollidingWithGround = False
# keep the collision node perpendicular to the track, this is necessary
# since the ball rolls all the time
self._ballCollNodeNp.setQuat(self._track.nodepath,Quat(1,0,0,0))
# check track collisions
# TODO must optimise this, no need to check the whole track,
# but only the current segment
self._picker.traverse(self._track.nodepath)
if self._collQueue.getNumEntries() > 0:
self._collQueue.sortEntries()
firstGroundContact = self.DUMMY_VALUE
firstTile = None
for i in range(self._collQueue.getNumEntries()):
entry = self._collQueue.getEntry(i)
z = entry.getSurfacePoint(render).getZ()
# check camera collision. There can be more than one
if entry.getFromNodePath() == self._cameraCollNodeNp:
if z > firstGroundContact:
firstGroundContact = z
firstTile = entry.getIntoNodePath()
# check _ball's ray collision with ground
elif entry.getFromNodePath() == self._ballCollNodeNp:
np = entry.getIntoNodePath()
rootNode = np.getParent().getParent().getParent()
if rootNode.hasTag("effect"):
self._ball.setSpecialItem(rootNode.getTag("effect"))
rootNode.removeNode()
else:
# tell the _track which segment the _ball is on
#self._track.setCurrentTile(np)
#
if np.hasTag("type"):
self._tileType = np.getTag("type")
self._ball.rayGroundZ = z
ballIsCollidingWithGround = True
if entry != self._lastTile:
self._lastTile = entry
self._camGroundZ = firstGroundContact
if ballIsCollidingWithGround == False:
if self._ball.isJumping():
print "no _ball-ground contact but jumping"
else:
print "no _ball-ground contact, losing"
self._ball.getLost()
self._view.gameIsAlive = False
return task.done # automatically stop the task
# check for rays colliding with the _ball
self._picker.traverse(self._ball.nodepath)
if self._collQueue.getNumEntries() > 0:
self._collQueue.sortEntries()
if self._collQueue.getNumEntries() == 1:
entry = self._collQueue.getEntry(0)
if entry.getFromNodePath() == self._cameraCollNodeNp:
self.camBallZ = entry.getSurfacePoint(render).getZ()
else:
raise AssertionError("must always be 1")
#if self._camGroundZ > self.camBallZ:
# ground collision happened before _ball collision, this means
# that the _ball is descending a slope
# Get the row colliding with the cam's ray, get two rows after,
# set all of them transparent
# TODO store the rows in a list, as I have to set the transparency
# back to 0 after the _ball has passed
#pass
#row = firstTile.getParent()
#row.setSa(0.8)
#row.setTransparency(TransparencyAttrib.MAlpha)
# HACK
forward = self._view.scene._rootNode.getRelativeVector(
self._player.nodepath,
Vec3(0,1,0))
forward.setZ(0)
forward.normalize()
speedVec = forward * dt * self._ball.physics.speed
self._ball.forward = forward
self._ball.physics.speedVec = speedVec
self._player.nodepath.setPos(self._player.nodepath.getPos() + speedVec)
self._player.nodepath.setZ(self._ball.rayGroundZ +
self._ball.jumpZ + \
self._ball.physics.radius)
# rotate the _ball
self._ball.nodepath.setP(self._ball.nodepath.getP() -1 * dt * \
self._ball.physics.speed *
self._ball.physics.spinningFactor)
# set the _ball to the position of the controller node
self._ball.nodepath.setPos(self._player.nodepath.getPos())
# rotate the controller to follow the direction of the _ball
self._player.nodepath.setH(self._ball.nodepath.getH())
return task.cont
def _loadTrack(self, tid):
# TODO remove first existing track from scene
logger.info("Using track %s" % tid)
vfs = VirtualFileSystem.getGlobalPtr()
vfs.mount(Filename("../res/tracks/%s.track"% tid) ,".",
VirtualFileSystem.MFReadOnly)
if self._track is not None:
self._track.nodepath.removeNode()
self._track = GOM.getEntity(entity.new_track_params, False)
self._track.unfold()
# TODO this should be done in data
self._track.nodepath.setCollideMask(BitMask32(1))
def _loadBall(self, ballName):
# TODO remove first existing ball and player from scene
logger.info("Using ball %s" % ballName)
if self._ball is not None:
self._ball.nodepath.removeNode()
params = entity.ballsMap[ballName]
self._ball = GOM.getEntity(params, False)
self._ball.nodepath.setPos(Point3(params["position"]["x"],
params["position"]["y"],
params["position"]["z"]))
collSphere = self._ball.nodepath.find("**/ball")
collSphere.node().setIntoCollideMask(BitMask32(2))
collSphere.node().setFromCollideMask(BitMask32.allOff())
self._player = GOM.getEntity(entity.player_params)
self._player.nodepath.setPos(self._ball.nodepath.getPos())
self._player.nodepath.setQuat(self._track.nodepath,Quat(1,0,0,0))
self._ball.forward = Vec3(0,1,0)
def _setKey(self, key, value):
self._keyMap[key] = value
def _setupInput(self):
self._keyMap = {"left":False, "right":False, "forward":False, \
"backward":False, "jump": False}
self.inputMgr = InputManager()
self.inputMgr.createSchemeAndSwitch("game")
self.inputMgr.bindCallback(cfg.strValueForKey("options_steer_left"),
self._setKey, ["left",True], scheme="game")
self.inputMgr.bindCallback(cfg.strValueForKey("options_steer_right"),
self._setKey, ["right",True])
self.inputMgr.bindCallback(cfg.strValueForKey("options_accelerate")
, self._setKey, ["forward",True])
self.inputMgr.bindCallback(cfg.strValueForKey("options_brake"),
self._setKey, ["backward",True])
self.inputMgr.bindCallback(cfg.strValueForKey("options_jump"),
self._setKey, ["jump",True])
self.inputMgr.bindCallback("p", base.oobe)
key = cfg.strValueForKey("options_steer_left") + "-up"
self.inputMgr.bindCallback(key, self._setKey, ["left",False])
key = cfg.strValueForKey("options_steer_right") + "-up"
self.inputMgr.bindCallback(key, self._setKey, ["right",False])
key = cfg.strValueForKey("options_accelerate") + "-up"
self.inputMgr.bindCallback(key, self._setKey, ["forward",False])
key = cfg.strValueForKey("options_brake") + "-up"
self.inputMgr.bindCallback(key, self._setKey, ["backward",False])
def _setupCollisionDetection(self):
self._collQueue = CollisionHandlerQueue();
# ball-ground collision setup
self._ballCollNodeNp = self._ball.nodepath.attachCollisionRay(
"ball-ground",
0,0,10, # origin
0,0,-1, # direction
BitMask32(1),BitMask32.allOff())
self._ballCollNodeNp.setQuat(self._track.nodepath, Quat(1,0,0,0))
self._ballCollNodeNp.show()
# camera-ball collision setup
bmFrom = BitMask32(1); bmFrom.setBit(1)
self._cameraCollNodeNp = self._view.cam.attachCollisionRay("camera-ball",
0,0,0,
0,1,0,
bmFrom,BitMask32.allOff())
self._cameraCollNodeNp.setQuat(self._view.cam.getQuat() + Quat(.1,0,0,0))
self._cameraCollNodeNp.show()
self._picker = CollisionTraverser()
self._picker.setRespectPrevTransform(True)
self._picker.addCollider(self._ballCollNodeNp, self._collQueue)
self._picker.addCollider(self._cameraCollNodeNp, self._collQueue)
def _subscribeToEvents(self):
self._listener = DirectObject()
#self._listener.accept(event.END_TRACK, self.endTrack)
class GameApplication(object):
dta = 0
stepSize = 1/60.0
def __init__(self):
super(GameApplication, self).__init__()
self._listener = DirectObject()
if self._checkRequirements():
self._subscribeToEvents()
self._createWindow()
GS.state.request(GameState.INITIALISE)
else:
self._quitInDespair("Requirements not satisfied", -2)
def run(self):
if cfg.boolValueForKey("cap-framerate"):
FPS = 60
globalClock = ClockObject.getGlobalClock()
globalClock.setMode(ClockObject.MLimited)
globalClock.setFrameRate(FPS)
base.openDefaultWindow(startDirect=False, props=self._wp)
self._screenMgr = ScreenManager()
self._screenMgr.displayScreen("main")
self._createGameAndView()
taskMgr.run()
@eventCallback
def shutdown(self):
sys.exit(0)
@eventCallback
def startRace(self):
logger.info("Starting game")
self._screenMgr.destroyCurrent()
GS.state.request(GameState.PLAY)
self._game.start()
self._startProcesses()
@eventCallback
def exitGameRequest(self):
self._screenMgr.displayScreen("exit")
def endRace(self):
self._stopProcesses()
delay = Wait(1.0)
Sequence(Func(self._game.endTrack), delay,
Func(self._screenMgr.displayScreen, "next-track"),
Func(self._view.hud.hide),
Func(self._view.scene.hide),
Func(self._view.showCursor)).start()
def _createGameAndView(self):
self._view = GameView()
self._view.hide()
self._game = Game(self._view)
def _startProcesses(self):
taskMgr.add(self._game.inputMgr.update, "update-input")
taskMgr.add(self._game.collisionStep, "collision-step")
taskMgr.add(self._game.simulationStep, "world-simulation")
taskMgr.add(self._game.update, "update-objects")
def _stopProcesses(self):
taskMgr.remove("update-input")
taskMgr.remove("collision-step")
taskMgr.remove("world-simulation")
taskMgr.remove("update-objects")
def _quitInDespair(msg, status):
print msg
sys.exit(status)
def _subscribeToEvents(self):
self._listener.accept(event.GAME_EXIT_REQUEST, self.exitGameRequest)
self._listener.accept(event.GAME_DESTROY, self.shutdown)
self._listener.accept(event.GAME_START, self.startRace)
self._listener.accept(event.END_TRACK, self.endRace)
self._listener.accept("l", self.__wireframe)
a = 0
def __wireframe(self):
if self.a ==0:
base.wireframeOn()
self.a == 1
self._game._track.nodepath.hide()
else:
base.wireframeOff()
self.a == 0
self._game._track.nodepath.show()
def _createWindow(self):
self._wp = WindowProperties().getDefault()
self._wp.setOrigin(0,0)
self._wp.setFullscreen(cfg.boolValueForKey("options_fullscreen"))
if not self._wp.getFullscreen():
w,h = cfg.strValueForKey("options_resolution").split("x")
self._wp.setSize(int(w),int(h))
def _loadResources(self):
vfs = VirtualFileSystem.getGlobalPtr()
return vfs.mount(Filename("../res/scene.mf"),".",
VirtualFileSystem.MFReadOnly)
def _checkRequirements(self):
logger.debug("Checking system requirements")
sm = SystemManager()
dsNeeded = cfg.intValueForKey("required-space")
enoughDS = sm.checkDiskSpace(sm.getSystemDrive(), dsNeeded)
enoughRam = sm.checkRam(cfg.intValueForKey("required-ram"))
return enoughRam and enoughDS
if __name__ == '__main__':
GameApplication().run()
changed collision detection mechanism, now using events for
special cases (ball on tile or ball touching bonuses).
Still work in progress though.
# -*- coding: utf-8-*-
"""
Author: Marco Dinacci <dev@dinointeractive.com>
Copyright © 2008-2009
"""
# logging
from mdlib.log import ConsoleLogger, DEBUG,INFO
logger = ConsoleLogger("game", INFO)
# configuration
from mdlib.panda import config as cfg
cfg.loadFile("../res/config.prc")
cfg.loadFile("../res/conf/options.prc")
cfg.loadFile("../res/conf/high.prc")
from mdlib.native import SystemManager
from mdlib.panda import eventCallback
from mdlib.panda.data import GOM
from mdlib.panda.input import InputManager, SafeDirectObject
from mdlib.patterns import singleton
from direct.showbase.DirectObject import DirectObject
import direct.directbase.DirectStart
from pandac.PandaModules import WindowProperties, VirtualFileSystem, Filename
from pandac.PandaModules import ClockObject, RigidBodyCombiner, NodePath
from pandac.PandaModules import BitMask32, AntialiasAttrib, Quat, Vec3, Point3
from pandac.PandaModules import CollisionHandlerQueue, CollisionTraverser, \
CollisionHandlerEvent, CollisionNode, CollisionSphere
from direct.interval.MetaInterval import Sequence
from direct.interval.FunctionInterval import Wait, Func
from direct.showbase.PythonUtil import Functor
from gui import ScreenManager
from view import GameView
from data import TrackResult, GameMode
from state import GS, GameState
import utils
import pprofile as profile
import event, entity
import sys, time
class CheckpointDelegate(object):
latestCp = None
startPos = None
class Game(object):
DUMMY_VALUE = -999
def __init__(self, view):
self._view = view
self._setupInput()
self._subscribeToEvents()
self._cpDelegate = CheckpointDelegate()
self._camGroundZ = self.DUMMY_VALUE
self._lastTile = ""
self._tileType = "neutral"
self._lastTileType = "neutral"
self._currentSegment = None
self._track = None
self._ball = None
self._controlInverted = False
self._gameIsAlive = True
def start(self):
self._loadTrack(GS.selectedTrack)
self._loadBall(GS.selectedBall)
self._setupCollisionDetection()
self._view.scene.addEntity(self._track)
self._view.scene.addEntity(self._ball)
self._view.scene.addEntity(self._player)
self._view.cam.followTarget(self._player)
self._view.showCursor(False)
self._view.show()
self._view.hud.timer.resetAndStart()
def update(self, task):
# steer
if self._keyMap["right"] == True:
if self._ball.physics.speed > 0:
if self._controlInverted:
self._ball.turnLeft()
else:
self._ball.turnRight()
if self._keyMap["left"] == True:
if self._ball.physics.speed > 0:
if self._controlInverted:
self._ball.turnRight()
else:
self._ball.turnLeft()
if self._keyMap["forward"] == True:
if self._controlInverted:
self._ball.brake()
else:
self._ball.accelerate()
else:
self._ball.decelerate()
if self._keyMap["backward"] == True:
if self._controlInverted:
self._ball.accelerate()
else:
self._ball.brake()
if self._keyMap["jump"] == True:
self._ball.jump()
self._keyMap["jump"] = False
# special actions
t = self._tileType
if t == "neutral" or t == "N":
self._ball.neutral()
elif t == "jump" or t == "J":
self._ball.jump()
elif t == "accelerate" or t == "A":
self._ball.sprint()
elif t == "slow" or t == "S":
self._ball.slowDown()
elif t == "freeze" or t == "F":
if not self._lastTileType == "F" and not self._lastTileType == "freeze":
self._ball.freeze()
else:
print "unknown type: " , self._tileType
self._ball.neutral()
self._lastTileType = self._tileType
# special items
if self._ball.hasSpecialItem():
item = self._ball.specialItem
if item == "M":
self._ball.minimize()
elif item == "I":
self._ball.invisibleMode()
elif item == "+":
self._view.hud.timer.addTime(30)
self._view.hud.timer.flash()
elif item == "-":
self._view.hud.timer.removeTime(30)
self._view.hud.timer.flash()
elif item == "?":
delay = Wait(1)
f = Func(self.__setattr__,"_controlInverted", True)
f1 = Func(self.__setattr__,"_controlInverted", False)
Sequence(f, delay, f1).start()
self._ball.specialItem = None
if self._ball.physics.speed < 0:
self._ball.physics.speed = 0
self._tileType = "neutral"
return task.cont
def simulationStep(self, task):
self._view.cam.update()
return task.cont
def collisionStep(self, task):
if not self._gameIsAlive:
print "no coll step "
return task.cont
dt = globalClock.getDt()
self._camGroundZ = self.DUMMY_VALUE
ballIsCollidingWithGround = False
# keep the collision node perpendicular to the track, this is necessary
# since the ball rolls all the time
self._ballCollNodeNp.setQuat(self._track.nodepath,Quat(1,0,0,0))
# check track collisions
# TODO must optimise this, no need to check the whole track,
# but only the current segment
self._picker.traverse(self._track.nodepath)
#self._picker.traverse(self._currentSegment)
if self._collQueue.getNumEntries() > 0:
self._collQueue.sortEntries()
for i in range(self._collQueue.getNumEntries()):
entry = self._collQueue.getEntry(i)
z = entry.getSurfacePoint(render).getZ()
# check _ball's ray collision with ground
if entry.getFromNodePath() == self._ballCollNodeNp:
np = entry.getIntoNodePath()
if np.getName() == "cp":
pass
self._ball.rayGroundZ = z
ballIsCollidingWithGround = True
if entry != self._lastTile:
self._lastTile = entry
if ballIsCollidingWithGround == False:
if not self._ball.isJumping():
print "no ball-ground contact, losing"
self._playBallFallingSequence()
return task.cont
#return task.done # automatically stop the task
# HACK
forward = self._view.scene._rootNode.getRelativeVector(
self._player.nodepath,
Vec3(0,1,0))
forward.setZ(0)
forward.normalize()
speedVec = forward * dt * self._ball.physics.speed
self._ball.forward = forward
self._ball.physics.speedVec = speedVec
self._player.nodepath.setPos(self._player.nodepath.getPos() + speedVec)
self._player.nodepath.setZ(self._ball.rayGroundZ + self._ball.jumpZ + \
self._ball.physics.radius)
# rotate the _ball
self._ball.nodepath.setP(self._ball.nodepath.getP() -1 * dt * \
self._ball.physics.speed *
self._ball.physics.spinningFactor)
# set the _ball to the position of the controller node
self._ball.nodepath.setPos(self._player.nodepath.getPos())
# rotate the controller to follow the direction of the _ball
self._player.nodepath.setH(self._ball.nodepath.getH())
return task.cont
@eventCallback
def endTrack(self):
self._ball.slowDown()
self._view.hud.timer.stop()
trackTime = self._view.hud.timer.time
info = GS.getTrackInfo(GS.selectedTrack)
result = TrackResult()
result.bestTime = utils.strTimeToTenths(trackTime)
result.bid = GS.selectedBall
result.tid = info.tid
result.trophy = None
if result.bestTime <= info.bronze:
result.trophy = entity.bronze_cup_params
if result.bestTime <= info.silver:
result.trophy = entity.silver_cup_params
if result.bestTime <= info.gold:
result.trophy = entity.gold_cup_params
GS.lastTrackResult = result
GS.profile.update(result, GS.mode)
@eventCallback
def _onBallIntoCheckpoint(self, entry):
logger.info("Checkpoint crossed")
cp = entry.getIntoNodePath()
self._cpDelegate.latestCp = cp
@eventCallback
def _onBallIntoSpecialTile(self, tile ,entry):
logger.info("Ball on special tile")
self._tileType = tile
@eventCallback
def _onBallIntoSpecialItem(self, item ,entry):
logger.info("Ball on special item")
self._ball.setSpecialitem(item)
# TODO remove also node, get it from entry
def _onBallIntoSegment(self, entry):
logger.info("Rolling on segment")
self._currentSegment.setCollideMask(BitMask32.allOff())
self._currentSegment = entry.getIntoNodePath()
self._currentSegment.setCollideMask(BitMask32(1))
def _playBallFallingSequence(self):
# play falling sequence and restart from latest
# checkpoint (or start point)
sp = self._cpDelegate.startPos
startPos = Point3(sp[0], sp[1], sp[2])
if self._cpDelegate.latestCp is not None:
startPos = self._cpDelegate.latestCp.getPos()
seq = Sequence(Func(self.__setattr__,"_gameIsAlive",False),
Func(self._ball.getLost, startPos),
Func(self.__setattr__,"_gameIsAlive",True),
Func(self._player.nodepath.setPos,
self._ball.nodepath.getPos()))
seq.start()
def _loadTrack(self, tid):
# TODO remove first existing track from scene
logger.info("Using track %s" % tid)
vfs = VirtualFileSystem.getGlobalPtr()
vfs.mount(Filename("../res/tracks/%s.track"% tid) ,".",
VirtualFileSystem.MFReadOnly)
if self._track is not None:
self._track.nodepath.removeNode()
self._track = GOM.getEntity(entity.new_track_params, False)
# TODO this should be done in data
#self._track.nodepath.getChild(0).getChild(0).setCollideMask(BitMask32(1))
self._currentSegment = self._track.nodepath.find("**/=start-point")
self._currentSegment.setCollideMask(BitMask32(1))
#self._track.nodepath.ls()
"""
rbc = RigidBodyCombiner("rbc")
rbcnp = NodePath(rbc)
rbcnp.reparentTo(render)
self._track.nodepath.reparentTo(rbcnp)
rbc.collect()
"""
def _loadBall(self, ballName):
# TODO remove first existing ball and player from scene
logger.info("Using ball %s" % ballName)
if self._ball is not None:
self._ball.nodepath.removeNode()
params = entity.ballsMap[ballName]
self._ball = GOM.getEntity(params, False)
# place the ball at the beginning of the track
t = self._track.nodepath.find("**/=start-point")
pos = map(lambda x: float(x), t.getTag("start-point").split(","))
# HACK 5 is half segment, unless the segment is a curve :/
self._ball.nodepath.setPos(render,pos[0], pos[1]-5, pos[2])
self._cpDelegate.startPos = pos
self._player = GOM.getEntity(entity.player_params)
self._player.nodepath.setPos(self._ball.nodepath.getPos())
self._player.nodepath.setQuat(self._track.nodepath,Quat(1,0,0,0))
self._ball.forward = Vec3(0,1,0)
def _setKey(self, key, value):
self._keyMap[key] = value
def _setupInput(self):
self._keyMap = {"left":False, "right":False, "forward":False, \
"backward":False, "jump": False}
self.inputMgr = InputManager()
self.inputMgr.createSchemeAndSwitch("game")
self.inputMgr.bindCallback(cfg.strValueForKey("options_steer_left"),
self._setKey, ["left",True], scheme="game")
self.inputMgr.bindCallback(cfg.strValueForKey("options_steer_right"),
self._setKey, ["right",True])
self.inputMgr.bindCallback(cfg.strValueForKey("options_accelerate")
, self._setKey, ["forward",True])
self.inputMgr.bindCallback(cfg.strValueForKey("options_brake"),
self._setKey, ["backward",True])
self.inputMgr.bindCallback(cfg.strValueForKey("options_jump"),
self._setKey, ["jump",True])
self.inputMgr.bindCallback("p", base.oobe)
self.inputMgr.bindCallback("escape", GS.state.request, [GameState.PAUSE])
key = cfg.strValueForKey("options_steer_left") + "-up"
self.inputMgr.bindCallback(key, self._setKey, ["left",False])
key = cfg.strValueForKey("options_steer_right") + "-up"
self.inputMgr.bindCallback(key, self._setKey, ["right",False])
key = cfg.strValueForKey("options_accelerate") + "-up"
self.inputMgr.bindCallback(key, self._setKey, ["forward",False])
key = cfg.strValueForKey("options_brake") + "-up"
self.inputMgr.bindCallback(key, self._setKey, ["backward",False])
def _setupCollisionDetection(self):
self._collQueue = CollisionHandlerQueue();
# ball-ground collision setup
self._ballCollNodeNp = self._ball.nodepath.attachCollisionRay(
"ball-ground",
0,0,10, # origin
0,0,-1, # direction
BitMask32(1),BitMask32.allOff())
# orient it perpendicular to the track
self._ballCollNodeNp.setQuat(self._track.nodepath, Quat(1,0,0,0))
self._ballCollNodeNp.show()
self._picker = CollisionTraverser()
self._picker.addCollider(self._ballCollNodeNp, self._collQueue)
#self._picker.setRespectPrevTransform(True)
self._collHandler = CollisionHandlerEvent()
self._collHandler.addInPattern(event.BALL_INTO)
collNode = self._ball.nodepath.find("**/ball")
self._picker.addCollider(collNode, self._collHandler)
def _subscribeToEvents(self):
do = DirectObject()
do.accept("ball-into-segment", self._onBallIntoSegment)
do.accept(event.BALL_INTO_CHECKPOINT, self._onBallIntoCheckpoint)
do.accept(event.BALL_INTO_SLOW, self._onBallIntoSpecialTile, ["slow"])
do.accept(event.BALL_INTO_ACCELERATE, self._onBallIntoSpecialTile,
["accelerate"])
do.accept(event.BALL_INTO_JUMP, self._onBallIntoSpecialTile, ["jump"])
self._listener = do
class GameApplication(object):
dta = 0
stepSize = 1/60.0
def __init__(self):
singleton(self)
super(GameApplication, self).__init__()
GS.setApplication(self)
self._listener = DirectObject()
if self._checkRequirements():
self._subscribeToEvents()
self._createWindow()
GS.state.request(GameState.INITIALISE)
else:
self._quitInDespair("Requirements not satisfied", -2)
def run(self):
if cfg.boolValueForKey("cap-framerate"):
FPS = 60
globalClock = ClockObject.getGlobalClock()
globalClock.setMode(ClockObject.MLimited)
globalClock.setFrameRate(FPS)
base.openDefaultWindow(startDirect=False, props=self._wp)
self._screenMgr = ScreenManager()
self._screenMgr.displayScreen("main")
taskMgr.run()
@eventCallback
def shutdown(self):
sys.exit(0)
@eventCallback
def startRace(self):
logger.info("Starting game")
self._screenMgr.destroyCurrent()
GS.state.request(GameState.PLAY)
@eventCallback
def exitGameRequest(self):
self._screenMgr.displayScreen("exit")
@eventCallback
def endRace(self):
GS.state.request(GameState.NEXT_TRACK)
def startProcesses(self):
taskMgr.add(self._game.inputMgr.update, "update-input")
taskMgr.add(self._game.collisionStep, "collision-step")
taskMgr.add(self._game.update, "update-objects")
# nothing to do for now
#taskMgr.add(self._game.simulationStep, "world-simulation")
def stopProcesses(self):
taskMgr.remove("update-input")
taskMgr.remove("collision-step")
taskMgr.remove("world-simulation")
taskMgr.remove("update-objects")
def createGameAndView(self):
# TODO first destroy (or reset) previous game and view if they exists
self._view = GameView()
self._view.hide()
self._game = Game(self._view)
def _quitInDespair(msg, status):
print msg
sys.exit(status)
def _subscribeToEvents(self):
self._listener.accept(event.GAME_EXIT_REQUEST, self.exitGameRequest)
self._listener.accept(event.GAME_DESTROY, self.shutdown)
self._listener.accept(event.GAME_START, self.startRace)
self._listener.accept(event.RESTART_TRACK, self.startRace)
self._listener.accept(event.END_TRACK, self.endRace)
self._listener.accept(event.UNPAUSE_GAME, GS.state.request,
[GameState.NEUTRAL])
self._listener.accept("l", self.__wireframe)
a = 0
def __wireframe(self):
if self.a ==0:
base.wireframeOn()
self.a == 1
self._game._track.nodepath.hide()
else:
base.wireframeOff()
self.a == 0
self._game._track.nodepath.show()
def _createWindow(self):
self._wp = WindowProperties().getDefault()
self._wp.setOrigin(0,0)
self._wp.setFullscreen(cfg.boolValueForKey("options_fullscreen"))
if not self._wp.getFullscreen():
w,h = cfg.strValueForKey("options_resolution").split("x")
self._wp.setSize(int(w),int(h))
def _loadResources(self):
vfs = VirtualFileSystem.getGlobalPtr()
return vfs.mount(Filename("../res/scene.mf"),".",
VirtualFileSystem.MFReadOnly)
def _checkRequirements(self):
logger.debug("Checking system requirements")
sm = SystemManager()
dsNeeded = cfg.intValueForKey("required-space")
enoughDS = sm.checkDiskSpace(sm.getSystemDrive(), dsNeeded)
enoughRam = sm.checkRam(cfg.intValueForKey("required-ram"))
return enoughRam and enoughDS
game = property(fget = lambda self: self._game)
view = property(fget = lambda self: self._view)
screen = property(fget = lambda self: self._screenMgr)
if __name__ == '__main__':
GameApplication().run()
|
# -*- coding: utf8 -*-
import json
from random import getrandbits
import pytest
import six
from tests.constants import GO_EP1_ID
from tests.utils import on_windows
@pytest.fixture
def dir_operations(run_line):
"""
Given an input directory name, makes the directory to test inputs,
ls's the directory to test outputs and output formats,
attempts to remake the directory to test error outputs,
and finally deletes the directory for cleanup.
"""
def f(input_name, expected_name=None):
# mkdir
# name randomized to prevent collision
rand = str(getrandbits(128))
dir_name = input_name + rand
expected = (expected_name or input_name) + rand
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
make_output = run_line(
u"globus mkdir {}:~/{}".format(GO_EP1_ID, dir_name)
).output
# if given a byte string name, run a byte string
else:
make_output = run_line(
b"globus mkdir {}:~/{}".format(GO_EP1_ID, dir_name)
).output
assert "The directory was created successfully" in make_output
# confirm the dir can be seen. Confirms simple, long, verbose,
# and json output all handle the encoding.
ls_output = run_line(u"globus ls {}:~/".format(GO_EP1_ID)).output
assert expected in ls_output
long_output = run_line(u"globus ls -l {}:~/".format(GO_EP1_ID)).output
assert expected in long_output
assert "Filename" in long_output
json_output = json.loads(
run_line(u"globus ls -F json {}:~/".format(GO_EP1_ID)).output
)
assert expected in [i["name"] for i in json_output["DATA"]]
# attempt to make the dir again to test error output:
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
make2_output = run_line(
u"globus mkdir {}:~/{}".format(GO_EP1_ID, dir_name), assert_exit_code=1
).output
# if given a byte string name, run a byte string
else:
make2_output = run_line(
b"globus mkdir {}:~/{}".format(GO_EP1_ID, dir_name), assert_exit_code=1
).output
assert "Path already exists" in make2_output
assert expected in make2_output
# delete for cleanup:
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
delete_output = run_line(
u"globus delete -r {}:~/{}".format(GO_EP1_ID, dir_name)
).output
# if given a byte string name, run a byte string
else:
delete_output = run_line(
b"globus delete -r {}:~/{}".format(GO_EP1_ID, dir_name)
).output
assert "The delete has been accepted" in delete_output
return f
@pytest.fixture
def ep_operations(run_line):
"""
Given an input_name, creates, updates, gets, and deletes an endpoint
using the input_name as a display_name. If an expected_name is given,
confirms output matches that name rather than the input_name.
"""
def f(input_name, expected_name=None):
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
create_output = json.loads(
run_line(
u"globus endpoint create -F json --server {}".format(input_name)
).output
)
# if given a byte string name, run a byte string
else:
create_output = json.loads(
run_line(
b"globus endpoint create -F json --server {}".format(input_name)
).output
)
assert create_output["code"] == "Created"
assert create_output["code"] == "Created"
ep_id = create_output["id"]
# confirm endpoint show sees ep
show_output = run_line("globus endpoint show {}".format(ep_id)).output
assert (expected_name or input_name) in show_output
# update
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
update_output = run_line(
u"globus endpoint update {} --description {}".format(ep_id, input_name)
).output
# if given a byte string name, run a byte string
else:
update_output = run_line(
b"globus endpoint update {} --description {}".format(ep_id, input_name)
).output
assert "updated successfully" in update_output
# confirm show sees updated description
show_output = json.loads(
run_line("globus endpoint show {} -F json".format(ep_id)).output
)
assert (expected_name or input_name) == show_output["description"]
# delete
delete_output = run_line("globus endpoint delete {}".format(ep_id)).output
assert "deleted successfully" in delete_output
return f
def test_quote_escaping(dir_operations, ep_operations):
"""
Tests operations with an escaped quote inside quotes that should be
seen as one literal quote character by the shell
"""
name = r'"\""'
dir_operations(name, expected_name='"')
ep_operations(name, expected_name='"')
def test_ascii_url_encoding(dir_operations, ep_operations):
"""
Tests operations with an ASCII name that includes ' ' and '%"
characters that will need to be encoded for use in a url.
"""
name = '"a% b"'
dir_operations(name, expected_name="a% b")
ep_operations(name, expected_name="a% b")
@pytest.mark.skipif(
six.PY2 and on_windows(), reason="python2 Windows console issues (FIXME?)"
)
def test_non_ascii_utf8(dir_operations, ep_operations):
"""
Tests operations with a UTF-8 name containing non ASCII characters with
code points requiring multiple bytes.
"""
name = u"テスト"
dir_operations(name)
ep_operations(name)
@pytest.mark.skipif(six.PY3, reason="test run with Python 3")
@pytest.mark.skipif(
six.PY2 and on_windows(), reason="python2 Windows console issues (FIXME?)"
)
def test_non_ascii_utf8_bytes(dir_operations, ep_operations):
"""
Tests operations with a byte string encoded from non ASCII UTF-8.
This test is only run on Python 2 as bytes are not strings in Python 3.
"""
uni_name = u"テスト"
byte_name = uni_name.encode("utf8")
# we expect uni_name back since the API returns unicode strings
dir_operations(byte_name, expected_name=uni_name)
ep_operations(byte_name, expected_name=uni_name)
@pytest.mark.skipif(
six.PY2 and on_windows(), reason="python2 Windows console issues (FIXME?)"
)
def test_latin1(dir_operations, ep_operations):
"""
Tests operations with latin-1 name that is not valid UTF-8.
"""
# the encoding for 'é' in latin-1 is a continuation byte in utf-8
byte_name = b"\xe9" # é's latin-1 encoding
name = byte_name.decode("latin-1")
with pytest.raises(UnicodeDecodeError):
byte_name.decode("utf-8")
dir_operations(name)
ep_operations(name)
@pytest.mark.skipif(six.PY3, reason="test run with Python 3")
@pytest.mark.skipif(
six.PY2 and on_windows(), reason="python2 Windows console issues (FIXME?)"
)
def test_invalid_utf8_bytes(run_line):
r"""
Tests operations with byte string that can be decoded with
latin-1 but not with UTF-8. Confirms that this raises a
UnicodeDecodeError, as the SDK/APIs can't handle decoding non UTF-8.
This test is only run on Python 2 as bytes are not strings in Python 3.
You can imitate this in the command-line using a `printf` subshell, e.g.
globus mkdir "${GO_EP_1}:~/$(printf "\xe9")"
"""
# the encoding for 'é' in latin-1 is a continuation byte in utf-8
byte_name = b"\xe9" # é's latin-1 encoding
make_output = run_line(
"globus mkdir {}:~/{}".format(GO_EP1_ID, byte_name), assert_exit_code=1
).output
assert "UnicodeDecodeError" in make_output
create_output = run_line(
"globus endpoint create --server {}".format(byte_name), assert_exit_code=1
).output
assert "UnicodeDecodeError" in create_output
Remove test case for failure on click<7.1
In pallets/click#1468 , string parameter types (the default) added a
catchall case in which a value which fails to decode is run though
`decode('utf-8', 'replace')`. This succeeds even on things like
latin-1 chars which can't decode into utf-8 . As a result, our test
case which relied on the value being passed through verbatim doesn't
work anymore. However, the case which uses our custom param type (the
mkdir test) still fails as expected.
We may still be able to and want to test our failure modes around
invalid UTF8 bytes like this, but for the purposes of being click 7.1
compatible, just remove the failing test.
# -*- coding: utf8 -*-
import json
from random import getrandbits
import pytest
import six
from tests.constants import GO_EP1_ID
from tests.utils import on_windows
@pytest.fixture
def dir_operations(run_line):
"""
Given an input directory name, makes the directory to test inputs,
ls's the directory to test outputs and output formats,
attempts to remake the directory to test error outputs,
and finally deletes the directory for cleanup.
"""
def f(input_name, expected_name=None):
# mkdir
# name randomized to prevent collision
rand = str(getrandbits(128))
dir_name = input_name + rand
expected = (expected_name or input_name) + rand
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
make_output = run_line(
u"globus mkdir {}:~/{}".format(GO_EP1_ID, dir_name)
).output
# if given a byte string name, run a byte string
else:
make_output = run_line(
b"globus mkdir {}:~/{}".format(GO_EP1_ID, dir_name)
).output
assert "The directory was created successfully" in make_output
# confirm the dir can be seen. Confirms simple, long, verbose,
# and json output all handle the encoding.
ls_output = run_line(u"globus ls {}:~/".format(GO_EP1_ID)).output
assert expected in ls_output
long_output = run_line(u"globus ls -l {}:~/".format(GO_EP1_ID)).output
assert expected in long_output
assert "Filename" in long_output
json_output = json.loads(
run_line(u"globus ls -F json {}:~/".format(GO_EP1_ID)).output
)
assert expected in [i["name"] for i in json_output["DATA"]]
# attempt to make the dir again to test error output:
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
make2_output = run_line(
u"globus mkdir {}:~/{}".format(GO_EP1_ID, dir_name), assert_exit_code=1
).output
# if given a byte string name, run a byte string
else:
make2_output = run_line(
b"globus mkdir {}:~/{}".format(GO_EP1_ID, dir_name), assert_exit_code=1
).output
assert "Path already exists" in make2_output
assert expected in make2_output
# delete for cleanup:
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
delete_output = run_line(
u"globus delete -r {}:~/{}".format(GO_EP1_ID, dir_name)
).output
# if given a byte string name, run a byte string
else:
delete_output = run_line(
b"globus delete -r {}:~/{}".format(GO_EP1_ID, dir_name)
).output
assert "The delete has been accepted" in delete_output
return f
@pytest.fixture
def ep_operations(run_line):
"""
Given an input_name, creates, updates, gets, and deletes an endpoint
using the input_name as a display_name. If an expected_name is given,
confirms output matches that name rather than the input_name.
"""
def f(input_name, expected_name=None):
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
create_output = json.loads(
run_line(
u"globus endpoint create -F json --server {}".format(input_name)
).output
)
# if given a byte string name, run a byte string
else:
create_output = json.loads(
run_line(
b"globus endpoint create -F json --server {}".format(input_name)
).output
)
assert create_output["code"] == "Created"
assert create_output["code"] == "Created"
ep_id = create_output["id"]
# confirm endpoint show sees ep
show_output = run_line("globus endpoint show {}".format(ep_id)).output
assert (expected_name or input_name) in show_output
# update
# if given a unicode name, run a unicode string
if isinstance(input_name, six.text_type):
update_output = run_line(
u"globus endpoint update {} --description {}".format(ep_id, input_name)
).output
# if given a byte string name, run a byte string
else:
update_output = run_line(
b"globus endpoint update {} --description {}".format(ep_id, input_name)
).output
assert "updated successfully" in update_output
# confirm show sees updated description
show_output = json.loads(
run_line("globus endpoint show {} -F json".format(ep_id)).output
)
assert (expected_name or input_name) == show_output["description"]
# delete
delete_output = run_line("globus endpoint delete {}".format(ep_id)).output
assert "deleted successfully" in delete_output
return f
def test_quote_escaping(dir_operations, ep_operations):
"""
Tests operations with an escaped quote inside quotes that should be
seen as one literal quote character by the shell
"""
name = r'"\""'
dir_operations(name, expected_name='"')
ep_operations(name, expected_name='"')
def test_ascii_url_encoding(dir_operations, ep_operations):
"""
Tests operations with an ASCII name that includes ' ' and '%"
characters that will need to be encoded for use in a url.
"""
name = '"a% b"'
dir_operations(name, expected_name="a% b")
ep_operations(name, expected_name="a% b")
@pytest.mark.skipif(
six.PY2 and on_windows(), reason="python2 Windows console issues (FIXME?)"
)
def test_non_ascii_utf8(dir_operations, ep_operations):
"""
Tests operations with a UTF-8 name containing non ASCII characters with
code points requiring multiple bytes.
"""
name = u"テスト"
dir_operations(name)
ep_operations(name)
@pytest.mark.skipif(six.PY3, reason="test run with Python 3")
@pytest.mark.skipif(
six.PY2 and on_windows(), reason="python2 Windows console issues (FIXME?)"
)
def test_non_ascii_utf8_bytes(dir_operations, ep_operations):
"""
Tests operations with a byte string encoded from non ASCII UTF-8.
This test is only run on Python 2 as bytes are not strings in Python 3.
"""
uni_name = u"テスト"
byte_name = uni_name.encode("utf8")
# we expect uni_name back since the API returns unicode strings
dir_operations(byte_name, expected_name=uni_name)
ep_operations(byte_name, expected_name=uni_name)
@pytest.mark.skipif(
six.PY2 and on_windows(), reason="python2 Windows console issues (FIXME?)"
)
def test_latin1(dir_operations, ep_operations):
"""
Tests operations with latin-1 name that is not valid UTF-8.
"""
# the encoding for 'é' in latin-1 is a continuation byte in utf-8
byte_name = b"\xe9" # é's latin-1 encoding
name = byte_name.decode("latin-1")
with pytest.raises(UnicodeDecodeError):
byte_name.decode("utf-8")
dir_operations(name)
ep_operations(name)
@pytest.mark.skipif(six.PY3, reason="test run with Python 3")
@pytest.mark.skipif(
six.PY2 and on_windows(), reason="python2 Windows console issues (FIXME?)"
)
def test_invalid_utf8_bytes(run_line):
r"""
Tests operations with byte string that can be decoded with
latin-1 but not with UTF-8. Confirms that this raises a
UnicodeDecodeError, as the SDK/APIs can't handle decoding non UTF-8.
This test is only run on Python 2 as bytes are not strings in Python 3.
You can imitate this in the command-line using a `printf` subshell, e.g.
globus mkdir "${GO_EP_1}:~/$(printf "\xe9")"
"""
# the encoding for 'é' in latin-1 is a continuation byte in utf-8
byte_name = b"\xe9" # é's latin-1 encoding
make_output = run_line(
"globus mkdir {}:~/{}".format(GO_EP1_ID, byte_name), assert_exit_code=1
).output
assert "UnicodeDecodeError" in make_output
|
import sys
#usage:
### first create connected components from disco (-A option)
#sh from_phased_alleles_to_clusters.sh phased_alleles_read_set_id_1.txt # creates file connected_components_phased_alleles_read_set_id_1.txt
### them from the .fa file, the id of the set your interested in (e.g. 1 for phased_alleles_read_set_id_1.txt, this will correspond to C1 coverage in the fa file), the file containing the connected components, and the phased_alleles_read_set_id_X.txt file, generate the fact file
#python format_phased_variants_for_haplotyping.py mapping_k_31_c_auto_D_100_P_10_b_0_coherent.fa 1 connected_components_phased_alleles_read_set_id_1.txt phased_alleles_read_set_id_1.txt > phased_alles_read_set_1_facts.txt
if not len(sys.argv)==5:
print ("usage: python format_phased_variants_for_haplotyping.py <file coherent.fa> <id number><connected_component_file><phased_allele_file>")
print (" * coherent.fa file: the file generated by discoSnp")
print (" * id number is the id of the read set, for which variants are phased. With i, this corresponds to Ci in the .fa file headers.")
print (" * connected_component_file: file obtained from \"from_phased_alleles_to_clusters.sh phased_alleles_read_set_id_1.txt\" continaing connected component of phased alleles")
print (" * phased_alleles_read_set_id_1.txt: file generated by discoSnp (with the hidden -A option. The value 1 here shoud correspond to \"id number\"")
sys.exit(0)
coherent_fa_file = open(sys.argv[1])
set_id = sys.argv[2]
cc_file = open(sys.argv[3])
phased_alleles_file = open(sys.argv[4])
def store_abundances(coherent_fa_file,set_id):
pos_coverage_determined=False
pos_coverage=-1
coverages={}
for oline in coherent_fa_file: #>SNP_higher_path_991|P_1:30_C/G|high|nb_pol_1|C1_38|C2_0|Q1_0|Q2_0|G1_0/0:6,119,764|G2_1/1:664,104,6|rank_1
if oline[0] != '>': continue
line=oline.rstrip().split('|')
id=line[0].split('_')[-1]
id+=line[0].split('_')[1][0]
if not pos_coverage_determined:
for pos_coverage in range(len(line)):
if line[pos_coverage][0]=='C':
value=line[pos_coverage][1:].split('_')[0]
if value==set_id:
pos_coverage_determined=True
break
if not pos_coverage_determined:
print ("Set id", set_id, "not findable in header like ", oline.rstrip())
print ("ciao")
sys.exit(0)
coverages[id]=line[pos_coverage].split('_')[1]
return coverages
def store_cc(cc_file):
cc={}
for i,oline in enumerate (cc_file): # 852 1891 3484 2641 5758 3247
oline=oline.rstrip().split()
for idf in oline:
if idf in cc:
print("ERROR, idf is in more than one connected component")
cc[idf]=i
return cc
def store_phased_alleles(phased_alleles_file):
phased_alleles={}
for oline in phased_alleles_file: #-1187h;1001h;2178h; => 5
oline=oline.rstrip()
if oline[0]=='#': continue
ids = oline.split(' ')[0].split(';')[:-1]
abundance = int(oline.split(' ')[-1])
idlist=[]
for aid in ids:
if aid[0]=='-': # remove the '-'
aid=aid[1:]
idlist.append(aid)
# canonical representation: smallest first:
if int(idlist[0][:-1])>int(idlist[-1][:-1]):
idlist.reverse()
list_as_string = ""
for aid in idlist:
list_as_string+=aid+';'
# add the list to the phased_alleles or increase its count if not existing:
if list_as_string in phased_alleles:
phased_alleles[list_as_string]+=abundance
else:
phased_alleles[list_as_string]=abundance
return phased_alleles
def print_djack_formated_phased_variants(coverages,cc,phased_alleles):
for aid in coverages:
if aid[:-1] in cc:
print("snp(cc"+str(cc[aid[:-1]])+","+aid[:-1]+","+aid[-1]+","+str(coverages[aid])+").")
for i,list_as_string in enumerate(phased_alleles):#'2686l;4324h;5375h;': 3
# get the CC:
ids=list_as_string.split(';')[:-1]
abundance = phased_alleles[list_as_string]
this_cc=cc[ids[0][:-1]]
for j in range(1,len(ids)):
if cc[ids[j][:-1]] != this_cc:
print("impossible all variants from ",list_as_string, "are not in the same CC")
sys.exit(0)
for aid in ids:
print("fact(cc"+str(this_cc)+","+str(i)+","+aid[:-1]+","+aid[-1]+").")
print("count("+str(i)+","+str(abundance)+").")
coverages=store_abundances(coherent_fa_file,set_id)
cc=store_cc(cc_file)
phased_alleles=store_phased_alleles(phased_alleles_file)
print_djack_formated_phased_variants(coverages,cc,phased_alleles)
Bug fix in format_phased_variants_for_haplotyping.py
import sys
#usage:
### first create connected components from disco (-A option)
#sh from_phased_alleles_to_clusters.sh phased_alleles_read_set_id_1.txt # creates file connected_components_phased_alleles_read_set_id_1.txt
### them from the .fa file, the id of the set your interested in (e.g. 1 for phased_alleles_read_set_id_1.txt, this will correspond to C1 coverage in the fa file), the file containing the connected components, and the phased_alleles_read_set_id_X.txt file, generate the fact file
#python format_phased_variants_for_haplotyping.py mapping_k_31_c_auto_D_100_P_10_b_0_coherent.fa 1 connected_components_phased_alleles_read_set_id_1.txt phased_alleles_read_set_id_1.txt > phased_alles_read_set_1_facts.txt
if not len(sys.argv)==5:
print ("usage: python format_phased_variants_for_haplotyping.py <file coherent.fa> <id number><connected_component_file><phased_allele_file>")
print (" * coherent.fa file: the file generated by discoSnp")
print (" * id number is the id of the read set, for which variants are phased. With i, this corresponds to Ci in the .fa file headers.")
print (" * connected_component_file: file obtained from \"from_phased_alleles_to_clusters.sh phased_alleles_read_set_id_1.txt\" continaing connected component of phased alleles")
print (" * phased_alleles_read_set_id_1.txt: file generated by discoSnp (with the hidden -A option. The value 1 here shoud correspond to \"id number\"")
sys.exit(0)
coherent_fa_file = open(sys.argv[1])
set_id = sys.argv[2]
cc_file = open(sys.argv[3])
phased_alleles_file = open(sys.argv[4])
def store_abundances(coherent_fa_file,set_id):
pos_coverage_determined=False
pos_coverage=-1
coverages={}
for oline in coherent_fa_file: #>SNP_higher_path_991|P_1:30_C/G|high|nb_pol_1|C1_38|C2_0|Q1_0|Q2_0|G1_0/0:6,119,764|G2_1/1:664,104,6|rank_1
if oline[0] != '>': continue
line=oline.rstrip().split('|')
id=line[0].split('_')[-1]
id+=line[0].split('_')[1][0]
if not pos_coverage_determined:
for pos_coverage in range(len(line)):
if line[pos_coverage][0]=='C':
value=line[pos_coverage][1:].split('_')[0]
if value==set_id:
pos_coverage_determined=True
break
if not pos_coverage_determined:
print ("Set id", set_id, "not findable in header like ", oline.rstrip())
print ("ciao")
sys.exit(0)
coverages[id]=line[pos_coverage].split('_')[1]
return coverages
def store_cc(cc_file):
cc={}
for i,oline in enumerate (cc_file): # 852 1891 3484 2641 5758 3247
oline=oline.rstrip().split()
for idf in oline:
if idf in cc:
print("ERROR, idf is in more than one connected component")
cc[idf]=i
return cc
def store_phased_alleles(phased_alleles_file):
phased_alleles={}
for oline in phased_alleles_file: #-1187h;1001h;2178h; => 5
oline=oline.rstrip()
if oline[0]=='#': continue
ids = oline.split(' ')[0].split(';')[:-1]
abundance = int(oline.split(' ')[-1])
idlist=[]
for aid in ids:
if aid[0]=='-': # remove the '-'
aid=aid[1:]
idlist.append(aid)
# canonical representation: smallest first:
if int(idlist[0][:-1])>int(idlist[-1][:-1]):
idlist.reverse()
list_as_string = ""
for aid in idlist:
list_as_string+=aid+';'
# add the list to the phased_alleles or increase its count if not existing:
if list_as_string in phased_alleles:
phased_alleles[list_as_string]+=abundance
else:
phased_alleles[list_as_string]=abundance
return phased_alleles
def print_djack_formated_phased_variants(coverages,cc,phased_alleles):
for aid in coverages:
if aid[:-1] in cc:
print("snp(cc"+str(cc[aid[:-1]])+","+aid[:-1]+","+aid[-1]+","+str(coverages[aid])+").")
for i,list_as_string in enumerate(phased_alleles):#'2686l;4324h;5375h;': 3
# get the CC:
ids=list_as_string.split(';')[:-1]
abundance = phased_alleles[list_as_string]
print(list_as_string,abundance)
if ids[0][:-1] not in cc: continue
this_cc=cc[ids[0][:-1]]
for j in range(1,len(ids)):
if cc[ids[j][:-1]] != this_cc:
print("impossible all variants from ",list_as_string, "are not in the same CC")
sys.exit(0)
for aid in ids:
print("fact(cc"+str(this_cc)+","+str(i)+","+aid[:-1]+","+aid[-1]+").")
print("count("+str(i)+","+str(abundance)+").")
coverages=store_abundances(coherent_fa_file,set_id)
cc=store_cc(cc_file)
phased_alleles=store_phased_alleles(phased_alleles_file)
print_djack_formated_phased_variants(coverages,cc,phased_alleles)
|
"Parses and creates Grammar objects"
import os.path
import sys
from itertools import chain
import re
from ast import literal_eval
from copy import deepcopy
from .lexer import Token
from .parse_tree_builder import ParseTreeBuilder
from .parser_frontends import LALR_TraditionalLexer
from .common import LexerConf, ParserConf, PatternStr, PatternRE, TokenDef
from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol
from .utils import classify, suppress
from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken
from .tree import Tree, SlottedTree as ST
from .visitors import Transformer, Visitor, v_args, Transformer_InPlace
inline_args = v_args(inline=True)
__path__ = os.path.dirname(__file__)
IMPORT_PATHS = [os.path.join(__path__, 'grammars')]
EXT = '.lark'
_RE_FLAGS = 'imslux'
def is_terminal(sym):
return sym.isupper()
_TERMINAL_NAMES = {
'.' : 'DOT',
',' : 'COMMA',
':' : 'COLON',
';' : 'SEMICOLON',
'+' : 'PLUS',
'-' : 'MINUS',
'*' : 'STAR',
'/' : 'SLASH',
'\\' : 'BACKSLASH',
'|' : 'VBAR',
'?' : 'QMARK',
'!' : 'BANG',
'@' : 'AT',
'#' : 'HASH',
'$' : 'DOLLAR',
'%' : 'PERCENT',
'^' : 'CIRCUMFLEX',
'&' : 'AMPERSAND',
'_' : 'UNDERSCORE',
'<' : 'LESSTHAN',
'>' : 'MORETHAN',
'=' : 'EQUAL',
'"' : 'DBLQUOTE',
'\'' : 'QUOTE',
'`' : 'BACKQUOTE',
'~' : 'TILDE',
'(' : 'LPAR',
')' : 'RPAR',
'{' : 'LBRACE',
'}' : 'RBRACE',
'[' : 'LSQB',
']' : 'RSQB',
'\n' : 'NEWLINE',
'\r\n' : 'CRLF',
'\t' : 'TAB',
' ' : 'SPACE',
}
# Grammar Parser
TERMINALS = {
'_LPAR': r'\(',
'_RPAR': r'\)',
'_LBRA': r'\[',
'_RBRA': r'\]',
'OP': '[+*][?]?|[?](?![a-z])',
'_COLON': ':',
'_COMMA': ',',
'_OR': r'\|',
'_DOT': r'\.',
'TILDE': '~',
'RULE': '!?[_?]?[a-z][_a-z0-9]*',
'TERMINAL': '_?[A-Z][_A-Z0-9]*',
'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/[%s]*' % _RE_FLAGS,
'_NL': r'(\r?\n)+\s*',
'WS': r'[ \t]+',
'COMMENT': r'//[^\n]*',
'_TO': '->',
'_IGNORE': r'%ignore',
'_DECLARE': r'%declare',
'_IMPORT': r'%import',
'NUMBER': r'\d+',
}
RULES = {
'start': ['_list'],
'_list': ['_item', '_list _item'],
'_item': ['rule', 'token', 'statement', '_NL'],
'rule': ['RULE _COLON expansions _NL',
'RULE _DOT NUMBER _COLON expansions _NL'],
'expansions': ['alias',
'expansions _OR alias',
'expansions _NL _OR alias'],
'?alias': ['expansion _TO RULE', 'expansion'],
'expansion': ['_expansion'],
'_expansion': ['', '_expansion expr'],
'?expr': ['atom',
'atom OP',
'atom TILDE NUMBER',
'atom TILDE NUMBER _DOT _DOT NUMBER',
],
'?atom': ['_LPAR expansions _RPAR',
'maybe',
'value'],
'value': ['terminal',
'nonterminal',
'literal',
'range'],
'terminal': ['TERMINAL'],
'nonterminal': ['RULE'],
'?name': ['RULE', 'TERMINAL'],
'maybe': ['_LBRA expansions _RBRA'],
'range': ['STRING _DOT _DOT STRING'],
'token': ['TERMINAL _COLON expansions _NL',
'TERMINAL _DOT NUMBER _COLON expansions _NL'],
'statement': ['ignore', 'import', 'declare'],
'ignore': ['_IGNORE expansions _NL'],
'declare': ['_DECLARE _declare_args _NL'],
'import': ['_IMPORT _import_path _NL',
'_IMPORT _import_path _LPAR name_list _RPAR _NL',
'_IMPORT _import_path _TO TERMINAL _NL'],
'_import_path': ['import_lib', 'import_rel'],
'import_lib': ['_import_args'],
'import_rel': ['_DOT _import_args'],
'_import_args': ['name', '_import_args _DOT name'],
'name_list': ['_name_list'],
'_name_list': ['name', '_name_list _COMMA name'],
'_declare_args': ['name', '_declare_args name'],
'literal': ['REGEXP', 'STRING'],
}
@inline_args
class EBNF_to_BNF(Transformer_InPlace):
def __init__(self):
self.new_rules = []
self.rules_by_expr = {}
self.prefix = 'anon'
self.i = 0
self.rule_options = None
def _add_recurse_rule(self, type_, expr):
if expr in self.rules_by_expr:
return self.rules_by_expr[expr]
new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
self.i += 1
t = NonTerminal(new_name)
tree = ST('expansions', [ST('expansion', [expr]), ST('expansion', [t, expr])])
self.new_rules.append((new_name, tree, self.rule_options))
self.rules_by_expr[expr] = t
return t
def expr(self, rule, op, *args):
if op.value == '?':
return ST('expansions', [rule, ST('expansion', [])])
elif op.value == '+':
# a : b c+ d
# -->
# a : b _c d
# _c : _c c | c;
return self._add_recurse_rule('plus', rule)
elif op.value == '*':
# a : b c* d
# -->
# a : b _c? d
# _c : _c c | c;
new_name = self._add_recurse_rule('star', rule)
return ST('expansions', [new_name, ST('expansion', [])])
elif op.value == '~':
if len(args) == 1:
mn = mx = int(args[0])
else:
mn, mx = map(int, args)
if mx < mn:
raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (rule, mn, mx))
return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx+1)])
assert False, op
class SimplifyRule_Visitor(Visitor):
@staticmethod
def _flatten(tree):
while True:
to_expand = [i for i, child in enumerate(tree.children)
if isinstance(child, Tree) and child.data == tree.data]
if not to_expand:
break
tree.expand_kids_by_index(*to_expand)
def expansion(self, tree):
# rules_list unpacking
# a : b (c|d) e
# -->
# a : b c e | b d e
#
# In AST terms:
# expansion(b, expansions(c, d), e)
# -->
# expansions( expansion(b, c, e), expansion(b, d, e) )
self._flatten(tree)
for i, child in enumerate(tree.children):
if isinstance(child, Tree) and child.data == 'expansions':
tree.data = 'expansions'
tree.children = [self.visit(ST('expansion', [option if i==j else other
for j, other in enumerate(tree.children)]))
for option in set(child.children)]
self._flatten(tree)
break
def alias(self, tree):
rule, alias_name = tree.children
if rule.data == 'expansions':
aliases = []
for child in tree.children[0].children:
aliases.append(ST('alias', [child, alias_name]))
tree.data = 'expansions'
tree.children = aliases
def expansions(self, tree):
self._flatten(tree)
tree.children = list(set(tree.children))
class RuleTreeToText(Transformer):
def expansions(self, x):
return x
def expansion(self, symbols):
return symbols, None
def alias(self, x):
(expansion, _alias), alias = x
assert _alias is None, (alias, expansion, '-', _alias) # Double alias not allowed
return expansion, alias.value
@inline_args
class CanonizeTree(Transformer_InPlace):
def maybe(self, expr):
return ST('expr', [expr, Token('OP', '?', -1)])
def tokenmods(self, *args):
if len(args) == 1:
return list(args)
tokenmods, value = args
return tokenmods + [value]
class PrepareAnonTerminals(Transformer_InPlace):
"Create a unique list of anonymous tokens. Attempt to give meaningful names to them when we add them"
def __init__(self, tokens):
self.tokens = tokens
self.token_set = {td.name for td in self.tokens}
self.token_reverse = {td.pattern: td for td in tokens}
self.i = 0
@inline_args
def pattern(self, p):
value = p.value
if p in self.token_reverse and p.flags != self.token_reverse[p].pattern.flags:
raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
token_name = None
if isinstance(p, PatternStr):
try:
# If already defined, use the user-defined token name
token_name = self.token_reverse[p].name
except KeyError:
# Try to assign an indicative anon-token name
try:
token_name = _TERMINAL_NAMES[value]
except KeyError:
if value.isalnum() and value[0].isalpha() and value.upper() not in self.token_set:
with suppress(UnicodeEncodeError):
value.upper().encode('ascii') # Make sure we don't have unicode in our token names
token_name = value.upper()
elif isinstance(p, PatternRE):
if p in self.token_reverse: # Kind of a wierd placement.name
token_name = self.token_reverse[p].name
else:
assert False, p
if token_name is None:
token_name = '__ANON_%d' % self.i
self.i += 1
if token_name not in self.token_set:
assert p not in self.token_reverse
self.token_set.add(token_name)
tokendef = TokenDef(token_name, p)
self.token_reverse[p] = tokendef
self.tokens.append(tokendef)
return Terminal(token_name, filter_out=isinstance(p, PatternStr))
def _rfind(s, choices):
return max(s.rfind(c) for c in choices)
def _fix_escaping(s):
w = ''
i = iter(s)
for n in i:
w += n
if n == '\\':
n2 = next(i)
if n2 == '\\':
w += '\\\\'
elif n2 not in 'unftr':
w += '\\'
w += n2
w = w.replace('\\"', '"').replace("'", "\\'")
to_eval = "u'''%s'''" % w
try:
s = literal_eval(to_eval)
except SyntaxError as e:
raise ValueError(s, e)
return s
def _literal_to_pattern(literal):
v = literal.value
flag_start = _rfind(v, '/"')+1
assert flag_start > 0
flags = v[flag_start:]
assert all(f in _RE_FLAGS for f in flags), flags
v = v[:flag_start]
assert v[0] == v[-1] and v[0] in '"/'
x = v[1:-1]
s = _fix_escaping(x)
if literal.type == 'STRING':
s = s.replace('\\\\', '\\')
return { 'STRING': PatternStr,
'REGEXP': PatternRE }[literal.type](s, flags)
@inline_args
class PrepareLiterals(Transformer_InPlace):
def literal(self, literal):
return ST('pattern', [_literal_to_pattern(literal)])
def range(self, start, end):
assert start.type == end.type == 'STRING'
start = start.value[1:-1]
end = end.value[1:-1]
assert len(start) == len(end) == 1, (start, end, len(start), len(end))
regexp = '[%s-%s]' % (start, end)
return ST('pattern', [PatternRE(regexp)])
class TokenTreeToPattern(Transformer):
def pattern(self, ps):
p ,= ps
return p
def expansion(self, items):
assert items
if len(items) == 1:
return items[0]
if len({i.flags for i in items}) > 1:
raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags if items else ())
def expansions(self, exps):
if len(exps) == 1:
return exps[0]
if len({i.flags for i in exps}) > 1:
raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)), exps[0].flags)
def expr(self, args):
inner, op = args[:2]
if op == '~':
if len(args) == 3:
op = "{%d}" % int(args[2])
else:
mn, mx = map(int, args[2:])
if mx < mn:
raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (inner, mn, mx))
op = "{%d,%d}" % (mn, mx)
else:
assert len(args) == 2
return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
def alias(self, t):
raise GrammarError("Aliasing not allowed in terminals (You used -> in the wrong place)")
def value(self, v):
return v[0]
class PrepareSymbols(Transformer_InPlace):
def value(self, v):
v ,= v
if isinstance(v, Tree):
return v
elif v.type == 'RULE':
return NonTerminal(v.value)
elif v.type == 'TERMINAL':
return Terminal(v.value, filter_out=v.startswith('_'))
assert False
def _choice_of_rules(rules):
return ST('expansions', [ST('expansion', [Token('RULE', name)]) for name in rules])
class Grammar:
def __init__(self, rule_defs, token_defs, ignore):
self.token_defs = token_defs
self.rule_defs = rule_defs
self.ignore = ignore
def compile(self):
# We change the trees in-place (to support huge grammars)
# So deepcopy allows calling compile more than once.
token_defs = deepcopy(list(self.token_defs))
rule_defs = deepcopy(self.rule_defs)
# =================
# Compile Tokens
# =================
# Convert token-trees to strings/regexps
transformer = PrepareLiterals() * TokenTreeToPattern()
for name, (token_tree, priority) in token_defs:
if token_tree is None: # Terminal added through %declare
continue
expansions = list(token_tree.find_data('expansion'))
if len(expansions) == 1 and not expansions[0].children:
raise GrammarError("Terminals cannot be empty (%s)" % name)
tokens = [TokenDef(name, transformer.transform(token_tree), priority)
for name, (token_tree, priority) in token_defs if token_tree]
# =================
# Compile Rules
# =================
# 1. Pre-process terminals
transformer = PrepareLiterals() * PrepareSymbols() * PrepareAnonTerminals(tokens) # Adds to tokens
# 2. Convert EBNF to BNF (and apply step 1)
ebnf_to_bnf = EBNF_to_BNF()
rules = []
for name, rule_tree, options in rule_defs:
ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
tree = transformer.transform(rule_tree)
rules.append((name, ebnf_to_bnf.transform(tree), options))
rules += ebnf_to_bnf.new_rules
assert len(rules) == len({name for name, _t, _o in rules}), "Whoops, name collision"
# 3. Compile tree to Rule objects
rule_tree_to_text = RuleTreeToText()
simplify_rule = SimplifyRule_Visitor()
compiled_rules = []
for name, tree, options in rules:
simplify_rule.visit(tree)
expansions = rule_tree_to_text.transform(tree)
for expansion, alias in expansions:
if alias and name.startswith('_'):
raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)" % (name, alias))
assert all(isinstance(x, Symbol) for x in expansion), expansion
rule = Rule(NonTerminal(name), expansion, alias, options)
compiled_rules.append(rule)
return tokens, compiled_rules, self.ignore
_imported_grammars = {}
def import_grammar(grammar_path, base_paths=[]):
if grammar_path not in _imported_grammars:
import_paths = base_paths + IMPORT_PATHS
for import_path in import_paths:
with suppress(IOError):
with open(os.path.join(import_path, grammar_path)) as f:
text = f.read()
grammar = load_grammar(text, grammar_path)
_imported_grammars[grammar_path] = grammar
break
else:
open(grammar_path)
assert False
return _imported_grammars[grammar_path]
def resolve_token_references(token_defs):
# TODO Cycles detection
# TODO Solve with transitive closure (maybe)
token_dict = {k:t for k, (t,_p) in token_defs}
assert len(token_dict) == len(token_defs), "Same name defined twice?"
while True:
changed = False
for name, (token_tree, _p) in token_defs:
if token_tree is None: # Terminal added through %declare
continue
for exp in token_tree.find_data('value'):
item ,= exp.children
if isinstance(item, Token):
if item.type == 'RULE':
raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name))
if item.type == 'TERMINAL':
exp.children[0] = token_dict[item]
changed = True
if not changed:
break
def options_from_rule(name, *x):
if len(x) > 1:
priority, expansions = x
priority = int(priority)
else:
expansions ,= x
priority = None
keep_all_tokens = name.startswith('!')
name = name.lstrip('!')
expand1 = name.startswith('?')
name = name.lstrip('?')
return name, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority)
def symbols_from_strcase(expansion):
return [Terminal(x, filter_out=x.startswith('_')) if is_terminal(x) else NonTerminal(x) for x in expansion]
@inline_args
class PrepareGrammar(Transformer_InPlace):
def terminal(self, name):
return name
def nonterminal(self, name):
return name
class GrammarLoader:
def __init__(self):
tokens = [TokenDef(name, PatternRE(value)) for name, value in TERMINALS.items()]
rules = [options_from_rule(name, x) for name, x in RULES.items()]
rules = [Rule(NonTerminal(r), symbols_from_strcase(x.split()), None, o) for r, xs, o in rules for x in xs]
callback = ParseTreeBuilder(rules, ST).create_callback()
lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'])
parser_conf = ParserConf(rules, callback, 'start')
self.parser = LALR_TraditionalLexer(lexer_conf, parser_conf)
self.canonize_tree = CanonizeTree()
def load_grammar(self, grammar_text, grammar_name='<?>'):
"Parse grammar_text, verify, and create Grammar object. Display nice messages on error."
try:
tree = self.canonize_tree.transform( self.parser.parse(grammar_text+'\n') )
except UnexpectedCharacters as e:
context = e.get_context(grammar_text)
raise GrammarError("Unexpected input at line %d column %d in %s: \n\n%s" %
(e.line, e.column, grammar_name, context))
except UnexpectedToken as e:
context = e.get_context(grammar_text)
error = e.match_examples(self.parser.parse, {
'Unclosed parenthesis': ['a: (\n'],
'Umatched closing parenthesis': ['a: )\n', 'a: [)\n', 'a: (]\n'],
'Expecting rule or token definition (missing colon)': ['a\n', 'a->\n', 'A->\n', 'a A\n'],
'Alias expects lowercase name': ['a: -> "a"\n'],
'Unexpected colon': ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n'],
'Misplaced operator': ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n'],
'Expecting option ("|") or a new rule or token definition': ['a:a\n()\n'],
'%import expects a name': ['%import "a"\n'],
'%ignore expects a value': ['%ignore %import\n'],
})
if error:
raise GrammarError("%s at line %s column %s\n\n%s" % (error, e.line, e.column, context))
elif 'STRING' in e.expected:
raise GrammarError("Expecting a value at line %s column %s\n\n%s" % (e.line, e.column, context))
raise
tree = PrepareGrammar().transform(tree)
# Extract grammar items
defs = classify(tree.children, lambda c: c.data, lambda c: c.children)
token_defs = defs.pop('token', [])
rule_defs = defs.pop('rule', [])
statements = defs.pop('statement', [])
assert not defs
token_defs = [td if len(td)==3 else (td[0], 1, td[1]) for td in token_defs]
token_defs = [(name.value, (t, int(p))) for name, p, t in token_defs]
# Execute statements
ignore = []
declared = []
for (stmt,) in statements:
if stmt.data == 'ignore':
t ,= stmt.children
ignore.append(t)
elif stmt.data == 'import':
if len(stmt.children) > 1:
path_node, arg1 = stmt.children
else:
path_node ,= stmt.children
arg1 = None
dotted_path = path_node.children
if isinstance(arg1, Tree): # Multi import
names = arg1.children
aliases = names # Can't have aliased multi import, so all aliases will be the same as names
else: # Single import
names = [dotted_path[-1]] # Get name from dotted path
aliases = [arg1] if arg1 else names # Aliases if exist
dotted_path = dotted_path[:-1]
grammar_path = os.path.join(*dotted_path) + EXT
if path_node.data == 'import_lib': # Import from library
g = import_grammar(grammar_path)
else: # Relative import
if grammar_name == '<string>': # Import relative to script file path if grammar is coded in script
base_file = os.path.abspath(sys.modules['__main__'].__file__)
else:
base_file = grammar_name # Import relative to grammar file path if external grammar file
base_path = os.path.split(base_file)[0]
g = import_grammar(grammar_path, base_paths=[base_path])
for name, alias in zip(names, aliases):
token_options = dict(g.token_defs)[name]
assert isinstance(token_options, tuple) and len(token_options)==2
token_defs.append([alias.value, token_options])
elif stmt.data == 'declare':
for t in stmt.children:
token_defs.append([t.value, (None, None)])
else:
assert False, stmt
# Verify correctness 1
for name, _ in token_defs:
if name.startswith('__'):
raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
# Handle ignore tokens
# XXX A slightly hacky solution. Recognition of %ignore TERMINAL as separate comes from the lexer's
# inability to handle duplicate tokens (two names, one value)
ignore_names = []
for t in ignore:
if t.data=='expansions' and len(t.children) == 1:
t2 ,= t.children
if t2.data=='expansion' and len(t2.children) == 1:
item ,= t2.children
if item.data == 'value':
item ,= item.children
if isinstance(item, Token) and item.type == 'TERMINAL':
ignore_names.append(item.value)
continue
name = '__IGNORE_%d'% len(ignore_names)
ignore_names.append(name)
token_defs.append((name, (t, 0)))
# Verify correctness 2
token_names = set()
for name, _ in token_defs:
if name in token_names:
raise GrammarError("Token '%s' defined more than once" % name)
token_names.add(name)
if set(ignore_names) > token_names:
raise GrammarError("Tokens %s were marked to ignore but were not defined!" % (set(ignore_names) - token_names))
# Resolve token references
resolve_token_references(token_defs)
rules = [options_from_rule(*x) for x in rule_defs]
rule_names = set()
for name, _x, _o in rules:
if name.startswith('__'):
raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
if name in rule_names:
raise GrammarError("Rule '%s' defined more than once" % name)
rule_names.add(name)
for name, expansions, _o in rules:
used_symbols = {t for x in expansions.find_data('expansion')
for t in x.scan_values(lambda t: t.type in ('RULE', 'TERMINAL'))}
for sym in used_symbols:
if is_terminal(sym):
if sym not in token_names:
raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name))
else:
if sym not in rule_names:
raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name))
# TODO don't include unused tokens, they can only cause trouble!
return Grammar(rules, token_defs, ignore_names)
load_grammar = GrammarLoader().load_grammar
BUGFIX: Automatic terminal names didn't respect existing terminals(Issue #224)
"Parses and creates Grammar objects"
import os.path
import sys
from itertools import chain
import re
from ast import literal_eval
from copy import deepcopy
from .lexer import Token
from .parse_tree_builder import ParseTreeBuilder
from .parser_frontends import LALR_TraditionalLexer
from .common import LexerConf, ParserConf, PatternStr, PatternRE, TokenDef
from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol
from .utils import classify, suppress
from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken
from .tree import Tree, SlottedTree as ST
from .visitors import Transformer, Visitor, v_args, Transformer_InPlace
inline_args = v_args(inline=True)
__path__ = os.path.dirname(__file__)
IMPORT_PATHS = [os.path.join(__path__, 'grammars')]
EXT = '.lark'
_RE_FLAGS = 'imslux'
def is_terminal(sym):
return sym.isupper()
_TERMINAL_NAMES = {
'.' : 'DOT',
',' : 'COMMA',
':' : 'COLON',
';' : 'SEMICOLON',
'+' : 'PLUS',
'-' : 'MINUS',
'*' : 'STAR',
'/' : 'SLASH',
'\\' : 'BACKSLASH',
'|' : 'VBAR',
'?' : 'QMARK',
'!' : 'BANG',
'@' : 'AT',
'#' : 'HASH',
'$' : 'DOLLAR',
'%' : 'PERCENT',
'^' : 'CIRCUMFLEX',
'&' : 'AMPERSAND',
'_' : 'UNDERSCORE',
'<' : 'LESSTHAN',
'>' : 'MORETHAN',
'=' : 'EQUAL',
'"' : 'DBLQUOTE',
'\'' : 'QUOTE',
'`' : 'BACKQUOTE',
'~' : 'TILDE',
'(' : 'LPAR',
')' : 'RPAR',
'{' : 'LBRACE',
'}' : 'RBRACE',
'[' : 'LSQB',
']' : 'RSQB',
'\n' : 'NEWLINE',
'\r\n' : 'CRLF',
'\t' : 'TAB',
' ' : 'SPACE',
}
# Grammar Parser
TERMINALS = {
'_LPAR': r'\(',
'_RPAR': r'\)',
'_LBRA': r'\[',
'_RBRA': r'\]',
'OP': '[+*][?]?|[?](?![a-z])',
'_COLON': ':',
'_COMMA': ',',
'_OR': r'\|',
'_DOT': r'\.',
'TILDE': '~',
'RULE': '!?[_?]?[a-z][_a-z0-9]*',
'TERMINAL': '_?[A-Z][_A-Z0-9]*',
'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/[%s]*' % _RE_FLAGS,
'_NL': r'(\r?\n)+\s*',
'WS': r'[ \t]+',
'COMMENT': r'//[^\n]*',
'_TO': '->',
'_IGNORE': r'%ignore',
'_DECLARE': r'%declare',
'_IMPORT': r'%import',
'NUMBER': r'\d+',
}
RULES = {
'start': ['_list'],
'_list': ['_item', '_list _item'],
'_item': ['rule', 'token', 'statement', '_NL'],
'rule': ['RULE _COLON expansions _NL',
'RULE _DOT NUMBER _COLON expansions _NL'],
'expansions': ['alias',
'expansions _OR alias',
'expansions _NL _OR alias'],
'?alias': ['expansion _TO RULE', 'expansion'],
'expansion': ['_expansion'],
'_expansion': ['', '_expansion expr'],
'?expr': ['atom',
'atom OP',
'atom TILDE NUMBER',
'atom TILDE NUMBER _DOT _DOT NUMBER',
],
'?atom': ['_LPAR expansions _RPAR',
'maybe',
'value'],
'value': ['terminal',
'nonterminal',
'literal',
'range'],
'terminal': ['TERMINAL'],
'nonterminal': ['RULE'],
'?name': ['RULE', 'TERMINAL'],
'maybe': ['_LBRA expansions _RBRA'],
'range': ['STRING _DOT _DOT STRING'],
'token': ['TERMINAL _COLON expansions _NL',
'TERMINAL _DOT NUMBER _COLON expansions _NL'],
'statement': ['ignore', 'import', 'declare'],
'ignore': ['_IGNORE expansions _NL'],
'declare': ['_DECLARE _declare_args _NL'],
'import': ['_IMPORT _import_path _NL',
'_IMPORT _import_path _LPAR name_list _RPAR _NL',
'_IMPORT _import_path _TO TERMINAL _NL'],
'_import_path': ['import_lib', 'import_rel'],
'import_lib': ['_import_args'],
'import_rel': ['_DOT _import_args'],
'_import_args': ['name', '_import_args _DOT name'],
'name_list': ['_name_list'],
'_name_list': ['name', '_name_list _COMMA name'],
'_declare_args': ['name', '_declare_args name'],
'literal': ['REGEXP', 'STRING'],
}
@inline_args
class EBNF_to_BNF(Transformer_InPlace):
def __init__(self):
self.new_rules = []
self.rules_by_expr = {}
self.prefix = 'anon'
self.i = 0
self.rule_options = None
def _add_recurse_rule(self, type_, expr):
if expr in self.rules_by_expr:
return self.rules_by_expr[expr]
new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
self.i += 1
t = NonTerminal(new_name)
tree = ST('expansions', [ST('expansion', [expr]), ST('expansion', [t, expr])])
self.new_rules.append((new_name, tree, self.rule_options))
self.rules_by_expr[expr] = t
return t
def expr(self, rule, op, *args):
if op.value == '?':
return ST('expansions', [rule, ST('expansion', [])])
elif op.value == '+':
# a : b c+ d
# -->
# a : b _c d
# _c : _c c | c;
return self._add_recurse_rule('plus', rule)
elif op.value == '*':
# a : b c* d
# -->
# a : b _c? d
# _c : _c c | c;
new_name = self._add_recurse_rule('star', rule)
return ST('expansions', [new_name, ST('expansion', [])])
elif op.value == '~':
if len(args) == 1:
mn = mx = int(args[0])
else:
mn, mx = map(int, args)
if mx < mn:
raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (rule, mn, mx))
return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx+1)])
assert False, op
class SimplifyRule_Visitor(Visitor):
@staticmethod
def _flatten(tree):
while True:
to_expand = [i for i, child in enumerate(tree.children)
if isinstance(child, Tree) and child.data == tree.data]
if not to_expand:
break
tree.expand_kids_by_index(*to_expand)
def expansion(self, tree):
# rules_list unpacking
# a : b (c|d) e
# -->
# a : b c e | b d e
#
# In AST terms:
# expansion(b, expansions(c, d), e)
# -->
# expansions( expansion(b, c, e), expansion(b, d, e) )
self._flatten(tree)
for i, child in enumerate(tree.children):
if isinstance(child, Tree) and child.data == 'expansions':
tree.data = 'expansions'
tree.children = [self.visit(ST('expansion', [option if i==j else other
for j, other in enumerate(tree.children)]))
for option in set(child.children)]
self._flatten(tree)
break
def alias(self, tree):
rule, alias_name = tree.children
if rule.data == 'expansions':
aliases = []
for child in tree.children[0].children:
aliases.append(ST('alias', [child, alias_name]))
tree.data = 'expansions'
tree.children = aliases
def expansions(self, tree):
self._flatten(tree)
tree.children = list(set(tree.children))
class RuleTreeToText(Transformer):
def expansions(self, x):
return x
def expansion(self, symbols):
return symbols, None
def alias(self, x):
(expansion, _alias), alias = x
assert _alias is None, (alias, expansion, '-', _alias) # Double alias not allowed
return expansion, alias.value
@inline_args
class CanonizeTree(Transformer_InPlace):
def maybe(self, expr):
return ST('expr', [expr, Token('OP', '?', -1)])
def tokenmods(self, *args):
if len(args) == 1:
return list(args)
tokenmods, value = args
return tokenmods + [value]
class PrepareAnonTerminals(Transformer_InPlace):
"Create a unique list of anonymous tokens. Attempt to give meaningful names to them when we add them"
def __init__(self, tokens):
self.tokens = tokens
self.token_set = {td.name for td in self.tokens}
self.token_reverse = {td.pattern: td for td in tokens}
self.i = 0
@inline_args
def pattern(self, p):
value = p.value
if p in self.token_reverse and p.flags != self.token_reverse[p].pattern.flags:
raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
token_name = None
if isinstance(p, PatternStr):
try:
# If already defined, use the user-defined token name
token_name = self.token_reverse[p].name
except KeyError:
# Try to assign an indicative anon-token name
try:
token_name = _TERMINAL_NAMES[value]
except KeyError:
if value.isalnum() and value[0].isalpha() and value.upper() not in self.token_set:
with suppress(UnicodeEncodeError):
value.upper().encode('ascii') # Make sure we don't have unicode in our token names
token_name = value.upper()
if token_name in self.token_set:
token_name = None
elif isinstance(p, PatternRE):
if p in self.token_reverse: # Kind of a wierd placement.name
token_name = self.token_reverse[p].name
else:
assert False, p
if token_name is None:
token_name = '__ANON_%d' % self.i
self.i += 1
if token_name not in self.token_set:
assert p not in self.token_reverse
self.token_set.add(token_name)
tokendef = TokenDef(token_name, p)
self.token_reverse[p] = tokendef
self.tokens.append(tokendef)
return Terminal(token_name, filter_out=isinstance(p, PatternStr))
def _rfind(s, choices):
return max(s.rfind(c) for c in choices)
def _fix_escaping(s):
w = ''
i = iter(s)
for n in i:
w += n
if n == '\\':
n2 = next(i)
if n2 == '\\':
w += '\\\\'
elif n2 not in 'unftr':
w += '\\'
w += n2
w = w.replace('\\"', '"').replace("'", "\\'")
to_eval = "u'''%s'''" % w
try:
s = literal_eval(to_eval)
except SyntaxError as e:
raise ValueError(s, e)
return s
def _literal_to_pattern(literal):
v = literal.value
flag_start = _rfind(v, '/"')+1
assert flag_start > 0
flags = v[flag_start:]
assert all(f in _RE_FLAGS for f in flags), flags
v = v[:flag_start]
assert v[0] == v[-1] and v[0] in '"/'
x = v[1:-1]
s = _fix_escaping(x)
if literal.type == 'STRING':
s = s.replace('\\\\', '\\')
return { 'STRING': PatternStr,
'REGEXP': PatternRE }[literal.type](s, flags)
@inline_args
class PrepareLiterals(Transformer_InPlace):
def literal(self, literal):
return ST('pattern', [_literal_to_pattern(literal)])
def range(self, start, end):
assert start.type == end.type == 'STRING'
start = start.value[1:-1]
end = end.value[1:-1]
assert len(start) == len(end) == 1, (start, end, len(start), len(end))
regexp = '[%s-%s]' % (start, end)
return ST('pattern', [PatternRE(regexp)])
class TokenTreeToPattern(Transformer):
def pattern(self, ps):
p ,= ps
return p
def expansion(self, items):
assert items
if len(items) == 1:
return items[0]
if len({i.flags for i in items}) > 1:
raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags if items else ())
def expansions(self, exps):
if len(exps) == 1:
return exps[0]
if len({i.flags for i in exps}) > 1:
raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)), exps[0].flags)
def expr(self, args):
inner, op = args[:2]
if op == '~':
if len(args) == 3:
op = "{%d}" % int(args[2])
else:
mn, mx = map(int, args[2:])
if mx < mn:
raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (inner, mn, mx))
op = "{%d,%d}" % (mn, mx)
else:
assert len(args) == 2
return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
def alias(self, t):
raise GrammarError("Aliasing not allowed in terminals (You used -> in the wrong place)")
def value(self, v):
return v[0]
class PrepareSymbols(Transformer_InPlace):
def value(self, v):
v ,= v
if isinstance(v, Tree):
return v
elif v.type == 'RULE':
return NonTerminal(v.value)
elif v.type == 'TERMINAL':
return Terminal(v.value, filter_out=v.startswith('_'))
assert False
def _choice_of_rules(rules):
return ST('expansions', [ST('expansion', [Token('RULE', name)]) for name in rules])
class Grammar:
def __init__(self, rule_defs, token_defs, ignore):
self.token_defs = token_defs
self.rule_defs = rule_defs
self.ignore = ignore
def compile(self):
# We change the trees in-place (to support huge grammars)
# So deepcopy allows calling compile more than once.
token_defs = deepcopy(list(self.token_defs))
rule_defs = deepcopy(self.rule_defs)
# =================
# Compile Tokens
# =================
# Convert token-trees to strings/regexps
transformer = PrepareLiterals() * TokenTreeToPattern()
for name, (token_tree, priority) in token_defs:
if token_tree is None: # Terminal added through %declare
continue
expansions = list(token_tree.find_data('expansion'))
if len(expansions) == 1 and not expansions[0].children:
raise GrammarError("Terminals cannot be empty (%s)" % name)
tokens = [TokenDef(name, transformer.transform(token_tree), priority)
for name, (token_tree, priority) in token_defs if token_tree]
# =================
# Compile Rules
# =================
# 1. Pre-process terminals
transformer = PrepareLiterals() * PrepareSymbols() * PrepareAnonTerminals(tokens) # Adds to tokens
# 2. Convert EBNF to BNF (and apply step 1)
ebnf_to_bnf = EBNF_to_BNF()
rules = []
for name, rule_tree, options in rule_defs:
ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
tree = transformer.transform(rule_tree)
rules.append((name, ebnf_to_bnf.transform(tree), options))
rules += ebnf_to_bnf.new_rules
assert len(rules) == len({name for name, _t, _o in rules}), "Whoops, name collision"
# 3. Compile tree to Rule objects
rule_tree_to_text = RuleTreeToText()
simplify_rule = SimplifyRule_Visitor()
compiled_rules = []
for name, tree, options in rules:
simplify_rule.visit(tree)
expansions = rule_tree_to_text.transform(tree)
for expansion, alias in expansions:
if alias and name.startswith('_'):
raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)" % (name, alias))
assert all(isinstance(x, Symbol) for x in expansion), expansion
rule = Rule(NonTerminal(name), expansion, alias, options)
compiled_rules.append(rule)
return tokens, compiled_rules, self.ignore
_imported_grammars = {}
def import_grammar(grammar_path, base_paths=[]):
if grammar_path not in _imported_grammars:
import_paths = base_paths + IMPORT_PATHS
for import_path in import_paths:
with suppress(IOError):
with open(os.path.join(import_path, grammar_path)) as f:
text = f.read()
grammar = load_grammar(text, grammar_path)
_imported_grammars[grammar_path] = grammar
break
else:
open(grammar_path)
assert False
return _imported_grammars[grammar_path]
def resolve_token_references(token_defs):
# TODO Cycles detection
# TODO Solve with transitive closure (maybe)
token_dict = {k:t for k, (t,_p) in token_defs}
assert len(token_dict) == len(token_defs), "Same name defined twice?"
while True:
changed = False
for name, (token_tree, _p) in token_defs:
if token_tree is None: # Terminal added through %declare
continue
for exp in token_tree.find_data('value'):
item ,= exp.children
if isinstance(item, Token):
if item.type == 'RULE':
raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name))
if item.type == 'TERMINAL':
exp.children[0] = token_dict[item]
changed = True
if not changed:
break
def options_from_rule(name, *x):
if len(x) > 1:
priority, expansions = x
priority = int(priority)
else:
expansions ,= x
priority = None
keep_all_tokens = name.startswith('!')
name = name.lstrip('!')
expand1 = name.startswith('?')
name = name.lstrip('?')
return name, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority)
def symbols_from_strcase(expansion):
return [Terminal(x, filter_out=x.startswith('_')) if is_terminal(x) else NonTerminal(x) for x in expansion]
@inline_args
class PrepareGrammar(Transformer_InPlace):
def terminal(self, name):
return name
def nonterminal(self, name):
return name
class GrammarLoader:
def __init__(self):
tokens = [TokenDef(name, PatternRE(value)) for name, value in TERMINALS.items()]
rules = [options_from_rule(name, x) for name, x in RULES.items()]
rules = [Rule(NonTerminal(r), symbols_from_strcase(x.split()), None, o) for r, xs, o in rules for x in xs]
callback = ParseTreeBuilder(rules, ST).create_callback()
lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'])
parser_conf = ParserConf(rules, callback, 'start')
self.parser = LALR_TraditionalLexer(lexer_conf, parser_conf)
self.canonize_tree = CanonizeTree()
def load_grammar(self, grammar_text, grammar_name='<?>'):
"Parse grammar_text, verify, and create Grammar object. Display nice messages on error."
try:
tree = self.canonize_tree.transform( self.parser.parse(grammar_text+'\n') )
except UnexpectedCharacters as e:
context = e.get_context(grammar_text)
raise GrammarError("Unexpected input at line %d column %d in %s: \n\n%s" %
(e.line, e.column, grammar_name, context))
except UnexpectedToken as e:
context = e.get_context(grammar_text)
error = e.match_examples(self.parser.parse, {
'Unclosed parenthesis': ['a: (\n'],
'Umatched closing parenthesis': ['a: )\n', 'a: [)\n', 'a: (]\n'],
'Expecting rule or token definition (missing colon)': ['a\n', 'a->\n', 'A->\n', 'a A\n'],
'Alias expects lowercase name': ['a: -> "a"\n'],
'Unexpected colon': ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n'],
'Misplaced operator': ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n'],
'Expecting option ("|") or a new rule or token definition': ['a:a\n()\n'],
'%import expects a name': ['%import "a"\n'],
'%ignore expects a value': ['%ignore %import\n'],
})
if error:
raise GrammarError("%s at line %s column %s\n\n%s" % (error, e.line, e.column, context))
elif 'STRING' in e.expected:
raise GrammarError("Expecting a value at line %s column %s\n\n%s" % (e.line, e.column, context))
raise
tree = PrepareGrammar().transform(tree)
# Extract grammar items
defs = classify(tree.children, lambda c: c.data, lambda c: c.children)
token_defs = defs.pop('token', [])
rule_defs = defs.pop('rule', [])
statements = defs.pop('statement', [])
assert not defs
token_defs = [td if len(td)==3 else (td[0], 1, td[1]) for td in token_defs]
token_defs = [(name.value, (t, int(p))) for name, p, t in token_defs]
# Execute statements
ignore = []
declared = []
for (stmt,) in statements:
if stmt.data == 'ignore':
t ,= stmt.children
ignore.append(t)
elif stmt.data == 'import':
if len(stmt.children) > 1:
path_node, arg1 = stmt.children
else:
path_node ,= stmt.children
arg1 = None
dotted_path = path_node.children
if isinstance(arg1, Tree): # Multi import
names = arg1.children
aliases = names # Can't have aliased multi import, so all aliases will be the same as names
else: # Single import
names = [dotted_path[-1]] # Get name from dotted path
aliases = [arg1] if arg1 else names # Aliases if exist
dotted_path = dotted_path[:-1]
grammar_path = os.path.join(*dotted_path) + EXT
if path_node.data == 'import_lib': # Import from library
g = import_grammar(grammar_path)
else: # Relative import
if grammar_name == '<string>': # Import relative to script file path if grammar is coded in script
base_file = os.path.abspath(sys.modules['__main__'].__file__)
else:
base_file = grammar_name # Import relative to grammar file path if external grammar file
base_path = os.path.split(base_file)[0]
g = import_grammar(grammar_path, base_paths=[base_path])
for name, alias in zip(names, aliases):
token_options = dict(g.token_defs)[name]
assert isinstance(token_options, tuple) and len(token_options)==2
token_defs.append([alias.value, token_options])
elif stmt.data == 'declare':
for t in stmt.children:
token_defs.append([t.value, (None, None)])
else:
assert False, stmt
# Verify correctness 1
for name, _ in token_defs:
if name.startswith('__'):
raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
# Handle ignore tokens
# XXX A slightly hacky solution. Recognition of %ignore TERMINAL as separate comes from the lexer's
# inability to handle duplicate tokens (two names, one value)
ignore_names = []
for t in ignore:
if t.data=='expansions' and len(t.children) == 1:
t2 ,= t.children
if t2.data=='expansion' and len(t2.children) == 1:
item ,= t2.children
if item.data == 'value':
item ,= item.children
if isinstance(item, Token) and item.type == 'TERMINAL':
ignore_names.append(item.value)
continue
name = '__IGNORE_%d'% len(ignore_names)
ignore_names.append(name)
token_defs.append((name, (t, 0)))
# Verify correctness 2
token_names = set()
for name, _ in token_defs:
if name in token_names:
raise GrammarError("Token '%s' defined more than once" % name)
token_names.add(name)
if set(ignore_names) > token_names:
raise GrammarError("Tokens %s were marked to ignore but were not defined!" % (set(ignore_names) - token_names))
# Resolve token references
resolve_token_references(token_defs)
rules = [options_from_rule(*x) for x in rule_defs]
rule_names = set()
for name, _x, _o in rules:
if name.startswith('__'):
raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
if name in rule_names:
raise GrammarError("Rule '%s' defined more than once" % name)
rule_names.add(name)
for name, expansions, _o in rules:
used_symbols = {t for x in expansions.find_data('expansion')
for t in x.scan_values(lambda t: t.type in ('RULE', 'TERMINAL'))}
for sym in used_symbols:
if is_terminal(sym):
if sym not in token_names:
raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name))
else:
if sym not in rule_names:
raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name))
# TODO don't include unused tokens, they can only cause trouble!
return Grammar(rules, token_defs, ignore_names)
load_grammar = GrammarLoader().load_grammar
|
#!/usr/bin/python3
import sys, math, argparse, re, json, os, subprocess, logging, random
from time import gmtime, strftime
# the difficulties for each technics
from parameters import Knows, Settings, isKnows, isSettings
from parameters import easy, medium, hard, harder, hardcore, mania, god, samus, impossibru, infinity, diff2text
# the helper functions
from smbool import SMBool
from smboolmanager import SMBoolManager
from helpers import Pickup, Bosses
from rom import RomLoader, RomPatcher, RomReader
from rom_patches import RomPatches
from itemrandomizerweb.Items import ItemManager
from graph_locations import locations as graphLocations
from graph import AccessGraph
from graph_access import vanillaTransitions, vanillaBossesTransitions, vanillaEscapeTransitions, accessPoints, GraphUtils, getAccessPoint
from utils import PresetLoader, removeChars
from vcr import VCR
import log, db
class Conf:
# keep getting majors of at most this difficulty before going for minors or changing area
difficultyTarget = medium
# display the generated path (spoilers!)
displayGeneratedPath = False
# choose how many items are required (possible value: minimal/all/any)
itemsPickup = 'minimal'
# the list of items to not pick up
itemsForbidden = []
class SolverState(object):
def __init__(self, debug=False):
self.debug = debug
def fromSolver(self, solver):
self.state = {}
# string
self.state["majorsSplit"] = solver.majorsSplit
# bool
self.state["areaRando"] = solver.areaRando
# bool
self.state["bossRando"] = solver.bossRando
# bool
self.state["escapeRando"] = solver.escapeRando
# string "03:00"
self.state["escapeTimer"] = solver.escapeTimer
# list of active patches
self.state["patches"] = RomPatches.ActivePatches
# start ap
self.state["startAP"] = solver.startAP
# start area
self.state["startArea"] = solver.startArea
# dict {locName: {itemName: "xxx", "accessPoint": "xxx"}, ...}
self.state["locsData"] = self.getLocsData(solver.locations)
# list [(ap1, ap2), (ap3, ap4), ...]
self.state["areaTransitions"] = solver.areaTransitions
# list [(ap1, ap2), (ap3, ap4), ...]
self.state["bossTransitions"] = solver.bossTransitions
# list [(ap1, ap2), ...]
self.state["curGraphTransitions"] = solver.curGraphTransitions
# preset file name
self.state["presetFileName"] = solver.presetFileName
## items collected / locs visited / bosses killed
# list [item1, item2, ...]
self.state["collectedItems"] = solver.collectedItems
# dict {locName: {index: 0, difficulty: (bool, diff, ...), ...} with index being the position of the loc in visitedLocations
self.state["visitedLocations"] = self.getVisitedLocations(solver.visitedLocations)
# dict {locName: (bool, diff, [know1, ...], [item1, ...]), ...}
self.state["availableLocations"] = self.getAvailableLocations(solver.majorLocations)
# string of last access point
self.state["lastAP"] = solver.lastAP
# list of killed bosses: ["boss1", "boss2"]
self.state["bosses"] = [boss for boss in Bosses.golden4Dead if Bosses.golden4Dead[boss] == True]
# dict {locNameWeb: {infos}, ...}
self.state["availableLocationsWeb"] = self.getAvailableLocationsWeb(solver.majorLocations)
# dict {locNameWeb: {infos}, ...}
self.state["visitedLocationsWeb"] = self.getAvailableLocationsWeb(solver.visitedLocations)
# dict {locNameWeb: {infos}, ...}
self.state["remainLocationsWeb"] = self.getRemainLocationsWeb(solver.majorLocations)
# string: standard/seedless/plando
self.state["mode"] = solver.mode
# string:
self.state["seed"] = solver.seed
# dict {point: point, ...} / array of startPoints
(self.state["linesWeb"], self.state["linesSeqWeb"]) = self.getLinesWeb(solver.curGraphTransitions)
# bool
self.state["allTransitions"] = len(solver.curGraphTransitions) == len(solver.areaTransitions) + len(solver.bossTransitions)
self.state["errorMsg"] = solver.errorMsg
if len(solver.visitedLocations) > 0:
self.state["last"] = {"loc": solver.visitedLocations[-1]["Name"],
"item": solver.visitedLocations[-1]["itemName"]}
else:
self.state["last"] = ""
def toSolver(self, solver):
if 'majorsSplit' in self.state:
solver.majorsSplit = self.state["majorsSplit"]
else:
# compatibility with existing sessions
if self.state['fullRando'] == True:
solver.majorsSplit = 'Full'
else:
solver.majorsSplit = 'Major'
solver.areaRando = self.state["areaRando"]
solver.bossRando = self.state["bossRando"]
solver.escapeRando = self.state["escapeRando"]
solver.escapeTimer = self.state["escapeTimer"]
RomPatches.ActivePatches = self.state["patches"]
solver.startAP = self.state["startAP"]
solver.startArea = self.state["startArea"]
self.setLocsData(solver.locations)
solver.areaTransitions = self.state["areaTransitions"]
solver.bossTransitions = self.state["bossTransitions"]
solver.curGraphTransitions = self.state["curGraphTransitions"]
# preset
solver.presetFileName = self.state["presetFileName"]
# items collected / locs visited / bosses killed
solver.collectedItems = self.state["collectedItems"]
(solver.visitedLocations, solver.majorLocations) = self.setLocations(self.state["visitedLocations"],
self.state["availableLocations"],
solver.locations)
solver.lastAP = self.state["lastAP"]
Bosses.reset()
for boss in self.state["bosses"]:
Bosses.beatBoss(boss)
solver.mode = self.state["mode"]
solver.seed = self.state["seed"]
def getLocsData(self, locations):
ret = {}
for loc in locations:
ret[loc["Name"]] = {"itemName": loc["itemName"]}
if "accessPoint" in loc:
ret[loc["Name"]]["accessPoint"] = loc["accessPoint"]
return ret
def setLocsData(self, locations):
for loc in locations:
loc["itemName"] = self.state["locsData"][loc["Name"]]["itemName"]
if "accessPoint" in self.state["locsData"][loc["Name"]]:
loc["accessPoint"] = self.state["locsData"][loc["Name"]]["accessPoint"]
def getVisitedLocations(self, visitedLocations):
# need to keep the order (for cancelation)
ret = {}
i = 0
for loc in visitedLocations:
diff = loc["difficulty"]
ret[loc["Name"]] = {"index": i,
"difficulty": (diff.bool, diff.difficulty, diff.knows, diff.items),
"Visibility": loc["Visibility"]}
i += 1
return ret
def setLocations(self, visitedLocations, availableLocations, locations):
retVis = []
retMaj = []
for loc in locations:
if loc["Name"] in visitedLocations:
# visitedLocations contains an index
diff = visitedLocations[loc["Name"]]["difficulty"]
loc["difficulty"] = SMBool(diff[0], diff[1], diff[2], diff[3])
if "Visibility" in visitedLocations[loc["Name"]]:
loc["Visibility"] = visitedLocations[loc["Name"]]["Visibility"]
retVis.append((visitedLocations[loc["Name"]]["index"], loc))
else:
if loc["Name"] in availableLocations:
diff = availableLocations[loc["Name"]]
loc["difficulty"] = SMBool(diff[0], diff[1], diff[2], diff[3])
retMaj.append(loc)
retVis.sort(key=lambda x: x[0])
return ([loc for (i, loc) in retVis], retMaj)
def diff4isolver(self, difficulty):
if difficulty == -1:
return "break"
elif difficulty < medium:
return "easy"
elif difficulty < hard:
return "medium"
elif difficulty < harder:
return "hard"
elif difficulty < hardcore:
return "harder"
elif difficulty < mania:
return "hardcore"
else:
return "mania"
def name4isolver(self, locName):
# remove space and special characters
# sed -e 's+ ++g' -e 's+,++g' -e 's+(++g' -e 's+)++g' -e 's+-++g'
return removeChars(locName, " ,()-")
def knows2isolver(self, knows):
result = []
for know in knows:
if know in Knows.desc:
result.append(Knows.desc[know]['display'])
else:
result.append(know)
return list(set(result))
def transition2isolver(self, transition):
transition = str(transition)
return transition[0].lower() + removeChars(transition[1:], " ,()-")
def getAvailableLocationsWeb(self, locations):
ret = {}
for loc in locations:
if "difficulty" in loc and loc["difficulty"].bool == True:
diff = loc["difficulty"]
locName = self.name4isolver(loc["Name"])
ret[locName] = {"difficulty": self.diff4isolver(diff.difficulty),
"knows": self.knows2isolver(diff.knows),
"items": list(set(diff.items)),
"item": loc["itemName"],
"name": loc["Name"],
"canHidden": loc["CanHidden"],
"visibility": loc["Visibility"]}
if "comeBack" in loc:
ret[locName]["comeBack"] = loc["comeBack"]
# for debug purpose
if self.debug == True:
if "path" in loc:
ret[locName]["path"] = [a.Name for a in loc["path"]]
if "distance" in loc:
ret[locName]["distance"] = loc["distance"]
return ret
def getRemainLocationsWeb(self, locations):
ret = {}
for loc in locations:
if "difficulty" not in loc or ("difficulty" in loc and loc["difficulty"].bool == False):
locName = self.name4isolver(loc["Name"])
ret[locName] = {"item": loc["itemName"],
"name": loc["Name"],
"knows": ["Sequence Break"],
"items": [],
"canHidden": loc["CanHidden"],
"visibility": loc["Visibility"]}
if self.debug == True:
if "difficulty" in loc:
ret[locName]["difficulty"] = str(loc["difficulty"])
if "distance" in loc:
ret[locName]["distance"] = loc["distance"]
return ret
def getLinesWeb(self, transitions):
lines = {}
linesSeq = []
for (start, end) in transitions:
startWeb = self.transition2isolver(start)
endWeb = self.transition2isolver(end)
lines[startWeb] = endWeb
lines[endWeb] = startWeb
linesSeq.append((startWeb, endWeb))
return (lines, linesSeq)
def getAvailableLocations(self, locations):
ret = {}
for loc in locations:
if "difficulty" in loc and loc["difficulty"].bool == True:
diff = loc["difficulty"]
ret[loc["Name"]] = (diff.bool, diff.difficulty, diff.knows, diff.items)
return ret
def fromJson(self, stateJsonFileName):
with open(stateJsonFileName, 'r') as jsonFile:
self.state = json.load(jsonFile)
# print("Loaded Json State:")
# for key in self.state:
# if key in ["availableLocationsWeb", "visitedLocationsWeb", "collectedItems", "availableLocations", "visitedLocations"]:
# print("{}: {}".format(key, self.state[key]))
# print("")
def toJson(self, outputFileName):
with open(outputFileName, 'w') as jsonFile:
json.dump(self.state, jsonFile)
# print("Dumped Json State:")
# for key in self.state:
# if key in ["availableLocationsWeb", "visitedLocationsWeb", "collectedItems", "visitedLocations"]:
# print("{}: {}".format(key, self.state[key]))
# print("")
class CommonSolver(object):
def loadRom(self, rom, interactive=False, magic=None, startAP=None):
# startAP param is only use for seedless
if rom == None:
self.romFileName = 'seedless'
self.majorsSplit = 'Full'
self.areaRando = True
self.bossRando = True
self.escapeRando = False
self.escapeTimer = "03:00"
self.startAP = startAP
RomPatches.setDefaultPatches(startAP)
self.startArea = getAccessPoint(startAP).Start['solveArea']
# in seedless load all the vanilla transitions
self.areaTransitions = vanillaTransitions[:]
self.bossTransitions = vanillaBossesTransitions[:]
self.escapeTransition = [vanillaEscapeTransitions[0]]
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
for loc in self.locations:
loc['itemName'] = 'Nothing'
else:
self.romFileName = rom
self.romLoader = RomLoader.factory(rom, magic)
self.majorsSplit = self.romLoader.assignItems(self.locations)
(self.startAP, self.startArea, startPatches) = self.romLoader.getStartAP()
(self.areaRando, self.bossRando, self.escapeRando) = self.romLoader.loadPatches()
RomPatches.ActivePatches += startPatches
self.escapeTimer = self.romLoader.getEscapeTimer()
self.romLoader.readNothingId()
if interactive == False:
print("ROM {} majors: {} area: {} boss: {} escape: {} patches: {} activePatches: {}".format(rom, self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(self.romLoader.getPatches()), sorted(RomPatches.ActivePatches)))
else:
print("majors: {} area: {} boss: {} escape: {} activepatches: {}".format(self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(RomPatches.ActivePatches)))
(self.areaTransitions, self.bossTransitions, self.escapeTransition) = self.romLoader.getTransitions()
if interactive == True and self.debug == False:
# in interactive area mode we build the graph as we play along
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
if self.escapeRando == False:
self.curGraphTransitions += self.escapeTransition
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.areaGraph = AccessGraph(accessPoints, self.curGraphTransitions)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("Display items at locations:")
for location in self.locations:
self.log.debug('{:>50}: {:>16}'.format(location["Name"], location['itemName']))
def loadPreset(self, presetFileName):
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
if self.log.getEffectiveLevel() == logging.DEBUG:
presetLoader.printToScreen()
def getLoc(self, locName):
for loc in self.locations:
if loc['Name'] == locName:
return loc
def computeLocationsDifficulty(self, locations):
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastAP)
# check post available functions too
for loc in locations:
if loc['difficulty'].bool == True:
if 'PostAvailable' in loc:
self.smbm.addItem(loc['itemName'])
postAvailable = loc['PostAvailable'](self.smbm)
self.smbm.removeItem(loc['itemName'])
loc['difficulty'] = self.smbm.wand(loc['difficulty'], postAvailable)
# also check if we can come back to landing site from the location
loc['comeBack'] = self.areaGraph.canAccess(self.smbm, loc['accessPoint'], self.lastAP, infinity, loc['itemName'])
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("available locs:")
for loc in locations:
if loc['difficulty'].bool == True:
self.log.debug("{}: {}".format(loc['Name'], loc['difficulty']))
def collectMajor(self, loc, itemName=None):
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc, itemName)
def collectMinor(self, loc):
self.minorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc)
def collectItem(self, loc, item=None):
if item == None:
item = loc["itemName"]
if self.vcr != None:
self.vcr.addLocation(loc['Name'], item)
if self.firstLogFile is not None:
if item not in self.collectedItems:
self.firstLogFile.write("{};{};{};{}\n".format(item, loc['Name'], loc['Area'], loc['GraphArea']))
if item not in Conf.itemsForbidden:
self.collectedItems.append(item)
if self.checkDuplicateMajor == True:
if item not in ['Nothing', 'NoEnergy', 'Missile', 'Super', 'PowerBomb', 'ETank', 'Reserve']:
if self.smbm.haveItem(item):
print("WARNING: {} has already been picked up".format(item))
self.smbm.addItem(item)
else:
# update the name of the item
item = "-{}-".format(item)
loc["itemName"] = item
self.collectedItems.append(item)
# we still need the boss difficulty
if 'Pickup' not in loc:
loc["difficulty"] = SMBool(False)
if 'Pickup' in loc:
loc['Pickup']()
self.log.debug("collectItem: {} at {}".format(item, loc['Name']))
# last loc is used as root node for the graph
self.lastAP = loc['accessPoint']
self.lastArea = loc['SolveArea']
def cancelLastItems(self, count):
if self.vcr != None:
self.vcr.addRollback(count)
for _ in range(count):
if len(self.visitedLocations) == 0:
return
loc = self.visitedLocations.pop()
self.majorLocations.append(loc)
# pickup func
if 'Unpickup' in loc:
loc['Unpickup']()
# access point
if len(self.visitedLocations) == 0:
self.lastAP = self.startAP
self.lastArea = self.startArea
else:
self.lastAP = self.visitedLocations[-1]["accessPoint"]
self.lastArea = self.visitedLocations[-1]["SolveArea"]
# item
item = loc["itemName"]
if item != self.collectedItems[-1]:
raise Exception("Item of last collected loc {}: {} is different from last collected item: {}".format(loc["Name"], item, self.collectedItems[-1]))
# in plando we have to remove the last added item,
# else it could be used in computing the postAvailable of a location
if self.mode in ['plando', 'seedless']:
loc["itemName"] = 'Nothing'
self.collectedItems.pop()
# if multiple majors in plando mode, remove it from smbm only when it's the last occurence of it
if self.smbm.isCountItem(item):
self.smbm.removeItem(item)
else:
if item not in self.collectedItems:
self.smbm.removeItem(item)
def getAvailableItemsList(self, locations, threshold):
# locations without distance are not available
locations = [loc for loc in locations if 'distance' in loc]
if len(locations) == 0:
return []
# add nocomeback locations which has been selected by the comeback step (areaWeight == 1)
around = [loc for loc in locations if( ('areaWeight' in loc and loc['areaWeight'] == 1)
or ((loc['SolveArea'] == self.lastArea or loc['distance'] < 3)
and loc['difficulty'].difficulty <= threshold
and not Bosses.areaBossDead(self.lastArea)
and 'comeBack' in loc and loc['comeBack'] == True) )]
outside = [loc for loc in locations if not loc in around]
self.log.debug("around1 = {}".format([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in around]))
self.log.debug("outside1 = {}".format([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in outside]))
around.sort(key=lambda loc: (
# locs in the same area
0 if loc['SolveArea'] == self.lastArea
else 1,
# nearest locs
loc['distance'],
# beating a boss
0 if 'Pickup' in loc
else 1,
# easiest first
loc['difficulty'].difficulty
)
)
self.log.debug("around2: {}".format([(loc['Name'], 0 if loc['SolveArea'] == self.lastArea else 1, loc['distance'], 0 if 'Pickup' in loc else 1, loc['difficulty'].difficulty) for loc in around]))
# we want to sort the outside locations by putting the ones in the same area first,
# then we sort the remaining areas starting whith boss dead status.
# we also want to sort by range of difficulty and not only with the difficulty threshold.
ranged = {
"areaWeight": [],
"easy": [],
"medium": [],
"hard": [],
"harder": [],
"hardcore": [],
"mania": [],
"noComeBack": []
}
for loc in outside:
if "areaWeight" in loc:
ranged["areaWeight"].append(loc)
elif "comeBack" not in loc or loc['comeBack'] == False:
ranged["noComeBack"].append(loc)
else:
difficulty = loc['difficulty'].difficulty
if difficulty < medium:
ranged["easy"].append(loc)
elif difficulty < hard:
ranged["medium"].append(loc)
elif difficulty < harder:
ranged["hard"].append(loc)
elif difficulty < hardcore:
ranged["harder"].append(loc)
elif difficulty < mania:
ranged["hardcore"].append(loc)
else:
ranged["mania"].append(loc)
for key in ranged:
ranged[key].sort(key=lambda loc: (
# first locs in the same area
0 if loc['SolveArea'] == self.lastArea else 1,
# first nearest locs
loc['distance'],
# beating a boss
loc['difficulty'].difficulty if (not Bosses.areaBossDead(loc['Area'])
and 'Pickup' in loc)
else 100000,
# areas with boss still alive
loc['difficulty'].difficulty if (not Bosses.areaBossDead(loc['Area']))
else 100000,
loc['difficulty'].difficulty))
self.log.debug("outside2: (threshold: {}) name, areaWeight, area, distance, boss, boss in area, difficulty".format(threshold))
outside = []
for key in ["areaWeight", "easy", "medium", "hard", "harder", "hardcore", "mania", "noComeBack"]:
outside += ranged[key]
self.log.debug("outside2: {}: {}".format(key, [(loc['Name'], loc["areaWeight"] if "areaWeight" in loc else 0, 0 if loc['SolveArea'] == self.lastArea else 1, loc['distance'], loc['difficulty'].difficulty if (not Bosses.areaBossDead(loc['Area']) and 'Pickup' in loc) else 100000, loc['difficulty'].difficulty if not Bosses.areaBossDead(loc['Area']) else 100000,loc['difficulty'].difficulty) for loc in ranged[key]]))
return around + outside
def nextDecision(self, majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold):
# first take major items of acceptable difficulty in the current area
if (len(majorsAvailable) > 0
and majorsAvailable[0]['SolveArea'] == self.lastArea
and majorsAvailable[0]['difficulty'].difficulty <= diffThreshold
and majorsAvailable[0]['comeBack'] == True):
return self.collectMajor(majorsAvailable.pop(0))
# next item decision
elif len(minorsAvailable) == 0 and len(majorsAvailable) > 0:
self.log.debug('MAJOR')
return self.collectMajor(majorsAvailable.pop(0))
elif len(majorsAvailable) == 0 and len(minorsAvailable) > 0:
# we don't check for hasEnoughMinors here, because we would be stuck, so pickup
# what we can and hope it gets better
self.log.debug('MINOR')
return self.collectMinor(minorsAvailable.pop(0))
elif len(majorsAvailable) > 0 and len(minorsAvailable) > 0:
self.log.debug('BOTH|M=' + majorsAvailable[0]['Name'] + ', m=' + minorsAvailable[0]['Name'])
# if both are available, decide based on area, difficulty and comeBack
nextMajDifficulty = majorsAvailable[0]['difficulty'].difficulty
nextMinArea = minorsAvailable[0]['SolveArea']
nextMinDifficulty = minorsAvailable[0]['difficulty'].difficulty
nextMajComeBack = majorsAvailable[0]['comeBack']
nextMinComeBack = minorsAvailable[0]['comeBack']
nextMajDistance = majorsAvailable[0]['distance']
nextMinDistance = minorsAvailable[0]['distance']
self.log.debug("diff area back dist - diff area back dist")
self.log.debug("maj: {} '{}' {} {}, min: {} '{}' {} {}".format(nextMajDifficulty, majorsAvailable[0]['SolveArea'], nextMajComeBack, nextMajDistance, nextMinDifficulty, nextMinArea, nextMinComeBack, nextMinDistance))
if hasEnoughMinors == True and self.haveAllMinorTypes() == True and self.smbm.haveItem('Charge'):
# we have charge, no longer need minors
return self.collectMajor(majorsAvailable.pop(0))
else:
# first take item from loc where you can come back
if nextMajComeBack != nextMinComeBack:
self.log.debug("!= combeback")
if nextMajComeBack == True:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# difficulty over area (this is a difficulty estimator, not a speedrunning simulator)
elif nextMinDifficulty <= diffThreshold and nextMajDifficulty <= diffThreshold:
# take the closer one
if nextMajDistance != nextMinDistance:
self.log.debug("!= distance")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# take the easier
elif nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
# if not all the minors type are collected, start with minors
elif nextMinDifficulty <= diffThreshold and not self.haveAllMinorTypes():
self.log.debug("not all minors types")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinArea == self.lastArea and nextMinDifficulty <= diffThreshold:
self.log.debug("not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinDifficulty > diffThreshold and nextMajDifficulty > diffThreshold:
# take the easier
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# take the closer one
elif nextMajDistance != nextMinDistance:
self.log.debug("!= distance")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
else:
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
else:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
raise Exception("Can't take a decision")
def checkMB(self, mbLoc):
# add mother brain loc and check if it's accessible
self.majorLocations.append(mbLoc)
self.computeLocationsDifficulty(self.majorLocations)
if mbLoc["difficulty"] == True:
self.log.debug("MB loc accessible")
self.collectMajor(mbLoc)
return True
else:
self.log.debug("MB loc not accessible")
self.majorLocations.remove(mbLoc)
return False
def computeDifficulty(self):
# loop on the available locations depending on the collected items.
# before getting a new item, loop on all of them and get their difficulty,
# the next collected item is the one with the smallest difficulty,
# if equality between major and minor, take major first.
# remove mother brain location (there items pickup conditions on top of going to mother brain location)
mbLoc = self.getLoc('Mother Brain')
self.locations.remove(mbLoc)
if self.majorsSplit == 'Major':
self.majorLocations = [loc for loc in self.locations if "Major" in loc["Class"] or "Boss" in loc["Class"]]
self.minorLocations = [loc for loc in self.locations if "Minor" in loc["Class"]]
elif self.majorsSplit == 'Chozo':
self.majorLocations = [loc for loc in self.locations if "Chozo" in loc["Class"] or "Boss" in loc["Class"]]
self.minorLocations = [loc for loc in self.locations if "Chozo" not in loc["Class"] and "Boss" not in loc["Class"]]
else:
# Full
self.majorLocations = self.locations[:] # copy
self.minorLocations = self.majorLocations
self.visitedLocations = []
self.collectedItems = []
# with the knowsXXX conditions some roms can be unbeatable, so we have to detect it
previous = -1
current = 0
self.log.debug("{}: available major: {}, available minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
isEndPossible = False
endDifficulty = mania
diffThreshold = self.getDiffThreshold()
while True:
# actual while condition
hasEnoughMinors = self.pickup.enoughMinors(self.smbm, self.minorLocations)
hasEnoughMajors = self.pickup.enoughMajors(self.smbm, self.majorLocations)
hasEnoughItems = hasEnoughMajors and hasEnoughMinors
canEndGame = self.canEndGame()
(isEndPossible, endDifficulty) = (canEndGame.bool, canEndGame.difficulty)
if isEndPossible and hasEnoughItems and endDifficulty <= diffThreshold:
if self.checkMB(mbLoc):
self.log.debug("END")
break
else:
self.log.debug("canEnd but MB loc not accessible")
#self.log.debug(str(self.collectedItems))
self.log.debug("Current AP/Area: {}/{}".format(self.lastAP, self.lastArea))
# check if we have collected an item in the last loop
current = len(self.collectedItems)
if current == previous:
if not isEndPossible:
self.log.debug("STUCK ALL")
if self.comeBack.rewind(len(self.collectedItems)) == True:
# rewind ok
previous = len(self.collectedItems) - 1
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
else:
self.log.debug("HARD END 1")
self.checkMB(mbLoc)
break
previous = current
# compute the difficulty of all the locations
self.computeLocationsDifficulty(self.majorLocations)
if self.majorsSplit != 'Full':
self.computeLocationsDifficulty(self.minorLocations)
# keep only the available locations
majorsAvailable = [loc for loc in self.majorLocations if 'difficulty' in loc and loc["difficulty"].bool == True]
minorsAvailable = [loc for loc in self.minorLocations if 'difficulty' in loc and loc["difficulty"].bool == True]
# check if we're stuck
if len(majorsAvailable) == 0 and len(minorsAvailable) == 0:
if not isEndPossible:
self.log.debug("STUCK MAJORS and MINORS")
if self.comeBack.rewind(len(self.collectedItems)) == True:
previous = len(self.collectedItems) - 1
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
else:
self.log.debug("HARD END 2")
self.checkMB(mbLoc)
break
# handle no comeback heuristic
if self.majorsSplit == 'Full':
locs = majorsAvailable
else:
locs = majorsAvailable+minorsAvailable
rewindRequired = self.comeBack.handleNoComeBack(locs, len(self.collectedItems))
if rewindRequired == True:
if self.comeBack.rewind(len(self.collectedItems)) == True:
previous = len(self.collectedItems) - 1
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
# sort them on difficulty and proximity
majorsAvailable = self.getAvailableItemsList(majorsAvailable, diffThreshold)
if self.majorsSplit == 'Full':
minorsAvailable = majorsAvailable
else:
minorsAvailable = self.getAvailableItemsList(minorsAvailable, diffThreshold)
self.comeBack.cleanNoComeBack(locs)
# choose one to pick up
self.nextDecision(majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold)
# compute difficulty value
(difficulty, itemsOk) = self.computeDifficultyValue()
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("difficulty={}".format(difficulty))
self.log.debug("itemsOk={}".format(itemsOk))
self.log.debug("{}: remaining major: {}, remaining minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
self.log.debug("remaining majors:")
for loc in self.majorLocations:
self.log.debug("{} ({})".format(loc['Name'], loc['itemName']))
self.log.debug("bosses: {}".format(Bosses.golden4Dead))
return (difficulty, itemsOk)
def haveAllMinorTypes(self):
# the first minor of each type can be seen as a major, so check for them first before going to far in zebes
hasPB = 'PowerBomb' in self.collectedItems
hasSuper = 'Super' in self.collectedItems
hasMissile = 'Missile' in self.collectedItems
return (hasPB and hasSuper and hasMissile)
def canEndGame(self):
# to finish the game you must :
# - beat golden 4 : we force pickup of the 4 items
# behind the bosses to ensure that
# - defeat metroids
# - destroy/skip the zebetites
# - beat Mother Brain
return self.smbm.wand(Bosses.allBossesDead(self.smbm), self.smbm.enoughStuffTourian())
def computeDifficultyValue(self):
if not self.canEndGame().bool:
# we have aborted
return (-1, False)
else:
# return the maximum difficulty
difficultyMax = 0
for loc in self.visitedLocations:
difficultyMax = max(difficultyMax, loc['difficulty'].difficulty)
difficulty = difficultyMax
# check if we have taken all the requested items
if (self.pickup.enoughMinors(self.smbm, self.minorLocations)
and self.pickup.enoughMajors(self.smbm, self.majorLocations)):
return (difficulty, True)
else:
# can finish but can't take all the requested items
return (difficulty, False)
class InteractiveSolver(CommonSolver):
def __init__(self, output):
self.errorMsg = ""
self.checkDuplicateMajor = False
self.vcr = None
self.log = log.get('Solver')
self.outputFileName = output
self.firstLogFile = None
self.locations = graphLocations
(self.locsAddressName, self.locsWeb2Internal) = self.initLocsAddressName()
self.transWeb2Internal = self.initTransitionsName()
def initLocsAddressName(self):
addressName = {}
web2Internal = {}
for loc in graphLocations:
webName = self.locNameInternal2Web(loc["Name"])
addressName[loc["Address"] % 0x10000] = webName
web2Internal[webName] = loc["Name"]
return (addressName, web2Internal)
def initTransitionsName(self):
web2Internal = {}
for (startPoint, endPoint) in vanillaTransitions + vanillaBossesTransitions + vanillaEscapeTransitions:
for point in [startPoint, endPoint]:
web2Internal[self.apNameInternal2Web(point)] = point
return web2Internal
def dumpState(self):
state = SolverState(self.debug)
state.fromSolver(self)
state.toJson(self.outputFileName)
def initialize(self, mode, rom, presetFileName, magic, debug, fill, startAP):
# load rom and preset, return first state
self.debug = debug
self.mode = mode
if self.mode != "seedless":
self.seed = os.path.basename(os.path.splitext(rom)[0])+'.sfc'
else:
self.seed = "seedless"
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom, interactive=True, magic=magic, startAP=startAP)
if self.mode == 'plando':
# in plando always consider that we're doing full
self.majorsSplit = 'Full'
self.clearItems()
# in debug mode don't load plando locs/transitions
if self.mode == 'plando' and self.debug == False:
if fill == True:
# load the source seed transitions and items/locations
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.areaGraph = AccessGraph(accessPoints, self.curGraphTransitions)
self.fillPlandoLocs()
else:
if self.areaRando == True or self.bossRando == True:
plandoTrans = self.loadPlandoTransitions()
if len(plandoTrans) > 0:
self.curGraphTransitions = plandoTrans
self.areaGraph = AccessGraph(accessPoints, self.curGraphTransitions)
self.loadPlandoLocs()
# compute new available locations
self.computeLocationsDifficulty(self.majorLocations)
self.dumpState()
def iterate(self, stateJson, scope, action, params):
self.debug = params["debug"]
self.smbm = SMBoolManager()
state = SolverState()
state.fromJson(stateJson)
state.toSolver(self)
self.loadPreset(self.presetFileName)
# add already collected items to smbm
self.smbm.addItems(self.collectedItems)
if scope == 'item':
if action == 'clear':
self.clearItems(True)
else:
if action == 'add':
if self.mode == 'plando' or self.mode == 'seedless':
self.setItemAt(params['loc'], params['item'], params['hide'])
else:
# pickup item at locName
self.pickItemAt(params['loc'])
elif action == 'remove':
# remove last collected item
self.cancelLastItems(params['count'])
elif action == 'replace':
self.replaceItemAt(params['loc'], params['item'], params['hide'])
elif scope == 'area':
if action == 'clear':
self.clearTransitions()
else:
if action == 'add':
startPoint = params['startPoint']
endPoint = params['endPoint']
self.addTransition(self.transWeb2Internal[startPoint], self.transWeb2Internal[endPoint])
elif action == 'remove':
if 'startPoint' in params:
self.cancelTransition(self.transWeb2Internal[params['startPoint']])
else:
# remove last transition
self.cancelLastTransition()
self.areaGraph = AccessGraph(accessPoints, self.curGraphTransitions)
if scope == 'common':
if action == 'save':
return self.savePlando(params['lock'], params['escapeTimer'])
elif action == 'randomize':
self.randoPlando(params)
# if last loc added was a sequence break, recompute its difficulty,
# as it may be available with the newly placed item.
if len(self.visitedLocations) > 0:
lastVisited = self.visitedLocations[-1]
if lastVisited['difficulty'].difficulty == -1:
self.visitedLocations.remove(lastVisited)
self.majorLocations.append(lastVisited)
else:
lastVisited = None
else:
lastVisited = None
# compute new available locations
self.clearLocs(self.majorLocations)
self.computeLocationsDifficulty(self.majorLocations)
# put back last visited location
if lastVisited != None:
self.majorLocations.remove(lastVisited)
self.visitedLocations.append(lastVisited)
if lastVisited["difficulty"] == False:
# if the loc is still sequence break, put it back as sequence break
lastVisited["difficulty"] = SMBool(True, -1)
# return them
self.dumpState()
def getLocNameFromAddress(self, address):
return self.locsAddressName[address]
def loadPlandoTransitions(self):
# add escape transition
transitionsAddr = self.romLoader.getPlandoTransitions(len(vanillaBossesTransitions) + len(vanillaTransitions) + 1)
return GraphUtils.getTransitions(transitionsAddr)
def loadPlandoLocs(self):
# get the addresses of the already filled locs, with the correct order
addresses = self.romLoader.getPlandoAddresses()
# create a copy of the locations to avoid removing locs from self.locations
self.majorLocations = self.locations[:]
for address in addresses:
# TODO::compute only the difficulty of the current loc
self.computeLocationsDifficulty(self.majorLocations)
locName = self.getLocNameFromAddress(address)
self.pickItemAt(locName)
def fillPlandoLocs(self):
self.pickup = Pickup("all")
self.comeBack = ComeBack(self)
# backup
mbLoc = self.getLoc("Mother Brain")
locationsBck = self.locations[:]
self.lastAP = self.startAP
self.lastArea = self.startArea
(self.difficulty, self.itemsOk) = self.computeDifficulty()
# put back mother brain location
if mbLoc not in self.majorLocations and mbLoc not in self.visitedLocations:
self.majorLocations.append(mbLoc)
if self.itemsOk == False:
# add remaining locs as sequence break
for loc in self.majorLocations[:]:
loc["difficulty"] = SMBool(True, -1)
if "accessPoint" not in loc:
# take first ap of the loc
loc["accessPoint"] = list(loc["AccessFrom"])[0]
self.collectMajor(loc)
self.locations = locationsBck
def fillGraph(self):
# add self looping transitions on unused acces points
usedAPs = {}
for (src, dst) in self.curGraphTransitions:
usedAPs[src] = True
usedAPs[dst] = True
singleAPs = []
for ap in accessPoints:
if ap.isInternal() == True:
continue
if ap.Name not in usedAPs:
singleAPs.append(ap.Name)
transitions = self.curGraphTransitions[:]
for apName in singleAPs:
transitions.append((apName, apName))
return AccessGraph(accessPoints, transitions)
def randoPlando(self, parameters):
# if all the locations are visited, do nothing
if len(self.majorLocations) == 0:
return
plandoLocsItems = {}
for loc in self.visitedLocations:
if "Boss" in loc["Class"]:
plandoLocsItems[loc["Name"]] = "Boss"
else:
plandoLocsItems[loc["Name"]] = loc["itemName"]
plandoCurrent = {
"locsItems": plandoLocsItems,
"transitions": self.curGraphTransitions,
"patches": RomPatches.ActivePatches
}
plandoCurrentJson = json.dumps(plandoCurrent)
pythonExec = "python{}.{}".format(sys.version_info.major, sys.version_info.minor)
params = [
pythonExec, os.path.expanduser("~/RandomMetroidSolver/randomizer.py"),
'--runtime', '10',
'--param', self.presetFileName,
'--output', self.outputFileName,
'--plandoRando', plandoCurrentJson,
'--progressionSpeed', parameters["progressionSpeed"],
'--minorQty', parameters["minorQty"],
'--maxDifficulty', 'hardcore',
'--energyQty', parameters["energyQty"]
]
subprocess.call(params)
with open(self.outputFileName, 'r') as jsonFile:
data = json.load(jsonFile)
self.errorMsg = data["errorMsg"]
# load the locations
if "itemLocs" in data:
self.clearItems(reload=True)
itemsLocs = data["itemLocs"]
# create a copy because we need self.locations to be full, else the state will be empty
self.majorLocations = self.locations[:]
for itemLoc in itemsLocs:
locName = itemLoc["Location"]["Name"]
loc = self.getLoc(locName)
difficulty = itemLoc["Location"]["difficulty"]
smbool = SMBool(difficulty["bool"], difficulty["difficulty"], difficulty["knows"], difficulty["items"])
loc["difficulty"] = smbool
itemName = itemLoc["Item"]["Type"]
if itemName == "Boss":
itemName = "Nothing"
loc["itemName"] = itemName
loc["accessPoint"] = itemLoc["Location"]["accessPoint"]
self.collectMajor(loc)
def savePlando(self, lock, escapeTimer):
# store filled locations addresses in the ROM for next creating session
locsItems = {}
itemLocs = []
for loc in self.visitedLocations:
locsItems[loc["Name"]] = loc["itemName"]
for loc in self.locations:
if loc["Name"] in locsItems:
itemLocs.append({'Location': loc, 'Item': ItemManager.getItem(locsItems[loc["Name"]])})
else:
# put nothing items in unused locations
itemLocs.append({'Location': loc, 'Item': ItemManager.getItem("Nothing")})
# patch the ROM
if lock == True:
magic = random.randint(1, 0xffff)
else:
magic = None
romPatcher = RomPatcher(magic=magic, plando=True)
patches = ['credits_varia.ips', 'tracking.ips']
if magic != None:
patches.insert(0, 'race_mode.ips')
patches.append('race_mode_credits.ips')
romPatcher.addIPSPatches(patches)
romPatcher.commitIPS()
romPatcher.writeItemsLocs(itemLocs)
romPatcher.writeItemsNumber()
romPatcher.writeSpoiler(itemLocs)
class FakeRandoSettings:
def __init__(self):
self.qty = {'energy': 'plando'}
self.progSpeed = 'plando'
self.progDiff = 'plando'
self.restrictions = {'Suits': False, 'Morph': 'plando'}
self.superFun = {}
randoSettings = FakeRandoSettings()
romPatcher.writeRandoSettings(randoSettings, itemLocs)
if magic != None:
romPatcher.writeMagic()
else:
romPatcher.writePlandoAddresses(self.visitedLocations)
if self.areaRando == True or self.bossRando == True:
doors = GraphUtils.getDoorConnections(self.fillGraph(), self.areaRando, self.bossRando)
romPatcher.writeDoorConnections(doors)
if magic == None:
doorsPtrs = GraphUtils.getAps2DoorsPtrs()
romPatcher.writePlandoTransitions(self.curGraphTransitions, doorsPtrs,
len(vanillaBossesTransitions) + len(vanillaTransitions))
if self.escapeRando == True and escapeTimer != None:
# convert from '03:00' to number of seconds
escapeTimer = int(escapeTimer[0:2]) * 60 + int(escapeTimer[3:])
romPatcher.writeEscapeTimer(escapeTimer)
romPatcher.end()
data = romPatcher.romFile.data
preset = os.path.splitext(os.path.basename(self.presetFileName))[0]
seedCode = 'FX'
if self.bossRando == True:
seedCode = 'B'+seedCode
if self.areaRando == True:
seedCode = 'A'+seedCode
fileName = 'VARIA_Plandomizer_{}{}_{}.sfc'.format(seedCode, strftime("%Y%m%d%H%M%S", gmtime()), preset)
data["fileName"] = fileName
# error msg in json to be displayed by the web site
data["errorMsg"] = ""
with open(self.outputFileName, 'w') as jsonFile:
json.dump(data, jsonFile)
def locNameInternal2Web(self, locName):
return removeChars(locName, " ,()-")
def locNameWeb2Internal(self, locNameWeb):
return self.locsWeb2Internal[locNameWeb]
def apNameInternal2Web(self, apName):
return apName[0].lower() + removeChars(apName[1:], " ")
def getWebLoc(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
for loc in self.locations:
if loc["Name"] == locName:
return loc
raise Exception("Location '{}' not found".format(locName))
def pickItemAt(self, locName):
# collect new item at newLoc
loc = self.getWebLoc(locName)
if "difficulty" not in loc or loc["difficulty"] == False:
# sequence break
loc["difficulty"] = SMBool(True, -1)
if "accessPoint" not in loc:
# take first ap of the loc
loc["accessPoint"] = list(loc["AccessFrom"])[0]
self.collectMajor(loc)
def setItemAt(self, locName, itemName, hide):
# set itemName at locName
loc = self.getWebLoc(locName)
# plando mode
loc["itemName"] = itemName
if "difficulty" not in loc:
# sequence break
loc["difficulty"] = SMBool(True, -1)
if "accessPoint" not in loc:
# take first ap of the loc
loc["accessPoint"] = list(loc["AccessFrom"])[0]
if hide == True:
loc["Visibility"] = 'Hidden'
self.collectMajor(loc, itemName)
def replaceItemAt(self, locName, itemName, hide):
# replace itemName at locName
loc = self.getWebLoc(locName)
oldItemName = loc["itemName"]
loc["itemName"] = itemName
# major item can be set multiple times in plando mode
count = self.collectedItems.count(oldItemName)
isCount = self.smbm.isCountItem(oldItemName)
# replace item at the old item spot in collectedItems
index = next(i for i, vloc in enumerate(self.visitedLocations) if vloc['Name'] == loc['Name'])
self.collectedItems[index] = itemName
# update smbm if count item or major was only there once
if isCount == True or count == 1:
self.smbm.removeItem(oldItemName)
if hide == True:
loc["Visibility"] = 'Hidden'
elif loc['CanHidden'] == True and loc['Visibility'] == 'Hidden':
# the loc was previously hidden, set it back to visible
loc["Visibility"] = 'Visible'
self.smbm.addItem(itemName)
def clearItems(self, reload=False):
self.collectedItems = []
self.visitedLocations = []
self.lastAP = self.startAP
self.lastArea = self.startArea
self.majorLocations = self.locations
if reload == True:
for loc in self.majorLocations:
if "difficulty" in loc:
del loc["difficulty"]
Bosses.reset()
self.smbm.resetItems()
def addTransition(self, startPoint, endPoint):
# already check in controller if transition is valid for seed
self.curGraphTransitions.append((startPoint, endPoint))
def cancelLastTransition(self):
if self.areaRando == True and self.bossRando == True:
if len(self.curGraphTransitions) > 0:
self.curGraphTransitions.pop()
elif self.areaRando == True:
if len(self.curGraphTransitions) > len(self.bossTransitions):
self.curGraphTransitions.pop()
elif self.bossRando == True:
if len(self.curGraphTransitions) > len(self.areaTransitions):
self.curGraphTransitions.pop()
def cancelTransition(self, startPoint):
# get end point
endPoint = None
for (i, (start, end)) in enumerate(self.curGraphTransitions):
if start == startPoint:
endPoint = end
break
elif end == startPoint:
endPoint = start
break
if endPoint == None:
# shouldn't happen
return
# check that transition is cancelable
if self.areaRando == True and self.bossRando == True:
if len(self.curGraphTransitions) == 0:
return
elif self.areaRando == True:
if len(self.curGraphTransitions) == len(self.bossTransitions):
return
elif [startPoint, endPoint] in self.bossTransitions or [endPoint, startPoint] in self.bossTransitions:
return
elif self.bossRando == True:
if len(self.curGraphTransitions) == len(self.areaTransitions):
return
elif [startPoint, endPoint] in self.areaTransitions or [endPoint, startPoint] in self.areaTransitions:
return
# remove transition
self.curGraphTransitions.pop(i)
def clearTransitions(self):
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
def clearLocs(self, locs):
for loc in locs:
if 'difficulty' in loc:
del loc['difficulty']
def getDiffThreshold(self):
# in interactive solver we don't have the max difficulty parameter
epsilon = 0.001
return hard - epsilon
class StandardSolver(CommonSolver):
# given a rom and parameters returns the estimated difficulty
def __init__(self, rom, presetFileName, difficultyTarget, pickupStrategy, itemsForbidden=[], type='console',
firstItemsLog=None, extStatsFilename=None, displayGeneratedPath=False, outputFileName=None,
magic=None, checkDuplicateMajor=False, vcr=False):
self.checkDuplicateMajor = checkDuplicateMajor
self.vcr = VCR(rom, 'solver') if vcr == True else None
# for compatibility with some common methods of the interactive solver
self.mode = 'standard'
self.log = log.get('Solver')
self.setConf(difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath)
self.firstLogFile = None
if firstItemsLog is not None:
self.firstLogFile = open(firstItemsLog, 'w')
self.firstLogFile.write('Item;Location;Area\n')
self.extStatsFilename = extStatsFilename
# can be called from command line (console) or from web site (web)
self.type = type
self.output = Out.factory(self.type, self)
self.outputFileName = outputFileName
self.locations = graphLocations
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom, magic=magic)
self.pickup = Pickup(Conf.itemsPickup)
self.comeBack = ComeBack(self)
def setConf(self, difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath):
Conf.difficultyTarget = difficultyTarget
Conf.itemsPickup = pickupStrategy
Conf.displayGeneratedPath = displayGeneratedPath
Conf.itemsForbidden = itemsForbidden
def solveRom(self):
self.lastAP = self.startAP
self.lastArea = self.startArea
(self.difficulty, self.itemsOk) = self.computeDifficulty()
if self.firstLogFile is not None:
self.firstLogFile.close()
(self.knowsUsed, self.knowsKnown, knowsUsedList) = self.getKnowsUsed()
if self.vcr != None:
self.vcr.dump()
if self.extStatsFilename != None:
with open(self.extStatsFilename, 'a') as extStatsFile:
db.DB.dumpExtStatsSolver(self.difficulty, knowsUsedList, extStatsFile)
self.output.out()
def getRemainMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getRemainMinors(self):
if self.majorsSplit == 'Full':
return None
else:
return [loc for loc in self.minorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getSkippedMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == True and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getUnavailMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getDiffThreshold(self):
target = Conf.difficultyTarget
threshold = target
epsilon = 0.001
if target <= easy:
threshold = medium - epsilon
elif target <= medium:
threshold = hard - epsilon
elif target <= hard:
threshold = harder - epsilon
elif target <= harder:
threshold = hardcore - epsilon
elif target <= hardcore:
threshold = mania - epsilon
return threshold
def getKnowsUsed(self):
knowsUsed = []
for loc in self.visitedLocations:
knowsUsed += loc['difficulty'].knows
# get unique knows
knowsUsed = list(set(knowsUsed))
knowsUsedCount = len(knowsUsed)
# get total of known knows
knowsKnownCount = len([knows for knows in Knows.__dict__ if isKnows(knows) and getattr(Knows, knows).bool == True])
knowsKnownCount += len([hellRun for hellRun in Settings.hellRuns if Settings.hellRuns[hellRun] is not None])
return (knowsUsedCount, knowsKnownCount, knowsUsed)
def tryRemainingLocs(self):
# use preset which knows every techniques to test the remaining locs to
# find which technique could allow to continue the seed
locations = self.majorLocations if self.majorsSplit == 'Full' else self.majorLocations + self.minorLocations
presetFileName = os.path.expanduser('~/RandomMetroidSolver/standard_presets/solution.json')
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastAP)
return [loc for loc in locations if loc['difficulty'].bool == True]
class ComeBack(object):
# object to handle the decision to choose the next area when all locations have the "no comeback" flag.
# handle rewinding to try the next area in case of a stuck.
# one ComebackStep object is created each time we have to use the no comeback heuristic b, used for rewinding.
def __init__(self, solver):
self.comeBackSteps = []
# used to rewind
self.solver = solver
self.log = log.get('Rewind')
def handleNoComeBack(self, locations, cur):
# return true if a rewind is required
graphAreas = {}
for loc in locations:
if "comeBack" not in loc:
return False
if loc["comeBack"] == True:
return False
if loc["GraphArea"] in graphAreas:
graphAreas[loc["GraphArea"]] += 1
else:
graphAreas[loc["GraphArea"]] = 1
if len(graphAreas) == 1:
return False
self.log.debug("WARNING: use no come back heuristic for {} locs in {} graph areas ({})".format(len(locations), len(graphAreas), graphAreas))
# check if we can use existing step
if len(self.comeBackSteps) > 0:
lastStep = self.comeBackSteps[-1]
if lastStep.cur == cur:
self.log.debug("Use last step at {}".format(cur))
return lastStep.next(locations)
# create a step
self.log.debug("Create new step at {}".format(cur))
step = ComeBackStep(graphAreas, cur)
self.comeBackSteps.append(step)
return step.next(locations)
def cleanNoComeBack(self, locations):
for loc in locations:
if "areaWeight" in loc:
del loc["areaWeight"]
def rewind(self, cur):
# come back to the previous step
# if no more rewinds available: tell we're stuck by returning False
if len(self.comeBackSteps) == 0:
self.log.debug("No more steps to rewind")
return False
self.log.debug("Start rewind, current: {}".format(cur))
lastStep = self.comeBackSteps[-1]
if lastStep.cur == cur:
# need to go up one more time
self.comeBackSteps.pop()
if len(self.comeBackSteps) == 0:
self.log.debug("No more steps to rewind")
return False
lastStep = self.comeBackSteps[-1]
self.log.debug("Rewind previous step at {}".format(lastStep.cur))
count = cur - lastStep.cur
self.solver.cancelLastItems(count)
self.log.debug("Rewind {} items to {}".format(count, lastStep.cur))
return True
class ComeBackStep(object):
# one case of no come back decision
def __init__(self, graphAreas, cur):
self.visitedGraphAreas = []
self.graphAreas = graphAreas
self.cur = cur
self.log = log.get('RewindStep')
def next(self, locations):
# use next available area, if all areas have been visited return True (stuck), else False
if len(self.visitedGraphAreas) == len(self.graphAreas):
self.log.debug("all areas have been visited, stuck")
return True
self.log.debug("graphAreas: {} visitedGraphAreas: {}".format(self.graphAreas, self.visitedGraphAreas))
# get area with max available locs
maxAreaWeigth = 0
maxAreaName = ""
for graphArea in sorted(self.graphAreas):
if graphArea in self.visitedGraphAreas:
continue
else:
if self.graphAreas[graphArea] > maxAreaWeigth:
maxAreaWeigth = self.graphAreas[graphArea]
maxAreaName = graphArea
self.visitedGraphAreas.append(maxAreaName)
self.log.debug("next area: {}".format(maxAreaName))
outWeight = 10000
retGraphAreas = {}
for graphArea in self.graphAreas:
if graphArea == maxAreaName:
retGraphAreas[graphArea] = 1
else:
retGraphAreas[graphArea] = outWeight
# update locs
for loc in locations:
graphArea = loc["GraphArea"]
if graphArea in retGraphAreas:
loc["areaWeight"] = retGraphAreas[loc["GraphArea"]]
self.log.debug("{} areaWeight: {}".format(loc["Name"], loc["areaWeight"]))
else:
# can happen if going to the first area unlocks new areas
loc["areaWeight"] = outWeight
self.log.debug("loc {} from area {} not in original areas".format(loc["Name"], graphArea))
return False
class Out(object):
@staticmethod
def factory(output, solver):
if output == 'web':
return OutWeb(solver)
elif output == 'console':
return OutConsole(solver)
else:
raise Exception("Wrong output type for the Solver: {}".format(output))
def fixEnergy(self, items):
# display number of energy used
energies = [i for i in items if i.find('ETank') != -1]
if len(energies) > 0:
(maxETank, maxReserve, maxEnergy) = (0, 0, 0)
for energy in energies:
nETank = int(energy[0:energy.find('-ETank')])
if energy.find('-Reserve') != -1:
nReserve = int(energy[energy.find(' - ')+len(' - '):energy.find('-Reserve')])
else:
nReserve = 0
nEnergy = nETank + nReserve
if nEnergy > maxEnergy:
maxEnergy = nEnergy
maxETank = nETank
maxReserve = nReserve
items.remove(energy)
items.append('{}-ETank'.format(maxETank))
if maxReserve > 0:
items.append('{}-Reserve'.format(maxReserve))
class OutWeb(Out):
def __init__(self, solver):
self.solver = solver
def out(self):
s = self.solver
if s.areaRando == True:
dotFileName = os.path.basename(os.path.splitext(s.romFileName)[0])+'.json'
dotFileName = os.path.join(os.path.expanduser('~/web2py/applications/solver/static/graph'), dotFileName)
s.areaGraph.toDot(dotFileName)
(pngFileName, pngThumbFileName) = self.generatePng(dotFileName)
if pngFileName is not None and pngThumbFileName is not None:
pngFileName = os.path.basename(pngFileName)
pngThumbFileName = os.path.basename(pngThumbFileName)
else:
pngFileName = None
pngThumbFileName = None
randomizedRom = os.path.basename(os.path.splitext(s.romFileName)[0])+'.sfc'
diffPercent = DifficultyDisplayer(s.difficulty).percent()
generatedPath = self.getPath(s.visitedLocations)
collectedItems = s.smbm.getItems()
if s.difficulty == -1:
remainTry = self.getPath(s.tryRemainingLocs())
remainMajors = self.getPath(s.getRemainMajors())
remainMinors = self.getPath(s.getRemainMinors())
skippedMajors = None
unavailMajors = None
else:
remainTry = None
remainMajors = None
remainMinors = None
skippedMajors = self.getPath(s.getSkippedMajors())
unavailMajors = self.getPath(s.getUnavailMajors())
result = dict(randomizedRom=randomizedRom, difficulty=s.difficulty,
generatedPath=generatedPath, diffPercent=diffPercent,
knowsUsed=(s.knowsUsed, s.knowsKnown), itemsOk=s.itemsOk, patches=s.romLoader.getPatches(),
pngFileName=pngFileName, pngThumbFileName=pngThumbFileName,
remainTry=remainTry, remainMajors=remainMajors, remainMinors=remainMinors,
skippedMajors=skippedMajors, unavailMajors=unavailMajors,
collectedItems=collectedItems)
with open(s.outputFileName, 'w') as jsonFile:
json.dump(result, jsonFile)
def getPath(self, locations):
if locations is None:
return None
out = []
for loc in locations:
self.fixEnergy(loc['difficulty'].items)
out.append([(loc['Name'], loc['Room']), loc['Area'], loc['SolveArea'], loc['itemName'],
'{0:.2f}'.format(loc['difficulty'].difficulty),
sorted(loc['difficulty'].knows),
sorted(list(set(loc['difficulty'].items))),
[ap.Name for ap in loc['path']] if 'path' in loc else None,
loc['Class']])
return out
def generatePng(self, dotFileName):
# use dot to generate the graph's image .png
# use convert to generate the thumbnail
# dotFileName: the /directory/image.dot
# the png and thumbnails are generated in the same directory as the dot
# requires that graphviz is installed
splited = os.path.splitext(dotFileName)
pngFileName = splited[0] + '.png'
pngThumbFileName = splited[0] + '_thumbnail.png'
# dot -Tpng VARIA_Randomizer_AFX5399_noob.dot -oVARIA_Randomizer_AFX5399_noob.png
params = ['dot', '-Tpng', dotFileName, '-o'+pngFileName]
ret = subprocess.call(params)
if ret != 0:
print("Error calling dot {}: {}".format(params, ret))
return (None, None)
params = ['convert', pngFileName, '-resize', '1024', pngThumbFileName]
ret = subprocess.call(params)
if ret != 0:
print("Error calling convert {}: {}".format(params, ret))
os.remove(pngFileName)
return (None, None)
return (pngFileName, pngThumbFileName)
class OutConsole(Out):
def __init__(self, solver):
self.solver = solver
def out(self):
s = self.solver
self.displayOutput()
print("({}, {}): diff : {}".format(round(float(s.difficulty), 3), s.itemsOk, s.romFileName))
print("{}/{}: knows Used : {}".format(s.knowsUsed, s.knowsKnown, s.romFileName))
if s.difficulty >= 0:
sys.exit(0)
else:
sys.exit(1)
def printPath(self, message, locations, displayAPs=True):
print("")
print(message)
print('{} {:>48} {:>12} {:>34} {:>8} {:>16} {:>14} {} {}'.format("Z", "Location Name", "Area", "Sub Area", "Distance", "Item", "Difficulty", "Knows used", "Items used"))
print('-'*150)
lastAP = None
for loc in locations:
if displayAPs == True and 'path' in loc:
path = [ap.Name for ap in loc['path']]
lastAP = path[-1]
if not (len(path) == 1 and path[0] == lastAP):
path = " -> ".join(path)
print('{:>50}: {}'.format('Path', path))
line = '{} {:>48}: {:>12} {:>34} {:>8} {:>16} {:>14} {} {}'
self.fixEnergy(loc['difficulty'].items)
print(line.format('Z' if 'Chozo' in loc['Class'] else ' ',
loc['Name'],
loc['Area'],
loc['SolveArea'],
loc['distance'] if 'distance' in loc else 'nc',
loc['itemName'],
round(float(loc['difficulty'].difficulty), 2) if 'difficulty' in loc else 'nc',
sorted(loc['difficulty'].knows) if 'difficulty' in loc else 'nc',
sorted(list(set(loc['difficulty'].items))) if 'difficulty' in loc else 'nc'))
def displayOutput(self):
s = self.solver
print("all patches: {}".format(s.romLoader.getAllPatches()))
# print generated path
if Conf.displayGeneratedPath == True:
self.printPath("Generated path ({}/101):".format(len(s.visitedLocations)), s.visitedLocations)
# if we've aborted, display missing techniques and remaining locations
if s.difficulty == -1:
self.printPath("Next locs which could have been available if more techniques were known:", s.tryRemainingLocs())
remainMajors = s.getRemainMajors()
if len(remainMajors) > 0:
self.printPath("Remaining major locations:", remainMajors, displayAPs=False)
remainMinors = s.getRemainMinors()
if remainMinors is not None and len(remainMinors) > 0:
self.printPath("Remaining minor locations:", remainMinors, displayAPs=False)
else:
# if some locs are not picked up display those which are available
# and those which are not
skippedMajors = s.getSkippedMajors()
if len(skippedMajors) > 0:
self.printPath("Skipped major locations:", skippedMajors, displayAPs=False)
else:
print("No skipped major locations")
unavailMajors = s.getUnavailMajors()
if len(unavailMajors) > 0:
self.printPath("Unaccessible major locations:", unavailMajors, displayAPs=False)
else:
print("No unaccessible major locations")
items = s.smbm.getItems()
print("ETank: {}, Reserve: {}, Missile: {}, Super: {}, PowerBomb: {}".format(items['ETank'], items['Reserve'], items['Missile'], items['Super'], items['PowerBomb']))
print("Majors: {}".format(sorted([item for item in items if items[item] == True])))
# display difficulty scale
self.displayDifficulty(s.difficulty)
def displayDifficulty(self, difficulty):
if difficulty >= 0:
text = DifficultyDisplayer(difficulty).scale()
print("Estimated difficulty: {}".format(text))
else:
print("Aborted run, can't finish the game with the given prerequisites")
class DifficultyDisplayer:
def __init__(self, difficulty):
self.difficulty = difficulty
def scale(self):
if self.difficulty >= impossibru:
return "IMPOSSIBRU!"
else:
previous = 0
for d in sorted(diff2text):
if self.difficulty >= d:
previous = d
else:
displayString = diff2text[previous]
displayString += ' '
scale = d - previous
pos = int(self.difficulty - previous)
displayString += '-' * pos
displayString += '^'
displayString += '-' * (scale - pos)
displayString += ' '
displayString += diff2text[d]
break
return displayString
def percent(self):
# return the difficulty as a percent
if self.difficulty == -1:
return -1
elif self.difficulty in [0, easy]:
return 0
elif self.difficulty >= mania:
return 100
difficultiesPercent = {
easy: 0,
medium: 20,
hard: 40,
harder: 60,
hardcore: 80,
mania: 100
}
difficulty = self.difficulty
lower = 0
percent = 100
for upper in sorted(diff2text):
if self.difficulty >= upper:
lower = upper
else:
lowerPercent = difficultiesPercent[lower]
upperPercent = difficultiesPercent[upper]
a = (upperPercent-lowerPercent)/float(upper-lower)
b = lowerPercent - a * lower
percent = int(difficulty * a + b)
break
return percent
def interactiveSolver(args):
# to init, requires interactive/romFileName/presetFileName/output parameters in standard/plando mode
# to init, requires interactive/presetFileName/output parameters in seedless mode
# to iterate, requires interactive/state/[loc]/[item]/action/output parameters in item scope
# to iterate, requires interactive/state/[startPoint]/[endPoint]/action/output parameters in area scope
if args.action == 'init':
# init
if args.mode != 'seedless' and args.romFileName == None:
print("Missing romFileName parameter for {} mode".format(args.mode))
sys.exit(1)
if args.presetFileName == None or args.output == None:
print("Missing preset or output parameter")
sys.exit(1)
solver = InteractiveSolver(args.output)
solver.initialize(args.mode, args.romFileName, args.presetFileName, magic=args.raceMagic, debug=args.vcr, fill=args.fill, startAP=args.startAP)
else:
# iterate
params = {}
if args.scope == 'common':
if args.action == "save":
params["lock"] = args.lock
params["escapeTimer"] = args.escapeTimer
elif args.action == "randomize":
params["progressionSpeed"] = args.progressionSpeed
params["minorQty"] = args.minorQty
params["energyQty"] = args.energyQty
elif args.scope == 'item':
if args.state == None or args.action == None or args.output == None:
print("Missing state/action/output parameter")
sys.exit(1)
if args.action in ["add", "replace"]:
if args.loc == None:
print("Missing loc parameter when using action add for item")
sys.exit(1)
if args.mode != 'standard':
if args.item == None:
print("Missing item parameter when using action add in plando/suitless mode")
sys.exit(1)
params = {'loc': args.loc, 'item': args.item, 'hide': args.hide}
elif args.action == "remove":
params = {'count': args.count}
elif args.scope == 'area':
if args.state == None or args.action == None or args.output == None:
print("Missing state/action/output parameter")
sys.exit(1)
if args.action == "add":
if args.startPoint == None or args.endPoint == None:
print("Missing start or end point parameter when using action add for item")
sys.exit(1)
params = {'startPoint': args.startPoint, 'endPoint': args.endPoint}
if args.action == "remove" and args.startPoint != None:
params = {'startPoint': args.startPoint}
params["debug"] = args.vcr
solver = InteractiveSolver(args.output)
solver.iterate(args.state, args.scope, args.action, params)
def standardSolver(args):
if args.romFileName is None:
print("Parameter --romFileName mandatory when not in interactive mode")
sys.exit(1)
if args.difficultyTarget is None:
difficultyTarget = Conf.difficultyTarget
else:
difficultyTarget = args.difficultyTarget
if args.pickupStrategy is None:
pickupStrategy = Conf.itemsPickup
else:
pickupStrategy = args.pickupStrategy
# itemsForbidden is like that: [['Varia'], ['Reserve'], ['Gravity']], fix it
args.itemsForbidden = [item[0] for item in args.itemsForbidden]
solver = StandardSolver(args.romFileName, args.presetFileName, difficultyTarget,
pickupStrategy, args.itemsForbidden, type=args.type,
firstItemsLog=args.firstItemsLog, extStatsFilename=args.extStatsFilename,
displayGeneratedPath=args.displayGeneratedPath,
outputFileName=args.output, magic=args.raceMagic,
checkDuplicateMajor=args.checkDuplicateMajor, vcr=args.vcr)
solver.solveRom()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Random Metroid Solver")
parser.add_argument('--romFileName', '-r', help="the input rom", nargs='?',
default=None, dest="romFileName")
parser.add_argument('--preset', '-p', help="the preset file", nargs='?',
default=None, dest='presetFileName')
parser.add_argument('--difficultyTarget', '-t',
help="the difficulty target that the solver will aim for",
dest='difficultyTarget', nargs='?', default=None, type=int)
parser.add_argument('--pickupStrategy', '-s', help="Pickup strategy for the Solver",
dest='pickupStrategy', nargs='?', default=None,
choices=['minimal', 'all', 'any'])
parser.add_argument('--itemsForbidden', '-f', help="Item not picked up during solving",
dest='itemsForbidden', nargs='+', default=[], action='append')
parser.add_argument('--type', '-y', help="web or console", dest='type', nargs='?',
default='console', choices=['web', 'console'])
parser.add_argument('--checkDuplicateMajor', dest="checkDuplicateMajor", action='store_true',
help="print a warning if the same major is collected more than once")
parser.add_argument('--debug', '-d', help="activate debug logging", dest='debug', action='store_true')
parser.add_argument('--firstItemsLog', '-1',
help="path to file where for each item type the first time it was found and where will be written (spoilers!)",
nargs='?', default=None, type=str, dest='firstItemsLog')
parser.add_argument('--ext_stats', help="Generate extended stats",
nargs='?', default=None, dest='extStatsFilename')
parser.add_argument('--displayGeneratedPath', '-g', help="display the generated path (spoilers!)",
dest='displayGeneratedPath', action='store_true')
parser.add_argument('--race', help="Race mode magic number", dest='raceMagic', type=int)
parser.add_argument('--vcr', help="Generate VCR output file (in isolver it means debug mode: load all the transitions/add path info for locs)", dest='vcr', action='store_true')
# standard/interactive, web site
parser.add_argument('--output', '-o', help="When called from the website, contains the result of the solver",
dest='output', nargs='?', default=None)
# interactive, web site
parser.add_argument('--interactive', '-i', help="Activate interactive mode for the solver",
dest='interactive', action='store_true')
parser.add_argument('--state', help="JSON file of the Solver state (used in interactive mode)",
dest="state", nargs='?', default=None)
parser.add_argument('--loc', help="Name of the location to action on (used in interactive mode)",
dest="loc", nargs='?', default=None)
parser.add_argument('--action', help="Pickup item at location, remove last pickedup location, clear all (used in interactive mode)",
dest="action", nargs="?", default=None, choices=['init', 'add', 'remove', 'clear', 'get', 'save', 'replace', 'randomize'])
parser.add_argument('--item', help="Name of the item to place in plando mode (used in interactive mode)",
dest="item", nargs='?', default=None)
parser.add_argument('--hide', help="Hide the item to place in plando mode (used in interactive mode)",
dest="hide", action='store_true')
parser.add_argument('--startPoint', help="The start AP to connect (used in interactive mode)",
dest="startPoint", nargs='?', default=None)
parser.add_argument('--endPoint', help="The destination AP to connect (used in interactive mode)",
dest="endPoint", nargs='?', default=None)
parser.add_argument('--mode', help="Solver mode: standard/seedless/plando (used in interactive mode)",
dest="mode", nargs="?", default=None, choices=['standard', 'seedless', 'plando'])
parser.add_argument('--scope', help="Scope for the action: common/area/item (used in interactive mode)",
dest="scope", nargs="?", default=None, choices=['common', 'area', 'item'])
parser.add_argument('--count', help="Number of item rollback (used in interactive mode)",
dest="count", type=int)
parser.add_argument('--lock', help="lock the plando seed (used in interactive mode)",
dest="lock", action='store_true')
parser.add_argument('--escapeTimer', help="escape timer like 03:00", dest="escapeTimer", default=None)
parser.add_argument('--fill', help="in plando load all the source seed locations/transitions as a base (used in interactive mode)",
dest="fill", action='store_true')
parser.add_argument('--startAP', help="in plando/seedless: the start location", dest="startAP", default="Landing Site")
parser.add_argument('--progressionSpeed', help="rando plando (used in interactive mode)",
dest="progressionSpeed", nargs="?", default=None, choices=["slowest", "slow", "medium", "fast", "fastest", "basic", "VARIAble"])
parser.add_argument('--minorQty', help="rando plando (used in interactive mode)",
dest="minorQty", nargs="?", default=None, choices=[str(i) for i in range(0,101)])
parser.add_argument('--energyQty', help="rando plando (used in interactive mode)",
dest="energyQty", nargs="?", default=None, choices=["sparse", "medium", "vanilla"])
args = parser.parse_args()
if args.presetFileName is None:
args.presetFileName = 'standard_presets/regular.json'
if args.raceMagic != None:
if args.raceMagic <= 0 or args.raceMagic >= 0x10000:
print("Invalid magic")
sys.exit(-1)
if args.count != None:
if args.count < 1 or args.count > 0x80:
print("Invalid count")
sys.exit(-1)
log.init(args.debug)
if args.interactive == True:
interactiveSolver(args)
else:
standardSolver(args)
solver: fix rewind when multiple steps are involved.
#!/usr/bin/python3
import sys, math, argparse, re, json, os, subprocess, logging, random
from time import gmtime, strftime
# the difficulties for each technics
from parameters import Knows, Settings, isKnows, isSettings
from parameters import easy, medium, hard, harder, hardcore, mania, god, samus, impossibru, infinity, diff2text
# the helper functions
from smbool import SMBool
from smboolmanager import SMBoolManager
from helpers import Pickup, Bosses
from rom import RomLoader, RomPatcher, RomReader
from rom_patches import RomPatches
from itemrandomizerweb.Items import ItemManager
from graph_locations import locations as graphLocations
from graph import AccessGraph
from graph_access import vanillaTransitions, vanillaBossesTransitions, vanillaEscapeTransitions, accessPoints, GraphUtils, getAccessPoint
from utils import PresetLoader, removeChars
from vcr import VCR
import log, db
class Conf:
# keep getting majors of at most this difficulty before going for minors or changing area
difficultyTarget = medium
# display the generated path (spoilers!)
displayGeneratedPath = False
# choose how many items are required (possible value: minimal/all/any)
itemsPickup = 'minimal'
# the list of items to not pick up
itemsForbidden = []
class SolverState(object):
def __init__(self, debug=False):
self.debug = debug
def fromSolver(self, solver):
self.state = {}
# string
self.state["majorsSplit"] = solver.majorsSplit
# bool
self.state["areaRando"] = solver.areaRando
# bool
self.state["bossRando"] = solver.bossRando
# bool
self.state["escapeRando"] = solver.escapeRando
# string "03:00"
self.state["escapeTimer"] = solver.escapeTimer
# list of active patches
self.state["patches"] = RomPatches.ActivePatches
# start ap
self.state["startAP"] = solver.startAP
# start area
self.state["startArea"] = solver.startArea
# dict {locName: {itemName: "xxx", "accessPoint": "xxx"}, ...}
self.state["locsData"] = self.getLocsData(solver.locations)
# list [(ap1, ap2), (ap3, ap4), ...]
self.state["areaTransitions"] = solver.areaTransitions
# list [(ap1, ap2), (ap3, ap4), ...]
self.state["bossTransitions"] = solver.bossTransitions
# list [(ap1, ap2), ...]
self.state["curGraphTransitions"] = solver.curGraphTransitions
# preset file name
self.state["presetFileName"] = solver.presetFileName
## items collected / locs visited / bosses killed
# list [item1, item2, ...]
self.state["collectedItems"] = solver.collectedItems
# dict {locName: {index: 0, difficulty: (bool, diff, ...), ...} with index being the position of the loc in visitedLocations
self.state["visitedLocations"] = self.getVisitedLocations(solver.visitedLocations)
# dict {locName: (bool, diff, [know1, ...], [item1, ...]), ...}
self.state["availableLocations"] = self.getAvailableLocations(solver.majorLocations)
# string of last access point
self.state["lastAP"] = solver.lastAP
# list of killed bosses: ["boss1", "boss2"]
self.state["bosses"] = [boss for boss in Bosses.golden4Dead if Bosses.golden4Dead[boss] == True]
# dict {locNameWeb: {infos}, ...}
self.state["availableLocationsWeb"] = self.getAvailableLocationsWeb(solver.majorLocations)
# dict {locNameWeb: {infos}, ...}
self.state["visitedLocationsWeb"] = self.getAvailableLocationsWeb(solver.visitedLocations)
# dict {locNameWeb: {infos}, ...}
self.state["remainLocationsWeb"] = self.getRemainLocationsWeb(solver.majorLocations)
# string: standard/seedless/plando
self.state["mode"] = solver.mode
# string:
self.state["seed"] = solver.seed
# dict {point: point, ...} / array of startPoints
(self.state["linesWeb"], self.state["linesSeqWeb"]) = self.getLinesWeb(solver.curGraphTransitions)
# bool
self.state["allTransitions"] = len(solver.curGraphTransitions) == len(solver.areaTransitions) + len(solver.bossTransitions)
self.state["errorMsg"] = solver.errorMsg
if len(solver.visitedLocations) > 0:
self.state["last"] = {"loc": solver.visitedLocations[-1]["Name"],
"item": solver.visitedLocations[-1]["itemName"]}
else:
self.state["last"] = ""
def toSolver(self, solver):
if 'majorsSplit' in self.state:
solver.majorsSplit = self.state["majorsSplit"]
else:
# compatibility with existing sessions
if self.state['fullRando'] == True:
solver.majorsSplit = 'Full'
else:
solver.majorsSplit = 'Major'
solver.areaRando = self.state["areaRando"]
solver.bossRando = self.state["bossRando"]
solver.escapeRando = self.state["escapeRando"]
solver.escapeTimer = self.state["escapeTimer"]
RomPatches.ActivePatches = self.state["patches"]
solver.startAP = self.state["startAP"]
solver.startArea = self.state["startArea"]
self.setLocsData(solver.locations)
solver.areaTransitions = self.state["areaTransitions"]
solver.bossTransitions = self.state["bossTransitions"]
solver.curGraphTransitions = self.state["curGraphTransitions"]
# preset
solver.presetFileName = self.state["presetFileName"]
# items collected / locs visited / bosses killed
solver.collectedItems = self.state["collectedItems"]
(solver.visitedLocations, solver.majorLocations) = self.setLocations(self.state["visitedLocations"],
self.state["availableLocations"],
solver.locations)
solver.lastAP = self.state["lastAP"]
Bosses.reset()
for boss in self.state["bosses"]:
Bosses.beatBoss(boss)
solver.mode = self.state["mode"]
solver.seed = self.state["seed"]
def getLocsData(self, locations):
ret = {}
for loc in locations:
ret[loc["Name"]] = {"itemName": loc["itemName"]}
if "accessPoint" in loc:
ret[loc["Name"]]["accessPoint"] = loc["accessPoint"]
return ret
def setLocsData(self, locations):
for loc in locations:
loc["itemName"] = self.state["locsData"][loc["Name"]]["itemName"]
if "accessPoint" in self.state["locsData"][loc["Name"]]:
loc["accessPoint"] = self.state["locsData"][loc["Name"]]["accessPoint"]
def getVisitedLocations(self, visitedLocations):
# need to keep the order (for cancelation)
ret = {}
i = 0
for loc in visitedLocations:
diff = loc["difficulty"]
ret[loc["Name"]] = {"index": i,
"difficulty": (diff.bool, diff.difficulty, diff.knows, diff.items),
"Visibility": loc["Visibility"]}
i += 1
return ret
def setLocations(self, visitedLocations, availableLocations, locations):
retVis = []
retMaj = []
for loc in locations:
if loc["Name"] in visitedLocations:
# visitedLocations contains an index
diff = visitedLocations[loc["Name"]]["difficulty"]
loc["difficulty"] = SMBool(diff[0], diff[1], diff[2], diff[3])
if "Visibility" in visitedLocations[loc["Name"]]:
loc["Visibility"] = visitedLocations[loc["Name"]]["Visibility"]
retVis.append((visitedLocations[loc["Name"]]["index"], loc))
else:
if loc["Name"] in availableLocations:
diff = availableLocations[loc["Name"]]
loc["difficulty"] = SMBool(diff[0], diff[1], diff[2], diff[3])
retMaj.append(loc)
retVis.sort(key=lambda x: x[0])
return ([loc for (i, loc) in retVis], retMaj)
def diff4isolver(self, difficulty):
if difficulty == -1:
return "break"
elif difficulty < medium:
return "easy"
elif difficulty < hard:
return "medium"
elif difficulty < harder:
return "hard"
elif difficulty < hardcore:
return "harder"
elif difficulty < mania:
return "hardcore"
else:
return "mania"
def name4isolver(self, locName):
# remove space and special characters
# sed -e 's+ ++g' -e 's+,++g' -e 's+(++g' -e 's+)++g' -e 's+-++g'
return removeChars(locName, " ,()-")
def knows2isolver(self, knows):
result = []
for know in knows:
if know in Knows.desc:
result.append(Knows.desc[know]['display'])
else:
result.append(know)
return list(set(result))
def transition2isolver(self, transition):
transition = str(transition)
return transition[0].lower() + removeChars(transition[1:], " ,()-")
def getAvailableLocationsWeb(self, locations):
ret = {}
for loc in locations:
if "difficulty" in loc and loc["difficulty"].bool == True:
diff = loc["difficulty"]
locName = self.name4isolver(loc["Name"])
ret[locName] = {"difficulty": self.diff4isolver(diff.difficulty),
"knows": self.knows2isolver(diff.knows),
"items": list(set(diff.items)),
"item": loc["itemName"],
"name": loc["Name"],
"canHidden": loc["CanHidden"],
"visibility": loc["Visibility"]}
if "comeBack" in loc:
ret[locName]["comeBack"] = loc["comeBack"]
# for debug purpose
if self.debug == True:
if "path" in loc:
ret[locName]["path"] = [a.Name for a in loc["path"]]
if "distance" in loc:
ret[locName]["distance"] = loc["distance"]
return ret
def getRemainLocationsWeb(self, locations):
ret = {}
for loc in locations:
if "difficulty" not in loc or ("difficulty" in loc and loc["difficulty"].bool == False):
locName = self.name4isolver(loc["Name"])
ret[locName] = {"item": loc["itemName"],
"name": loc["Name"],
"knows": ["Sequence Break"],
"items": [],
"canHidden": loc["CanHidden"],
"visibility": loc["Visibility"]}
if self.debug == True:
if "difficulty" in loc:
ret[locName]["difficulty"] = str(loc["difficulty"])
if "distance" in loc:
ret[locName]["distance"] = loc["distance"]
return ret
def getLinesWeb(self, transitions):
lines = {}
linesSeq = []
for (start, end) in transitions:
startWeb = self.transition2isolver(start)
endWeb = self.transition2isolver(end)
lines[startWeb] = endWeb
lines[endWeb] = startWeb
linesSeq.append((startWeb, endWeb))
return (lines, linesSeq)
def getAvailableLocations(self, locations):
ret = {}
for loc in locations:
if "difficulty" in loc and loc["difficulty"].bool == True:
diff = loc["difficulty"]
ret[loc["Name"]] = (diff.bool, diff.difficulty, diff.knows, diff.items)
return ret
def fromJson(self, stateJsonFileName):
with open(stateJsonFileName, 'r') as jsonFile:
self.state = json.load(jsonFile)
# print("Loaded Json State:")
# for key in self.state:
# if key in ["availableLocationsWeb", "visitedLocationsWeb", "collectedItems", "availableLocations", "visitedLocations"]:
# print("{}: {}".format(key, self.state[key]))
# print("")
def toJson(self, outputFileName):
with open(outputFileName, 'w') as jsonFile:
json.dump(self.state, jsonFile)
# print("Dumped Json State:")
# for key in self.state:
# if key in ["availableLocationsWeb", "visitedLocationsWeb", "collectedItems", "visitedLocations"]:
# print("{}: {}".format(key, self.state[key]))
# print("")
class CommonSolver(object):
def loadRom(self, rom, interactive=False, magic=None, startAP=None):
# startAP param is only use for seedless
if rom == None:
self.romFileName = 'seedless'
self.majorsSplit = 'Full'
self.areaRando = True
self.bossRando = True
self.escapeRando = False
self.escapeTimer = "03:00"
self.startAP = startAP
RomPatches.setDefaultPatches(startAP)
self.startArea = getAccessPoint(startAP).Start['solveArea']
# in seedless load all the vanilla transitions
self.areaTransitions = vanillaTransitions[:]
self.bossTransitions = vanillaBossesTransitions[:]
self.escapeTransition = [vanillaEscapeTransitions[0]]
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
for loc in self.locations:
loc['itemName'] = 'Nothing'
else:
self.romFileName = rom
self.romLoader = RomLoader.factory(rom, magic)
self.majorsSplit = self.romLoader.assignItems(self.locations)
(self.startAP, self.startArea, startPatches) = self.romLoader.getStartAP()
(self.areaRando, self.bossRando, self.escapeRando) = self.romLoader.loadPatches()
RomPatches.ActivePatches += startPatches
self.escapeTimer = self.romLoader.getEscapeTimer()
self.romLoader.readNothingId()
if interactive == False:
print("ROM {} majors: {} area: {} boss: {} escape: {} patches: {} activePatches: {}".format(rom, self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(self.romLoader.getPatches()), sorted(RomPatches.ActivePatches)))
else:
print("majors: {} area: {} boss: {} escape: {} activepatches: {}".format(self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(RomPatches.ActivePatches)))
(self.areaTransitions, self.bossTransitions, self.escapeTransition) = self.romLoader.getTransitions()
if interactive == True and self.debug == False:
# in interactive area mode we build the graph as we play along
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
if self.escapeRando == False:
self.curGraphTransitions += self.escapeTransition
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.areaGraph = AccessGraph(accessPoints, self.curGraphTransitions)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("Display items at locations:")
for location in self.locations:
self.log.debug('{:>50}: {:>16}'.format(location["Name"], location['itemName']))
def loadPreset(self, presetFileName):
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
if self.log.getEffectiveLevel() == logging.DEBUG:
presetLoader.printToScreen()
def getLoc(self, locName):
for loc in self.locations:
if loc['Name'] == locName:
return loc
def computeLocationsDifficulty(self, locations):
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastAP)
# check post available functions too
for loc in locations:
if loc['difficulty'].bool == True:
if 'PostAvailable' in loc:
self.smbm.addItem(loc['itemName'])
postAvailable = loc['PostAvailable'](self.smbm)
self.smbm.removeItem(loc['itemName'])
loc['difficulty'] = self.smbm.wand(loc['difficulty'], postAvailable)
# also check if we can come back to landing site from the location
loc['comeBack'] = self.areaGraph.canAccess(self.smbm, loc['accessPoint'], self.lastAP, infinity, loc['itemName'])
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("available locs:")
for loc in locations:
if loc['difficulty'].bool == True:
self.log.debug("{}: {}".format(loc['Name'], loc['difficulty']))
def collectMajor(self, loc, itemName=None):
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc, itemName)
def collectMinor(self, loc):
self.minorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc)
def collectItem(self, loc, item=None):
if item == None:
item = loc["itemName"]
if self.vcr != None:
self.vcr.addLocation(loc['Name'], item)
if self.firstLogFile is not None:
if item not in self.collectedItems:
self.firstLogFile.write("{};{};{};{}\n".format(item, loc['Name'], loc['Area'], loc['GraphArea']))
if item not in Conf.itemsForbidden:
self.collectedItems.append(item)
if self.checkDuplicateMajor == True:
if item not in ['Nothing', 'NoEnergy', 'Missile', 'Super', 'PowerBomb', 'ETank', 'Reserve']:
if self.smbm.haveItem(item):
print("WARNING: {} has already been picked up".format(item))
self.smbm.addItem(item)
else:
# update the name of the item
item = "-{}-".format(item)
loc["itemName"] = item
self.collectedItems.append(item)
# we still need the boss difficulty
if 'Pickup' not in loc:
loc["difficulty"] = SMBool(False)
if 'Pickup' in loc:
loc['Pickup']()
self.log.debug("collectItem: {} at {}".format(item, loc['Name']))
# last loc is used as root node for the graph
self.lastAP = loc['accessPoint']
self.lastArea = loc['SolveArea']
def cancelLastItems(self, count):
if self.vcr != None:
self.vcr.addRollback(count)
for _ in range(count):
if len(self.visitedLocations) == 0:
return
loc = self.visitedLocations.pop()
self.majorLocations.append(loc)
# pickup func
if 'Unpickup' in loc:
loc['Unpickup']()
# access point
if len(self.visitedLocations) == 0:
self.lastAP = self.startAP
self.lastArea = self.startArea
else:
self.lastAP = self.visitedLocations[-1]["accessPoint"]
self.lastArea = self.visitedLocations[-1]["SolveArea"]
# item
item = loc["itemName"]
if item != self.collectedItems[-1]:
raise Exception("Item of last collected loc {}: {} is different from last collected item: {}".format(loc["Name"], item, self.collectedItems[-1]))
# in plando we have to remove the last added item,
# else it could be used in computing the postAvailable of a location
if self.mode in ['plando', 'seedless']:
loc["itemName"] = 'Nothing'
self.collectedItems.pop()
# if multiple majors in plando mode, remove it from smbm only when it's the last occurence of it
if self.smbm.isCountItem(item):
self.smbm.removeItem(item)
else:
if item not in self.collectedItems:
self.smbm.removeItem(item)
def getAvailableItemsList(self, locations, threshold):
# locations without distance are not available
locations = [loc for loc in locations if 'distance' in loc]
if len(locations) == 0:
return []
# add nocomeback locations which has been selected by the comeback step (areaWeight == 1)
around = [loc for loc in locations if( ('areaWeight' in loc and loc['areaWeight'] == 1)
or ((loc['SolveArea'] == self.lastArea or loc['distance'] < 3)
and loc['difficulty'].difficulty <= threshold
and not Bosses.areaBossDead(self.lastArea)
and 'comeBack' in loc and loc['comeBack'] == True) )]
outside = [loc for loc in locations if not loc in around]
self.log.debug("around1 = {}".format([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in around]))
self.log.debug("outside1 = {}".format([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in outside]))
around.sort(key=lambda loc: (
# locs in the same area
0 if loc['SolveArea'] == self.lastArea
else 1,
# nearest locs
loc['distance'],
# beating a boss
0 if 'Pickup' in loc
else 1,
# easiest first
loc['difficulty'].difficulty
)
)
self.log.debug("around2: {}".format([(loc['Name'], 0 if loc['SolveArea'] == self.lastArea else 1, loc['distance'], 0 if 'Pickup' in loc else 1, loc['difficulty'].difficulty) for loc in around]))
# we want to sort the outside locations by putting the ones in the same area first,
# then we sort the remaining areas starting whith boss dead status.
# we also want to sort by range of difficulty and not only with the difficulty threshold.
ranged = {
"areaWeight": [],
"easy": [],
"medium": [],
"hard": [],
"harder": [],
"hardcore": [],
"mania": [],
"noComeBack": []
}
for loc in outside:
if "areaWeight" in loc:
ranged["areaWeight"].append(loc)
elif "comeBack" not in loc or loc['comeBack'] == False:
ranged["noComeBack"].append(loc)
else:
difficulty = loc['difficulty'].difficulty
if difficulty < medium:
ranged["easy"].append(loc)
elif difficulty < hard:
ranged["medium"].append(loc)
elif difficulty < harder:
ranged["hard"].append(loc)
elif difficulty < hardcore:
ranged["harder"].append(loc)
elif difficulty < mania:
ranged["hardcore"].append(loc)
else:
ranged["mania"].append(loc)
for key in ranged:
ranged[key].sort(key=lambda loc: (
# first locs in the same area
0 if loc['SolveArea'] == self.lastArea else 1,
# first nearest locs
loc['distance'],
# beating a boss
loc['difficulty'].difficulty if (not Bosses.areaBossDead(loc['Area'])
and 'Pickup' in loc)
else 100000,
# areas with boss still alive
loc['difficulty'].difficulty if (not Bosses.areaBossDead(loc['Area']))
else 100000,
loc['difficulty'].difficulty))
self.log.debug("outside2: (threshold: {}) name, areaWeight, area, distance, boss, boss in area, difficulty".format(threshold))
outside = []
for key in ["areaWeight", "easy", "medium", "hard", "harder", "hardcore", "mania", "noComeBack"]:
outside += ranged[key]
self.log.debug("outside2: {}: {}".format(key, [(loc['Name'], loc["areaWeight"] if "areaWeight" in loc else 0, 0 if loc['SolveArea'] == self.lastArea else 1, loc['distance'], loc['difficulty'].difficulty if (not Bosses.areaBossDead(loc['Area']) and 'Pickup' in loc) else 100000, loc['difficulty'].difficulty if not Bosses.areaBossDead(loc['Area']) else 100000,loc['difficulty'].difficulty) for loc in ranged[key]]))
return around + outside
def nextDecision(self, majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold):
# first take major items of acceptable difficulty in the current area
if (len(majorsAvailable) > 0
and majorsAvailable[0]['SolveArea'] == self.lastArea
and majorsAvailable[0]['difficulty'].difficulty <= diffThreshold
and majorsAvailable[0]['comeBack'] == True):
return self.collectMajor(majorsAvailable.pop(0))
# next item decision
elif len(minorsAvailable) == 0 and len(majorsAvailable) > 0:
self.log.debug('MAJOR')
return self.collectMajor(majorsAvailable.pop(0))
elif len(majorsAvailable) == 0 and len(minorsAvailable) > 0:
# we don't check for hasEnoughMinors here, because we would be stuck, so pickup
# what we can and hope it gets better
self.log.debug('MINOR')
return self.collectMinor(minorsAvailable.pop(0))
elif len(majorsAvailable) > 0 and len(minorsAvailable) > 0:
self.log.debug('BOTH|M=' + majorsAvailable[0]['Name'] + ', m=' + minorsAvailable[0]['Name'])
# if both are available, decide based on area, difficulty and comeBack
nextMajDifficulty = majorsAvailable[0]['difficulty'].difficulty
nextMinArea = minorsAvailable[0]['SolveArea']
nextMinDifficulty = minorsAvailable[0]['difficulty'].difficulty
nextMajComeBack = majorsAvailable[0]['comeBack']
nextMinComeBack = minorsAvailable[0]['comeBack']
nextMajDistance = majorsAvailable[0]['distance']
nextMinDistance = minorsAvailable[0]['distance']
self.log.debug("diff area back dist - diff area back dist")
self.log.debug("maj: {} '{}' {} {}, min: {} '{}' {} {}".format(nextMajDifficulty, majorsAvailable[0]['SolveArea'], nextMajComeBack, nextMajDistance, nextMinDifficulty, nextMinArea, nextMinComeBack, nextMinDistance))
if hasEnoughMinors == True and self.haveAllMinorTypes() == True and self.smbm.haveItem('Charge'):
# we have charge, no longer need minors
return self.collectMajor(majorsAvailable.pop(0))
else:
# first take item from loc where you can come back
if nextMajComeBack != nextMinComeBack:
self.log.debug("!= combeback")
if nextMajComeBack == True:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# difficulty over area (this is a difficulty estimator, not a speedrunning simulator)
elif nextMinDifficulty <= diffThreshold and nextMajDifficulty <= diffThreshold:
# take the closer one
if nextMajDistance != nextMinDistance:
self.log.debug("!= distance")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# take the easier
elif nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
# if not all the minors type are collected, start with minors
elif nextMinDifficulty <= diffThreshold and not self.haveAllMinorTypes():
self.log.debug("not all minors types")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinArea == self.lastArea and nextMinDifficulty <= diffThreshold:
self.log.debug("not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinDifficulty > diffThreshold and nextMajDifficulty > diffThreshold:
# take the easier
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# take the closer one
elif nextMajDistance != nextMinDistance:
self.log.debug("!= distance")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
else:
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
else:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
raise Exception("Can't take a decision")
def checkMB(self, mbLoc):
# add mother brain loc and check if it's accessible
self.majorLocations.append(mbLoc)
self.computeLocationsDifficulty(self.majorLocations)
if mbLoc["difficulty"] == True:
self.log.debug("MB loc accessible")
self.collectMajor(mbLoc)
return True
else:
self.log.debug("MB loc not accessible")
self.majorLocations.remove(mbLoc)
return False
def computeDifficulty(self):
# loop on the available locations depending on the collected items.
# before getting a new item, loop on all of them and get their difficulty,
# the next collected item is the one with the smallest difficulty,
# if equality between major and minor, take major first.
# remove mother brain location (there items pickup conditions on top of going to mother brain location)
mbLoc = self.getLoc('Mother Brain')
self.locations.remove(mbLoc)
if self.majorsSplit == 'Major':
self.majorLocations = [loc for loc in self.locations if "Major" in loc["Class"] or "Boss" in loc["Class"]]
self.minorLocations = [loc for loc in self.locations if "Minor" in loc["Class"]]
elif self.majorsSplit == 'Chozo':
self.majorLocations = [loc for loc in self.locations if "Chozo" in loc["Class"] or "Boss" in loc["Class"]]
self.minorLocations = [loc for loc in self.locations if "Chozo" not in loc["Class"] and "Boss" not in loc["Class"]]
else:
# Full
self.majorLocations = self.locations[:] # copy
self.minorLocations = self.majorLocations
self.visitedLocations = []
self.collectedItems = []
# with the knowsXXX conditions some roms can be unbeatable, so we have to detect it
previous = -1
current = 0
self.log.debug("{}: available major: {}, available minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
isEndPossible = False
endDifficulty = mania
diffThreshold = self.getDiffThreshold()
while True:
# actual while condition
hasEnoughMinors = self.pickup.enoughMinors(self.smbm, self.minorLocations)
hasEnoughMajors = self.pickup.enoughMajors(self.smbm, self.majorLocations)
hasEnoughItems = hasEnoughMajors and hasEnoughMinors
canEndGame = self.canEndGame()
(isEndPossible, endDifficulty) = (canEndGame.bool, canEndGame.difficulty)
if isEndPossible and hasEnoughItems and endDifficulty <= diffThreshold:
if self.checkMB(mbLoc):
self.log.debug("END")
break
else:
self.log.debug("canEnd but MB loc not accessible")
#self.log.debug(str(self.collectedItems))
self.log.debug("Current AP/Area: {}/{}".format(self.lastAP, self.lastArea))
# check if we have collected an item in the last loop
current = len(self.collectedItems)
if current == previous:
if not isEndPossible:
self.log.debug("STUCK ALL")
if self.comeBack.rewind(len(self.collectedItems)) == True:
# rewind ok
previous = len(self.collectedItems) - 1
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
else:
self.log.debug("HARD END 1")
self.checkMB(mbLoc)
break
previous = current
# compute the difficulty of all the locations
self.computeLocationsDifficulty(self.majorLocations)
if self.majorsSplit != 'Full':
self.computeLocationsDifficulty(self.minorLocations)
# keep only the available locations
majorsAvailable = [loc for loc in self.majorLocations if 'difficulty' in loc and loc["difficulty"].bool == True]
minorsAvailable = [loc for loc in self.minorLocations if 'difficulty' in loc and loc["difficulty"].bool == True]
# check if we're stuck
if len(majorsAvailable) == 0 and len(minorsAvailable) == 0:
if not isEndPossible:
self.log.debug("STUCK MAJORS and MINORS")
if self.comeBack.rewind(len(self.collectedItems)) == True:
previous = len(self.collectedItems) - 1
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
else:
self.log.debug("HARD END 2")
self.checkMB(mbLoc)
break
# handle no comeback heuristic
if self.majorsSplit == 'Full':
locs = majorsAvailable
else:
locs = majorsAvailable+minorsAvailable
rewindRequired = self.comeBack.handleNoComeBack(locs, len(self.collectedItems))
if rewindRequired == True:
if self.comeBack.rewind(len(self.collectedItems)) == True:
previous = len(self.collectedItems) - 1
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
# sort them on difficulty and proximity
majorsAvailable = self.getAvailableItemsList(majorsAvailable, diffThreshold)
if self.majorsSplit == 'Full':
minorsAvailable = majorsAvailable
else:
minorsAvailable = self.getAvailableItemsList(minorsAvailable, diffThreshold)
self.comeBack.cleanNoComeBack(locs)
# choose one to pick up
self.nextDecision(majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold)
# compute difficulty value
(difficulty, itemsOk) = self.computeDifficultyValue()
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("difficulty={}".format(difficulty))
self.log.debug("itemsOk={}".format(itemsOk))
self.log.debug("{}: remaining major: {}, remaining minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
self.log.debug("remaining majors:")
for loc in self.majorLocations:
self.log.debug("{} ({})".format(loc['Name'], loc['itemName']))
self.log.debug("bosses: {}".format(Bosses.golden4Dead))
return (difficulty, itemsOk)
def haveAllMinorTypes(self):
# the first minor of each type can be seen as a major, so check for them first before going to far in zebes
hasPB = 'PowerBomb' in self.collectedItems
hasSuper = 'Super' in self.collectedItems
hasMissile = 'Missile' in self.collectedItems
return (hasPB and hasSuper and hasMissile)
def canEndGame(self):
# to finish the game you must :
# - beat golden 4 : we force pickup of the 4 items
# behind the bosses to ensure that
# - defeat metroids
# - destroy/skip the zebetites
# - beat Mother Brain
return self.smbm.wand(Bosses.allBossesDead(self.smbm), self.smbm.enoughStuffTourian())
def computeDifficultyValue(self):
if not self.canEndGame().bool:
# we have aborted
return (-1, False)
else:
# return the maximum difficulty
difficultyMax = 0
for loc in self.visitedLocations:
difficultyMax = max(difficultyMax, loc['difficulty'].difficulty)
difficulty = difficultyMax
# check if we have taken all the requested items
if (self.pickup.enoughMinors(self.smbm, self.minorLocations)
and self.pickup.enoughMajors(self.smbm, self.majorLocations)):
return (difficulty, True)
else:
# can finish but can't take all the requested items
return (difficulty, False)
class InteractiveSolver(CommonSolver):
def __init__(self, output):
self.errorMsg = ""
self.checkDuplicateMajor = False
self.vcr = None
self.log = log.get('Solver')
self.outputFileName = output
self.firstLogFile = None
self.locations = graphLocations
(self.locsAddressName, self.locsWeb2Internal) = self.initLocsAddressName()
self.transWeb2Internal = self.initTransitionsName()
def initLocsAddressName(self):
addressName = {}
web2Internal = {}
for loc in graphLocations:
webName = self.locNameInternal2Web(loc["Name"])
addressName[loc["Address"] % 0x10000] = webName
web2Internal[webName] = loc["Name"]
return (addressName, web2Internal)
def initTransitionsName(self):
web2Internal = {}
for (startPoint, endPoint) in vanillaTransitions + vanillaBossesTransitions + vanillaEscapeTransitions:
for point in [startPoint, endPoint]:
web2Internal[self.apNameInternal2Web(point)] = point
return web2Internal
def dumpState(self):
state = SolverState(self.debug)
state.fromSolver(self)
state.toJson(self.outputFileName)
def initialize(self, mode, rom, presetFileName, magic, debug, fill, startAP):
# load rom and preset, return first state
self.debug = debug
self.mode = mode
if self.mode != "seedless":
self.seed = os.path.basename(os.path.splitext(rom)[0])+'.sfc'
else:
self.seed = "seedless"
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom, interactive=True, magic=magic, startAP=startAP)
if self.mode == 'plando':
# in plando always consider that we're doing full
self.majorsSplit = 'Full'
self.clearItems()
# in debug mode don't load plando locs/transitions
if self.mode == 'plando' and self.debug == False:
if fill == True:
# load the source seed transitions and items/locations
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.areaGraph = AccessGraph(accessPoints, self.curGraphTransitions)
self.fillPlandoLocs()
else:
if self.areaRando == True or self.bossRando == True:
plandoTrans = self.loadPlandoTransitions()
if len(plandoTrans) > 0:
self.curGraphTransitions = plandoTrans
self.areaGraph = AccessGraph(accessPoints, self.curGraphTransitions)
self.loadPlandoLocs()
# compute new available locations
self.computeLocationsDifficulty(self.majorLocations)
self.dumpState()
def iterate(self, stateJson, scope, action, params):
self.debug = params["debug"]
self.smbm = SMBoolManager()
state = SolverState()
state.fromJson(stateJson)
state.toSolver(self)
self.loadPreset(self.presetFileName)
# add already collected items to smbm
self.smbm.addItems(self.collectedItems)
if scope == 'item':
if action == 'clear':
self.clearItems(True)
else:
if action == 'add':
if self.mode == 'plando' or self.mode == 'seedless':
self.setItemAt(params['loc'], params['item'], params['hide'])
else:
# pickup item at locName
self.pickItemAt(params['loc'])
elif action == 'remove':
# remove last collected item
self.cancelLastItems(params['count'])
elif action == 'replace':
self.replaceItemAt(params['loc'], params['item'], params['hide'])
elif scope == 'area':
if action == 'clear':
self.clearTransitions()
else:
if action == 'add':
startPoint = params['startPoint']
endPoint = params['endPoint']
self.addTransition(self.transWeb2Internal[startPoint], self.transWeb2Internal[endPoint])
elif action == 'remove':
if 'startPoint' in params:
self.cancelTransition(self.transWeb2Internal[params['startPoint']])
else:
# remove last transition
self.cancelLastTransition()
self.areaGraph = AccessGraph(accessPoints, self.curGraphTransitions)
if scope == 'common':
if action == 'save':
return self.savePlando(params['lock'], params['escapeTimer'])
elif action == 'randomize':
self.randoPlando(params)
# if last loc added was a sequence break, recompute its difficulty,
# as it may be available with the newly placed item.
if len(self.visitedLocations) > 0:
lastVisited = self.visitedLocations[-1]
if lastVisited['difficulty'].difficulty == -1:
self.visitedLocations.remove(lastVisited)
self.majorLocations.append(lastVisited)
else:
lastVisited = None
else:
lastVisited = None
# compute new available locations
self.clearLocs(self.majorLocations)
self.computeLocationsDifficulty(self.majorLocations)
# put back last visited location
if lastVisited != None:
self.majorLocations.remove(lastVisited)
self.visitedLocations.append(lastVisited)
if lastVisited["difficulty"] == False:
# if the loc is still sequence break, put it back as sequence break
lastVisited["difficulty"] = SMBool(True, -1)
# return them
self.dumpState()
def getLocNameFromAddress(self, address):
return self.locsAddressName[address]
def loadPlandoTransitions(self):
# add escape transition
transitionsAddr = self.romLoader.getPlandoTransitions(len(vanillaBossesTransitions) + len(vanillaTransitions) + 1)
return GraphUtils.getTransitions(transitionsAddr)
def loadPlandoLocs(self):
# get the addresses of the already filled locs, with the correct order
addresses = self.romLoader.getPlandoAddresses()
# create a copy of the locations to avoid removing locs from self.locations
self.majorLocations = self.locations[:]
for address in addresses:
# TODO::compute only the difficulty of the current loc
self.computeLocationsDifficulty(self.majorLocations)
locName = self.getLocNameFromAddress(address)
self.pickItemAt(locName)
def fillPlandoLocs(self):
self.pickup = Pickup("all")
self.comeBack = ComeBack(self)
# backup
mbLoc = self.getLoc("Mother Brain")
locationsBck = self.locations[:]
self.lastAP = self.startAP
self.lastArea = self.startArea
(self.difficulty, self.itemsOk) = self.computeDifficulty()
# put back mother brain location
if mbLoc not in self.majorLocations and mbLoc not in self.visitedLocations:
self.majorLocations.append(mbLoc)
if self.itemsOk == False:
# add remaining locs as sequence break
for loc in self.majorLocations[:]:
loc["difficulty"] = SMBool(True, -1)
if "accessPoint" not in loc:
# take first ap of the loc
loc["accessPoint"] = list(loc["AccessFrom"])[0]
self.collectMajor(loc)
self.locations = locationsBck
def fillGraph(self):
# add self looping transitions on unused acces points
usedAPs = {}
for (src, dst) in self.curGraphTransitions:
usedAPs[src] = True
usedAPs[dst] = True
singleAPs = []
for ap in accessPoints:
if ap.isInternal() == True:
continue
if ap.Name not in usedAPs:
singleAPs.append(ap.Name)
transitions = self.curGraphTransitions[:]
for apName in singleAPs:
transitions.append((apName, apName))
return AccessGraph(accessPoints, transitions)
def randoPlando(self, parameters):
# if all the locations are visited, do nothing
if len(self.majorLocations) == 0:
return
plandoLocsItems = {}
for loc in self.visitedLocations:
if "Boss" in loc["Class"]:
plandoLocsItems[loc["Name"]] = "Boss"
else:
plandoLocsItems[loc["Name"]] = loc["itemName"]
plandoCurrent = {
"locsItems": plandoLocsItems,
"transitions": self.curGraphTransitions,
"patches": RomPatches.ActivePatches
}
plandoCurrentJson = json.dumps(plandoCurrent)
pythonExec = "python{}.{}".format(sys.version_info.major, sys.version_info.minor)
params = [
pythonExec, os.path.expanduser("~/RandomMetroidSolver/randomizer.py"),
'--runtime', '10',
'--param', self.presetFileName,
'--output', self.outputFileName,
'--plandoRando', plandoCurrentJson,
'--progressionSpeed', parameters["progressionSpeed"],
'--minorQty', parameters["minorQty"],
'--maxDifficulty', 'hardcore',
'--energyQty', parameters["energyQty"]
]
subprocess.call(params)
with open(self.outputFileName, 'r') as jsonFile:
data = json.load(jsonFile)
self.errorMsg = data["errorMsg"]
# load the locations
if "itemLocs" in data:
self.clearItems(reload=True)
itemsLocs = data["itemLocs"]
# create a copy because we need self.locations to be full, else the state will be empty
self.majorLocations = self.locations[:]
for itemLoc in itemsLocs:
locName = itemLoc["Location"]["Name"]
loc = self.getLoc(locName)
difficulty = itemLoc["Location"]["difficulty"]
smbool = SMBool(difficulty["bool"], difficulty["difficulty"], difficulty["knows"], difficulty["items"])
loc["difficulty"] = smbool
itemName = itemLoc["Item"]["Type"]
if itemName == "Boss":
itemName = "Nothing"
loc["itemName"] = itemName
loc["accessPoint"] = itemLoc["Location"]["accessPoint"]
self.collectMajor(loc)
def savePlando(self, lock, escapeTimer):
# store filled locations addresses in the ROM for next creating session
locsItems = {}
itemLocs = []
for loc in self.visitedLocations:
locsItems[loc["Name"]] = loc["itemName"]
for loc in self.locations:
if loc["Name"] in locsItems:
itemLocs.append({'Location': loc, 'Item': ItemManager.getItem(locsItems[loc["Name"]])})
else:
# put nothing items in unused locations
itemLocs.append({'Location': loc, 'Item': ItemManager.getItem("Nothing")})
# patch the ROM
if lock == True:
magic = random.randint(1, 0xffff)
else:
magic = None
romPatcher = RomPatcher(magic=magic, plando=True)
patches = ['credits_varia.ips', 'tracking.ips']
if magic != None:
patches.insert(0, 'race_mode.ips')
patches.append('race_mode_credits.ips')
romPatcher.addIPSPatches(patches)
romPatcher.commitIPS()
romPatcher.writeItemsLocs(itemLocs)
romPatcher.writeItemsNumber()
romPatcher.writeSpoiler(itemLocs)
class FakeRandoSettings:
def __init__(self):
self.qty = {'energy': 'plando'}
self.progSpeed = 'plando'
self.progDiff = 'plando'
self.restrictions = {'Suits': False, 'Morph': 'plando'}
self.superFun = {}
randoSettings = FakeRandoSettings()
romPatcher.writeRandoSettings(randoSettings, itemLocs)
if magic != None:
romPatcher.writeMagic()
else:
romPatcher.writePlandoAddresses(self.visitedLocations)
if self.areaRando == True or self.bossRando == True:
doors = GraphUtils.getDoorConnections(self.fillGraph(), self.areaRando, self.bossRando)
romPatcher.writeDoorConnections(doors)
if magic == None:
doorsPtrs = GraphUtils.getAps2DoorsPtrs()
romPatcher.writePlandoTransitions(self.curGraphTransitions, doorsPtrs,
len(vanillaBossesTransitions) + len(vanillaTransitions))
if self.escapeRando == True and escapeTimer != None:
# convert from '03:00' to number of seconds
escapeTimer = int(escapeTimer[0:2]) * 60 + int(escapeTimer[3:])
romPatcher.writeEscapeTimer(escapeTimer)
romPatcher.end()
data = romPatcher.romFile.data
preset = os.path.splitext(os.path.basename(self.presetFileName))[0]
seedCode = 'FX'
if self.bossRando == True:
seedCode = 'B'+seedCode
if self.areaRando == True:
seedCode = 'A'+seedCode
fileName = 'VARIA_Plandomizer_{}{}_{}.sfc'.format(seedCode, strftime("%Y%m%d%H%M%S", gmtime()), preset)
data["fileName"] = fileName
# error msg in json to be displayed by the web site
data["errorMsg"] = ""
with open(self.outputFileName, 'w') as jsonFile:
json.dump(data, jsonFile)
def locNameInternal2Web(self, locName):
return removeChars(locName, " ,()-")
def locNameWeb2Internal(self, locNameWeb):
return self.locsWeb2Internal[locNameWeb]
def apNameInternal2Web(self, apName):
return apName[0].lower() + removeChars(apName[1:], " ")
def getWebLoc(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
for loc in self.locations:
if loc["Name"] == locName:
return loc
raise Exception("Location '{}' not found".format(locName))
def pickItemAt(self, locName):
# collect new item at newLoc
loc = self.getWebLoc(locName)
if "difficulty" not in loc or loc["difficulty"] == False:
# sequence break
loc["difficulty"] = SMBool(True, -1)
if "accessPoint" not in loc:
# take first ap of the loc
loc["accessPoint"] = list(loc["AccessFrom"])[0]
self.collectMajor(loc)
def setItemAt(self, locName, itemName, hide):
# set itemName at locName
loc = self.getWebLoc(locName)
# plando mode
loc["itemName"] = itemName
if "difficulty" not in loc:
# sequence break
loc["difficulty"] = SMBool(True, -1)
if "accessPoint" not in loc:
# take first ap of the loc
loc["accessPoint"] = list(loc["AccessFrom"])[0]
if hide == True:
loc["Visibility"] = 'Hidden'
self.collectMajor(loc, itemName)
def replaceItemAt(self, locName, itemName, hide):
# replace itemName at locName
loc = self.getWebLoc(locName)
oldItemName = loc["itemName"]
loc["itemName"] = itemName
# major item can be set multiple times in plando mode
count = self.collectedItems.count(oldItemName)
isCount = self.smbm.isCountItem(oldItemName)
# replace item at the old item spot in collectedItems
index = next(i for i, vloc in enumerate(self.visitedLocations) if vloc['Name'] == loc['Name'])
self.collectedItems[index] = itemName
# update smbm if count item or major was only there once
if isCount == True or count == 1:
self.smbm.removeItem(oldItemName)
if hide == True:
loc["Visibility"] = 'Hidden'
elif loc['CanHidden'] == True and loc['Visibility'] == 'Hidden':
# the loc was previously hidden, set it back to visible
loc["Visibility"] = 'Visible'
self.smbm.addItem(itemName)
def clearItems(self, reload=False):
self.collectedItems = []
self.visitedLocations = []
self.lastAP = self.startAP
self.lastArea = self.startArea
self.majorLocations = self.locations
if reload == True:
for loc in self.majorLocations:
if "difficulty" in loc:
del loc["difficulty"]
Bosses.reset()
self.smbm.resetItems()
def addTransition(self, startPoint, endPoint):
# already check in controller if transition is valid for seed
self.curGraphTransitions.append((startPoint, endPoint))
def cancelLastTransition(self):
if self.areaRando == True and self.bossRando == True:
if len(self.curGraphTransitions) > 0:
self.curGraphTransitions.pop()
elif self.areaRando == True:
if len(self.curGraphTransitions) > len(self.bossTransitions):
self.curGraphTransitions.pop()
elif self.bossRando == True:
if len(self.curGraphTransitions) > len(self.areaTransitions):
self.curGraphTransitions.pop()
def cancelTransition(self, startPoint):
# get end point
endPoint = None
for (i, (start, end)) in enumerate(self.curGraphTransitions):
if start == startPoint:
endPoint = end
break
elif end == startPoint:
endPoint = start
break
if endPoint == None:
# shouldn't happen
return
# check that transition is cancelable
if self.areaRando == True and self.bossRando == True:
if len(self.curGraphTransitions) == 0:
return
elif self.areaRando == True:
if len(self.curGraphTransitions) == len(self.bossTransitions):
return
elif [startPoint, endPoint] in self.bossTransitions or [endPoint, startPoint] in self.bossTransitions:
return
elif self.bossRando == True:
if len(self.curGraphTransitions) == len(self.areaTransitions):
return
elif [startPoint, endPoint] in self.areaTransitions or [endPoint, startPoint] in self.areaTransitions:
return
# remove transition
self.curGraphTransitions.pop(i)
def clearTransitions(self):
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
def clearLocs(self, locs):
for loc in locs:
if 'difficulty' in loc:
del loc['difficulty']
def getDiffThreshold(self):
# in interactive solver we don't have the max difficulty parameter
epsilon = 0.001
return hard - epsilon
class StandardSolver(CommonSolver):
# given a rom and parameters returns the estimated difficulty
def __init__(self, rom, presetFileName, difficultyTarget, pickupStrategy, itemsForbidden=[], type='console',
firstItemsLog=None, extStatsFilename=None, displayGeneratedPath=False, outputFileName=None,
magic=None, checkDuplicateMajor=False, vcr=False):
self.checkDuplicateMajor = checkDuplicateMajor
self.vcr = VCR(rom, 'solver') if vcr == True else None
# for compatibility with some common methods of the interactive solver
self.mode = 'standard'
self.log = log.get('Solver')
self.setConf(difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath)
self.firstLogFile = None
if firstItemsLog is not None:
self.firstLogFile = open(firstItemsLog, 'w')
self.firstLogFile.write('Item;Location;Area\n')
self.extStatsFilename = extStatsFilename
# can be called from command line (console) or from web site (web)
self.type = type
self.output = Out.factory(self.type, self)
self.outputFileName = outputFileName
self.locations = graphLocations
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom, magic=magic)
self.pickup = Pickup(Conf.itemsPickup)
self.comeBack = ComeBack(self)
def setConf(self, difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath):
Conf.difficultyTarget = difficultyTarget
Conf.itemsPickup = pickupStrategy
Conf.displayGeneratedPath = displayGeneratedPath
Conf.itemsForbidden = itemsForbidden
def solveRom(self):
self.lastAP = self.startAP
self.lastArea = self.startArea
(self.difficulty, self.itemsOk) = self.computeDifficulty()
if self.firstLogFile is not None:
self.firstLogFile.close()
(self.knowsUsed, self.knowsKnown, knowsUsedList) = self.getKnowsUsed()
if self.vcr != None:
self.vcr.dump()
if self.extStatsFilename != None:
with open(self.extStatsFilename, 'a') as extStatsFile:
db.DB.dumpExtStatsSolver(self.difficulty, knowsUsedList, extStatsFile)
self.output.out()
def getRemainMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getRemainMinors(self):
if self.majorsSplit == 'Full':
return None
else:
return [loc for loc in self.minorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getSkippedMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == True and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getUnavailMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getDiffThreshold(self):
target = Conf.difficultyTarget
threshold = target
epsilon = 0.001
if target <= easy:
threshold = medium - epsilon
elif target <= medium:
threshold = hard - epsilon
elif target <= hard:
threshold = harder - epsilon
elif target <= harder:
threshold = hardcore - epsilon
elif target <= hardcore:
threshold = mania - epsilon
return threshold
def getKnowsUsed(self):
knowsUsed = []
for loc in self.visitedLocations:
knowsUsed += loc['difficulty'].knows
# get unique knows
knowsUsed = list(set(knowsUsed))
knowsUsedCount = len(knowsUsed)
# get total of known knows
knowsKnownCount = len([knows for knows in Knows.__dict__ if isKnows(knows) and getattr(Knows, knows).bool == True])
knowsKnownCount += len([hellRun for hellRun in Settings.hellRuns if Settings.hellRuns[hellRun] is not None])
return (knowsUsedCount, knowsKnownCount, knowsUsed)
def tryRemainingLocs(self):
# use preset which knows every techniques to test the remaining locs to
# find which technique could allow to continue the seed
locations = self.majorLocations if self.majorsSplit == 'Full' else self.majorLocations + self.minorLocations
presetFileName = os.path.expanduser('~/RandomMetroidSolver/standard_presets/solution.json')
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastAP)
return [loc for loc in locations if loc['difficulty'].bool == True]
class ComeBack(object):
# object to handle the decision to choose the next area when all locations have the "no comeback" flag.
# handle rewinding to try the next area in case of a stuck.
# one ComebackStep object is created each time we have to use the no comeback heuristic b, used for rewinding.
def __init__(self, solver):
self.comeBackSteps = []
# used to rewind
self.solver = solver
self.log = log.get('Rewind')
def handleNoComeBack(self, locations, cur):
# return true if a rewind is required
graphAreas = {}
for loc in locations:
if "comeBack" not in loc:
return False
if loc["comeBack"] == True:
return False
if loc["GraphArea"] in graphAreas:
graphAreas[loc["GraphArea"]] += 1
else:
graphAreas[loc["GraphArea"]] = 1
if len(graphAreas) == 1:
return False
self.log.debug("WARNING: use no come back heuristic for {} locs in {} graph areas ({})".format(len(locations), len(graphAreas), graphAreas))
# check if we can use existing step
if len(self.comeBackSteps) > 0:
lastStep = self.comeBackSteps[-1]
if lastStep.cur == cur:
self.log.debug("Use last step at {}".format(cur))
return lastStep.next(locations)
# create a step
self.log.debug("Create new step at {}".format(cur))
step = ComeBackStep(graphAreas, cur)
self.comeBackSteps.append(step)
return step.next(locations)
def cleanNoComeBack(self, locations):
for loc in locations:
if "areaWeight" in loc:
del loc["areaWeight"]
def rewind(self, cur):
# come back to the previous step
# if no more rewinds available: tell we're stuck by returning False
if len(self.comeBackSteps) == 0:
self.log.debug("No more steps to rewind")
return False
self.log.debug("Start rewind, current: {}".format(cur))
lastStep = self.comeBackSteps[-1]
if not lastStep.moreAvailable():
# need to go up one more time
self.comeBackSteps.pop()
if len(self.comeBackSteps) == 0:
self.log.debug("No more steps to rewind")
return False
lastStep = self.comeBackSteps[-1]
self.log.debug("Rewind previous step at {}".format(lastStep.cur))
count = cur - lastStep.cur
self.solver.cancelLastItems(count)
self.log.debug("Rewind {} items to {}".format(count, lastStep.cur))
return True
class ComeBackStep(object):
# one case of no come back decision
def __init__(self, graphAreas, cur):
self.visitedGraphAreas = []
self.graphAreas = graphAreas
self.cur = cur
self.log = log.get('RewindStep')
self.log.debug("create rewind step: {} {}".format(cur, graphAreas))
def moreAvailable(self):
return len(self.visitedGraphAreas) < len(self.graphAreas)
def next(self, locations):
# use next available area, if all areas have been visited return True (stuck), else False
if not self.moreAvailable():
self.log.debug("rewind: all areas have been visited, stuck")
return True
self.log.debug("rewind next, graphAreas: {} visitedGraphAreas: {}".format(self.graphAreas, self.visitedGraphAreas))
# get area with max available locs
maxAreaWeigth = 0
maxAreaName = ""
for graphArea in sorted(self.graphAreas):
if graphArea in self.visitedGraphAreas:
continue
else:
if self.graphAreas[graphArea] > maxAreaWeigth:
maxAreaWeigth = self.graphAreas[graphArea]
maxAreaName = graphArea
self.visitedGraphAreas.append(maxAreaName)
self.log.debug("rewind next area: {}".format(maxAreaName))
outWeight = 10000
retGraphAreas = {}
for graphArea in self.graphAreas:
if graphArea == maxAreaName:
retGraphAreas[graphArea] = 1
else:
retGraphAreas[graphArea] = outWeight
# update locs
for loc in locations:
graphArea = loc["GraphArea"]
if graphArea in retGraphAreas:
loc["areaWeight"] = retGraphAreas[loc["GraphArea"]]
self.log.debug("rewind loc {} new areaWeight: {}".format(loc["Name"], loc["areaWeight"]))
else:
# can happen if going to the first area unlocks new areas
loc["areaWeight"] = outWeight
self.log.debug("rewind loc {} from area {} not in original areas".format(loc["Name"], graphArea))
return False
class Out(object):
@staticmethod
def factory(output, solver):
if output == 'web':
return OutWeb(solver)
elif output == 'console':
return OutConsole(solver)
else:
raise Exception("Wrong output type for the Solver: {}".format(output))
def fixEnergy(self, items):
# display number of energy used
energies = [i for i in items if i.find('ETank') != -1]
if len(energies) > 0:
(maxETank, maxReserve, maxEnergy) = (0, 0, 0)
for energy in energies:
nETank = int(energy[0:energy.find('-ETank')])
if energy.find('-Reserve') != -1:
nReserve = int(energy[energy.find(' - ')+len(' - '):energy.find('-Reserve')])
else:
nReserve = 0
nEnergy = nETank + nReserve
if nEnergy > maxEnergy:
maxEnergy = nEnergy
maxETank = nETank
maxReserve = nReserve
items.remove(energy)
items.append('{}-ETank'.format(maxETank))
if maxReserve > 0:
items.append('{}-Reserve'.format(maxReserve))
class OutWeb(Out):
def __init__(self, solver):
self.solver = solver
def out(self):
s = self.solver
if s.areaRando == True:
dotFileName = os.path.basename(os.path.splitext(s.romFileName)[0])+'.json'
dotFileName = os.path.join(os.path.expanduser('~/web2py/applications/solver/static/graph'), dotFileName)
s.areaGraph.toDot(dotFileName)
(pngFileName, pngThumbFileName) = self.generatePng(dotFileName)
if pngFileName is not None and pngThumbFileName is not None:
pngFileName = os.path.basename(pngFileName)
pngThumbFileName = os.path.basename(pngThumbFileName)
else:
pngFileName = None
pngThumbFileName = None
randomizedRom = os.path.basename(os.path.splitext(s.romFileName)[0])+'.sfc'
diffPercent = DifficultyDisplayer(s.difficulty).percent()
generatedPath = self.getPath(s.visitedLocations)
collectedItems = s.smbm.getItems()
if s.difficulty == -1:
remainTry = self.getPath(s.tryRemainingLocs())
remainMajors = self.getPath(s.getRemainMajors())
remainMinors = self.getPath(s.getRemainMinors())
skippedMajors = None
unavailMajors = None
else:
remainTry = None
remainMajors = None
remainMinors = None
skippedMajors = self.getPath(s.getSkippedMajors())
unavailMajors = self.getPath(s.getUnavailMajors())
result = dict(randomizedRom=randomizedRom, difficulty=s.difficulty,
generatedPath=generatedPath, diffPercent=diffPercent,
knowsUsed=(s.knowsUsed, s.knowsKnown), itemsOk=s.itemsOk, patches=s.romLoader.getPatches(),
pngFileName=pngFileName, pngThumbFileName=pngThumbFileName,
remainTry=remainTry, remainMajors=remainMajors, remainMinors=remainMinors,
skippedMajors=skippedMajors, unavailMajors=unavailMajors,
collectedItems=collectedItems)
with open(s.outputFileName, 'w') as jsonFile:
json.dump(result, jsonFile)
def getPath(self, locations):
if locations is None:
return None
out = []
for loc in locations:
self.fixEnergy(loc['difficulty'].items)
out.append([(loc['Name'], loc['Room']), loc['Area'], loc['SolveArea'], loc['itemName'],
'{0:.2f}'.format(loc['difficulty'].difficulty),
sorted(loc['difficulty'].knows),
sorted(list(set(loc['difficulty'].items))),
[ap.Name for ap in loc['path']] if 'path' in loc else None,
loc['Class']])
return out
def generatePng(self, dotFileName):
# use dot to generate the graph's image .png
# use convert to generate the thumbnail
# dotFileName: the /directory/image.dot
# the png and thumbnails are generated in the same directory as the dot
# requires that graphviz is installed
splited = os.path.splitext(dotFileName)
pngFileName = splited[0] + '.png'
pngThumbFileName = splited[0] + '_thumbnail.png'
# dot -Tpng VARIA_Randomizer_AFX5399_noob.dot -oVARIA_Randomizer_AFX5399_noob.png
params = ['dot', '-Tpng', dotFileName, '-o'+pngFileName]
ret = subprocess.call(params)
if ret != 0:
print("Error calling dot {}: {}".format(params, ret))
return (None, None)
params = ['convert', pngFileName, '-resize', '1024', pngThumbFileName]
ret = subprocess.call(params)
if ret != 0:
print("Error calling convert {}: {}".format(params, ret))
os.remove(pngFileName)
return (None, None)
return (pngFileName, pngThumbFileName)
class OutConsole(Out):
def __init__(self, solver):
self.solver = solver
def out(self):
s = self.solver
self.displayOutput()
print("({}, {}): diff : {}".format(round(float(s.difficulty), 3), s.itemsOk, s.romFileName))
print("{}/{}: knows Used : {}".format(s.knowsUsed, s.knowsKnown, s.romFileName))
if s.difficulty >= 0:
sys.exit(0)
else:
sys.exit(1)
def printPath(self, message, locations, displayAPs=True):
print("")
print(message)
print('{} {:>48} {:>12} {:>34} {:>8} {:>16} {:>14} {} {}'.format("Z", "Location Name", "Area", "Sub Area", "Distance", "Item", "Difficulty", "Knows used", "Items used"))
print('-'*150)
lastAP = None
for loc in locations:
if displayAPs == True and 'path' in loc:
path = [ap.Name for ap in loc['path']]
lastAP = path[-1]
if not (len(path) == 1 and path[0] == lastAP):
path = " -> ".join(path)
print('{:>50}: {}'.format('Path', path))
line = '{} {:>48}: {:>12} {:>34} {:>8} {:>16} {:>14} {} {}'
self.fixEnergy(loc['difficulty'].items)
print(line.format('Z' if 'Chozo' in loc['Class'] else ' ',
loc['Name'],
loc['Area'],
loc['SolveArea'],
loc['distance'] if 'distance' in loc else 'nc',
loc['itemName'],
round(float(loc['difficulty'].difficulty), 2) if 'difficulty' in loc else 'nc',
sorted(loc['difficulty'].knows) if 'difficulty' in loc else 'nc',
sorted(list(set(loc['difficulty'].items))) if 'difficulty' in loc else 'nc'))
def displayOutput(self):
s = self.solver
print("all patches: {}".format(s.romLoader.getAllPatches()))
# print generated path
if Conf.displayGeneratedPath == True:
self.printPath("Generated path ({}/101):".format(len(s.visitedLocations)), s.visitedLocations)
# if we've aborted, display missing techniques and remaining locations
if s.difficulty == -1:
self.printPath("Next locs which could have been available if more techniques were known:", s.tryRemainingLocs())
remainMajors = s.getRemainMajors()
if len(remainMajors) > 0:
self.printPath("Remaining major locations:", remainMajors, displayAPs=False)
remainMinors = s.getRemainMinors()
if remainMinors is not None and len(remainMinors) > 0:
self.printPath("Remaining minor locations:", remainMinors, displayAPs=False)
else:
# if some locs are not picked up display those which are available
# and those which are not
skippedMajors = s.getSkippedMajors()
if len(skippedMajors) > 0:
self.printPath("Skipped major locations:", skippedMajors, displayAPs=False)
else:
print("No skipped major locations")
unavailMajors = s.getUnavailMajors()
if len(unavailMajors) > 0:
self.printPath("Unaccessible major locations:", unavailMajors, displayAPs=False)
else:
print("No unaccessible major locations")
items = s.smbm.getItems()
print("ETank: {}, Reserve: {}, Missile: {}, Super: {}, PowerBomb: {}".format(items['ETank'], items['Reserve'], items['Missile'], items['Super'], items['PowerBomb']))
print("Majors: {}".format(sorted([item for item in items if items[item] == True])))
# display difficulty scale
self.displayDifficulty(s.difficulty)
def displayDifficulty(self, difficulty):
if difficulty >= 0:
text = DifficultyDisplayer(difficulty).scale()
print("Estimated difficulty: {}".format(text))
else:
print("Aborted run, can't finish the game with the given prerequisites")
class DifficultyDisplayer:
def __init__(self, difficulty):
self.difficulty = difficulty
def scale(self):
if self.difficulty >= impossibru:
return "IMPOSSIBRU!"
else:
previous = 0
for d in sorted(diff2text):
if self.difficulty >= d:
previous = d
else:
displayString = diff2text[previous]
displayString += ' '
scale = d - previous
pos = int(self.difficulty - previous)
displayString += '-' * pos
displayString += '^'
displayString += '-' * (scale - pos)
displayString += ' '
displayString += diff2text[d]
break
return displayString
def percent(self):
# return the difficulty as a percent
if self.difficulty == -1:
return -1
elif self.difficulty in [0, easy]:
return 0
elif self.difficulty >= mania:
return 100
difficultiesPercent = {
easy: 0,
medium: 20,
hard: 40,
harder: 60,
hardcore: 80,
mania: 100
}
difficulty = self.difficulty
lower = 0
percent = 100
for upper in sorted(diff2text):
if self.difficulty >= upper:
lower = upper
else:
lowerPercent = difficultiesPercent[lower]
upperPercent = difficultiesPercent[upper]
a = (upperPercent-lowerPercent)/float(upper-lower)
b = lowerPercent - a * lower
percent = int(difficulty * a + b)
break
return percent
def interactiveSolver(args):
# to init, requires interactive/romFileName/presetFileName/output parameters in standard/plando mode
# to init, requires interactive/presetFileName/output parameters in seedless mode
# to iterate, requires interactive/state/[loc]/[item]/action/output parameters in item scope
# to iterate, requires interactive/state/[startPoint]/[endPoint]/action/output parameters in area scope
if args.action == 'init':
# init
if args.mode != 'seedless' and args.romFileName == None:
print("Missing romFileName parameter for {} mode".format(args.mode))
sys.exit(1)
if args.presetFileName == None or args.output == None:
print("Missing preset or output parameter")
sys.exit(1)
solver = InteractiveSolver(args.output)
solver.initialize(args.mode, args.romFileName, args.presetFileName, magic=args.raceMagic, debug=args.vcr, fill=args.fill, startAP=args.startAP)
else:
# iterate
params = {}
if args.scope == 'common':
if args.action == "save":
params["lock"] = args.lock
params["escapeTimer"] = args.escapeTimer
elif args.action == "randomize":
params["progressionSpeed"] = args.progressionSpeed
params["minorQty"] = args.minorQty
params["energyQty"] = args.energyQty
elif args.scope == 'item':
if args.state == None or args.action == None or args.output == None:
print("Missing state/action/output parameter")
sys.exit(1)
if args.action in ["add", "replace"]:
if args.loc == None:
print("Missing loc parameter when using action add for item")
sys.exit(1)
if args.mode != 'standard':
if args.item == None:
print("Missing item parameter when using action add in plando/suitless mode")
sys.exit(1)
params = {'loc': args.loc, 'item': args.item, 'hide': args.hide}
elif args.action == "remove":
params = {'count': args.count}
elif args.scope == 'area':
if args.state == None or args.action == None or args.output == None:
print("Missing state/action/output parameter")
sys.exit(1)
if args.action == "add":
if args.startPoint == None or args.endPoint == None:
print("Missing start or end point parameter when using action add for item")
sys.exit(1)
params = {'startPoint': args.startPoint, 'endPoint': args.endPoint}
if args.action == "remove" and args.startPoint != None:
params = {'startPoint': args.startPoint}
params["debug"] = args.vcr
solver = InteractiveSolver(args.output)
solver.iterate(args.state, args.scope, args.action, params)
def standardSolver(args):
if args.romFileName is None:
print("Parameter --romFileName mandatory when not in interactive mode")
sys.exit(1)
if args.difficultyTarget is None:
difficultyTarget = Conf.difficultyTarget
else:
difficultyTarget = args.difficultyTarget
if args.pickupStrategy is None:
pickupStrategy = Conf.itemsPickup
else:
pickupStrategy = args.pickupStrategy
# itemsForbidden is like that: [['Varia'], ['Reserve'], ['Gravity']], fix it
args.itemsForbidden = [item[0] for item in args.itemsForbidden]
solver = StandardSolver(args.romFileName, args.presetFileName, difficultyTarget,
pickupStrategy, args.itemsForbidden, type=args.type,
firstItemsLog=args.firstItemsLog, extStatsFilename=args.extStatsFilename,
displayGeneratedPath=args.displayGeneratedPath,
outputFileName=args.output, magic=args.raceMagic,
checkDuplicateMajor=args.checkDuplicateMajor, vcr=args.vcr)
solver.solveRom()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Random Metroid Solver")
parser.add_argument('--romFileName', '-r', help="the input rom", nargs='?',
default=None, dest="romFileName")
parser.add_argument('--preset', '-p', help="the preset file", nargs='?',
default=None, dest='presetFileName')
parser.add_argument('--difficultyTarget', '-t',
help="the difficulty target that the solver will aim for",
dest='difficultyTarget', nargs='?', default=None, type=int)
parser.add_argument('--pickupStrategy', '-s', help="Pickup strategy for the Solver",
dest='pickupStrategy', nargs='?', default=None,
choices=['minimal', 'all', 'any'])
parser.add_argument('--itemsForbidden', '-f', help="Item not picked up during solving",
dest='itemsForbidden', nargs='+', default=[], action='append')
parser.add_argument('--type', '-y', help="web or console", dest='type', nargs='?',
default='console', choices=['web', 'console'])
parser.add_argument('--checkDuplicateMajor', dest="checkDuplicateMajor", action='store_true',
help="print a warning if the same major is collected more than once")
parser.add_argument('--debug', '-d', help="activate debug logging", dest='debug', action='store_true')
parser.add_argument('--firstItemsLog', '-1',
help="path to file where for each item type the first time it was found and where will be written (spoilers!)",
nargs='?', default=None, type=str, dest='firstItemsLog')
parser.add_argument('--ext_stats', help="Generate extended stats",
nargs='?', default=None, dest='extStatsFilename')
parser.add_argument('--displayGeneratedPath', '-g', help="display the generated path (spoilers!)",
dest='displayGeneratedPath', action='store_true')
parser.add_argument('--race', help="Race mode magic number", dest='raceMagic', type=int)
parser.add_argument('--vcr', help="Generate VCR output file (in isolver it means debug mode: load all the transitions/add path info for locs)", dest='vcr', action='store_true')
# standard/interactive, web site
parser.add_argument('--output', '-o', help="When called from the website, contains the result of the solver",
dest='output', nargs='?', default=None)
# interactive, web site
parser.add_argument('--interactive', '-i', help="Activate interactive mode for the solver",
dest='interactive', action='store_true')
parser.add_argument('--state', help="JSON file of the Solver state (used in interactive mode)",
dest="state", nargs='?', default=None)
parser.add_argument('--loc', help="Name of the location to action on (used in interactive mode)",
dest="loc", nargs='?', default=None)
parser.add_argument('--action', help="Pickup item at location, remove last pickedup location, clear all (used in interactive mode)",
dest="action", nargs="?", default=None, choices=['init', 'add', 'remove', 'clear', 'get', 'save', 'replace', 'randomize'])
parser.add_argument('--item', help="Name of the item to place in plando mode (used in interactive mode)",
dest="item", nargs='?', default=None)
parser.add_argument('--hide', help="Hide the item to place in plando mode (used in interactive mode)",
dest="hide", action='store_true')
parser.add_argument('--startPoint', help="The start AP to connect (used in interactive mode)",
dest="startPoint", nargs='?', default=None)
parser.add_argument('--endPoint', help="The destination AP to connect (used in interactive mode)",
dest="endPoint", nargs='?', default=None)
parser.add_argument('--mode', help="Solver mode: standard/seedless/plando (used in interactive mode)",
dest="mode", nargs="?", default=None, choices=['standard', 'seedless', 'plando'])
parser.add_argument('--scope', help="Scope for the action: common/area/item (used in interactive mode)",
dest="scope", nargs="?", default=None, choices=['common', 'area', 'item'])
parser.add_argument('--count', help="Number of item rollback (used in interactive mode)",
dest="count", type=int)
parser.add_argument('--lock', help="lock the plando seed (used in interactive mode)",
dest="lock", action='store_true')
parser.add_argument('--escapeTimer', help="escape timer like 03:00", dest="escapeTimer", default=None)
parser.add_argument('--fill', help="in plando load all the source seed locations/transitions as a base (used in interactive mode)",
dest="fill", action='store_true')
parser.add_argument('--startAP', help="in plando/seedless: the start location", dest="startAP", default="Landing Site")
parser.add_argument('--progressionSpeed', help="rando plando (used in interactive mode)",
dest="progressionSpeed", nargs="?", default=None, choices=["slowest", "slow", "medium", "fast", "fastest", "basic", "VARIAble"])
parser.add_argument('--minorQty', help="rando plando (used in interactive mode)",
dest="minorQty", nargs="?", default=None, choices=[str(i) for i in range(0,101)])
parser.add_argument('--energyQty', help="rando plando (used in interactive mode)",
dest="energyQty", nargs="?", default=None, choices=["sparse", "medium", "vanilla"])
args = parser.parse_args()
if args.presetFileName is None:
args.presetFileName = 'standard_presets/regular.json'
if args.raceMagic != None:
if args.raceMagic <= 0 or args.raceMagic >= 0x10000:
print("Invalid magic")
sys.exit(-1)
if args.count != None:
if args.count < 1 or args.count > 0x80:
print("Invalid count")
sys.exit(-1)
log.init(args.debug)
if args.interactive == True:
interactiveSolver(args)
else:
standardSolver(args)
|
# -*- coding: utf-8 -*-
"""
Main server factory.
We create all the components here!
"""
from __future__ import division, absolute_import
from . import myself, lic, get_logger
# from flask.ext.security import SQLAlchemyUserDatastore # , Security
# from .models import db, User, Role
from confs import config
__author__ = myself
__copyright__ = myself
__license__ = lic
logger = get_logger(__name__)
# ####################################
# # Security
# udstore = SQLAlchemyUserDatastore(db, User, Role)
# # security = Security(datastore=udstore)
####################################
# DB init for security
# THIS WORKS ONLY WITH SQLALCHEMY
def db_auth():
""" What to do if the main auth object has no rows """
missing_role = not Role.query.first()
logger.debug("Missing role")
if missing_role:
udstore.create_role(name=config.ROLE_ADMIN, description='King')
udstore.create_role(name=config.ROLE_USER, description='Citizen')
logger.debug("Created roles")
missing_user = not User.query.first()
logger.debug("Missing user")
if missing_user:
import datetime
now = datetime.datetime.utcnow()
from flask.ext.security.utils import encrypt_password
udstore.create_user(first_name='TheOnlyUser', last_name='IAm',
email=config.USER, confirmed_at=now,
password=encrypt_password(config.PWD))
udstore.add_role_to_user(config.USER, config.ROLE_ADMIN)
logger.debug("Created user")
if missing_user or missing_role:
db.session.commit()
logger.info("Database init with user/roles from conf")
Bug fixing for relationaldb
# -*- coding: utf-8 -*-
"""
Main server factory.
We create all the components here!
"""
from __future__ import division, absolute_import
from . import myself, lic, get_logger
# from flask.ext.security import SQLAlchemyUserDatastore # , Security
# from .models import db, User, Role
from confs import config
from .generic import BaseAuthentication
__author__ = myself
__copyright__ = myself
__license__ = lic
logger = get_logger(__name__)
class Authentication(BaseAuthentication):
def __init__(self):
raise NotImplementedError("Use the 'graphdb' authentication for now")
# ####################################
# # Security
# udstore = SQLAlchemyUserDatastore(db, User, Role)
# # security = Security(datastore=udstore)
####################################
# DB init for security
# THIS WORKS ONLY WITH SQLALCHEMY
def db_auth():
""" What to do if the main auth object has no rows """
missing_role = not Role.query.first()
logger.debug("Missing role")
if missing_role:
udstore.create_role(name=config.ROLE_ADMIN, description='King')
udstore.create_role(name=config.ROLE_USER, description='Citizen')
logger.debug("Created roles")
missing_user = not User.query.first()
logger.debug("Missing user")
if missing_user:
import datetime
now = datetime.datetime.utcnow()
from flask.ext.security.utils import encrypt_password
udstore.create_user(first_name='TheOnlyUser', last_name='IAm',
email=config.USER, confirmed_at=now,
password=encrypt_password(config.PWD))
udstore.add_role_to_user(config.USER, config.ROLE_ADMIN)
logger.debug("Created user")
if missing_user or missing_role:
db.session.commit()
logger.info("Database init with user/roles from conf")
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.gcp.gce_virtual_machine"""
import unittest
import mock
from perfkitbenchmarker import pkb # noqa
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.gcp import gce_virtual_machine
class GCEPreemptibleVMFlagTestCase(unittest.TestCase):
def testPreemptibleVMFlag(self):
with mock.patch(vm_util.__name__ + '.IssueCommand') as issue_command, \
mock.patch('__builtin__.open'), \
mock.patch(vm_util.__name__ + '.NamedTemporaryFile'), \
mock.patch(gce_virtual_machine.__name__ + '.FLAGS') as gvm_flags:
gvm_flags.gce_preemptible_vms = True
gvm_flags.gcloud_scopes = None
vm_spec = virtual_machine.BaseVirtualMachineSpec('proj',
'zone',
'n1-standard-1',
'image')
vm = gce_virtual_machine.GceVirtualMachine(vm_spec)
vm._Create()
self.assertEquals(issue_command.call_count, 1)
self.assertIn('--preemptible', issue_command.call_args[0][0])
if __name__ == '__main__':
unittest.main()
Add comment
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.gcp.gce_virtual_machine"""
import unittest
import mock
from perfkitbenchmarker import pkb # noqa. Imported to create needed flags.
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.gcp import gce_virtual_machine
class GCEPreemptibleVMFlagTestCase(unittest.TestCase):
def testPreemptibleVMFlag(self):
with mock.patch(vm_util.__name__ + '.IssueCommand') as issue_command, \
mock.patch('__builtin__.open'), \
mock.patch(vm_util.__name__ + '.NamedTemporaryFile'), \
mock.patch(gce_virtual_machine.__name__ + '.FLAGS') as gvm_flags:
gvm_flags.gce_preemptible_vms = True
gvm_flags.gcloud_scopes = None
vm_spec = virtual_machine.BaseVirtualMachineSpec('proj',
'zone',
'n1-standard-1',
'image')
vm = gce_virtual_machine.GceVirtualMachine(vm_spec)
vm._Create()
self.assertEquals(issue_command.call_count, 1)
self.assertIn('--preemptible', issue_command.call_args[0][0])
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import json
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC
import selenium.webdriver.support.ui as ui
import unittest
import time
import logging
class AWSDeployer(object):
__logger = None
def __init__(self):
""" Init driver and read json config file
"""
# init logger
self.__logger = logging.getLogger("Deployer")
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
logger_formatter = logging.Formatter("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p")
console_handler.setFormatter(logger_formatter)
self.__logger.addHandler(console_handler)
self.__logger.setLevel(logging.DEBUG)
# init web driver
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
self.config = None
# read config file
with open("config.json") as file:
self.config = json.load(file)
def launch_deployement(self):
""" Login into Github account and fork the "django-docker-started" repository
"""
# go on the `django-docker-starter` GitHub repository and fork repository
self.fork_github_repo()
# create automated build repository on DockerHub
self.create_dockerhub_build_repo()
# create `tutum` user on AWS
tutum_access_key_id, tutum_secret_access_key = self.create_tutum_user_on_aws()
# link AWS account to Tutum
if tutum_access_key_id != None and tutum_secret_access_key != None:
self.link_aws_account_to_tutum(tutum_access_key_id, tutum_secret_access_key)
# create tutum node on Tutum
self.create_tutum_node()
# create tutum service on Tutum
app_ip = self.create_tutum_service()
# Watch application
self.watch_app(app_ip)
def login_into_github(self):
""" Login into DockerHub
"""
driver = self.driver
driver.get(self.config["gitHub"]["url"])
if self.is_element_present_by_css_selector("a[href=\"/login\"]"):
driver.find_element_by_css_selector("a[href=\"/login\"]").click()
driver.find_element_by_id("login_field").clear()
driver.find_element_by_id("login_field").send_keys(self.config["gitHub"]["credentials"]["name"])
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(self.config["gitHub"]["credentials"]["password"])
driver.find_element_by_name("commit").click()
def fork_github_repo(self):
""" Fork the `django-docker-starter`
"""
driver = self.driver
# login into GitHub
self.login_into_github()
# fork the `django-docker-starter` repository if it's not the case
if not self.is_element_present_by_css_selector("#repo_listing .fork a[href=\"/" + self.config["gitHub"]["credentials"]["name"] + "/" + self.config["gitHub"]["starterRepository"][
"name"] + "\"]"):
driver.get(self.config["gitHub"]["url"] + self.config["gitHub"]["starterRepository"]["owner"] + "/" + self.config["gitHub"]["starterRepository"]["name"] + ".git")
driver.find_element_by_xpath("//button[@type='submit']").click()
def login_into_dockerhub(self):
""" Login into Docker Hub
"""
driver = self.driver
driver.get(self.config["dockerHub"]["url"])
if self.is_element_present_by_css_selector("a[href=\"/account/login/\"]"):
driver.find_element_by_css_selector("a[href=\"/account/login/\"]").click()
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(self.config["dockerHub"]["credentials"]["name"])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(self.config["dockerHub"]["credentials"]["password"])
driver.find_element_by_css_selector("input.btn.btn-primary").click()
def create_dockerhub_build_repo(self):
"""
Create an automated build repository on DockerHub with the forked repository
and wait build of container
"""
driver = self.driver
# login into DockerHub
self.login_into_dockerhub()
# create an automated build repository if it doesn't already exist
if not self.is_element_present_by_css_selector("#rightcol .row a[href=\"/u/" + self.config["dockerHub"]["credentials"]["name"] + "/" + self.config["dockerHub"]["repository"][
"name"] + "/\"]"):
driver.get(self.config["dockerHub"]["url"] + "/builds/add/")
driver.find_element_by_css_selector(".content .add-build .github a[href=\"/builds/github/select/\"]").click()
driver.find_element_by_link_text(self.config["gitHub"]["credentials"]["name"]).click()
driver.find_element_by_css_selector("[href=\"https://registry.hub.docker.com/builds/github/" +
self.config["gitHub"]["credentials"]["name"] + "/" + self.config["gitHub"]["starterRepository"]["name"] + "/\"]").click()
driver.find_element_by_id("id_repo_name").clear()
driver.find_element_by_id("id_repo_name").send_keys(self.config["dockerHub"]["repository"]["name"])
# change visibility of repository
if self.config["dockerHub"]["repository"]["visibility"] == "private":
driver.find_element_by_id("id_repo_visibility_1").click()
driver.find_element_by_name("action").click()
# wait during initialization of container
driver.get(self.config["dockerHub"]["url"])
# wait until docker image be built
# while not self._is_visible("#rightcol .row a[href=\"/u/" + self.config["dockerHub"]["credentials"]["name"] + "/" + self.config["dockerHub"]["repository"]["name"] + "/\"] .stars-and-downloads-container"):
# driver.get(self.config["dockerHub"]["url"])
def login_into_tutum(self):
""" Login into Tutum
"""
driver = self.driver
driver.get(self.config["tutum"]["url"])
driver.find_element_by_link_text("Login").click()
if self.is_element_present("id", "id_username"):
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(self.config["tutum"]["credentials"]["email"])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(self.config["tutum"]["credentials"]["password"])
driver.find_element_by_xpath("//button[@type='submit']").click()
def link_aws_account_to_tutum(self, tutum_access_key_id, tutum_secret_access_key):
"""
Link AWS account to Tutum
:param tutum_access_key_id: access key id of AWS tutum user
:param tutum_secret_access_key: secret access key of AWS tutum user
"""
driver = self.driver
# login into tutum
self.login_into_tutum()
driver.find_element_by_css_selector("span.user-info").click()
driver.find_element_by_xpath("//div[@id='navbar-container']/div[2]/ul/li[3]/ul/li/a/i").click()
# link AWS account if there is no one
if self.is_element_present_by_css_selector("div.aws-not-linked > #aws-link"):
driver.find_element_by_css_selector("div.aws-not-linked > #aws-link").click()
driver.find_element_by_id("access-key").clear()
driver.find_element_by_id("access-key").send_keys(tutum_access_key_id)
driver.find_element_by_id("secret-access-key").clear()
driver.find_element_by_id("secret-access-key").send_keys(tutum_secret_access_key)
driver.find_element_by_id("aws-save-credentials").click()
time.sleep(5)
def create_tutum_node(self):
""" Create a Tutum node based on AWS
"""
driver = self.driver
# login into tutum
self.login_into_tutum()
driver.find_element_by_css_selector("a[href=\"/node/cluster/list/\"]").click()
time.sleep(5)
# create a node if it doesn't exist
if not self.is_element_present_by_link_text(self.config["tutum"]["node"]["name"]):
driver.find_element_by_css_selector("a[href=\"/node/launch/\"]").click()
driver.find_element_by_id("node-cluster-name").clear()
driver.find_element_by_id("node-cluster-name").send_keys(self.config["tutum"]["node"]["name"])
# short delay to load javascript functions
time.sleep(5)
driver.find_element_by_id("btn-finish-node-cluster").click()
# wait until docker image be built
while not self._is_visible(".main-container-inner .status-container .status .green"):
pass
def create_tutum_service(self):
""" Create a Tutum service based on the docker container previously built
"""
driver = self.driver
# login into Tutum
self.login_into_tutum()
driver.find_element_by_link_text("Services").click()
driver.execute_script("$(\".cluster-link a\").text($(\".cluster-link a\").clone().children().remove().end().text())")
# create a service if it doesn't exist
if not self.is_element_present_by_link_text(self.config["tutum"]["service"]["name"]):
driver.find_element_by_css_selector("a[href=\"/container/launch/\"]").click()
driver.find_element_by_link_text("Public images").click()
driver.find_element_by_link_text("Search Docker hub").click()
driver.find_element_by_id("search").clear()
driver.find_element_by_id("search").send_keys(self.config["dockerHub"]["repository"]["name"])
# wait until docker image be available
while not self._is_visible("#community-search-result button[data-image-name*=\"" + self.config["dockerHub"]["repository"]["name"] + "\"]"):
driver.find_element_by_id("search").clear()
driver.find_element_by_id("search").send_keys(self.config["dockerHub"]["repository"]["name"])
driver.find_element_by_css_selector("button[data-image-name*=\"" + self.config["dockerHub"]["repository"]["name"] + "\"]").click()
driver.find_element_by_id("app-name").clear()
driver.find_element_by_id("app-name").send_keys(self.config["tutum"]["service"]["name"])
# short delay to load javascript functions
time.sleep(3)
driver.find_element_by_css_selector("div.overlay.overlay-override").click()
driver.find_element_by_css_selector("input[type=\"checkbox\"]").click()
driver.find_element_by_xpath("//div[@id='image-ports-wrapper']/div/div/div/table/tbody/tr/td[4]/span").click()
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys(self.config["tutum"]["service"]["port"])
driver.find_element_by_id("step-container").click()
driver.find_element_by_id("btn-deploy-services").click()
# short delay to launch the service
time.sleep(5)
# wait until container is running
while not self._is_visible("#cluster-status .green"):
pass
else:
driver.find_element_by_link_text(self.config["tutum"]["service"]["name"]).click()
driver.find_element_by_css_selector("td.container-link.sortable.renderable > a").click()
driver.find_element_by_css_selector("#node > a").click()
driver.execute_script("document.getElementsByClassName('info-bar')[0].getElementsByClassName('icon-link')[0].remove()")
node_ip = driver.find_element_by_xpath("//div[@class='info-bar']/div[@class='app-info'][1]").text
return node_ip.replace("\"", "").replace(" ", "")
def watch_app(self, ip):
"""
Go on node ip to watch application in live
:param ip: ip of an application
:return:
"""
driver = self.driver
driver.get("http://" + ip)
time.sleep(20)
def login_into_aws(self):
""" Login into AWS
"""
driver = self.driver
driver.get(self.config["aws"]["url"])
if self.is_element_present("id", "ap_email") and self.is_element_present("id", "ap_password"):
driver.find_element_by_id("ap_email").clear()
driver.find_element_by_id("ap_email").send_keys(self.config["aws"]["credentials"]["email"])
driver.find_element_by_id("ap_password").clear()
driver.find_element_by_id("ap_password").send_keys(self.config["aws"]["credentials"]["password"])
driver.find_element_by_id("signInSubmit-input").click()
def create_tutum_user_on_aws(self):
""" Create a user (name: tutum) on AWS
"""
driver = self.driver
# login into AWS
self.login_into_aws()
driver.find_element_by_css_selector("a.service[data-service-id=\"iam\"]").click()
driver.find_element_by_link_text("Users").click()
# create a `tutum` user if he doesn't exist
if not self.is_element_present_by_css_selector("table[data-table=\"resource\"] td[title=\"tutum\"]"):
driver.find_element_by_css_selector("button.create_user").click()
driver.find_element_by_css_selector("li > input").clear()
driver.find_element_by_css_selector("li > input").send_keys("tutum")
driver.find_element_by_xpath("//div[@id='c']/div/div[2]/div/div[2]/div[3]/div/button").click()
driver.find_element_by_link_text("Show User Security Credentials").click()
# Get information of `tutum` user
tutum_access_key_id = driver.find_elements_by_class_name("attrValue")[0].text
tutum_secret_access_key = driver.find_elements_by_class_name("attrValue")[1].text
else:
tutum_access_key_id = None
tutum_secret_access_key = None
driver.find_element_by_link_text("Policies").click()
if self.is_element_present_by_css_selector("button.getStarted"):
driver.find_element_by_css_selector("button.getStarted").click()
driver.find_element_by_css_selector("td[title=\"AmazonEC2FullAccess\"]").click()
# Attach policy (full access to EC2) to `tutum` user if its not
if not self.is_element_present("text", "tutum"):
driver.find_element_by_css_selector("button.attach").click()
# short delay to load javascript functions
time.sleep(5)
driver.find_element_by_css_selector("div.tableField").click()
driver.find_element_by_css_selector("button.submit").click()
return tutum_access_key_id, tutum_secret_access_key
def is_element_present(self, how, what):
"""
Check if an element exist
:param how: how to select it
:param what: what to select
:return: Boolean
"""
try:
self.driver.find_element(by = how, value = what)
except NoSuchElementException as e:
return False
return True
def is_element_present_by_css_selector(self, css_selector):
"""
Check if an element exist by css selector
:param css_selector: css selector
:return: Boolean
"""
try:
self.driver.find_element_by_css_selector(css_selector)
except NoSuchElementException as e:
return False
return True
def is_element_present_by_link_text(self, link_text):
"""
Check if an element exist by link text
:param link_text: link text
:return: Boolean
"""
try:
self.driver.find_element_by_link_text(link_text)
except NoSuchElementException as e:
return False
return True
def _is_visible(self, locator, timeout = 2):
try:
ui.WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))
return True
except TimeoutException:
return False
add logger messages on github and docker features
# -*- coding: utf-8 -*-
import json
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC
import selenium.webdriver.support.ui as ui
import unittest
import time
import logging
class AWSDeployer(object):
__logger = None
def __init__(self):
""" Init driver and read json config file
"""
# init logger
self.__logger = logging.getLogger("Deployer")
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
logger_formatter = logging.Formatter("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p")
console_handler.setFormatter(logger_formatter)
self.__logger.addHandler(console_handler)
self.__logger.setLevel(logging.DEBUG)
# init web driver
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
self.config = None
# read config file
with open("config.json") as file:
self.config = json.load(file)
def launch_deployement(self):
""" Login into Github account and fork the "django-docker-started" repository
"""
# go on the `django-docker-starter` GitHub repository and fork repository
self.fork_github_repo()
# create automated build repository on DockerHub
self.create_dockerhub_build_repo()
# create `tutum` user on AWS
tutum_access_key_id, tutum_secret_access_key = self.create_tutum_user_on_aws()
# link AWS account to Tutum
if tutum_access_key_id != None and tutum_secret_access_key != None:
self.link_aws_account_to_tutum(tutum_access_key_id, tutum_secret_access_key)
# create tutum node on Tutum
self.create_tutum_node()
# create tutum service on Tutum
app_ip = self.create_tutum_service()
# Watch application
self.watch_app(app_ip)
def login_into_github(self):
""" Login into DockerHub
"""
self.__logger.debug("Logging into GitHub...")
driver = self.driver
driver.get(self.config["gitHub"]["url"])
if self.is_element_present_by_css_selector("a[href=\"/login\"]"):
driver.find_element_by_css_selector("a[href=\"/login\"]").click()
driver.find_element_by_id("login_field").clear()
driver.find_element_by_id("login_field").send_keys(self.config["gitHub"]["credentials"]["name"])
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(self.config["gitHub"]["credentials"]["password"])
driver.find_element_by_name("commit").click()
self.__logger.debug("Logged into GitHub")
else:
self.__logger.debug("Already logged into GitHub")
def fork_github_repo(self):
""" Fork the `django-docker-starter`
"""
driver = self.driver
# login into GitHub
self.login_into_github()
self.__logger.debug("forking repository : %s/%s ...", self.config["gitHub"]['starterRepository']["owner"], self.config["gitHub"]['starterRepository']["owner"])
# fork the `django-docker-starter` repository if it's not the case
if not self.is_element_present_by_css_selector("#repo_listing .fork a[href=\"/" + self.config["gitHub"]["credentials"]["name"] + "/" + self.config["gitHub"]["starterRepository"][
"name"] + "\"]"):
self.__logger.debug("Repository forked")
driver.get(self.config["gitHub"]["url"] + self.config["gitHub"]["starterRepository"]["owner"] + "/" + self.config["gitHub"]["starterRepository"]["name"] + ".git")
driver.find_element_by_xpath("//button[@type='submit']").click()
else:
self.__logger.debug("Repository already forked")
def login_into_dockerhub(self):
""" Login into Docker Hub
"""
self.__logger.debug("Logging into DockerHub...")
driver = self.driver
driver.get(self.config["dockerHub"]["url"])
if self.is_element_present_by_css_selector("a[href=\"/account/login/\"]"):
driver.find_element_by_css_selector("a[href=\"/account/login/\"]").click()
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(self.config["dockerHub"]["credentials"]["name"])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(self.config["dockerHub"]["credentials"]["password"])
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.__logger.debug("Logged into DockerHub")
else:
self.__logger.debug("Already logger into GitHub")
def create_dockerhub_build_repo(self):
"""
Create an automated build repository on DockerHub with the forked repository
and wait build of container
"""
driver = self.driver
# login into DockerHub
self.login_into_dockerhub()
self.__logger.debug("Creating automated build repository on DockerHub...")
# create an automated build repository if it doesn't already exist
if not self.is_element_present_by_css_selector("#rightcol .row a[href=\"/u/" + self.config["dockerHub"]["credentials"]["name"] + "/" + self.config["dockerHub"]["repository"][
"name"] + "/\"]"):
driver.get(self.config["dockerHub"]["url"] + "/builds/add/")
driver.find_element_by_css_selector(".content .add-build .github a[href=\"/builds/github/select/\"]").click()
driver.find_element_by_link_text(self.config["gitHub"]["credentials"]["name"]).click()
driver.find_element_by_css_selector("[href=\"https://registry.hub.docker.com/builds/github/" +
self.config["gitHub"]["credentials"]["name"] + "/" + self.config["gitHub"]["starterRepository"]["name"] + "/\"]").click()
driver.find_element_by_id("id_repo_name").clear()
driver.find_element_by_id("id_repo_name").send_keys(self.config["dockerHub"]["repository"]["name"])
# change visibility of repository
if self.config["dockerHub"]["repository"]["visibility"] == "private":
driver.find_element_by_id("id_repo_visibility_1").click()
driver.find_element_by_name("action").click()
# wait during initialization of container
driver.get(self.config["dockerHub"]["url"])
self.__logger.debug("Automated build repository created")
else:
self.__logger.debug("Automated build repository already created")
# wait until docker image be built
# while not self._is_visible("#rightcol .row a[href=\"/u/" + self.config["dockerHub"]["credentials"]["name"] + "/" + self.config["dockerHub"]["repository"]["name"] + "/\"] .stars-and-downloads-container"):
# driver.get(self.config["dockerHub"]["url"])
def login_into_tutum(self):
""" Login into Tutum
"""
driver = self.driver
driver.get(self.config["tutum"]["url"])
driver.find_element_by_link_text("Login").click()
if self.is_element_present("id", "id_username"):
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(self.config["tutum"]["credentials"]["email"])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(self.config["tutum"]["credentials"]["password"])
driver.find_element_by_xpath("//button[@type='submit']").click()
def link_aws_account_to_tutum(self, tutum_access_key_id, tutum_secret_access_key):
"""
Link AWS account to Tutum
:param tutum_access_key_id: access key id of AWS tutum user
:param tutum_secret_access_key: secret access key of AWS tutum user
"""
driver = self.driver
# login into tutum
self.login_into_tutum()
driver.find_element_by_css_selector("span.user-info").click()
driver.find_element_by_xpath("//div[@id='navbar-container']/div[2]/ul/li[3]/ul/li/a/i").click()
# link AWS account if there is no one
if self.is_element_present_by_css_selector("div.aws-not-linked > #aws-link"):
driver.find_element_by_css_selector("div.aws-not-linked > #aws-link").click()
driver.find_element_by_id("access-key").clear()
driver.find_element_by_id("access-key").send_keys(tutum_access_key_id)
driver.find_element_by_id("secret-access-key").clear()
driver.find_element_by_id("secret-access-key").send_keys(tutum_secret_access_key)
driver.find_element_by_id("aws-save-credentials").click()
time.sleep(5)
def create_tutum_node(self):
""" Create a Tutum node based on AWS
"""
driver = self.driver
# login into tutum
self.login_into_tutum()
driver.find_element_by_css_selector("a[href=\"/node/cluster/list/\"]").click()
time.sleep(5)
# create a node if it doesn't exist
if not self.is_element_present_by_link_text(self.config["tutum"]["node"]["name"]):
driver.find_element_by_css_selector("a[href=\"/node/launch/\"]").click()
driver.find_element_by_id("node-cluster-name").clear()
driver.find_element_by_id("node-cluster-name").send_keys(self.config["tutum"]["node"]["name"])
# short delay to load javascript functions
time.sleep(5)
driver.find_element_by_id("btn-finish-node-cluster").click()
# wait until docker image be built
while not self._is_visible(".main-container-inner .status-container .status .green"):
pass
def create_tutum_service(self):
""" Create a Tutum service based on the docker container previously built
"""
driver = self.driver
# login into Tutum
self.login_into_tutum()
driver.find_element_by_link_text("Services").click()
driver.execute_script("$(\".cluster-link a\").text($(\".cluster-link a\").clone().children().remove().end().text())")
# create a service if it doesn't exist
if not self.is_element_present_by_link_text(self.config["tutum"]["service"]["name"]):
driver.find_element_by_css_selector("a[href=\"/container/launch/\"]").click()
driver.find_element_by_link_text("Public images").click()
driver.find_element_by_link_text("Search Docker hub").click()
driver.find_element_by_id("search").clear()
driver.find_element_by_id("search").send_keys(self.config["dockerHub"]["repository"]["name"])
# wait until docker image be available
while not self._is_visible("#community-search-result button[data-image-name*=\"" + self.config["dockerHub"]["repository"]["name"] + "\"]"):
driver.find_element_by_id("search").clear()
driver.find_element_by_id("search").send_keys(self.config["dockerHub"]["repository"]["name"])
driver.find_element_by_css_selector("button[data-image-name*=\"" + self.config["dockerHub"]["repository"]["name"] + "\"]").click()
driver.find_element_by_id("app-name").clear()
driver.find_element_by_id("app-name").send_keys(self.config["tutum"]["service"]["name"])
# short delay to load javascript functions
time.sleep(3)
driver.find_element_by_css_selector("div.overlay.overlay-override").click()
driver.find_element_by_css_selector("input[type=\"checkbox\"]").click()
driver.find_element_by_xpath("//div[@id='image-ports-wrapper']/div/div/div/table/tbody/tr/td[4]/span").click()
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys(self.config["tutum"]["service"]["port"])
driver.find_element_by_id("step-container").click()
driver.find_element_by_id("btn-deploy-services").click()
# short delay to launch the service
time.sleep(5)
# wait until container is running
while not self._is_visible("#cluster-status .green"):
pass
else:
driver.find_element_by_link_text(self.config["tutum"]["service"]["name"]).click()
driver.find_element_by_css_selector("td.container-link.sortable.renderable > a").click()
driver.find_element_by_css_selector("#node > a").click()
driver.execute_script("document.getElementsByClassName('info-bar')[0].getElementsByClassName('icon-link')[0].remove()")
node_ip = driver.find_element_by_xpath("//div[@class='info-bar']/div[@class='app-info'][1]").text
return node_ip.replace("\"", "").replace(" ", "")
def watch_app(self, ip):
"""
Go on node ip to watch application in live
:param ip: ip of an application
:return:
"""
driver = self.driver
driver.get("http://" + ip)
time.sleep(20)
def login_into_aws(self):
""" Login into AWS
"""
driver = self.driver
driver.get(self.config["aws"]["url"])
if self.is_element_present("id", "ap_email") and self.is_element_present("id", "ap_password"):
driver.find_element_by_id("ap_email").clear()
driver.find_element_by_id("ap_email").send_keys(self.config["aws"]["credentials"]["email"])
driver.find_element_by_id("ap_password").clear()
driver.find_element_by_id("ap_password").send_keys(self.config["aws"]["credentials"]["password"])
driver.find_element_by_id("signInSubmit-input").click()
def create_tutum_user_on_aws(self):
""" Create a user (name: tutum) on AWS
"""
driver = self.driver
# login into AWS
self.login_into_aws()
driver.find_element_by_css_selector("a.service[data-service-id=\"iam\"]").click()
driver.find_element_by_link_text("Users").click()
# create a `tutum` user if he doesn't exist
if not self.is_element_present_by_css_selector("table[data-table=\"resource\"] td[title=\"tutum\"]"):
driver.find_element_by_css_selector("button.create_user").click()
driver.find_element_by_css_selector("li > input").clear()
driver.find_element_by_css_selector("li > input").send_keys("tutum")
driver.find_element_by_xpath("//div[@id='c']/div/div[2]/div/div[2]/div[3]/div/button").click()
driver.find_element_by_link_text("Show User Security Credentials").click()
# Get information of `tutum` user
tutum_access_key_id = driver.find_elements_by_class_name("attrValue")[0].text
tutum_secret_access_key = driver.find_elements_by_class_name("attrValue")[1].text
else:
tutum_access_key_id = None
tutum_secret_access_key = None
driver.find_element_by_link_text("Policies").click()
if self.is_element_present_by_css_selector("button.getStarted"):
driver.find_element_by_css_selector("button.getStarted").click()
driver.find_element_by_css_selector("td[title=\"AmazonEC2FullAccess\"]").click()
# Attach policy (full access to EC2) to `tutum` user if its not
if not self.is_element_present("text", "tutum"):
driver.find_element_by_css_selector("button.attach").click()
# short delay to load javascript functions
time.sleep(5)
driver.find_element_by_css_selector("div.tableField").click()
driver.find_element_by_css_selector("button.submit").click()
return tutum_access_key_id, tutum_secret_access_key
def is_element_present(self, how, what):
"""
Check if an element exist
:param how: how to select it
:param what: what to select
:return: Boolean
"""
try:
self.driver.find_element(by = how, value = what)
except NoSuchElementException as e:
return False
return True
def is_element_present_by_css_selector(self, css_selector):
"""
Check if an element exist by css selector
:param css_selector: css selector
:return: Boolean
"""
try:
self.driver.find_element_by_css_selector(css_selector)
except NoSuchElementException as e:
return False
return True
def is_element_present_by_link_text(self, link_text):
"""
Check if an element exist by link text
:param link_text: link text
:return: Boolean
"""
try:
self.driver.find_element_by_link_text(link_text)
except NoSuchElementException as e:
return False
return True
def _is_visible(self, locator, timeout = 2):
try:
ui.WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))
return True
except TimeoutException:
return False
|
import argparse
import datetime
import math
import os
import shutil
import sys
import tarfile
import subprocess
import re
from bs4 import BeautifulSoup
from latex_utils import remove_accented_characters, read_latex_file, write_latex_file, get_relevant_warnings, open_webpage
from reference_utils import Reference, concatenate_authors, extract_bibtex_items, remove_arxiv_id_version
from reference_formatter import format_references
from paths import LATEX_SKELETON_PATH, PRODUCTION_PATH
def extract_submission_content(latex_source):
"""Extract the submission content from a LaTeX source."""
content = re.search(r"(\\section{.*?}.*)(?=\\bibliography|\\begin{thebibliography})", latex_source, re.DOTALL).group(0)
return content
def extract_packages(latex_source):
"""Extract packages and package options from a LaTeX source."""
production_packages = []
usepackages = re.findall(r"(?<!%)\\usepackage.*?{.*?}", latex_source)
for package in usepackages:
print(package)
package_name = re.search(r"{(.*?)}", package).group(1).split(",")[0]
print(package_name)
if package_name in ["amssymb", "a4wide"]:
production_packages.append("% REMOVED IN PROD " + package)
elif package_name in ["amsmath", "doi", "fancyhdr", "geometry", "graphicx", "hyperref", "inputenc", "lineno", "titlesec", "tocloft", "nottoc", "notlot", "notlof", "xcolor"]:
pass
else:
production_packages.append(package)
production_packages.append("\\usepackage{amsfonts}")
production_packages = "".join([m + "\n" for m in production_packages])
return production_packages
def extract_commands(latex_source):
commands = []
commands += re.findall(r"(?<=\n)\\newcommand.*", latex_source)
commands += re.findall(r"(?<=\n)\\def.*", latex_source)
commands += re.findall(r"(?<=\n)\\DeclareMathOperator.*", latex_source)
return "\n".join(commands)
def calculate_current_volume(date):
"""
Calculate the current SciPost volume.
A new volume is started every six months, in January and July.
First volume ran from July 2016 to (end of) December 2016.
"""
month = date.month
year = date.year
years_running = year - 2016
months_running = years_running * 12 + 6 # First six months of operation in 2016.
volume = math.floor(months_running)
if month < 7:
volume -= 1
return volume
class LatexPreparer:
def __init__(self, submission_address):
self.submission_address = submission_address
now = datetime.datetime.now()
self.issue = math.floor((now.month-0.1)/2) + 1 # Slight offset so even months are correctly converted.
self.volume = calculate_current_volume(now)
self.arxiv_id = None
self.submission_date = None
self.title = None
self.full_authors = None
self.abbreviated_authors = None
self.first_author_last_name = None
self.abstract = None
self.tex_source_zip = None
self.original_tex_text = None
self.publication_production_folder = None
self.publication_tex_filename = None
self.year = now.year
self.references = None
self.content = None
self.packages = None
self.commands = None
def main(self):
self.retrieve_scipost_submission_data()
self.retrieve_arxiv_metadata()
self.prepare_production_folder()
self.download_arxiv_source()
self.prepare_paper_data()
self.edit_tex_file()
self.run_pdflatex()
def retrieve_scipost_submission_data(self):
"""Retrieve a submission's webpage and extract metadata."""
print("Retrieving SciPost submission data...")
_, submission_page = open_webpage(self.submission_address)
submission_page = BeautifulSoup(submission_page.text, "html5lib")
# Check that the latest version for the submission is retrieved.
submission_version = submission_page.find(text="SciPost Submission Page").parent.find_next("h3").text
if submission_version == "This is not the current version.":
sys.exit("Not the current version.")
self.arxiv_id = submission_page.find(text="arxiv Link:").parent.find_next("td").text.strip().strip("http://arxiv.org/abs/")
# Extract submission date (date that first version was submitted).
if submission_page.find(text="Other versions of this Submission (with Reports) exist:"):
oldest_version = submission_page.find(class_="pubtitleli")["href"] # First instance is first version.
_, oldest_version_page = open_webpage(f"https://www.scipost.org{oldest_version}")
oldest_version_page = BeautifulSoup(oldest_version_page.text, "html5lib")
submission_date = oldest_version_page.find(text="Date submitted:").parent.find_next("td").text.strip()
else:
submission_date = submission_page.find(text="Date submitted:").parent.find_next("td").text.strip()
# Change from YYYY-MM-DD to DD-MM-YYYY.
self.submission_date = "-".join(submission_date.split("-")[::-1])
def retrieve_arxiv_metadata(self):
"""Retrieve the arXiv data (title, authors, abstract) for a submission."""
reference = Reference(f"arXiv:{self.arxiv_id}")
reference.main()
self.title = re.sub(r"[ ]{2,}", " ", reference.title) # Remove occurences of more than one space.
self.full_authors = reference.full_authors
self.abbreviated_authors = reference.abbreviated_authors
self.first_author_last_name = remove_accented_characters(reference.first_author_last_name.replace(" ", "_"))
self.abstract = reference.abstract
def prepare_production_folder(self, production_path=PRODUCTION_PATH):
"""
Prepare the production folder for the submission.
production_path: top level folder in which the submission folder should be placed
"""
print("Preparing production folder...")
self.publication_production_folder = os.path.join(production_path, f"SciPost_Phys_{self.arxiv_id}_{self.first_author_last_name}")
if not os.path.exists(self.publication_production_folder):
os.makedirs(self.publication_production_folder)
else:
sys.exit("Folder already exists! Aborting...") # Better save than sorry, so no overwriting.
for file_name in os.listdir(LATEX_SKELETON_PATH):
shutil.copy2(os.path.join(LATEX_SKELETON_PATH, file_name), self.publication_production_folder)
arxiv_id_without_dots = self.arxiv_id.replace(".", "_")
self.publication_tex_filename = f"SciPost_Phys_{arxiv_id_without_dots}_{self.first_author_last_name}.tex"
shutil.copy(os.path.join(self.publication_production_folder, "SciPost_Phys_Skeleton.tex"), os.path.join(self.publication_production_folder, self.publication_tex_filename))
def download_arxiv_source(self):
"""Download the LaTeX source for a submission from arXiv."""
print("Downloading LaTeX source from arXiv...")
# Note that we use src/ID instead of e-print/ID, since then the source is always returned as a tarfile, even if it's a single file.
_, tex_source_zip = open_webpage(f"https://arxiv.org/src/{self.arxiv_id}")
# Save the tar file.
with open(os.path.join(self.publication_production_folder, f"{self.arxiv_id}.tar.gz"), "wb") as zip_file:
for chunk in tex_source_zip:
zip_file.write(chunk)
# Extract the tar file.
with tarfile.open(os.path.join(self.publication_production_folder, f"{self.arxiv_id}.tar.gz")) as tar_file:
# Single file submission.
if len(tar_file.getmembers()) == 1:
tar_file.extractall(path=self.publication_production_folder)
arxiv_id_without_version = remove_arxiv_id_version(self.arxiv_id)
os.rename(os.path.join(self.publication_production_folder, arxiv_id_without_version), os.path.join(self.publication_production_folder, f"{self.arxiv_id}.tex"))
else:
tar_file.extractall(path=os.path.join(self.publication_production_folder, self.arxiv_id))
# Copy the files and directories one level up.
for file_name in os.listdir(os.path.join(self.publication_production_folder, self.arxiv_id)):
# Exclude any class files the authors may have bundled.
if not os.path.splitext(file_name)[-1] in [".bst", ".cls"]:
# Copy directories and their contents.
if os.path.isdir(os.path.join(self.publication_production_folder, self.arxiv_id, file_name)):
shutil.copytree(os.path.join(self.publication_production_folder, self.arxiv_id, file_name), os.path.join(self.publication_production_folder, file_name))
# Copy individual files.
else:
shutil.copy2(os.path.join(self.publication_production_folder, self.arxiv_id, file_name), self.publication_production_folder)
def prepare_paper_data(self):
"""Prepare and extract data from the LaTeX source file of a submission."""
for file_name in os.listdir(self.publication_production_folder):
print(file_name)
if os.path.splitext(file_name)[-1] == ".bbl":
print("Found bbl")
references = read_latex_file(os.path.join(self.publication_production_folder, file_name))
print(references)
self.references = "\n\n".join(extract_bibtex_items(references))
# TODO: Handle multiple tex files in a submission.
elif os.path.splitext(file_name)[-1] == ".tex" and file_name not in ["SciPost_Phys_Skeleton.tex", self.publication_tex_filename]:
self.original_tex_source = read_latex_file(os.path.join(self.publication_production_folder, file_name))
self.content = extract_submission_content(self.original_tex_source)
self.packages = extract_packages(self.original_tex_source)
self.commands = extract_commands(self.original_tex_source)
if not self.references:
self.references = "\n\n".join(extract_bibtex_items(self.original_tex_source))
def edit_tex_file(self):
"""Edit a tex file."""
self.production_tex_source = read_latex_file(os.path.join(self.publication_production_folder, self.publication_tex_filename))
old_citation = "%%%%%%%%%% TODO: PAPER CITATION\n\\rhead{\\small \\href{https://scipost.org/SciPostPhys.?.?.???}{SciPost Phys. ?, ??? (20??)}}\n%%%%%%%%%% END TODO: PAPER CITATION"
new_citation = f"%%%%%%%%%% TODO: PAPER CITATION\n\\rhead{{\\small \\href{{https://scipost.org/SciPostPhys.{self.volume}.{self.issue}.???}}{{SciPost Phys. {self.volume}, ??? ({self.year})}}}}\n%%%%%%%%%% END TODO: PAPER CITATION"
self.production_tex_source = self.production_tex_source.replace(old_citation, new_citation)
old_packages = "%%%%%%%%%% TODO: PACKAGES include extra packages used by authors:\n\n% ADDED IN PRODUCTION"
new_packages = f"%%%%%%%%%% TODO: PACKAGES include extra packages used by authors:\n\n{self.packages}\n\n% ADDED IN PRODUCTION"
self.production_tex_source = self.production_tex_source.replace(old_packages, new_packages)
old_commands = "%%%%%%%%%% TODO: COMMANDS\n\n%%%%%%%%%% END TODO: COMMANDS"
new_commands = f"%%%%%%%%%% TODO: COMMANDS\n\n{self.commands}\n%%%%%%%%%% END TODO: COMMANDS"
self.production_tex_source = self.production_tex_source.replace(old_commands, new_commands)
old_title = "% multiline titles: end with a \\\\ to regularize line spacing"
new_title = f"% multiline titles: end with a \\\\ to regularize line spacing\n{self.title}\\\\"
self.production_tex_source = self.production_tex_source.replace(old_title, new_title)
old_authors = "A. Bee\\textsuperscript{1,2},\nC. Dee\\textsuperscript{1} and\nE. Eff\\textsuperscript{3$\star$}"
new_authors = concatenate_authors(self.full_authors)
self.production_tex_source = self.production_tex_source.replace(old_authors, new_authors)
old_abstract = "%%%%%%%%%% TODO: ABSTRACT Paste abstract here\n%%%%%%%%%% END TODO: ABSTRACT"
new_abstract = f"%%%%%%%%%% TODO: ABSTRACT Paste abstract here\n{self.abstract}\n%%%%%%%%%% END TODO: ABSTRACT"
self.production_tex_source = self.production_tex_source.replace(old_abstract, new_abstract)
old_copyright = "{\\small Copyright A. Bee {\\it et al}."
new_copyright = "{\\small Copyright DA COPY SQUAD."
self.production_tex_source = self.production_tex_source.replace(old_copyright, new_copyright)
old_received_date = "\\small Received ??-??-20??"
new_received_date = f"\\small Received {self.submission_date}"
self.production_tex_source = self.production_tex_source.replace(old_received_date, new_received_date)
old_doi = "\\doi{10.21468/SciPostPhys.?.?.???}"
new_doi = f"\\doi{{10.21468/SciPostPhys.{self.volume}.{self.issue}.???}}"
self.production_tex_source = self.production_tex_source.replace(old_doi, new_doi)
old_contents = "%%%%%%%%% TODO: CONTENTS Contents come here, starting from first \\section\n\n\n\n%%%%%%%%% END TODO: CONTENTS"
new_contents = f"%%%%%%%%% TODO: CONTENTS Contents come here, starting from first \\section\n\n{self.content}\n\n%%%%%%%%% END TODO: CONTENTS"
self.production_tex_source = self.production_tex_source.replace(old_contents, new_contents)
if self.references:
old_references = "TODO: BBL IF BiBTeX was used: paste the contenst of the .bbl file here"
new_references = f"TODO: BBL IF BiBTeX was used: paste the contenst of the .bbl file here\n\n{self.references}"
self.production_tex_source = self.production_tex_source.replace(old_references, new_references)
print("Processing references...")
self.production_tex_source = format_references(self.production_tex_source)
write_latex_file(os.path.join(self.publication_production_folder, self.publication_tex_filename), self.production_tex_source)
def run_pdflatex(self):
os.chdir(self.publication_production_folder)
subprocess.check_output(["latexmk", "-pdf", os.path.join(self.publication_production_folder, self.publication_tex_filename)])
print("The following warning were generated:")
for warning in get_relevant_warnings(read_latex_file(re.sub(r".tex$", ".log", self.publication_tex_filename))):
print(warning)
subprocess.run(["open", os.path.join(self.publication_production_folder, self.publication_tex_filename.replace(".tex", ".pdf"))])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('submission_address')
args = parser.parse_args()
latex_preparer = LatexPreparer(args.submission_address)
latex_preparer.main()
Fix typos and add whitespace
import argparse
import datetime
import math
import os
import shutil
import sys
import tarfile
import subprocess
import re
from bs4 import BeautifulSoup
from latex_utils import remove_accented_characters, read_latex_file, write_latex_file, get_relevant_warnings, open_webpage
from reference_utils import Reference, concatenate_authors, extract_bibtex_items, remove_arxiv_id_version
from reference_formatter import format_references
from paths import LATEX_SKELETON_PATH, PRODUCTION_PATH
def extract_submission_content(latex_source):
"""Extract the submission content from a LaTeX source."""
content = re.search(r"(\\section{.*?}.*)(?=\\bibliography|\\begin{thebibliography})", latex_source, re.DOTALL).group(0)
return content
def extract_packages(latex_source):
"""Extract packages and package options from a LaTeX source."""
production_packages = []
usepackages = re.findall(r"(?<!%)\\usepackage.*?{.*?}", latex_source)
for package in usepackages:
print(package)
package_name = re.search(r"{(.*?)}", package).group(1).split(",")[0]
print(package_name)
if package_name in ["amssymb", "a4wide"]:
production_packages.append("% REMOVED IN PROD " + package)
elif package_name in ["amsmath", "doi", "fancyhdr", "geometry", "graphicx", "hyperref", "inputenc", "lineno", "titlesec", "tocloft", "nottoc", "notlot", "notlof", "xcolor"]:
pass
else:
production_packages.append(package)
production_packages.append("\\usepackage{amsfonts}")
production_packages = "".join([m + "\n" for m in production_packages])
return production_packages
def extract_commands(latex_source):
commands = []
commands += re.findall(r"(?<=\n)\\newcommand.*", latex_source)
commands += re.findall(r"(?<=\n)\\def.*", latex_source)
commands += re.findall(r"(?<=\n)\\DeclareMathOperator.*", latex_source)
return "\n".join(commands)
def calculate_current_volume(date):
"""
Calculate the current SciPost volume.
A new volume is started every six months, in January and July.
First volume ran from July 2016 to (end of) December 2016.
"""
month = date.month
year = date.year
years_running = year - 2016
months_running = years_running * 12 + 6 # First six months of operation in 2016.
volume = math.floor(months_running / 6)
if month < 7:
volume -= 1
return volume
class LatexPreparer:
def __init__(self, submission_address):
self.submission_address = submission_address
now = datetime.datetime.now()
self.issue = math.floor((now.month-0.1)/2) + 1 # Slight offset so even months are correctly converted.
self.volume = calculate_current_volume(now)
self.arxiv_id = None
self.submission_date = None
self.title = None
self.full_authors = None
self.abbreviated_authors = None
self.first_author_last_name = None
self.abstract = None
self.tex_source_zip = None
self.original_tex_text = None
self.publication_production_folder = None
self.publication_tex_filename = None
self.year = now.year
self.references = None
self.content = None
self.packages = None
self.commands = None
def main(self):
self.retrieve_scipost_submission_data()
self.retrieve_arxiv_metadata()
self.prepare_production_folder()
self.download_arxiv_source()
self.prepare_paper_data()
self.edit_tex_file()
self.run_pdflatex()
def retrieve_scipost_submission_data(self):
"""Retrieve a submission's webpage and extract metadata."""
print("Retrieving SciPost submission data...")
_, submission_page = open_webpage(self.submission_address, exit_on_error=True)
submission_page = BeautifulSoup(submission_page.text, "html5lib")
# Check that the latest version for the submission is retrieved.
submission_version = submission_page.find(text="SciPost Submission Page").parent.find_next("h3").text
if submission_version == "This is not the current version.":
sys.exit("Not the current version.")
self.arxiv_id = submission_page.find(text="arxiv Link:").parent.find_next("td").text.strip().strip("http://arxiv.org/abs/")
# Extract submission date (date that first version was submitted).
if submission_page.find(text="Other versions of this Submission (with Reports) exist:"):
oldest_version = submission_page.find(class_="pubtitleli")["href"] # First instance is first version on SciPost.
_, oldest_version_page = open_webpage(f"https://www.scipost.org{oldest_version}", exit_on_error=True)
oldest_version_page = BeautifulSoup(oldest_version_page.text, "html5lib")
submission_date = oldest_version_page.find(text="Date submitted:").parent.find_next("td").text.strip()
else:
submission_date = submission_page.find(text="Date submitted:").parent.find_next("td").text.strip()
# Change from YYYY-MM-DD to DD-MM-YYYY.
self.submission_date = "-".join(submission_date.split("-")[::-1])
def retrieve_arxiv_metadata(self):
"""Retrieve the arXiv data (title, authors, abstract) for a submission."""
reference = Reference(f"arXiv:{self.arxiv_id}")
reference.main()
self.title = re.sub(r"[ ]{2,}", " ", reference.title) # Remove occurences of more than one space.
self.full_authors = reference.full_authors
self.abbreviated_authors = reference.abbreviated_authors
self.first_author_last_name = remove_accented_characters(reference.first_author_last_name.replace(" ", "_"))
self.abstract = reference.abstract
def prepare_production_folder(self, production_path=PRODUCTION_PATH):
"""
Prepare the production folder for the submission.
production_path: top level folder in which the submission folder should be placed
"""
print("Preparing production folder...")
self.publication_production_folder = os.path.join(production_path, f"SciPost_Phys_{self.arxiv_id}_{self.first_author_last_name}")
if not os.path.exists(self.publication_production_folder):
os.makedirs(self.publication_production_folder)
else:
sys.exit("Folder already exists! Aborting...") # Better save than sorry, so no overwriting.
for file_name in os.listdir(LATEX_SKELETON_PATH):
shutil.copy2(os.path.join(LATEX_SKELETON_PATH, file_name), self.publication_production_folder)
arxiv_id_without_dots = self.arxiv_id.replace(".", "_")
self.publication_tex_filename = f"SciPost_Phys_{arxiv_id_without_dots}_{self.first_author_last_name}.tex"
shutil.copy(os.path.join(self.publication_production_folder, "SciPost_Phys_Skeleton.tex"), os.path.join(self.publication_production_folder, self.publication_tex_filename))
def download_arxiv_source(self):
"""Download the LaTeX source for a submission from arXiv."""
print("Downloading LaTeX source from arXiv...")
# Note that we use src/ID instead of e-print/ID, since then the source is always returned as a tarfile, even if it's a single file.
_, tex_source_zip = open_webpage(f"https://arxiv.org/src/{self.arxiv_id}", exit_on_error=True)
# Save the tar file.
with open(os.path.join(self.publication_production_folder, f"{self.arxiv_id}.tar.gz"), "wb") as zip_file:
for chunk in tex_source_zip:
zip_file.write(chunk)
# Extract the tar file.
with tarfile.open(os.path.join(self.publication_production_folder, f"{self.arxiv_id}.tar.gz")) as tar_file:
# Single file submission.
if len(tar_file.getmembers()) == 1:
tar_file.extractall(path=self.publication_production_folder)
arxiv_id_without_version = remove_arxiv_id_version(self.arxiv_id)
os.rename(os.path.join(self.publication_production_folder, arxiv_id_without_version), os.path.join(self.publication_production_folder, f"{self.arxiv_id}.tex"))
else:
tar_file.extractall(path=os.path.join(self.publication_production_folder, self.arxiv_id))
# Copy the files and directories one level up.
for file_name in os.listdir(os.path.join(self.publication_production_folder, self.arxiv_id)):
# Exclude any class files the authors may have bundled.
if not os.path.splitext(file_name)[-1] in [".bst", ".cls"]:
# Copy directories and their contents.
if os.path.isdir(os.path.join(self.publication_production_folder, self.arxiv_id, file_name)):
shutil.copytree(os.path.join(self.publication_production_folder, self.arxiv_id, file_name), os.path.join(self.publication_production_folder, file_name))
# Copy individual files.
else:
shutil.copy2(os.path.join(self.publication_production_folder, self.arxiv_id, file_name), self.publication_production_folder)
def prepare_paper_data(self):
"""Prepare and extract data from the LaTeX source file of a submission."""
for file_name in os.listdir(self.publication_production_folder):
print(file_name)
if os.path.splitext(file_name)[-1] == ".bbl":
print("Found bbl")
references = read_latex_file(os.path.join(self.publication_production_folder, file_name))
self.references = "\n\n".join(extract_bibtex_items(references))
# TODO: Handle multiple tex files in a submission.
elif os.path.splitext(file_name)[-1] == ".tex" and file_name not in ["SciPost_Phys_Skeleton.tex", self.publication_tex_filename]:
self.original_tex_source = read_latex_file(os.path.join(self.publication_production_folder, file_name))
self.content = extract_submission_content(self.original_tex_source)
self.packages = extract_packages(self.original_tex_source)
self.commands = extract_commands(self.original_tex_source)
if not self.references:
self.references = "\n\n".join(extract_bibtex_items(self.original_tex_source))
def edit_tex_file(self):
"""Edit a tex file."""
self.production_tex_source = read_latex_file(os.path.join(self.publication_production_folder, self.publication_tex_filename))
old_citation = "%%%%%%%%%% TODO: PAPER CITATION\n\\rhead{\\small \\href{https://scipost.org/SciPostPhys.?.?.???}{SciPost Phys. ?, ??? (20??)}}\n%%%%%%%%%% END TODO: PAPER CITATION"
new_citation = f"%%%%%%%%%% TODO: PAPER CITATION\n\\rhead{{\\small \\href{{https://scipost.org/SciPostPhys.{self.volume}.{self.issue}.???}}{{SciPost Phys. {self.volume}, ??? ({self.year})}}}}\n%%%%%%%%%% END TODO: PAPER CITATION"
self.production_tex_source = self.production_tex_source.replace(old_citation, new_citation)
old_packages = "%%%%%%%%%% TODO: PACKAGES include extra packages used by authors:\n\n% ADDED IN PRODUCTION"
new_packages = f"%%%%%%%%%% TODO: PACKAGES include extra packages used by authors:\n\n{self.packages}\n\n% ADDED IN PRODUCTION"
self.production_tex_source = self.production_tex_source.replace(old_packages, new_packages)
old_commands = "%%%%%%%%%% TODO: COMMANDS\n\n%%%%%%%%%% END TODO: COMMANDS"
new_commands = f"%%%%%%%%%% TODO: COMMANDS\n\n{self.commands}\n%%%%%%%%%% END TODO: COMMANDS"
self.production_tex_source = self.production_tex_source.replace(old_commands, new_commands)
old_title = "% multiline titles: end with a \\\\ to regularize line spacing"
new_title = f"% multiline titles: end with a \\\\ to regularize line spacing\n{self.title}\\\\"
self.production_tex_source = self.production_tex_source.replace(old_title, new_title)
old_authors = "A. Bee\\textsuperscript{1,2},\nC. Dee\\textsuperscript{1} and\nE. Eff\\textsuperscript{3$\star$}"
new_authors = concatenate_authors(self.full_authors)
self.production_tex_source = self.production_tex_source.replace(old_authors, new_authors)
old_abstract = "%%%%%%%%%% TODO: ABSTRACT Paste abstract here\n%%%%%%%%%% END TODO: ABSTRACT"
new_abstract = f"%%%%%%%%%% TODO: ABSTRACT Paste abstract here\n{self.abstract}\n%%%%%%%%%% END TODO: ABSTRACT"
self.production_tex_source = self.production_tex_source.replace(old_abstract, new_abstract)
old_copyright = "{\\small Copyright A. Bee {\\it et al}."
new_copyright = "{\\small Copyright DA COPY SQUAD."
self.production_tex_source = self.production_tex_source.replace(old_copyright, new_copyright)
old_received_date = "\\small Received ??-??-20??"
new_received_date = f"\\small Received {self.submission_date}"
self.production_tex_source = self.production_tex_source.replace(old_received_date, new_received_date)
old_doi = "\\doi{10.21468/SciPostPhys.?.?.???}"
new_doi = f"\\doi{{10.21468/SciPostPhys.{self.volume}.{self.issue}.???}}"
self.production_tex_source = self.production_tex_source.replace(old_doi, new_doi)
old_contents = "%%%%%%%%% TODO: CONTENTS Contents come here, starting from first \\section\n\n\n\n%%%%%%%%% END TODO: CONTENTS"
new_contents = f"%%%%%%%%% TODO: CONTENTS Contents come here, starting from first \\section\n\n{self.content}\n\n%%%%%%%%% END TODO: CONTENTS"
self.production_tex_source = self.production_tex_source.replace(old_contents, new_contents)
if self.references:
old_references = "TODO: BBL IF BiBTeX was used: paste the contents of the .bbl file here"
new_references = f"TODO: BBL IF BiBTeX was used: paste the contents of the .bbl file here\n\n{self.references}"
self.production_tex_source = self.production_tex_source.replace(old_references, new_references)
print("Processing references...")
self.production_tex_source = format_references(self.production_tex_source)
write_latex_file(os.path.join(self.publication_production_folder, self.publication_tex_filename), self.production_tex_source)
def run_pdflatex(self):
os.chdir(self.publication_production_folder)
subprocess.check_output(["latexmk", "-pdf", os.path.join(self.publication_production_folder, self.publication_tex_filename)])
print("The following warning were generated:")
for warning in get_relevant_warnings(read_latex_file(re.sub(r".tex$", ".log", self.publication_tex_filename))):
print(warning)
subprocess.run(["open", os.path.join(self.publication_production_folder, self.publication_tex_filename.replace(".tex", ".pdf"))])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('submission_address')
args = parser.parse_args()
latex_preparer = LatexPreparer(args.submission_address)
latex_preparer.main()
|
#!/usr/bin/env python3
import pyglet, time
from . import dispatcher
"""
Check http://cwru-hackers.googlecode.com/svn-history/r233/splatterboard/trunk/draw.py
"""
MAX_OBJECTS = 20
class Context(dispatcher.Dispatcher):
def __init__(self, width=800, height=600, background=(1.0, 1.0, 1.0, 1.0), fullscreen=False, title="animation", chrome=True, screen=0, smooth=True, _3d=False):
self._width = width
self._height = height
self._background = background
self._fullscreen = fullscreen
self._title = title
self._screen = screen
self._chrome = chrome
self.window = None
self._3d = _3d
self.last_frame = 0
self.smooth = smooth
self.objects = []
dispatcher.Dispatcher.__init__(self)
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def fps(self):
return pyglet.clock.get_fps()
def start(self, draw_func, update_func=None):
config = pyglet.gl.Config(sample_buffers=1, samples=4, depth_size=24, double_buffer=True)
style = pyglet.window.Window.WINDOW_STYLE_DEFAULT if self._chrome else pyglet.window.Window.WINDOW_STYLE_BORDERLESS
screens = pyglet.window.get_platform().get_default_display().get_screens()
screen_index = min(self._screen, len(screens) - 1)
screen = screens[screen_index]
if not (self._fullscreen and screen_index !=0):
self.window = pyglet.window.Window(config=config, width=self.width, height=self.height, resizable=False, fullscreen=self._fullscreen, caption=self._title, style=style, screen=screen)
else: # hack because pyglet fullscreen doesnt work on secondary screen
self._width = screen.width
self._height = screen.height
self.window = pyglet.window.Window(config=config, width=self.width, height=self.height, resizable=False, fullscreen=False, caption=self._title, style=style, screen=screen)
self.window.set_location(screen.x, screen.y)
self.window.on_mouse_press = self.on_mouse_press
self.window.on_mouse_release = self.on_mouse_release
self.draw_func = draw_func
self.update_func = update_func if update_func is not None else lambda x: x
pyglet.gl.glClearColor(*self._background)
if self._3d:
pyglet.gl.glColor3f(1, 0, 0)
pyglet.gl.glEnable(pyglet.gl.GL_DEPTH_TEST)
pyglet.gl.glEnable(pyglet.gl.GL_CULL_FACE)
# pyglet.gl.glPolygonMode(pyglet.gl.GL_FRONT_AND_BACK, pyglet.gl.GL_LINE) # Uncomment this line for a wireframe view
pyglet.gl.glEnable(pyglet.gl.GL_LIGHTING)
pyglet.gl.glEnable(pyglet.gl.GL_LIGHT0)
pyglet.gl.glEnable(pyglet.gl.GL_LIGHT1)
if self.smooth:
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glEnable(pyglet.gl.GL_LINE_SMOOTH)
pyglet.gl.glHint(pyglet.gl.GL_LINE_SMOOTH_HINT, pyglet.gl.GL_NICEST)
self.window.on_draw = self.draw_loop
if self._3d:
self.window.on_resize = self.on_resize
pyglet.clock.schedule(self.update_func)
pyglet.app.run()
def on_resize(self, width, height):
if self._3d:
pyglet.gl.glViewport(0, 0, width, height)
pyglet.gl.glMatrixMode(pyglet.gl.GL_PROJECTION)
pyglet.gl.glLoadIdentity()
pyglet.gl.gluPerspective(60.0, width / height, 0.1, 1000.0)
pyglet.gl.glMatrixMode(pyglet.gl.GL_MODELVIEW)
return pyglet.event.EVENT_HANDLED
def draw_loop(self):
self.window.clear()
pyglet.gl.glLoadIdentity()
self.draw_func()
for o in self.objects:
o.draw()
fps = pyglet.clock.get_fps()
if fps < 30:
print("%f fps" % fps)
def line(self, x1, y1, x2, y2, color=(0., 0., 0., 1.), thickness=1.0):
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
pyglet.graphics.draw(2, pyglet.gl.GL_LINES,
('v2f', (x1 * self.width, y1 * self.height, x2 * self.width, y2 * self.height))
)
def line3(self, x1, y1, z1, x2, y2, z2, color=(0., 0., 0., 1.), thickness=1.0):
self.depth = self.height
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
pyglet.graphics.draw(2, pyglet.gl.GL_LINES,
('v3f', (x1 * self.width, y1 * self.height, z1 * self.depth, x2 * self.width, y2 * self.height, z2 * self.depth))
)
def lines(self, points, color=(0., 0., 0., 1.), thickness=1.0):
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
points = [(item * self.width) if (i % 2 == 0) else (item * self.height) for sublist in points for (i, item) in enumerate(sublist)] # flatten
pyglet.graphics.draw(len(points) // 2, pyglet.gl.GL_LINE_STRIP, ('v2f', points))
def plot(self, signal, color=(0., 0., 0., 1.), thickness=1.0):
points = [(float(s) / self.width, sample) for (s, sample) in enumerate(signal)]
self.lines(points, color=color, thickness=thickness)
def rect(self, x, y, width, height, color=(0., 0., 0., 1.), thickness=1.0):
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
x *= self.width
y *= self.height
width *= self.width
height *= self.height
pyglet.graphics.draw(4, pyglet.gl.GL_QUADS,
('v2f', (x, y, x, y + height, x + width, y + height, x + width, y)),
# ('c3b', (color, color, color, color))
)
def arc():
pass
def curve():
pass
def label(self, x, y, text="", font="Helvetica", size=36, width=400, color=(0., 0., 0., 1.), center=False):
# why is the antialiasing so awful
color = [int(c * 255) for c in color] # why?
l = pyglet.text.HTMLLabel(text, x=x * self.width, y=y * self.height, width=width, multiline=True)
l.font_name = font
l.font_size = size
l.color = color
if center:
l.anchor_x = 'center'
self.objects.append(l)
l.draw()
return l
def on_mouse_press(self, x, y, button, modifiers):
self.fire('mouse_press', (x/self.width, y/self.width, button, modifiers))
def on_mouse_release(self, x, y, button, modifiers):
self.fire('mouse_release', (x/self.width, y/self.width, button, modifiers))
def rgb_to_html(rgb_tuple):
return '#%02x%02x%02x' % rgb_tuple[:3]
if __name__ == "__main__":
from random import random
ctx = Context(1200, 600, background=(0.9, 0.9, 0.9, 1.), fullscreen=False)
def draw():
ctx.line(random(), random(), random(), random(), thickness=2.0)#, color=(1., 1., 1., 1.))
ctx.start(draw)
adapted for 3d
#!/usr/bin/env python3
import pyglet, time
from . import dispatcher
"""
Check http://cwru-hackers.googlecode.com/svn-history/r233/splatterboard/trunk/draw.py
"""
MAX_OBJECTS = 20
class Context(dispatcher.Dispatcher):
def __init__(self, width=800, height=600, background=(1.0, 1.0, 1.0, 1.0), fullscreen=False, title="animation", chrome=True, screen=0, smooth=True, _3d=False):
self._width = width
self._height = height
self._background = background
self._fullscreen = fullscreen
self._title = title
self._screen = screen
self._chrome = chrome
self.window = None
self._3d = _3d
self.last_frame = 0
self.smooth = smooth
self.objects = []
dispatcher.Dispatcher.__init__(self)
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def fps(self):
return pyglet.clock.get_fps()
def start(self, draw_func, update_func=None):
config = pyglet.gl.Config(sample_buffers=1, samples=4, depth_size=24, double_buffer=True)
style = pyglet.window.Window.WINDOW_STYLE_DEFAULT if self._chrome else pyglet.window.Window.WINDOW_STYLE_BORDERLESS
screens = pyglet.window.get_platform().get_default_display().get_screens()
screen_index = min(self._screen, len(screens) - 1)
screen = screens[screen_index]
if not (self._fullscreen and screen_index !=0):
self.window = pyglet.window.Window(config=config, width=self.width, height=self.height, resizable=False, fullscreen=self._fullscreen, caption=self._title, style=style, screen=screen)
else: # hack because pyglet fullscreen doesnt work on secondary screen
self._width = screen.width
self._height = screen.height
self.window = pyglet.window.Window(config=config, width=self.width, height=self.height, resizable=False, fullscreen=False, caption=self._title, style=style, screen=screen)
self.window.set_location(screen.x, screen.y)
self.window.on_mouse_press = self.on_mouse_press
self.window.on_mouse_release = self.on_mouse_release
self.window.on_mouse_drag = self.on_mouse_drag
self.draw_func = draw_func
self.update_func = update_func if update_func is not None else lambda x: x
pyglet.gl.glClearColor(*self._background)
if self._3d:
pyglet.gl.glViewport(0, 0, self.width, self.height)
pyglet.gl.glMatrixMode(pyglet.gl.GL_PROJECTION)
pyglet.gl.glLoadIdentity()
# pyglet.gl.gluPerspective(60.0, self.width / self.height, 0.1, 1000.0) # on-axis projection
# usr Frustum to do off-axis projection (if the coords arent symmetrical). Equiv to gluPerspective above is left, right, bottom, top = -0.082479, 0.082479, -0.057735, 0.057735
left, right, bottom, top = -0.082479, 0.082479, -0.057735, 0.057735
# left, right, bottom, top = -0.001, 0.001, -0.001, 0.001
pyglet.gl.glFrustum(left, right, bottom, top, 0.1, 1000.0)
pyglet.gl.glMatrixMode(pyglet.gl.GL_MODELVIEW)
pyglet.gl.glEnable(pyglet.gl.GL_DEPTH_TEST)
# for 3d shapes / shading
# pyglet.gl.glEnable(pyglet.gl.GL_CULL_FACE)
# pyglet.gl.glPolygonMode(pyglet.gl.GL_FRONT_AND_BACK, pyglet.gl.GL_LINE) # Uncomment this line for a wireframe view
# pyglet.gl.glEnable(pyglet.gl.GL_LIGHTING)
# pyglet.gl.glEnable(pyglet.gl.GL_LIGHT0)
# pyglet.gl.glEnable(pyglet.gl.GL_LIGHT1)
if self.smooth:
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glEnable(pyglet.gl.GL_LINE_SMOOTH)
pyglet.gl.glHint(pyglet.gl.GL_LINE_SMOOTH_HINT, pyglet.gl.GL_NICEST)
self.window.on_draw = self.draw_loop
if self._3d:
self.window.on_resize = lambda w,h: True # stop the default 2d projection
pyglet.clock.schedule(self.update_func)
pyglet.app.run()
def translate(self, *matrix):
pyglet.gl.glTranslatef(*matrix)
def rotate(self, *matrix):
pyglet.gl.glRotatef(*matrix)
def draw_loop(self):
self.window.clear()
pyglet.gl.glLoadIdentity()
self.draw_func()
for o in self.objects:
o.draw()
fps = pyglet.clock.get_fps()
if fps < 30:
print("%f fps" % fps)
def line(self, x1, y1, x2, y2, color=(0., 0., 0., 1.), thickness=1.0):
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
if not self._3d:
pyglet.graphics.draw(2, pyglet.gl.GL_LINES,
('v2f', (x1 * self.width, y1 * self.height, x2 * self.width, y2 * self.height))
)
else:
pyglet.graphics.draw(2, pyglet.gl.GL_LINES,
('v2f', (x1, y1, x2, y2))
)
def line3D(self, x1, y1, z1, x2, y2, z2, color=(0., 0., 0., 1.), thickness=1.0):
self.depth = self.height
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
pyglet.graphics.draw(2, pyglet.gl.GL_LINES,
('v3f', (x1, y1, z1, x2, y2, z2)) # dont multiply by canvas -- but by what?
)
def lines(self, points, color=(0., 0., 0., 1.), thickness=1.0):
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
if not self._3d:
points = [(item * self.width) if (i % 2 == 0) else (item * self.height) for sublist in points for (i, item) in enumerate(sublist)] # flatten
else:
points = [item for sublist in points for (i, item) in enumerate(sublist)]
pyglet.graphics.draw(len(points) // 2, pyglet.gl.GL_LINE_STRIP, ('v2f', points))
def lines3D(self, points, color=(0., 0., 0., 1.), thickness=1.0):
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
points = [item for sublist in points for (i, item) in enumerate(sublist)] # flatten
pyglet.graphics.draw(len(points) // 3, pyglet.gl.GL_LINE_STRIP, ('v3f', points))
def plot(self, signal, color=(0., 0., 0., 1.), thickness=1.0):
points = [(float(s) / self.width, sample) for (s, sample) in enumerate(signal)]
self.lines(points, color=color, thickness=thickness)
def rect(self, x, y, width, height, color=(0., 0., 0., 1.), thickness=1.0):
pyglet.gl.glColor4f(*color)
pyglet.gl.glLineWidth(thickness)
if not self._3d:
x *= self.width
y *= self.height
width *= self.width
height *= self.height
pyglet.graphics.draw(4, pyglet.gl.GL_QUADS,
('v2f', (x, y, x, y + height, x + width, y + height, x + width, y)),
# ('c3b', (color, color, color, color))
)
def arc():
pass
def curve():
pass
def label(self, x, y, text="", font="Helvetica", size=36, width=400, color=(0., 0., 0., 1.), center=False):
# why is the antialiasing so awful
color = [int(c * 255) for c in color] # why?
l = pyglet.text.HTMLLabel(text, x=x * self.width, y=y * self.height, width=width, multiline=True)
l.font_name = font
l.font_size = size
l.color = color
if center:
l.anchor_x = 'center'
self.objects.append(l)
l.draw()
return l
def on_mouse_press(self, x, y, button, modifiers):
if not self._3d:
x /= self.width
y /= self.height
self.fire('mouse_press', (x, y, button, modifiers))
def on_mouse_release(self, x, y, button, modifiers):
if not self._3d:
x /= self.width
y /= self.height
self.fire('mouse_release', (x, y, button, modifiers))
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
if not self._3d:
x /= self.width
y /= self.height
dx /= self.width
dy /= self.height
self.fire('mouse_drag', (x, y, dx, dy, button, modifiers))
def rgb_to_html(rgb_tuple):
return '#%02x%02x%02x' % rgb_tuple[:3]
if __name__ == "__main__":
from random import random
ctx = Context(1200, 600, background=(0.9, 0.9, 0.9, 1.), fullscreen=False)
def draw():
ctx.line(random(), random(), random(), random(), thickness=2.0)#, color=(1., 1., 1., 1.))
ctx.start(draw)
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An Example of a custom Estimator for the Iris dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import iris_data
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=1000, type=int,
help='number of training steps')
def my_model(features, labels, mode, params):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers each layer having a dropout
# probability of 0.1.
net = tf.feature_column.input_layer(features, params['feature_columns'])
for units in params['hidden_units']:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, params['n_classes'], activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Compute evaluation metrics.
accuracy = tf.metrics.accuracy(labels=labels,
predictions=predicted_classes,
name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def main(argv):
args = parser.parse_args(argv[1:])
# Fetch the data
(train_x, train_y), (test_x, test_y) = iris_data.load_data()
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train_x.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.Estimator(
model_fn=my_model,
params={
'feature_columns': my_feature_columns,
# Two hidden layers of 10 nodes each.
'hidden_units': [10, 10],
# The model must choose between 3 classes.
'n_classes': 3,
})
# Train the Model.
classifier.train(
input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size),
steps=args.train_steps)
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
# Generate predictions from the model
expected = ['Setosa', 'Versicolor', 'Virginica']
predict_x = {
'SepalLength': [5.1, 5.9, 6.9],
'SepalWidth': [3.3, 3.0, 3.1],
'PetalLength': [1.7, 4.2, 5.4],
'PetalWidth': [0.5, 1.5, 2.1],
}
predictions = classifier.predict(
input_fn=lambda:iris_data.eval_input_fn(predict_x,
labels=None,
batch_size=args.batch_size))
for pred_dict, expec in zip(predictions, expected):
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
class_id = pred_dict['class_ids'][0]
probability = pred_dict['probabilities'][class_id]
print(template.format(iris_data.SPECIES[class_id],
100 * probability, expec))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
Fix #5814
Comments wrongly say dropout is used
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An Example of a custom Estimator for the Iris dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import iris_data
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=1000, type=int,
help='number of training steps')
def my_model(features, labels, mode, params):
"""DNN with three hidden layers and learning_rate=0.1."""
# Create three fully connected layers.
net = tf.feature_column.input_layer(features, params['feature_columns'])
for units in params['hidden_units']:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, params['n_classes'], activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Compute evaluation metrics.
accuracy = tf.metrics.accuracy(labels=labels,
predictions=predicted_classes,
name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def main(argv):
args = parser.parse_args(argv[1:])
# Fetch the data
(train_x, train_y), (test_x, test_y) = iris_data.load_data()
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train_x.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.Estimator(
model_fn=my_model,
params={
'feature_columns': my_feature_columns,
# Two hidden layers of 10 nodes each.
'hidden_units': [10, 10],
# The model must choose between 3 classes.
'n_classes': 3,
})
# Train the Model.
classifier.train(
input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size),
steps=args.train_steps)
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
# Generate predictions from the model
expected = ['Setosa', 'Versicolor', 'Virginica']
predict_x = {
'SepalLength': [5.1, 5.9, 6.9],
'SepalWidth': [3.3, 3.0, 3.1],
'PetalLength': [1.7, 4.2, 5.4],
'PetalWidth': [0.5, 1.5, 2.1],
}
predictions = classifier.predict(
input_fn=lambda:iris_data.eval_input_fn(predict_x,
labels=None,
batch_size=args.batch_size))
for pred_dict, expec in zip(predictions, expected):
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
class_id = pred_dict['class_ids'][0]
probability = pred_dict['probabilities'][class_id]
print(template.format(iris_data.SPECIES[class_id],
100 * probability, expec))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_int_id
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
class ServersAdminTestJSON(base.BaseComputeAdminTest):
"""
Tests Servers API using admin privileges
"""
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ServersAdminTestJSON, cls).setUpClass()
cls.client = cls.os_adm.servers_client
cls.flavors_client = cls.os_adm.flavors_client
cls.admin_client = cls._get_identity_admin_client()
tenant = cls.admin_client.get_tenant_by_name(
cls.client.tenant_name)
cls.tenant_id = tenant['id']
cls.s1_name = rand_name('server')
resp, server = cls.create_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s2_name = rand_name('server')
resp, server = cls.create_server(name=cls.s2_name,
wait_until='ACTIVE')
def _get_unused_flavor_id(self):
flavor_id = rand_int_id(start=1000)
while True:
try:
resp, body = self.flavors_client.get_flavor_details(flavor_id)
except exceptions.NotFound:
break
flavor_id = rand_int_id(start=1000)
return flavor_id
@attr(type='gate')
def test_list_servers_by_admin(self):
# Listing servers by admin user returns empty list by default
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@attr(type='gate')
def test_list_servers_by_admin_with_all_tenants(self):
# Listing servers by admin user with all tenants parameter
# Here should be listed all servers
params = {'all_tenants': ''}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
servers_name = map(lambda x: x['name'], servers)
self.assertIn(self.s1_name, servers_name)
self.assertIn(self.s2_name, servers_name)
@attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
flavor_name = rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
resp, quota_set = self.quotas_client.get_default_quota_set(
self.tenant_id)
ram = int(quota_set['ram']) + 1
vcpus = 8
disk = 10
resp, flavor_ref = self.flavors_client.create_flavor(flavor_name,
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises(exceptions.OverLimit,
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
@attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
flavor_name = rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
ram = 512
resp, quota_set = self.quotas_client.get_default_quota_set(
self.tenant_id)
vcpus = int(quota_set['cores']) + 1
disk = 10
resp, flavor_ref = self.flavors_client.create_flavor(flavor_name,
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises(exceptions.OverLimit,
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
class ServersAdminTestXML(ServersAdminTestJSON):
_interface = 'xml'
Add test for admin deleting servers of others
Administrator can delete servers of other users.
Change-Id: I08a9de69dae18a743b872ceb7b070400c1ac39a8
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_int_id
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
class ServersAdminTestJSON(base.BaseComputeAdminTest):
"""
Tests Servers API using admin privileges
"""
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ServersAdminTestJSON, cls).setUpClass()
cls.client = cls.os_adm.servers_client
cls.flavors_client = cls.os_adm.flavors_client
cls.admin_client = cls._get_identity_admin_client()
tenant = cls.admin_client.get_tenant_by_name(
cls.client.tenant_name)
cls.tenant_id = tenant['id']
cls.s1_name = rand_name('server')
resp, server = cls.create_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s2_name = rand_name('server')
resp, server = cls.create_server(name=cls.s2_name,
wait_until='ACTIVE')
def _get_unused_flavor_id(self):
flavor_id = rand_int_id(start=1000)
while True:
try:
resp, body = self.flavors_client.get_flavor_details(flavor_id)
except exceptions.NotFound:
break
flavor_id = rand_int_id(start=1000)
return flavor_id
@attr(type='gate')
def test_list_servers_by_admin(self):
# Listing servers by admin user returns empty list by default
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@attr(type='gate')
def test_list_servers_by_admin_with_all_tenants(self):
# Listing servers by admin user with all tenants parameter
# Here should be listed all servers
params = {'all_tenants': ''}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
servers_name = map(lambda x: x['name'], servers)
self.assertIn(self.s1_name, servers_name)
self.assertIn(self.s2_name, servers_name)
@attr(type='gate')
def test_admin_delete_servers_of_others(self):
# Administrator can delete servers of others
_, server = self.create_server()
resp, _ = self.client.delete_server(server['id'])
self.assertEqual('204', resp['status'])
self.servers_client.wait_for_server_termination(server['id'])
@attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
flavor_name = rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
resp, quota_set = self.quotas_client.get_default_quota_set(
self.tenant_id)
ram = int(quota_set['ram']) + 1
vcpus = 8
disk = 10
resp, flavor_ref = self.flavors_client.create_flavor(flavor_name,
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises(exceptions.OverLimit,
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
@attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
flavor_name = rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
ram = 512
resp, quota_set = self.quotas_client.get_default_quota_set(
self.tenant_id)
vcpus = int(quota_set['cores']) + 1
disk = 10
resp, flavor_ref = self.flavors_client.create_flavor(flavor_name,
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises(exceptions.OverLimit,
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
class ServersAdminTestXML(ServersAdminTestJSON):
_interface = 'xml'
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from searchlight.elasticsearch.plugins import base
from searchlight.elasticsearch.plugins.nova import serialize_nova_server
from searchlight.elasticsearch.plugins.nova \
import servers_notification_handler
from searchlight.elasticsearch.plugins import openstack_clients
# TODO(sjmc7): Parameterize once we have plugin configs
LIST_LIMIT = 100
class ServerIndex(base.IndexBase):
# Will be combined with 'unsearchable_fields' from config
UNSEARCHABLE_FIELDS = ['OS-EXT-SRV-ATTR:*']
def __init__(self):
super(ServerIndex, self).__init__()
@classmethod
def get_document_type(self):
return 'OS::Nova::Server'
def get_mapping(self):
return {
'dynamic': True,
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'name': {
'type': 'string',
'index': 'not_analyzed',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'flavor': {
'type': 'nested',
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'}
}
},
'owner': {'type': 'string', 'index': 'not_analyzed'},
'tenant_id': {'type': 'string', 'index': 'not_analyzed'},
'user_id': {'type': 'string', 'index': 'not_analyzed'},
'created': {'type': 'date'},
'updated': {'type': 'date'},
'created_at': {'type': 'date'},
'updated_at': {'type': 'date'},
'networks': {
'type': 'nested',
'properties': {
'name': {'type': 'string'},
'version': {'type': 'short'},
'OS-EXT-IPS-MAC:mac_addr': {
'type': 'string',
'index': 'not_analyzed'
},
'OS-EXT-IPS:type': {
'type': 'string',
'index': 'not_analyzed'
},
'ipv4_addr': {'type': 'ip'},
'ipv6_addr': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
'image': {
'type': 'nested',
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'}
}
},
'OS-EXT-AZ:availability_zone': {
'type': 'string',
'index': 'not_analyzed'
},
'security_groups': {
'type': 'nested',
'properties': {
'name': {'type': 'string'}
}
},
'status': {'type': 'string', 'index': 'not_analyzed'},
},
}
@property
def unsearchable_fields(self):
from_conf = super(ServerIndex, self).unsearchable_fields
return ServerIndex.UNSEARCHABLE_FIELDS + from_conf
@property
def facets_with_options(self):
return ('OS-EXT-AZ:availability_zone',
'status', 'image.id', 'flavor.id', 'networks.name',
'networks.OS-EXT-IPS:type', 'networks.version',
'security_groups.name')
@property
def facets_excluded(self):
"""A map of {name: allow_admin} that indicate which
fields should not be offered as facet options, or those that should
only be available to administrators.
"""
return {'tenant_id': True,
'created': False, 'updated': False}
def _get_rbac_field_filters(self, request_context):
"""Return any RBAC field filters to be injected into an indices
query. Document type will be added to this list.
"""
return [
{'term': {'tenant_id': request_context.owner}}
]
def get_objects(self):
"""Generator that lists all nova servers owned by all tenants."""
has_more = True
marker = None
while has_more:
servers = openstack_clients.get_novaclient().servers.list(
limit=LIST_LIMIT,
search_opts={'all_tenants': True},
marker=marker
)
if not servers:
# Definitely no more; break straight away
break
# servers.list always returns a list so we can grab the last id
has_more = len(servers) == LIST_LIMIT
marker = servers[-1].id
for server in servers:
yield server
def serialize(self, server):
return serialize_nova_server(server)
@classmethod
def get_notification_exchanges(cls):
return ['nova', 'neutron']
def get_notification_handler(self):
return servers_notification_handler.InstanceHandler(
self.engine,
self.get_index_name(),
self.get_document_type()
)
def get_notification_supported_events(self):
# TODO(sjmc7): DRY
# Most events are duplicated by instance.update
return [
'compute.instance.update', 'compute.instance.exists',
'compute.instance.create.end', 'compute.instance.delete.end',
'compute.instance.power_on.end', 'compute.instance.power_off.end',
'port.delete.end', 'port.create.end',
]
Remove 'not_analyzed' from server.name
Remove 'not_analyzed' from server name. In general,
not_analyzed should only be used for id fields and those where it
doesn't make sense to tokenize values.
Change-Id: I35bff3a422d1a809b461e3f1fdf162e2fb1c5f24
Closes-Bug: #1505358
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from searchlight.elasticsearch.plugins import base
from searchlight.elasticsearch.plugins.nova import serialize_nova_server
from searchlight.elasticsearch.plugins.nova \
import servers_notification_handler
from searchlight.elasticsearch.plugins import openstack_clients
# TODO(sjmc7): Parameterize once we have plugin configs
LIST_LIMIT = 100
class ServerIndex(base.IndexBase):
# Will be combined with 'unsearchable_fields' from config
UNSEARCHABLE_FIELDS = ['OS-EXT-SRV-ATTR:*']
def __init__(self):
super(ServerIndex, self).__init__()
@classmethod
def get_document_type(self):
return 'OS::Nova::Server'
def get_mapping(self):
return {
'dynamic': True,
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'name': {
'type': 'string',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'flavor': {
'type': 'nested',
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'}
}
},
'owner': {'type': 'string', 'index': 'not_analyzed'},
'tenant_id': {'type': 'string', 'index': 'not_analyzed'},
'user_id': {'type': 'string', 'index': 'not_analyzed'},
'created': {'type': 'date'},
'updated': {'type': 'date'},
'created_at': {'type': 'date'},
'updated_at': {'type': 'date'},
'networks': {
'type': 'nested',
'properties': {
'name': {'type': 'string'},
'version': {'type': 'short'},
'OS-EXT-IPS-MAC:mac_addr': {
'type': 'string',
'index': 'not_analyzed'
},
'OS-EXT-IPS:type': {
'type': 'string',
'index': 'not_analyzed'
},
'ipv4_addr': {'type': 'ip'},
'ipv6_addr': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
'image': {
'type': 'nested',
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'}
}
},
'OS-EXT-AZ:availability_zone': {
'type': 'string',
'index': 'not_analyzed'
},
'security_groups': {
'type': 'nested',
'properties': {
'name': {'type': 'string'}
}
},
'status': {'type': 'string', 'index': 'not_analyzed'},
},
}
@property
def unsearchable_fields(self):
from_conf = super(ServerIndex, self).unsearchable_fields
return ServerIndex.UNSEARCHABLE_FIELDS + from_conf
@property
def facets_with_options(self):
return ('OS-EXT-AZ:availability_zone',
'status', 'image.id', 'flavor.id', 'networks.name',
'networks.OS-EXT-IPS:type', 'networks.version',
'security_groups.name')
@property
def facets_excluded(self):
"""A map of {name: allow_admin} that indicate which
fields should not be offered as facet options, or those that should
only be available to administrators.
"""
return {'tenant_id': True,
'created': False, 'updated': False}
def _get_rbac_field_filters(self, request_context):
"""Return any RBAC field filters to be injected into an indices
query. Document type will be added to this list.
"""
return [
{'term': {'tenant_id': request_context.owner}}
]
def get_objects(self):
"""Generator that lists all nova servers owned by all tenants."""
has_more = True
marker = None
while has_more:
servers = openstack_clients.get_novaclient().servers.list(
limit=LIST_LIMIT,
search_opts={'all_tenants': True},
marker=marker
)
if not servers:
# Definitely no more; break straight away
break
# servers.list always returns a list so we can grab the last id
has_more = len(servers) == LIST_LIMIT
marker = servers[-1].id
for server in servers:
yield server
def serialize(self, server):
return serialize_nova_server(server)
@classmethod
def get_notification_exchanges(cls):
return ['nova', 'neutron']
def get_notification_handler(self):
return servers_notification_handler.InstanceHandler(
self.engine,
self.get_index_name(),
self.get_document_type()
)
def get_notification_supported_events(self):
# TODO(sjmc7): DRY
# Most events are duplicated by instance.update
return [
'compute.instance.update', 'compute.instance.exists',
'compute.instance.create.end', 'compute.instance.delete.end',
'compute.instance.power_on.end', 'compute.instance.power_off.end',
'port.delete.end', 'port.create.end',
]
|
#!/usr/env python
import logging
import os
import sys
import json
import re
from gluon.http import HTTP
_CONF_OBJ_DICT = {}
def get_conf(request):
global _CONF_OBJ_DICT
app_name = request.application
c = _CONF_OBJ_DICT.get(app_name)
if c is None:
from ConfigParser import SafeConfigParser
c = SafeConfigParser({})
# DON'T convert property names to lower-case!
c.optionxform = str
lcp = "applications/%s/private/localconfig" % app_name
if os.path.isfile(lcp):
c.read(lcp)
else:
c.read("applications/%s/private/config" % app_name)
_CONF_OBJ_DICT[app_name] = c
return c
def get_logging_level(request):
'''
Converts a config files logging section, level attribute to a logging modules'
value (default is logging.INFO)
'''
conf = get_conf(request)
try:
level_str = conf.get("logging", "level").upper()
if level_str == "NOTSET":
return logging.NOTSET
elif level_str == "DEBUG":
return logging.DEBUG
elif level_str == "INFO":
return logging.INFO
elif level_str == "WARNING":
return logging.WARNING
elif level_str == "ERROR":
return logging.ERROR
elif level_str == "CRITICAL":
return logging.CRITICAL
else:
return logging.NOTSET
except:
return logging.INFO
def get_logger(request, name):
'''
Returns a logger object with the level set based on the config file
'''
logger = logging.getLogger(name)
if not hasattr(logger, 'is_configured'):
logger.is_configured = False
if not logger.is_configured:
level = get_logging_level(request)
logging_formatter = logging.Formatter("%(levelname) 8s: %(message)s")
logging_formatter.datefmt='%H:%M:%S'
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(logging_formatter)
logger.addHandler(ch)
logger.is_configured = True
return logger
def get_opentree_services_domains(request):
'''
Reads the local configuration to get the domains and returns a dictionary
with keys:
treemachine_domain
taxomachine_domain
oti_domain
opentree_api_domain
the values of the domain will contain the port (when needed)
This is mainly useful for debugging because it lets developers use local
instances of the service by tweaking private/conf (see private/conf.example)
'''
conf = get_conf(request)
domain_pairs = conf.items('domains')
domains = dict()
for name, url in domain_pairs:
domains[ "%s_domain" % name ] = url
return domains
def get_maintenance_info(request):
'''
Reads the local configuration to determine whether we're doing scheduled
maintenance tasks that might interrupt some editing.
to a newer system . In this case, we should block study editing and show a message
to the user.
'''
conf = get_conf(request)
minfo = dict()
try:
minfo['maintenance_in_progress' ] = conf.getboolean("maintenance", "maintenance_in_progress")
minfo['maintenance_notice' ] = conf.get("maintenance", "maintenance_notice")
except:
minfo['maintenance_in_progress' ] = False
minfo['maintenance_notice'] = ""
return minfo
def get_opentree_services_method_urls(request):
'''
Reads the local configuration to build on domains and return a dictionary
with keys for all domains AND their service methods, whose values are
URLs combining domain and partial paths
This is useful for debugging and for adapting to different ways of
configuring services, eg, proxied through a single domain
(see private/conf.example)
'''
domains = get_opentree_services_domains(request)
conf = get_conf(request)
url_pairs = conf.items('method_urls')
method_urls = domains.copy()
for mname, murl in url_pairs:
# replace any domain tokens, eg, 'treemachine_domain'
for dname, durl in domains.items():
murl = murl.replace('{%s}' % dname, durl)
method_urls[ mname ] = murl
return method_urls
def get_user_display_name():
# Determine the best possible name to show for the current logged-in user.
# This is for display purposes and credit in study Nexson. It's a bit
# convoluted due to GitHub's various and optional name fields.
from gluon import current
auth = current.session.auth or None
if (not auth) or (not auth.get('user', None)):
return 'ANONYMOUS'
if auth.user.name:
# this is a preset display name
return auth.user.name
if auth.user.first_name and auth.user.last_name:
# combined first and last is also good
return '%s %s' % (auth.user.first_name, auth.user.last_name,)
if auth.user.username:
# compact userid is our last resort
return auth.user.username
# no name or id found (this should never happen)
return 'UNKNOWN'
def get_user_login():
# simply return the login (username)
from gluon import current
auth = current.session.auth or None
if (not auth) or (not auth.get('user', None)):
return 'ANONYMOUS'
if auth.user.username:
# compact userid is our last resort
return auth.user.username
# no name or id found (this should never happen)
return 'UNKNOWN'
def get_domain_banner_text(request):
# Add an optional CSS banner to indicate a test domain, or none if
# we're on a production server.
if request.env.http_host == 'devtree.opentreeoflife.org':
return 'DEVELOPMENT'
elif request.env.http_host == 'stagingtree.opentreeoflife.org':
return 'STAGING'
return ''
def get_domain_banner_hovertext(request):
# Return optional hover-text for test domains, or none if
# we're on a production server.
if request.env.http_host == 'devtree.opentreeoflife.org':
return 'This is the development site for Open Tree of Life. Data and services may not be up to date, or may be untested. Production version at tree.opentreeoflife.org'
elif request.env.http_host == 'stagingtree.opentreeoflife.org':
return 'This is the staging site for Open Tree of Life. Data and services may not be up to date, or may be untested. Production version at tree.opentreeoflife.org'
return ''
treebase_deposit_doi = re.compile('//purl.org/phylo/treebase/phylows/study/TB2:S(?P<treebase_id>\d+)')
def get_data_deposit_message(raw_deposit_doi):
# Returns a *compact* hyperlink (HTML) to study data, or an empty string if
# no DOI/URL is found. Some cryptic dataDeposit URLs require more
# explanation or a modified URL to be more web-friendly.
#
# NOTE that we maintain a client-side counterpart in
# curator/static/js/study-editor.js > getDataDepositMessage
raw_deposit_doi = raw_deposit_doi.strip()
if raw_deposit_doi == '':
return ''
# TreeBASE URLs should point to a web page (vs RDF)
# EXAMPLE: http://purl.org/phylo/treebase/phylows/study/TB2:S13451
# => http://treebase.org/treebase-web/search/study/summary.html?id=13451
treebase_match = treebase_deposit_doi.search(raw_deposit_doi)
if treebase_match:
return ('<a href="http://treebase.org/treebase-web/search/study/summary.html?'+
'id=%s" target="_blank">Data in Treebase</a>' % treebase_match.group('treebase_id'))
# TODO: Add other substitutions?
return ('<a target="_blank" href="%s">Data deposit DOI/URL</a>' % raw_deposit_doi)
def fetch_current_TNRS_context_names(request):
try:
# fetch the latest contextName values as JSON from remote site
from gluon.tools import fetch
import simplejson
method_dict = get_opentree_services_method_urls(request)
fetch_url = method_dict['getContextsJSON_url']
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "https:%s" % fetch_url
# as usual, this needs to be a POST (pass empty fetch_args)
contextnames_response = fetch(fetch_url, data='')
contextnames_json = simplejson.loads( contextnames_response )
# start with LIFE group (incl. 'All life'), and add any other ordered suggestions
ordered_group_names = unique_ordered_list(['LIFE','PLANTS','ANIMALS'] + [g for g in contextnames_json])
context_names = [ ]
for gname in ordered_group_names:
# allow for eventual removal or renaming of expected groups
if gname in contextnames_json:
context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]
# draftTreeName = str(ids_json['draftTreeName']).encode('utf-8')
return (context_names)
except Exception, e:
# throw 403 or 500 or just leave it
return ('ERROR', e.message)
def unique_ordered_list(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
# adapted from phylesystem-api/controllers/default.py (__extract_nexson_from_http_call)
def extract_nexson_from_http_call(request, **kwargs):
"""Returns the nexson blob from `kwargs` or the request.body"""
try:
# check for kwarg 'nexson', or load the full request body
if 'nexson' in kwargs:
nexson = kwargs.get('nexson', {})
else:
nexson = request.body.read()
if not isinstance(nexson, dict):
nexson = json.loads(nexson)
if 'nexson' in nexson:
nexson = nexson['nexson']
except:
# TODO: _LOG.exception('Exception getting nexson content in extract_nexson_from_http_call')
raise HTTP(400, json.dumps({"error": 1, "description": 'NexSON must be valid JSON'}))
return nexson
def get_currently_deployed_opentree_branch(request):
"""Read local git configuration and return the current branch"""
# Backtrack to the real (vs. symlinked) filesystem path for this app
real_app_path = os.path.realpath(request.folder)
infilepath = os.path.join(real_app_path, '..', '.git', 'HEAD')
branch_name = 'NOT FOUND (app is not inside a git repo?)'
try:
infile = open(infilepath)
for line in infile:
if 'ref:' in line:
# FOR EXAMPLE:
# ref: refs/heads/mystery-branch\n
branch_name = line.split('/')[-1].strip()
break
infile.close()
except:
pass
return branch_name
Ignore first_name and last_name fields for display.
These fields are included strictly for web2py compatibility, but we
defer to the single `name` field used by GitHub instead.
#!/usr/env python
import logging
import os
import sys
import json
import re
from gluon.http import HTTP
_CONF_OBJ_DICT = {}
def get_conf(request):
global _CONF_OBJ_DICT
app_name = request.application
c = _CONF_OBJ_DICT.get(app_name)
if c is None:
from ConfigParser import SafeConfigParser
c = SafeConfigParser({})
# DON'T convert property names to lower-case!
c.optionxform = str
lcp = "applications/%s/private/localconfig" % app_name
if os.path.isfile(lcp):
c.read(lcp)
else:
c.read("applications/%s/private/config" % app_name)
_CONF_OBJ_DICT[app_name] = c
return c
def get_logging_level(request):
'''
Converts a config files logging section, level attribute to a logging modules'
value (default is logging.INFO)
'''
conf = get_conf(request)
try:
level_str = conf.get("logging", "level").upper()
if level_str == "NOTSET":
return logging.NOTSET
elif level_str == "DEBUG":
return logging.DEBUG
elif level_str == "INFO":
return logging.INFO
elif level_str == "WARNING":
return logging.WARNING
elif level_str == "ERROR":
return logging.ERROR
elif level_str == "CRITICAL":
return logging.CRITICAL
else:
return logging.NOTSET
except:
return logging.INFO
def get_logger(request, name):
'''
Returns a logger object with the level set based on the config file
'''
logger = logging.getLogger(name)
if not hasattr(logger, 'is_configured'):
logger.is_configured = False
if not logger.is_configured:
level = get_logging_level(request)
logging_formatter = logging.Formatter("%(levelname) 8s: %(message)s")
logging_formatter.datefmt='%H:%M:%S'
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(logging_formatter)
logger.addHandler(ch)
logger.is_configured = True
return logger
def get_opentree_services_domains(request):
'''
Reads the local configuration to get the domains and returns a dictionary
with keys:
treemachine_domain
taxomachine_domain
oti_domain
opentree_api_domain
the values of the domain will contain the port (when needed)
This is mainly useful for debugging because it lets developers use local
instances of the service by tweaking private/conf (see private/conf.example)
'''
conf = get_conf(request)
domain_pairs = conf.items('domains')
domains = dict()
for name, url in domain_pairs:
domains[ "%s_domain" % name ] = url
return domains
def get_maintenance_info(request):
'''
Reads the local configuration to determine whether we're doing scheduled
maintenance tasks that might interrupt some editing.
to a newer system . In this case, we should block study editing and show a message
to the user.
'''
conf = get_conf(request)
minfo = dict()
try:
minfo['maintenance_in_progress' ] = conf.getboolean("maintenance", "maintenance_in_progress")
minfo['maintenance_notice' ] = conf.get("maintenance", "maintenance_notice")
except:
minfo['maintenance_in_progress' ] = False
minfo['maintenance_notice'] = ""
return minfo
def get_opentree_services_method_urls(request):
'''
Reads the local configuration to build on domains and return a dictionary
with keys for all domains AND their service methods, whose values are
URLs combining domain and partial paths
This is useful for debugging and for adapting to different ways of
configuring services, eg, proxied through a single domain
(see private/conf.example)
'''
domains = get_opentree_services_domains(request)
conf = get_conf(request)
url_pairs = conf.items('method_urls')
method_urls = domains.copy()
for mname, murl in url_pairs:
# replace any domain tokens, eg, 'treemachine_domain'
for dname, durl in domains.items():
murl = murl.replace('{%s}' % dname, durl)
method_urls[ mname ] = murl
return method_urls
def get_user_display_name():
# Determine the best possible name to show for the current logged-in user.
# This is for display purposes and credit in study Nexson. It's a bit
# convoluted due to GitHub's various and optional name fields.
from gluon import current
auth = current.session.auth or None
if (not auth) or (not auth.get('user', None)):
return 'ANONYMOUS'
if auth.user.name:
# this is a preset display name
return auth.user.name
# N.B. that auth.user.first_name and auth.user.last_name fields are not
# reliable in our apps! They're included for web2py compatibility, but we
# defer to the GitHub User API and use the 'name' field for this.
if auth.user.username:
# compact userid is our last resort
return auth.user.username
# no name or id found (this should never happen)
return 'UNKNOWN'
def get_user_login():
# simply return the login (username)
from gluon import current
auth = current.session.auth or None
if (not auth) or (not auth.get('user', None)):
return 'ANONYMOUS'
if auth.user.username:
# compact userid is our last resort
return auth.user.username
# no name or id found (this should never happen)
return 'UNKNOWN'
def get_domain_banner_text(request):
# Add an optional CSS banner to indicate a test domain, or none if
# we're on a production server.
if request.env.http_host == 'devtree.opentreeoflife.org':
return 'DEVELOPMENT'
elif request.env.http_host == 'stagingtree.opentreeoflife.org':
return 'STAGING'
return ''
def get_domain_banner_hovertext(request):
# Return optional hover-text for test domains, or none if
# we're on a production server.
if request.env.http_host == 'devtree.opentreeoflife.org':
return 'This is the development site for Open Tree of Life. Data and services may not be up to date, or may be untested. Production version at tree.opentreeoflife.org'
elif request.env.http_host == 'stagingtree.opentreeoflife.org':
return 'This is the staging site for Open Tree of Life. Data and services may not be up to date, or may be untested. Production version at tree.opentreeoflife.org'
return ''
treebase_deposit_doi = re.compile('//purl.org/phylo/treebase/phylows/study/TB2:S(?P<treebase_id>\d+)')
def get_data_deposit_message(raw_deposit_doi):
# Returns a *compact* hyperlink (HTML) to study data, or an empty string if
# no DOI/URL is found. Some cryptic dataDeposit URLs require more
# explanation or a modified URL to be more web-friendly.
#
# NOTE that we maintain a client-side counterpart in
# curator/static/js/study-editor.js > getDataDepositMessage
raw_deposit_doi = raw_deposit_doi.strip()
if raw_deposit_doi == '':
return ''
# TreeBASE URLs should point to a web page (vs RDF)
# EXAMPLE: http://purl.org/phylo/treebase/phylows/study/TB2:S13451
# => http://treebase.org/treebase-web/search/study/summary.html?id=13451
treebase_match = treebase_deposit_doi.search(raw_deposit_doi)
if treebase_match:
return ('<a href="http://treebase.org/treebase-web/search/study/summary.html?'+
'id=%s" target="_blank">Data in Treebase</a>' % treebase_match.group('treebase_id'))
# TODO: Add other substitutions?
return ('<a target="_blank" href="%s">Data deposit DOI/URL</a>' % raw_deposit_doi)
def fetch_current_TNRS_context_names(request):
try:
# fetch the latest contextName values as JSON from remote site
from gluon.tools import fetch
import simplejson
method_dict = get_opentree_services_method_urls(request)
fetch_url = method_dict['getContextsJSON_url']
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "https:%s" % fetch_url
# as usual, this needs to be a POST (pass empty fetch_args)
contextnames_response = fetch(fetch_url, data='')
contextnames_json = simplejson.loads( contextnames_response )
# start with LIFE group (incl. 'All life'), and add any other ordered suggestions
ordered_group_names = unique_ordered_list(['LIFE','PLANTS','ANIMALS'] + [g for g in contextnames_json])
context_names = [ ]
for gname in ordered_group_names:
# allow for eventual removal or renaming of expected groups
if gname in contextnames_json:
context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]
# draftTreeName = str(ids_json['draftTreeName']).encode('utf-8')
return (context_names)
except Exception, e:
# throw 403 or 500 or just leave it
return ('ERROR', e.message)
def unique_ordered_list(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
# adapted from phylesystem-api/controllers/default.py (__extract_nexson_from_http_call)
def extract_nexson_from_http_call(request, **kwargs):
"""Returns the nexson blob from `kwargs` or the request.body"""
try:
# check for kwarg 'nexson', or load the full request body
if 'nexson' in kwargs:
nexson = kwargs.get('nexson', {})
else:
nexson = request.body.read()
if not isinstance(nexson, dict):
nexson = json.loads(nexson)
if 'nexson' in nexson:
nexson = nexson['nexson']
except:
# TODO: _LOG.exception('Exception getting nexson content in extract_nexson_from_http_call')
raise HTTP(400, json.dumps({"error": 1, "description": 'NexSON must be valid JSON'}))
return nexson
def get_currently_deployed_opentree_branch(request):
"""Read local git configuration and return the current branch"""
# Backtrack to the real (vs. symlinked) filesystem path for this app
real_app_path = os.path.realpath(request.folder)
infilepath = os.path.join(real_app_path, '..', '.git', 'HEAD')
branch_name = 'NOT FOUND (app is not inside a git repo?)'
try:
infile = open(infilepath)
for line in infile:
if 'ref:' in line:
# FOR EXAMPLE:
# ref: refs/heads/mystery-branch\n
branch_name = line.split('/')[-1].strip()
break
infile.close()
except:
pass
return branch_name
|
#!/usr/bin/python
import sys, math, argparse, re, json, os, subprocess, logging
# the difficulties for each technics
from parameters import Knows, Settings, isKnows, isSettings
from parameters import easy, medium, hard, harder, hardcore, mania, god, samus, impossibru, infinity, diff2text
# the helper functions
from smbool import SMBool
from smboolmanager import SMBoolManager
from helpers import Pickup, Bosses
from rom import RomLoader
from graph_locations import locations as graphLocations
from graph import AccessGraph
from graph_access import vanillaTransitions, accessPoints
from utils import PresetLoader
import log
class Conf:
# keep getting majors of at most this difficulty before going for minors or changing area
difficultyTarget = medium
# display the generated path (spoilers!)
displayGeneratedPath = False
# choose how many items are required (possible value: minimal/all/any)
itemsPickup = 'minimal'
# the list of items to not pick up
itemsForbidden = []
class SolverState(object):
def fromSolver(self, solver):
self.state = {}
# bool
self.state["fullRando"] = solver.fullRando
# bool
self.state["areaRando"] = solver.areaRando
# dict of raw patches
self.state["patches"] = solver.patches
# dict {locName: {itemName: "xxx", "accessPoint": "xxx"}, ...}
self.state["locsData"] = self.getLocsData(solver.locations)
# list [(ap1, ap2), (ap3, ap4), ...]
self.state["graphTransitions"] = solver.graphTransitions
# preset file name
self.state["presetFileName"] = solver.presetFileName
## items collected / locs visited / bosses killed
# list [item1, item2, ...]
self.state["collectedItems"] = solver.collectedItems
# dict {locName: {index: 0, difficulty: (bool, diff, ...), ...} with index being the position of the loc in visitedLocations
self.state["visitedLocations"] = self.getVisitedLocations(solver.visitedLocations)
# dict {locName: (bool, diff, [know1, ...], [item1, ...]), ...}
self.state["availableLocations"] = self.getAvailableLocations(solver.majorLocations)
# string of last access point
self.state["lastLoc"] = solver.lastLoc
# list of killed bosses: ["boss1", "boss2"]
self.state["bosses"] = [boss for boss in Bosses.golden4Dead if Bosses.golden4Dead[boss] == True]
# dict {locNameWeb: {infos}, ...}
self.state["availableLocationsWeb"] = self.getAvailableLocationsWeb(solver.majorLocations)
# dict {locNameWeb: {infos}, ...}
self.state["visitedLocationsWeb"] = self.getAvailableLocationsWeb(solver.visitedLocations)
# dict {locNameWeb: {infos}, ...}
self.state["remainLocationsWeb"] = self.getRemainLocationsWeb(solver.majorLocations)
def toSolver(self, solver):
solver.fullRando = self.state["fullRando"]
solver.areaRando = self.state["areaRando"]
solver.patches = self.setPatches(self.state["patches"])
self.setLocsData(solver.locations)
solver.graphTransitions = self.state["graphTransitions"]
# preset
solver.presetFileName = self.state["presetFileName"]
# items collected / locs visited / bosses killed
solver.collectedItems = self.state["collectedItems"]
(solver.visitedLocations, solver.majorLocations) = self.setLocations(self.state["visitedLocations"],
self.state["availableLocations"],
solver.locations)
solver.lastLoc = self.state["lastLoc"]
Bosses.reset()
for boss in self.state["bosses"]:
Bosses.beatBoss(boss)
def getLocsData(self, locations):
ret = {}
for loc in locations:
ret[loc["Name"]] = {"itemName": loc["itemName"]}
if "accessPoint" in loc:
ret[loc["Name"]]["accessPoint"] = loc["accessPoint"]
return ret
def setLocsData(self, locations):
for loc in locations:
loc["itemName"] = self.state["locsData"][loc["Name"]]["itemName"]
if "accessPoint" in self.state["locsData"][loc["Name"]]:
loc["accessPoint"] = self.state["locsData"][loc["Name"]]["accessPoint"]
def getVisitedLocations(self, visitedLocations):
# need to keep the order (for cancelation)
ret = {}
i = 0
for loc in visitedLocations:
diff = loc["difficulty"]
ret[loc["Name"]] = {"index": i, "difficulty": (diff.bool, diff.difficulty, diff.knows, diff.items)}
i += 1
return ret
def setLocations(self, visitedLocations, availableLocations, locations):
retVis = []
retMaj = []
for loc in locations:
if loc["Name"] in visitedLocations:
# visitedLocations contains an index
diff = visitedLocations[loc["Name"]]["difficulty"]
loc["difficulty"] = SMBool(diff[0], diff[1], diff[2], diff[3])
retVis.append((visitedLocations[loc["Name"]]["index"], loc))
else:
if loc["Name"] in availableLocations:
diff = availableLocations[loc["Name"]]
loc["difficulty"] = SMBool(diff[0], diff[1], diff[2], diff[3])
retMaj.append(loc)
retVis.sort(key=lambda x: x[0])
return ([loc for (i, loc) in retVis], retMaj)
def diff4isolver(self, difficulty):
if difficulty == -1:
return "break"
elif difficulty < medium:
return "easy"
elif difficulty < hard:
return "medium"
elif difficulty < harder:
return "hard"
elif difficulty < hardcore:
return "harder"
elif difficulty < mania:
return "hardcore"
else:
return "mania"
def locName4isolver(self, locName):
# remove space and special characters
# sed -e 's+ ++g' -e 's+,++g' -e 's+(++g' -e 's+)++g' -e 's+-++g'
return locName.translate(None, " ,()-")
def knows2isolver(self, knows):
result = []
for know in knows:
if know in Knows.desc:
result.append(Knows.desc[know]['display'])
else:
result.append(know)
return list(set(result))
def getAvailableLocationsWeb(self, locations):
ret = {}
for loc in locations:
if "difficulty" in loc and loc["difficulty"].bool == True:
diff = loc["difficulty"]
locName = self.locName4isolver(loc["Name"])
ret[locName] = {"difficulty": self.diff4isolver(diff.difficulty),
"knows": self.knows2isolver(diff.knows),
"items": list(set(diff.items)),
"item": loc["itemName"],
"name": loc["Name"]}
if "comeBack" in loc:
ret[locName]["comeBack"] = loc["comeBack"]
return ret
def getRemainLocationsWeb(self, locations):
ret = {}
for loc in locations:
if "difficulty" not in loc or ("difficulty" in loc and loc["difficulty"].bool == False):
locName = self.locName4isolver(loc["Name"])
ret[locName] = {"item": loc["itemName"],
"name": loc["Name"],
"knows": ["Sequence Break"],
"items": []}
return ret
def getAvailableLocations(self, locations):
ret = {}
for loc in locations:
if "difficulty" in loc and loc["difficulty"].bool == True:
diff = loc["difficulty"]
ret[loc["Name"]] = (diff.bool, diff.difficulty, diff.knows, diff.items)
return ret
def setPatches(self, patchesData):
# json's dicts keys are strings
ret = {}
for address in patchesData:
ret[int(address)] = patchesData[address]
return ret
def fromJson(self, stateJsonFileName):
with open(stateJsonFileName, 'r') as jsonFile:
self.state = json.load(jsonFile)
# print("Loaded Json State:")
# for key in self.state:
# if key in ["availableLocationsWeb", "visitedLocationsWeb", "collectedItems", "visitedLocations"]:
# print("{}: {}".format(key, self.state[key]))
# print("")
def toJson(self, outputFileName):
with open(outputFileName, 'w') as jsonFile:
json.dump(self.state, jsonFile)
# print("Dumped Json State:")
# for key in self.state:
# if key in ["availableLocationsWeb", "visitedLocationsWeb", "collectedItems", "visitedLocations"]:
# print("{}: {}".format(key, self.state[key]))
# print("")
class CommonSolver(object):
def loadRom(self, rom, interactive=False):
self.romFileName = rom
self.romLoader = RomLoader.factory(rom)
self.fullRando = self.romLoader.assignItems(self.locations)
self.areaRando = self.romLoader.loadPatches()
if interactive == False:
self.patches = self.romLoader.getPatches()
else:
self.patches = self.romLoader.getRawPatches()
print("ROM {} full: {} area: {} patches: {}".format(rom, self.fullRando,
self.areaRando, self.patches))
self.graphTransitions = self.romLoader.getTransitions()
if self.graphTransitions is None:
self.graphTransitions = vanillaTransitions
self.areaGraph = AccessGraph(accessPoints, self.graphTransitions)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("Display items at locations:")
for location in self.locations:
self.log.debug('{:>50}: {:>16}'.format(location["Name"], location['itemName']))
def loadPreset(self, presetFileName):
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
if self.log.getEffectiveLevel() == logging.DEBUG:
presetLoader.printToScreen()
def computeLocationsDifficulty(self, locations):
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastLoc)
# check post available functions too
for loc in locations:
if 'PostAvailable' in loc:
self.smbm.addItem(loc['itemName'])
postAvailable = loc['PostAvailable'](self.smbm)
self.smbm.removeItem(loc['itemName'])
loc['difficulty'] = self.smbm.wand(loc['difficulty'], postAvailable)
# also check if we can come back to landing site from the location
if loc['difficulty'].bool == True:
loc['comeBack'] = self.areaGraph.canAccess(self.smbm, loc['accessPoint'], self.lastLoc, infinity, loc['itemName'])
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("available locs:")
for loc in locations:
if loc['difficulty'].bool == True:
self.log.debug("{}: {}".format(loc['Name'], loc['difficulty']))
def collectMajor(self, loc):
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
area = self.collectItem(loc)
return area
def collectMinor(self, loc):
self.minorLocations.remove(loc)
self.visitedLocations.append(loc)
area = self.collectItem(loc)
return area
def collectItem(self, loc):
item = loc["itemName"]
if item not in Conf.itemsForbidden:
self.collectedItems.append(item)
self.smbm.addItem(item)
else:
# update the name of the item
item = "-{}-".format(item)
loc["itemName"] = item
self.collectedItems.append(item)
# we still need the boss difficulty
if 'Pickup' not in loc:
loc["difficulty"] = SMBool(False)
if 'Pickup' in loc:
loc['Pickup']()
if self.firstLogFile is not None:
if item not in self.collectedItems:
self.firstLogFile.write("{};{};{};{}\n".format(item, loc['Name'], loc['Area'], loc['GraphArea']))
self.log.debug("collectItem: {} at {}".format(item, loc['Name']))
# last loc is used as root node for the graph
self.lastLoc = loc['accessPoint']
return loc['SolveArea']
class InteractiveSolver(CommonSolver):
def __init__(self, output):
self.log = log.get('Solver')
self.outputFileName = output
self.firstLogFile = None
def dumpState(self):
state = SolverState()
state.fromSolver(self)
state.toJson(self.outputFileName)
def initialize(self, rom, presetFileName):
# load rom and preset, return first state
self.locations = graphLocations
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom, interactive=True)
self.locations = self.addMotherBrainLoc(self.locations)
self.clear()
# compute new available locations
self.computeLocationsDifficulty(self.majorLocations)
self.dumpState()
def iterate(self, stateJson, locName, action):
self.locations = self.addMotherBrainLoc(graphLocations)
self.smbm = SMBoolManager()
state = SolverState()
state.fromJson(stateJson)
state.toSolver(self)
RomLoader.factory(self.patches).loadPatches()
self.loadPreset(self.presetFileName)
self.areaGraph = AccessGraph(accessPoints, self.graphTransitions)
if action == 'clear':
self.clear(True)
else:
# add already collected items to smbm
self.smbm.addItems(self.collectedItems)
if action == 'add':
# pickup item at locName
self.pickItemAt(locName)
elif action == 'remove':
# remove last collected item
self.cancelLast()
# compute new available locations
self.computeLocationsDifficulty(self.majorLocations)
# return them
self.dumpState()
def locNameWeb2Internal(self, locNameWeb):
locs = {
"EnergyTankGauntlet": "Energy Tank, Gauntlet",
"Bomb": "Bomb",
"EnergyTankTerminator": "Energy Tank, Terminator",
"ReserveTankBrinstar": "Reserve Tank, Brinstar",
"ChargeBeam": "Charge Beam",
"MorphingBall": "Morphing Ball",
"EnergyTankBrinstarCeiling": "Energy Tank, Brinstar Ceiling",
"EnergyTankEtecoons": "Energy Tank, Etecoons",
"EnergyTankWaterway": "Energy Tank, Waterway",
"EnergyTankBrinstarGate": "Energy Tank, Brinstar Gate",
"XRayScope": "X-Ray Scope",
"Spazer": "Spazer",
"EnergyTankKraid": "Energy Tank, Kraid",
"VariaSuit": "Varia Suit",
"IceBeam": "Ice Beam",
"EnergyTankCrocomire": "Energy Tank, Crocomire",
"HiJumpBoots": "Hi-Jump Boots",
"GrappleBeam": "Grapple Beam",
"ReserveTankNorfair": "Reserve Tank, Norfair",
"SpeedBooster": "Speed Booster",
"WaveBeam": "Wave Beam",
"EnergyTankRidley": "Energy Tank, Ridley",
"ScrewAttack": "Screw Attack",
"EnergyTankFirefleas": "Energy Tank, Firefleas",
"ReserveTankWreckedShip": "Reserve Tank, Wrecked Ship",
"EnergyTankWreckedShip": "Energy Tank, Wrecked Ship",
"RightSuperWreckedShip": "Right Super, Wrecked Ship",
"GravitySuit": "Gravity Suit",
"EnergyTankMamaturtle": "Energy Tank, Mama turtle",
"PlasmaBeam": "Plasma Beam",
"ReserveTankMaridia": "Reserve Tank, Maridia",
"SpringBall": "Spring Ball",
"EnergyTankBotwoon": "Energy Tank, Botwoon",
"SpaceJump": "Space Jump",
"PowerBombCrateriasurface": "Power Bomb (Crateria surface)",
"MissileoutsideWreckedShipbottom": "Missile (outside Wrecked Ship bottom)",
"MissileoutsideWreckedShiptop": "Missile (outside Wrecked Ship top)",
"MissileoutsideWreckedShipmiddle": "Missile (outside Wrecked Ship middle)",
"MissileCrateriamoat": "Missile (Crateria moat)",
"MissileCrateriabottom": "Missile (Crateria bottom)",
"MissileCrateriagauntletright": "Missile (Crateria gauntlet right)",
"MissileCrateriagauntletleft": "Missile (Crateria gauntlet left)",
"SuperMissileCrateria": "Super Missile (Crateria)",
"MissileCrateriamiddle": "Missile (Crateria middle)",
"PowerBombgreenBrinstarbottom": "Power Bomb (green Brinstar bottom)",
"SuperMissilepinkBrinstar": "Super Missile (pink Brinstar)",
"MissilegreenBrinstarbelowsupermissile": "Missile (green Brinstar below super missile)",
"SuperMissilegreenBrinstartop": "Super Missile (green Brinstar top)",
"MissilegreenBrinstarbehindmissile": "Missile (green Brinstar behind missile)",
"MissilegreenBrinstarbehindreservetank": "Missile (green Brinstar behind reserve tank)",
"MissilepinkBrinstartop": "Missile (pink Brinstar top)",
"MissilepinkBrinstarbottom": "Missile (pink Brinstar bottom)",
"PowerBombpinkBrinstar": "Power Bomb (pink Brinstar)",
"MissilegreenBrinstarpipe": "Missile (green Brinstar pipe)",
"PowerBombblueBrinstar": "Power Bomb (blue Brinstar)",
"MissileblueBrinstarmiddle": "Missile (blue Brinstar middle)",
"SuperMissilegreenBrinstarbottom": "Super Missile (green Brinstar bottom)",
"MissileblueBrinstarbottom": "Missile (blue Brinstar bottom)",
"MissileblueBrinstartop": "Missile (blue Brinstar top)",
"MissileblueBrinstarbehindmissile": "Missile (blue Brinstar behind missile)",
"PowerBombredBrinstarsidehopperroom": "Power Bomb (red Brinstar sidehopper room)",
"PowerBombredBrinstarspikeroom": "Power Bomb (red Brinstar spike room)",
"MissileredBrinstarspikeroom": "Missile (red Brinstar spike room)",
"MissileKraid": "Missile (Kraid)",
"Missilelavaroom": "Missile (lava room)",
"MissilebelowIceBeam": "Missile (below Ice Beam)",
"MissileaboveCrocomire": "Missile (above Crocomire)",
"MissileHiJumpBoots": "Missile (Hi-Jump Boots)",
"EnergyTankHiJumpBoots": "Energy Tank (Hi-Jump Boots)",
"PowerBombCrocomire": "Power Bomb (Crocomire)",
"MissilebelowCrocomire": "Missile (below Crocomire)",
"MissileGrappleBeam": "Missile (Grapple Beam)",
"MissileNorfairReserveTank": "Missile (Norfair Reserve Tank)",
"MissilebubbleNorfairgreendoor": "Missile (bubble Norfair green door)",
"MissilebubbleNorfair": "Missile (bubble Norfair)",
"MissileSpeedBooster": "Missile (Speed Booster)",
"MissileWaveBeam": "Missile (Wave Beam)",
"MissileGoldTorizo": "Missile (Gold Torizo)",
"SuperMissileGoldTorizo": "Super Missile (Gold Torizo)",
"MissileMickeyMouseroom": "Missile (Mickey Mouse room)",
"MissilelowerNorfairabovefireflearoom": "Missile (lower Norfair above fire flea room)",
"PowerBomblowerNorfairabovefireflearoom": "Power Bomb (lower Norfair above fire flea room)",
"PowerBombPowerBombsofshame": "Power Bomb (Power Bombs of shame)",
"MissilelowerNorfairnearWaveBeam": "Missile (lower Norfair near Wave Beam)",
"MissileWreckedShipmiddle": "Missile (Wrecked Ship middle)",
"MissileGravitySuit": "Missile (Gravity Suit)",
"MissileWreckedShiptop": "Missile (Wrecked Ship top)",
"SuperMissileWreckedShipleft": "Super Missile (Wrecked Ship left)",
"MissilegreenMaridiashinespark": "Missile (green Maridia shinespark)",
"SuperMissilegreenMaridia": "Super Missile (green Maridia)",
"MissilegreenMaridiatatori": "Missile (green Maridia tatori)",
"SuperMissileyellowMaridia": "Super Missile (yellow Maridia)",
"MissileyellowMaridiasupermissile": "Missile (yellow Maridia super missile)",
"MissileyellowMaridiafalsewall": "Missile (yellow Maridia false wall)",
"MissileleftMaridiasandpitroom": "Missile (left Maridia sand pit room)",
"MissilerightMaridiasandpitroom": "Missile (right Maridia sand pit room)",
"PowerBombrightMaridiasandpitroom": "Power Bomb (right Maridia sand pit room)",
"MissilepinkMaridia": "Missile (pink Maridia)",
"SuperMissilepinkMaridia": "Super Missile (pink Maridia)",
"MissileDraygon": "Missile (Draygon)",
"MotherBrain": "Mother Brain"
}
return locs[locNameWeb]
def getLoc(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
for loc in self.majorLocations:
if loc["Name"] == locName:
return loc
raise Exception("Location '{}' not found in remaining locations".format(locName))
def pickItemAt(self, locName):
# collect new item at newLoc
loc = self.getLoc(locName)
if "difficulty" not in loc:
# sequence break
loc["difficulty"] = SMBool(True, -1)
# take first ap of the loc
loc["accessPoint"] = loc["AccessFrom"].keys()[0]
self.collectMajor(loc)
def cancelLast(self):
# loc
if len(self.visitedLocations) == 0:
return
loc = self.visitedLocations.pop()
self.majorLocations.append(loc)
# pickup func
if 'Unpickup' in loc:
loc['Unpickup']()
# access point
if len(self.visitedLocations) == 0:
self.lastLoc = "Landing Site"
else:
self.lastLoc = self.visitedLocations[-1]["accessPoint"]
# item
item = loc["itemName"]
if item != self.collectedItems[-1]:
raise Exception("Item of last collected loc {}: {} is different from last collected item: {}".format(loc["Name"], item, self.collectedItems[-1]))
self.smbm.removeItem(item)
self.collectedItems.pop()
def clear(self, reload=False):
self.collectedItems = []
self.visitedLocations = []
self.lastLoc = 'Landing Site'
self.majorLocations = self.locations
if reload == True:
for loc in self.majorLocations:
if "difficulty" in loc:
del loc["difficulty"]
Bosses.reset()
self.smbm.resetItems()
def addMotherBrainLoc(self, locations):
# in the interactive solver mother brain is a new loc
locations.append({
'Area': "Tourian",
'GraphArea': "Tourian",
'SolveArea': "Tourian",
'Name': "Mother Brain",
'Visibility': "Visible",
'Room': 'Mother Brain Room',
'itemName': "Nothing",
'AccessFrom' : {
'Statues Hallway Left': lambda sm: SMBool(True)
},
'Available': lambda sm: sm.wand(Bosses.allBossesDead(sm), sm.enoughStuffTourian())
})
return locations
class StandardSolver(CommonSolver):
# given a rom and parameters returns the estimated difficulty
def __init__(self, rom, presetFileName, difficultyTarget, pickupStrategy, itemsForbidden=[], type='console', firstItemsLog=None, displayGeneratedPath=False, outputFileName=None):
self.log = log.get('Solver')
self.setConf(difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath)
self.firstLogFile = None
if firstItemsLog is not None:
self.firstLogFile = open(firstItemsLog, 'w')
self.firstLogFile.write('Item;Location;Area\n')
# can be called from command line (console) or from web site (web)
self.type = type
self.output = Out.factory(self.type, self)
self.outputFileName = outputFileName
self.locations = graphLocations
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom)
self.pickup = Pickup(Conf.itemsPickup)
def setConf(self, difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath):
Conf.difficultyTarget = difficultyTarget
Conf.itemsPickup = pickupStrategy
Conf.displayGeneratedPath = displayGeneratedPath
Conf.itemsForbidden = itemsForbidden
def solveRom(self):
self.lastLoc = 'Landing Site'
(self.difficulty, self.itemsOk) = self.computeDifficulty()
if self.firstLogFile is not None:
self.firstLogFile.close()
(self.knowsUsed, self.knowsKnown) = self.getKnowsUsed()
self.output.out()
def getRemainMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getRemainMinors(self):
if self.fullRando == True:
return None
else:
return [loc for loc in self.minorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getSkippedMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == True and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getUnavailMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getDiffThreshold(self):
target = Conf.difficultyTarget
threshold = target
epsilon = 0.001
if target <= easy:
threshold = medium - epsilon
elif target <= medium:
threshold = hard - epsilon
elif target <= hard:
threshold = harder - epsilon
elif target <= harder:
threshold = hardcore - epsilon
elif target <= hardcore:
threshold = mania - epsilon
return threshold
def computeDifficulty(self):
# loop on the available locations depending on the collected items.
# before getting a new item, loop on all of them and get their difficulty,
# the next collected item is the one with the smallest difficulty,
# if equality between major and minor, take major first.
if not self.fullRando:
self.majorLocations = [loc for loc in self.locations if loc["Class"] == "Major"]
self.minorLocations = [loc for loc in self.locations if loc["Class"] == "Minor"]
else:
self.majorLocations = self.locations[:] # copy
self.minorLocations = self.majorLocations
self.visitedLocations = []
self.collectedItems = []
# with the knowsXXX conditions some roms can be unbeatable, so we have to detect it
previous = -1
current = 0
self.log.debug("{}: available major: {}, available minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
isEndPossible = False
endDifficulty = mania
area = 'Crateria Landing Site'
diffThreshold = self.getDiffThreshold()
while True:
# actual while condition
hasEnoughMinors = self.pickup.enoughMinors(self.smbm, self.minorLocations)
hasEnoughMajors = self.pickup.enoughMajors(self.smbm, self.majorLocations)
hasEnoughItems = hasEnoughMajors and hasEnoughMinors
canEndGame = self.canEndGame()
(isEndPossible, endDifficulty) = (canEndGame.bool, canEndGame.difficulty)
if isEndPossible and hasEnoughItems and endDifficulty <= diffThreshold:
self.log.debug("END")
break
#self.log.debug(str(self.collectedItems))
self.log.debug("Current Area : " + area)
# check if we have collected an item in the last loop
current = len(self.collectedItems)
if current == previous:
if not isEndPossible:
self.log.debug("STUCK ALL")
else:
self.log.debug("HARD END")
break
previous = current
# compute the difficulty of all the locations
self.computeLocationsDifficulty(self.majorLocations)
if self.fullRando == False:
self.computeLocationsDifficulty(self.minorLocations)
# keep only the available locations
majorsAvailable = [loc for loc in self.majorLocations if 'difficulty' in loc and loc["difficulty"].bool == True]
minorsAvailable = [loc for loc in self.minorLocations if 'difficulty' in loc and loc["difficulty"].bool == True]
# check if we're stuck
if len(majorsAvailable) == 0 and len(minorsAvailable) == 0:
if not isEndPossible:
self.log.debug("STUCK MAJORS and MINORS")
else:
self.log.debug("HARD END")
break
# sort them on difficulty and proximity
majorsAvailable = self.getAvailableItemsList(majorsAvailable, area, diffThreshold)
if self.fullRando == True:
minorsAvailable = majorsAvailable
else:
minorsAvailable = self.getAvailableItemsList(minorsAvailable, area, diffThreshold)
# choose one to pick up
area = self.nextDecision(majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold, area)
# main loop end
if isEndPossible:
self.visitedLocations.append({
'item' : 'The End',
'itemName' : 'The End',
'Name' : 'The End',
'Area' : 'The End',
'SolveArea' : 'The End',
'Room': 'Mother Brain Room',
'distance': 0,
'difficulty' : SMBool(True, endDifficulty)
})
# compute difficulty value
(difficulty, itemsOk) = self.computeDifficultyValue()
self.log.debug("difficulty={}".format(difficulty))
self.log.debug("itemsOk={}".format(itemsOk))
self.log.debug("{}: remaining major: {}, remaining minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
self.log.debug("remaining majors:")
for loc in self.majorLocations:
self.log.debug("{} ({})".format(loc['Name'], loc['itemName']))
self.log.debug("bosses: {}".format(Bosses.golden4Dead))
return (difficulty, itemsOk)
def handleNoComeBack(self, locations):
# check if all the available locations have the no come back flag
# if so add a new parameter with the number of locations in each graph area
graphLocs = {}
for loc in locations:
if "comeBack" not in loc:
return False
if loc["comeBack"] == True:
return False
if loc["GraphArea"] in graphLocs:
graphLocs[loc["GraphArea"]] += 1
else:
graphLocs[loc["GraphArea"]] = 1
for graphLoc in graphLocs:
graphLocs[graphLoc] = 1.0/graphLocs[graphLoc]
for loc in locations:
loc["areaWeight"] = graphLocs[loc["GraphArea"]]
if len(graphLocs) > 1:
print("WARNING: use no come back heuristic")
return True
def getAvailableItemsList(self, locations, area, threshold):
# locations without distance are not available
locations = [loc for loc in locations if 'distance' in loc]
cleanAreaWeight = self.handleNoComeBack(locations)
around = [loc for loc in locations if (loc['SolveArea'] == area or loc['distance'] < 3) and loc['difficulty'].difficulty <= threshold and not Bosses.areaBossDead(area) and 'comeBack' in loc and loc['comeBack'] == True]
# pickup action means beating a boss, so do that first if possible
around.sort(key=lambda loc: (loc["areaWeight"] if "areaWeight" in loc
else 0,
0 if 'Pickup' in loc
else 1,
0 if 'comeBack' in loc and loc['comeBack'] == True
else 1,
0 if loc['SolveArea'] == area and loc['difficulty'].difficulty <= threshold
else 1,
loc['distance'] if loc['difficulty'].difficulty <= threshold
else 100000,
loc['difficulty'].difficulty))
outside = [loc for loc in locations if not loc in around]
self.log.debug("around1 = " + str([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in around]))
self.log.debug("outside1 = " + str([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in outside]))
# we want to sort the outside locations by putting the ones is the same
# area first if we don't have enough items,
# then we sort the remaining areas starting whith boss dead status
outside.sort(key=lambda loc: (loc["areaWeight"] if "areaWeight" in loc
else 0,
0 if loc['SolveArea'] == area and loc['difficulty'].difficulty <= threshold
else 1,
0 if 'comeBack' in loc and loc['comeBack'] == True
else 1,
loc['distance'] if loc['difficulty'].difficulty <= threshold
else 100000,
loc['difficulty'].difficulty if not Bosses.areaBossDead(loc['Area'])
and loc['difficulty'].difficulty <= threshold
and 'Pickup' in loc
else 100000,
loc['difficulty'].difficulty if not Bosses.areaBossDead(loc['Area'])
and loc['difficulty'].difficulty <= threshold
else 100000,
loc['difficulty'].difficulty))
self.log.debug("around2 = " + str([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in around]))
self.log.debug("outside2 = " + str([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in outside]))
if cleanAreaWeight == True:
for loc in locations:
del loc["areaWeight"]
return around + outside
def nextDecision(self, majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold, area):
# first take major items of acceptable difficulty in the current area
if (len(majorsAvailable) > 0
and majorsAvailable[0]['SolveArea'] == area
and majorsAvailable[0]['difficulty'].difficulty <= diffThreshold
and majorsAvailable[0]['comeBack'] == True):
return self.collectMajor(majorsAvailable.pop(0))
# next item decision
if len(minorsAvailable) == 0 and len(majorsAvailable) > 0:
self.log.debug('MAJOR')
return self.collectMajor(majorsAvailable.pop(0))
elif len(majorsAvailable) == 0 and len(minorsAvailable) > 0:
# we don't check for hasEnoughMinors here, because we would be stuck, so pickup
# what we can and hope it gets better
self.log.debug('MINOR')
return self.collectMinor(minorsAvailable.pop(0))
elif len(majorsAvailable) > 0 and len(minorsAvailable) > 0:
self.log.debug('BOTH|M=' + majorsAvailable[0]['Name'] + ', m=' + minorsAvailable[0]['Name'])
# if both are available, decide based on area, difficulty and comeBack
nextMajDifficulty = majorsAvailable[0]['difficulty'].difficulty
nextMinArea = minorsAvailable[0]['SolveArea']
nextMinDifficulty = minorsAvailable[0]['difficulty'].difficulty
nextMajComeBack = majorsAvailable[0]['comeBack']
nextMinComeBack = minorsAvailable[0]['comeBack']
nextMajDistance = majorsAvailable[0]['distance']
nextMinDistance = minorsAvailable[0]['distance']
self.log.debug("diff area back dist - diff area back dist")
self.log.debug("maj: {} '{}' {} {}, min: {} '{}' {} {}".format(nextMajDifficulty, majorsAvailable[0]['SolveArea'], nextMajComeBack, nextMajDistance, nextMinDifficulty, nextMinArea, nextMinComeBack, nextMinDistance))
if hasEnoughMinors == True and self.haveAllMinorTypes() == True and self.smbm.haveItem('Charge'):
# we have charge, no longer need minors
return self.collectMajor(majorsAvailable.pop(0))
else:
# first take item from loc where you can come back
if nextMajComeBack != nextMinComeBack:
self.log.debug("!= combeback")
if nextMajComeBack == True:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# if not all the minors type are collected, start with minors
elif nextMinDifficulty <= diffThreshold and not self.haveAllMinorTypes():
self.log.debug("not all minors types")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinArea == area and nextMinDifficulty <= diffThreshold:
self.log.debug("not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
# difficulty over area (this is a difficulty estimator, not a speedrunning simulator)
elif nextMinDifficulty <= diffThreshold and nextMajDistance <= diffThreshold:
# take the closer one
if nextMajDistance != nextMinDistance:
self.log.debug("!= distance")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# take the easier
elif nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
elif nextMinDifficulty > diffThreshold and nextMajDistance > diffThreshold:
# take the easier
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# take the closer one
elif nextMajDistance != nextMinDistance:
self.log.debug("!= distance")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
else:
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
else:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
raise Exception("Can't take a decision")
def computeDifficultyValue(self):
if not self.canEndGame().bool:
# we have aborted
return (-1, False)
else:
# return the maximum difficulty
difficultyMax = 0
for loc in self.visitedLocations:
difficultyMax = max(difficultyMax, loc['difficulty'].difficulty)
difficulty = difficultyMax
# check if we have taken all the requested items
if (self.pickup.enoughMinors(self.smbm, self.minorLocations)
and self.pickup.enoughMajors(self.smbm, self.majorLocations)):
return (difficulty, True)
else:
# can finish but can't take all the requested items
return (difficulty, False)
def getKnowsUsed(self):
knowsUsed = []
for loc in self.visitedLocations:
knowsUsed += loc['difficulty'].knows
# get unique knows
knowsUsed = len(list(set(knowsUsed)))
# get total of known knows
knowsKnown = len([knows for knows in Knows.__dict__ if isKnows(knows) and getattr(Knows, knows)[0] == True])
knowsKnown += len([hellRun for hellRun in Settings.hellRuns if Settings.hellRuns[hellRun] is not None])
return (knowsUsed, knowsKnown)
def tryRemainingLocs(self):
# use preset which knows every techniques to test the remaining locs to
# find which technique could allow to continue the seed
locations = self.majorLocations if self.fullRando == True else self.majorLocations + self.minorLocations
presetFileName = os.path.expanduser('~/RandomMetroidSolver/standard_presets/solution.json')
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastLoc)
return [loc for loc in locations if loc['difficulty'].bool == True]
def haveAllMinorTypes(self):
# the first minor of each type can be seen as a major, so check for them first before going to far in zebes
hasPB = 'PowerBomb' in self.collectedItems
hasSuper = 'Super' in self.collectedItems
hasMissile = 'Missile' in self.collectedItems
return (hasPB and hasSuper and hasMissile)
def canEndGame(self):
# to finish the game you must :
# - beat golden 4 : we force pickup of the 4 items
# behind the bosses to ensure that
# - defeat metroids
# - destroy/skip the zebetites
# - beat Mother Brain
return self.smbm.wand(Bosses.allBossesDead(self.smbm), self.smbm.enoughStuffTourian())
class Out(object):
@staticmethod
def factory(output, solver):
if output == 'web':
return OutWeb(solver)
elif output == 'console':
return OutConsole(solver)
else:
raise Exception("Wrong output type for the Solver: {}".format(output))
class OutWeb:
def __init__(self, solver):
self.solver = solver
def out(self):
s = self.solver
if s.areaRando == True:
dotFileName = os.path.basename(os.path.splitext(s.romFileName)[0])+'.json'
dotFileName = os.path.join(os.path.expanduser('~/web2py/applications/solver/static/graph'), dotFileName)
s.areaGraph.toDot(dotFileName)
(pngFileName, pngThumbFileName) = self.generatePng(dotFileName)
if pngFileName is not None and pngThumbFileName is not None:
pngFileName = os.path.basename(pngFileName)
pngThumbFileName = os.path.basename(pngThumbFileName)
else:
pngFileName = None
pngThumbFileName = None
randomizedRom = os.path.basename(os.path.splitext(s.romFileName)[0])+'.sfc'
diffPercent = DifficultyDisplayer(s.difficulty).percent()
generatedPath = self.getPath(s.visitedLocations)
collectedItems = s.smbm.getItems()
if s.difficulty == -1:
remainTry = self.getPath(s.tryRemainingLocs())
remainMajors = self.getPath(s.getRemainMajors())
remainMinors = self.getPath(s.getRemainMinors())
skippedMajors = None
unavailMajors = None
else:
remainTry = None
remainMajors = None
remainMinors = None
skippedMajors = self.getPath(s.getSkippedMajors())
unavailMajors = self.getPath(s.getUnavailMajors())
result = dict(randomizedRom=randomizedRom, difficulty=s.difficulty,
generatedPath=generatedPath, diffPercent=diffPercent,
knowsUsed=(s.knowsUsed, s.knowsKnown), itemsOk=s.itemsOk, patches=s.patches,
pngFileName=pngFileName, pngThumbFileName=pngThumbFileName,
remainTry=remainTry, remainMajors=remainMajors, remainMinors=remainMinors,
skippedMajors=skippedMajors, unavailMajors=unavailMajors,
collectedItems=collectedItems)
with open(s.outputFileName, 'w') as jsonFile:
json.dump(result, jsonFile)
def getPath(self, locations):
if locations is None:
return None
out = []
for loc in locations:
out.append([(loc['Name'], loc['Room']), loc['Area'], loc['SolveArea'], loc['itemName'],
'{0:.2f}'.format(loc['difficulty'].difficulty),
', '.join(sorted(loc['difficulty'].knows)),
', '.join(sorted(list(set(loc['difficulty'].items)))),
[ap.Name for ap in loc['path']] if 'path' in loc else None])
return out
def generatePng(self, dotFileName):
# use dot to generate the graph's image .png
# use convert to generate the thumbnail
# dotFileName: the /directory/image.dot
# the png and thumbnails are generated in the same directory as the dot
splited = os.path.splitext(dotFileName)
pngFileName = splited[0] + '.png'
pngThumbFileName = splited[0] + '_thumbnail.png'
# dot -Tpng VARIA_Randomizer_AFX5399_noob.dot -oVARIA_Randomizer_AFX5399_noob.png
params = ['dot', '-Tpng', dotFileName, '-o'+pngFileName]
ret = subprocess.call(params)
if ret != 0:
print("Error calling dot {}: {}".format(params, ret))
return (None, None)
params = ['convert', pngFileName, '-resize', '1024', pngThumbFileName]
ret = subprocess.call(params)
if ret != 0:
print("Error calling convert {}: {}".format(params, ret))
os.remove(pngFileName)
return (None, None)
return (pngFileName, pngThumbFileName)
class OutConsole:
def __init__(self, solver):
self.solver = solver
def out(self):
s = self.solver
self.displayOutput()
print("({}, {}): diff : {}".format(s.difficulty, s.itemsOk, s.romFileName))
print("{}/{}: knows Used : {}".format(s.knowsUsed, s.knowsKnown, s.romFileName))
if s.difficulty >= 0:
sys.exit(0)
else:
sys.exit(1)
def printPath(self, message, locations, displayAPs=True):
print("")
print(message)
print('{:>50} {:>12} {:>34} {:>8} {:>16} {:>14} {} {}'.format("Location Name", "Area", "Sub Area", "Distance", "Item", "Difficulty", "Knows used", "Items used"))
print('-'*150)
lastAP = None
for loc in locations:
if displayAPs == True and 'path' in loc:
path = [ap.Name for ap in loc['path']]
lastAP = path[-1]
if not (len(path) == 1 and path[0] == lastAP):
path = " -> ".join(path)
print('{:>50}: {}'.format('Path', path))
print('{:>50}: {:>12} {:>34} {:>8} {:>16} {:>14} {} {}'.format(loc['Name'],
loc['Area'],
loc['SolveArea'],
loc['distance'] if 'distance' in loc else 'nc',
loc['itemName'],
round(loc['difficulty'].difficulty, 2) if 'difficulty' in loc else 'nc',
sorted(loc['difficulty'].knows) if 'difficulty' in loc else 'nc',
list(set(loc['difficulty'].items)) if 'difficulty' in loc else 'nc'))
def displayOutput(self):
s = self.solver
# print generated path
if Conf.displayGeneratedPath == True:
self.printPath("Generated path ({}/101):".format(len(s.visitedLocations)), s.visitedLocations)
# if we've aborted, display missing techniques and remaining locations
if s.difficulty == -1:
self.printPath("Next locs which could have been available if more techniques were known:", s.tryRemainingLocs())
remainMajors = s.getRemainMajors()
if len(remainMajors) > 0:
self.printPath("Remaining major locations:", remainMajors, displayAPs=False)
remainMinors = s.getRemainMinors()
if remainMinors is not None and len(remainMinors) > 0:
self.printPath("Remaining minor locations:", remainMinors, displayAPs=False)
else:
# if some locs are not picked up display those which are available
# and those which are not
skippedMajors = s.getSkippedMajors()
if len(skippedMajors) > 0:
self.printPath("Skipped major locations:", skippedMajors, displayAPs=False)
else:
print("No skipped major locations")
unavailMajors = s.getUnavailMajors()
if len(unavailMajors) > 0:
self.printPath("Unaccessible major locations:", unavailMajors, displayAPs=False)
else:
print("No unaccessible major locations")
items = s.smbm.getItems()
print("ETank: {}, Reserve: {}, Missile: {}, Super: {}, PowerBomb: {}".format(items['ETank'], items['Reserve'], items['Missile'], items['Super'], items['PowerBomb']))
print("Majors: {}".format(sorted([item for item in items if items[item] == True])))
# display difficulty scale
self.displayDifficulty(s.difficulty)
def displayDifficulty(self, difficulty):
if difficulty >= 0:
text = DifficultyDisplayer(difficulty).scale()
print("Estimated difficulty: {}".format(text))
else:
print("Aborted run, can't finish the game with the given prerequisites")
class DifficultyDisplayer:
def __init__(self, difficulty):
self.difficulty = difficulty
def scale(self):
if self.difficulty >= impossibru:
return "IMPOSSIBRU!"
else:
previous = 0
for d in sorted(diff2text):
if self.difficulty >= d:
previous = d
else:
displayString = diff2text[previous]
displayString += ' '
scale = d - previous
pos = int(self.difficulty - previous)
displayString += '-' * pos
displayString += '^'
displayString += '-' * (scale - pos)
displayString += ' '
displayString += diff2text[d]
break
return displayString
def percent(self):
# return the difficulty as a percent
if self.difficulty == -1:
return -1
elif self.difficulty in [0, easy]:
return 0
elif self.difficulty >= mania:
return 100
difficultiesPercent = {
easy: 0,
medium: 20,
hard: 40,
harder: 60,
hardcore: 80,
mania: 100
}
difficulty = self.difficulty
lower = 0
percent = 100
for upper in sorted(diff2text):
if self.difficulty >= upper:
lower = upper
else:
lowerPercent = difficultiesPercent[lower]
upperPercent = difficultiesPercent[upper]
a = (upperPercent-lowerPercent)/float(upper-lower)
b = lowerPercent - a * lower
percent = int(difficulty * a + b)
break
return percent
def interactiveSolver(args):
# to init, requires interactive/romFileName/presetFileName/output parameters
# to iterate, requires interactive/state/loc/action/output parameters
if args.romFileName != None and args.presetFileName != None and args.output != None:
# init
solver = InteractiveSolver(args.output)
solver.initialize(args.romFileName, args.presetFileName)
elif args.state != None and args.action != None and args.output != None:
# iterate
if args.action == "add" and args.loc == None:
print("Missing loc parameter when using action add")
sys.exit(1)
solver = InteractiveSolver(args.output)
solver.iterate(args.state, args.loc, args.action)
else:
print("Wrong parameters for interactive mode")
sys.exit(1)
def standardSolver(args):
if args.romFileName is None:
print("Parameter --romFileName mandatory when not in interactive mode")
sys.exit(1)
if args.difficultyTarget is None:
difficultyTarget = Conf.difficultyTarget
else:
difficultyTarget = args.difficultyTarget
if args.pickupStrategy is None:
pickupStrategy = Conf.itemsPickup
else:
pickupStrategy = args.pickupStrategy
# itemsForbidden is like that: [['Varia'], ['Reserve'], ['Gravity']], fix it
args.itemsForbidden = [item[0] for item in args.itemsForbidden]
solver = StandardSolver(args.romFileName, args.presetFileName, difficultyTarget,
pickupStrategy, args.itemsForbidden, type=args.type,
firstItemsLog=args.firstItemsLog,
displayGeneratedPath=args.displayGeneratedPath,
outputFileName=args.output)
solver.solveRom()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Random Metroid Solver")
parser.add_argument('--romFileName', '-r', help="the input rom", nargs='?',
default=None, dest="romFileName")
parser.add_argument('--preset', '-p', help="the preset file", nargs='?',
default=None, dest='presetFileName')
parser.add_argument('--difficultyTarget', '-t',
help="the difficulty target that the solver will aim for",
dest='difficultyTarget', nargs='?', default=None, type=int)
parser.add_argument('--pickupStrategy', '-s', help="Pickup strategy for the Solver",
dest='pickupStrategy', nargs='?', default=None,
choices=['minimal', 'all', 'any'])
parser.add_argument('--itemsForbidden', '-f', help="Item not picked up during solving",
dest='itemsForbidden', nargs='+', default=[], action='append')
parser.add_argument('--type', '-y', help="web or console", dest='type', nargs='?',
default='console', choices=['web', 'console'])
parser.add_argument('--debug', '-d', help="activate debug logging", dest='debug', action='store_true')
parser.add_argument('--firstItemsLog', '-1',
help="path to file where for each item type the first time it was found and where will be written (spoilers!)",
nargs='?', default=None, type=str, dest='firstItemsLog')
parser.add_argument('--displayGeneratedPath', '-g', help="display the generated path (spoilers!)",
dest='displayGeneratedPath', action='store_true')
# standard/interactive, web site
parser.add_argument('--output', '-o', help="When called from the website, contains the result of the solver",
dest='output', nargs='?', default=None)
# interactive, web site
parser.add_argument('--interactive', '-i', help="Activate interactive mode for the solver",
dest='interactive', action='store_true')
parser.add_argument('--state', help="JSON file of the Solver state (used in interactive mode)",
dest="state", nargs='?', default=None)
parser.add_argument('--loc', help="Name of the location to action on (used in interactive mode)",
dest="loc", nargs='?', default=None)
parser.add_argument('--action', help="Pickup item at location, remove last pickedup location, clear all (used in interactive mode)",
dest="action", nargs="?", default=None, choices=['init', 'add', 'remove', 'clear', 'get'])
args = parser.parse_args()
if args.presetFileName is None:
args.presetFileName = 'standard_presets/regular.json'
log.init(args.debug)
if args.interactive == True:
interactiveSolver(args)
else:
standardSolver(args)
solver: always choose locations where you can come back first
#!/usr/bin/python
import sys, math, argparse, re, json, os, subprocess, logging
# the difficulties for each technics
from parameters import Knows, Settings, isKnows, isSettings
from parameters import easy, medium, hard, harder, hardcore, mania, god, samus, impossibru, infinity, diff2text
# the helper functions
from smbool import SMBool
from smboolmanager import SMBoolManager
from helpers import Pickup, Bosses
from rom import RomLoader
from graph_locations import locations as graphLocations
from graph import AccessGraph
from graph_access import vanillaTransitions, accessPoints
from utils import PresetLoader
import log
class Conf:
# keep getting majors of at most this difficulty before going for minors or changing area
difficultyTarget = medium
# display the generated path (spoilers!)
displayGeneratedPath = False
# choose how many items are required (possible value: minimal/all/any)
itemsPickup = 'minimal'
# the list of items to not pick up
itemsForbidden = []
class SolverState(object):
def fromSolver(self, solver):
self.state = {}
# bool
self.state["fullRando"] = solver.fullRando
# bool
self.state["areaRando"] = solver.areaRando
# dict of raw patches
self.state["patches"] = solver.patches
# dict {locName: {itemName: "xxx", "accessPoint": "xxx"}, ...}
self.state["locsData"] = self.getLocsData(solver.locations)
# list [(ap1, ap2), (ap3, ap4), ...]
self.state["graphTransitions"] = solver.graphTransitions
# preset file name
self.state["presetFileName"] = solver.presetFileName
## items collected / locs visited / bosses killed
# list [item1, item2, ...]
self.state["collectedItems"] = solver.collectedItems
# dict {locName: {index: 0, difficulty: (bool, diff, ...), ...} with index being the position of the loc in visitedLocations
self.state["visitedLocations"] = self.getVisitedLocations(solver.visitedLocations)
# dict {locName: (bool, diff, [know1, ...], [item1, ...]), ...}
self.state["availableLocations"] = self.getAvailableLocations(solver.majorLocations)
# string of last access point
self.state["lastLoc"] = solver.lastLoc
# list of killed bosses: ["boss1", "boss2"]
self.state["bosses"] = [boss for boss in Bosses.golden4Dead if Bosses.golden4Dead[boss] == True]
# dict {locNameWeb: {infos}, ...}
self.state["availableLocationsWeb"] = self.getAvailableLocationsWeb(solver.majorLocations)
# dict {locNameWeb: {infos}, ...}
self.state["visitedLocationsWeb"] = self.getAvailableLocationsWeb(solver.visitedLocations)
# dict {locNameWeb: {infos}, ...}
self.state["remainLocationsWeb"] = self.getRemainLocationsWeb(solver.majorLocations)
def toSolver(self, solver):
solver.fullRando = self.state["fullRando"]
solver.areaRando = self.state["areaRando"]
solver.patches = self.setPatches(self.state["patches"])
self.setLocsData(solver.locations)
solver.graphTransitions = self.state["graphTransitions"]
# preset
solver.presetFileName = self.state["presetFileName"]
# items collected / locs visited / bosses killed
solver.collectedItems = self.state["collectedItems"]
(solver.visitedLocations, solver.majorLocations) = self.setLocations(self.state["visitedLocations"],
self.state["availableLocations"],
solver.locations)
solver.lastLoc = self.state["lastLoc"]
Bosses.reset()
for boss in self.state["bosses"]:
Bosses.beatBoss(boss)
def getLocsData(self, locations):
ret = {}
for loc in locations:
ret[loc["Name"]] = {"itemName": loc["itemName"]}
if "accessPoint" in loc:
ret[loc["Name"]]["accessPoint"] = loc["accessPoint"]
return ret
def setLocsData(self, locations):
for loc in locations:
loc["itemName"] = self.state["locsData"][loc["Name"]]["itemName"]
if "accessPoint" in self.state["locsData"][loc["Name"]]:
loc["accessPoint"] = self.state["locsData"][loc["Name"]]["accessPoint"]
def getVisitedLocations(self, visitedLocations):
# need to keep the order (for cancelation)
ret = {}
i = 0
for loc in visitedLocations:
diff = loc["difficulty"]
ret[loc["Name"]] = {"index": i, "difficulty": (diff.bool, diff.difficulty, diff.knows, diff.items)}
i += 1
return ret
def setLocations(self, visitedLocations, availableLocations, locations):
retVis = []
retMaj = []
for loc in locations:
if loc["Name"] in visitedLocations:
# visitedLocations contains an index
diff = visitedLocations[loc["Name"]]["difficulty"]
loc["difficulty"] = SMBool(diff[0], diff[1], diff[2], diff[3])
retVis.append((visitedLocations[loc["Name"]]["index"], loc))
else:
if loc["Name"] in availableLocations:
diff = availableLocations[loc["Name"]]
loc["difficulty"] = SMBool(diff[0], diff[1], diff[2], diff[3])
retMaj.append(loc)
retVis.sort(key=lambda x: x[0])
return ([loc for (i, loc) in retVis], retMaj)
def diff4isolver(self, difficulty):
if difficulty == -1:
return "break"
elif difficulty < medium:
return "easy"
elif difficulty < hard:
return "medium"
elif difficulty < harder:
return "hard"
elif difficulty < hardcore:
return "harder"
elif difficulty < mania:
return "hardcore"
else:
return "mania"
def locName4isolver(self, locName):
# remove space and special characters
# sed -e 's+ ++g' -e 's+,++g' -e 's+(++g' -e 's+)++g' -e 's+-++g'
return locName.translate(None, " ,()-")
def knows2isolver(self, knows):
result = []
for know in knows:
if know in Knows.desc:
result.append(Knows.desc[know]['display'])
else:
result.append(know)
return list(set(result))
def getAvailableLocationsWeb(self, locations):
ret = {}
for loc in locations:
if "difficulty" in loc and loc["difficulty"].bool == True:
diff = loc["difficulty"]
locName = self.locName4isolver(loc["Name"])
ret[locName] = {"difficulty": self.diff4isolver(diff.difficulty),
"knows": self.knows2isolver(diff.knows),
"items": list(set(diff.items)),
"item": loc["itemName"],
"name": loc["Name"]}
if "comeBack" in loc:
ret[locName]["comeBack"] = loc["comeBack"]
return ret
def getRemainLocationsWeb(self, locations):
ret = {}
for loc in locations:
if "difficulty" not in loc or ("difficulty" in loc and loc["difficulty"].bool == False):
locName = self.locName4isolver(loc["Name"])
ret[locName] = {"item": loc["itemName"],
"name": loc["Name"],
"knows": ["Sequence Break"],
"items": []}
return ret
def getAvailableLocations(self, locations):
ret = {}
for loc in locations:
if "difficulty" in loc and loc["difficulty"].bool == True:
diff = loc["difficulty"]
ret[loc["Name"]] = (diff.bool, diff.difficulty, diff.knows, diff.items)
return ret
def setPatches(self, patchesData):
# json's dicts keys are strings
ret = {}
for address in patchesData:
ret[int(address)] = patchesData[address]
return ret
def fromJson(self, stateJsonFileName):
with open(stateJsonFileName, 'r') as jsonFile:
self.state = json.load(jsonFile)
# print("Loaded Json State:")
# for key in self.state:
# if key in ["availableLocationsWeb", "visitedLocationsWeb", "collectedItems", "visitedLocations"]:
# print("{}: {}".format(key, self.state[key]))
# print("")
def toJson(self, outputFileName):
with open(outputFileName, 'w') as jsonFile:
json.dump(self.state, jsonFile)
# print("Dumped Json State:")
# for key in self.state:
# if key in ["availableLocationsWeb", "visitedLocationsWeb", "collectedItems", "visitedLocations"]:
# print("{}: {}".format(key, self.state[key]))
# print("")
class CommonSolver(object):
def loadRom(self, rom, interactive=False):
self.romFileName = rom
self.romLoader = RomLoader.factory(rom)
self.fullRando = self.romLoader.assignItems(self.locations)
self.areaRando = self.romLoader.loadPatches()
if interactive == False:
self.patches = self.romLoader.getPatches()
else:
self.patches = self.romLoader.getRawPatches()
print("ROM {} full: {} area: {} patches: {}".format(rom, self.fullRando,
self.areaRando, self.patches))
self.graphTransitions = self.romLoader.getTransitions()
if self.graphTransitions is None:
self.graphTransitions = vanillaTransitions
self.areaGraph = AccessGraph(accessPoints, self.graphTransitions)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("Display items at locations:")
for location in self.locations:
self.log.debug('{:>50}: {:>16}'.format(location["Name"], location['itemName']))
def loadPreset(self, presetFileName):
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
if self.log.getEffectiveLevel() == logging.DEBUG:
presetLoader.printToScreen()
def computeLocationsDifficulty(self, locations):
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastLoc)
# check post available functions too
for loc in locations:
if 'PostAvailable' in loc:
self.smbm.addItem(loc['itemName'])
postAvailable = loc['PostAvailable'](self.smbm)
self.smbm.removeItem(loc['itemName'])
loc['difficulty'] = self.smbm.wand(loc['difficulty'], postAvailable)
# also check if we can come back to landing site from the location
if loc['difficulty'].bool == True:
loc['comeBack'] = self.areaGraph.canAccess(self.smbm, loc['accessPoint'], self.lastLoc, infinity, loc['itemName'])
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("available locs:")
for loc in locations:
if loc['difficulty'].bool == True:
self.log.debug("{}: {}".format(loc['Name'], loc['difficulty']))
def collectMajor(self, loc):
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
area = self.collectItem(loc)
return area
def collectMinor(self, loc):
self.minorLocations.remove(loc)
self.visitedLocations.append(loc)
area = self.collectItem(loc)
return area
def collectItem(self, loc):
item = loc["itemName"]
if item not in Conf.itemsForbidden:
self.collectedItems.append(item)
self.smbm.addItem(item)
else:
# update the name of the item
item = "-{}-".format(item)
loc["itemName"] = item
self.collectedItems.append(item)
# we still need the boss difficulty
if 'Pickup' not in loc:
loc["difficulty"] = SMBool(False)
if 'Pickup' in loc:
loc['Pickup']()
if self.firstLogFile is not None:
if item not in self.collectedItems:
self.firstLogFile.write("{};{};{};{}\n".format(item, loc['Name'], loc['Area'], loc['GraphArea']))
self.log.debug("collectItem: {} at {}".format(item, loc['Name']))
# last loc is used as root node for the graph
self.lastLoc = loc['accessPoint']
return loc['SolveArea']
class InteractiveSolver(CommonSolver):
def __init__(self, output):
self.log = log.get('Solver')
self.outputFileName = output
self.firstLogFile = None
def dumpState(self):
state = SolverState()
state.fromSolver(self)
state.toJson(self.outputFileName)
def initialize(self, rom, presetFileName):
# load rom and preset, return first state
self.locations = graphLocations
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom, interactive=True)
self.locations = self.addMotherBrainLoc(self.locations)
self.clear()
# compute new available locations
self.computeLocationsDifficulty(self.majorLocations)
self.dumpState()
def iterate(self, stateJson, locName, action):
self.locations = self.addMotherBrainLoc(graphLocations)
self.smbm = SMBoolManager()
state = SolverState()
state.fromJson(stateJson)
state.toSolver(self)
RomLoader.factory(self.patches).loadPatches()
self.loadPreset(self.presetFileName)
self.areaGraph = AccessGraph(accessPoints, self.graphTransitions)
if action == 'clear':
self.clear(True)
else:
# add already collected items to smbm
self.smbm.addItems(self.collectedItems)
if action == 'add':
# pickup item at locName
self.pickItemAt(locName)
elif action == 'remove':
# remove last collected item
self.cancelLast()
# compute new available locations
self.computeLocationsDifficulty(self.majorLocations)
# return them
self.dumpState()
def locNameWeb2Internal(self, locNameWeb):
locs = {
"EnergyTankGauntlet": "Energy Tank, Gauntlet",
"Bomb": "Bomb",
"EnergyTankTerminator": "Energy Tank, Terminator",
"ReserveTankBrinstar": "Reserve Tank, Brinstar",
"ChargeBeam": "Charge Beam",
"MorphingBall": "Morphing Ball",
"EnergyTankBrinstarCeiling": "Energy Tank, Brinstar Ceiling",
"EnergyTankEtecoons": "Energy Tank, Etecoons",
"EnergyTankWaterway": "Energy Tank, Waterway",
"EnergyTankBrinstarGate": "Energy Tank, Brinstar Gate",
"XRayScope": "X-Ray Scope",
"Spazer": "Spazer",
"EnergyTankKraid": "Energy Tank, Kraid",
"VariaSuit": "Varia Suit",
"IceBeam": "Ice Beam",
"EnergyTankCrocomire": "Energy Tank, Crocomire",
"HiJumpBoots": "Hi-Jump Boots",
"GrappleBeam": "Grapple Beam",
"ReserveTankNorfair": "Reserve Tank, Norfair",
"SpeedBooster": "Speed Booster",
"WaveBeam": "Wave Beam",
"EnergyTankRidley": "Energy Tank, Ridley",
"ScrewAttack": "Screw Attack",
"EnergyTankFirefleas": "Energy Tank, Firefleas",
"ReserveTankWreckedShip": "Reserve Tank, Wrecked Ship",
"EnergyTankWreckedShip": "Energy Tank, Wrecked Ship",
"RightSuperWreckedShip": "Right Super, Wrecked Ship",
"GravitySuit": "Gravity Suit",
"EnergyTankMamaturtle": "Energy Tank, Mama turtle",
"PlasmaBeam": "Plasma Beam",
"ReserveTankMaridia": "Reserve Tank, Maridia",
"SpringBall": "Spring Ball",
"EnergyTankBotwoon": "Energy Tank, Botwoon",
"SpaceJump": "Space Jump",
"PowerBombCrateriasurface": "Power Bomb (Crateria surface)",
"MissileoutsideWreckedShipbottom": "Missile (outside Wrecked Ship bottom)",
"MissileoutsideWreckedShiptop": "Missile (outside Wrecked Ship top)",
"MissileoutsideWreckedShipmiddle": "Missile (outside Wrecked Ship middle)",
"MissileCrateriamoat": "Missile (Crateria moat)",
"MissileCrateriabottom": "Missile (Crateria bottom)",
"MissileCrateriagauntletright": "Missile (Crateria gauntlet right)",
"MissileCrateriagauntletleft": "Missile (Crateria gauntlet left)",
"SuperMissileCrateria": "Super Missile (Crateria)",
"MissileCrateriamiddle": "Missile (Crateria middle)",
"PowerBombgreenBrinstarbottom": "Power Bomb (green Brinstar bottom)",
"SuperMissilepinkBrinstar": "Super Missile (pink Brinstar)",
"MissilegreenBrinstarbelowsupermissile": "Missile (green Brinstar below super missile)",
"SuperMissilegreenBrinstartop": "Super Missile (green Brinstar top)",
"MissilegreenBrinstarbehindmissile": "Missile (green Brinstar behind missile)",
"MissilegreenBrinstarbehindreservetank": "Missile (green Brinstar behind reserve tank)",
"MissilepinkBrinstartop": "Missile (pink Brinstar top)",
"MissilepinkBrinstarbottom": "Missile (pink Brinstar bottom)",
"PowerBombpinkBrinstar": "Power Bomb (pink Brinstar)",
"MissilegreenBrinstarpipe": "Missile (green Brinstar pipe)",
"PowerBombblueBrinstar": "Power Bomb (blue Brinstar)",
"MissileblueBrinstarmiddle": "Missile (blue Brinstar middle)",
"SuperMissilegreenBrinstarbottom": "Super Missile (green Brinstar bottom)",
"MissileblueBrinstarbottom": "Missile (blue Brinstar bottom)",
"MissileblueBrinstartop": "Missile (blue Brinstar top)",
"MissileblueBrinstarbehindmissile": "Missile (blue Brinstar behind missile)",
"PowerBombredBrinstarsidehopperroom": "Power Bomb (red Brinstar sidehopper room)",
"PowerBombredBrinstarspikeroom": "Power Bomb (red Brinstar spike room)",
"MissileredBrinstarspikeroom": "Missile (red Brinstar spike room)",
"MissileKraid": "Missile (Kraid)",
"Missilelavaroom": "Missile (lava room)",
"MissilebelowIceBeam": "Missile (below Ice Beam)",
"MissileaboveCrocomire": "Missile (above Crocomire)",
"MissileHiJumpBoots": "Missile (Hi-Jump Boots)",
"EnergyTankHiJumpBoots": "Energy Tank (Hi-Jump Boots)",
"PowerBombCrocomire": "Power Bomb (Crocomire)",
"MissilebelowCrocomire": "Missile (below Crocomire)",
"MissileGrappleBeam": "Missile (Grapple Beam)",
"MissileNorfairReserveTank": "Missile (Norfair Reserve Tank)",
"MissilebubbleNorfairgreendoor": "Missile (bubble Norfair green door)",
"MissilebubbleNorfair": "Missile (bubble Norfair)",
"MissileSpeedBooster": "Missile (Speed Booster)",
"MissileWaveBeam": "Missile (Wave Beam)",
"MissileGoldTorizo": "Missile (Gold Torizo)",
"SuperMissileGoldTorizo": "Super Missile (Gold Torizo)",
"MissileMickeyMouseroom": "Missile (Mickey Mouse room)",
"MissilelowerNorfairabovefireflearoom": "Missile (lower Norfair above fire flea room)",
"PowerBomblowerNorfairabovefireflearoom": "Power Bomb (lower Norfair above fire flea room)",
"PowerBombPowerBombsofshame": "Power Bomb (Power Bombs of shame)",
"MissilelowerNorfairnearWaveBeam": "Missile (lower Norfair near Wave Beam)",
"MissileWreckedShipmiddle": "Missile (Wrecked Ship middle)",
"MissileGravitySuit": "Missile (Gravity Suit)",
"MissileWreckedShiptop": "Missile (Wrecked Ship top)",
"SuperMissileWreckedShipleft": "Super Missile (Wrecked Ship left)",
"MissilegreenMaridiashinespark": "Missile (green Maridia shinespark)",
"SuperMissilegreenMaridia": "Super Missile (green Maridia)",
"MissilegreenMaridiatatori": "Missile (green Maridia tatori)",
"SuperMissileyellowMaridia": "Super Missile (yellow Maridia)",
"MissileyellowMaridiasupermissile": "Missile (yellow Maridia super missile)",
"MissileyellowMaridiafalsewall": "Missile (yellow Maridia false wall)",
"MissileleftMaridiasandpitroom": "Missile (left Maridia sand pit room)",
"MissilerightMaridiasandpitroom": "Missile (right Maridia sand pit room)",
"PowerBombrightMaridiasandpitroom": "Power Bomb (right Maridia sand pit room)",
"MissilepinkMaridia": "Missile (pink Maridia)",
"SuperMissilepinkMaridia": "Super Missile (pink Maridia)",
"MissileDraygon": "Missile (Draygon)",
"MotherBrain": "Mother Brain"
}
return locs[locNameWeb]
def getLoc(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
for loc in self.majorLocations:
if loc["Name"] == locName:
return loc
raise Exception("Location '{}' not found in remaining locations".format(locName))
def pickItemAt(self, locName):
# collect new item at newLoc
loc = self.getLoc(locName)
if "difficulty" not in loc:
# sequence break
loc["difficulty"] = SMBool(True, -1)
# take first ap of the loc
loc["accessPoint"] = loc["AccessFrom"].keys()[0]
self.collectMajor(loc)
def cancelLast(self):
# loc
if len(self.visitedLocations) == 0:
return
loc = self.visitedLocations.pop()
self.majorLocations.append(loc)
# pickup func
if 'Unpickup' in loc:
loc['Unpickup']()
# access point
if len(self.visitedLocations) == 0:
self.lastLoc = "Landing Site"
else:
self.lastLoc = self.visitedLocations[-1]["accessPoint"]
# item
item = loc["itemName"]
if item != self.collectedItems[-1]:
raise Exception("Item of last collected loc {}: {} is different from last collected item: {}".format(loc["Name"], item, self.collectedItems[-1]))
self.smbm.removeItem(item)
self.collectedItems.pop()
def clear(self, reload=False):
self.collectedItems = []
self.visitedLocations = []
self.lastLoc = 'Landing Site'
self.majorLocations = self.locations
if reload == True:
for loc in self.majorLocations:
if "difficulty" in loc:
del loc["difficulty"]
Bosses.reset()
self.smbm.resetItems()
def addMotherBrainLoc(self, locations):
# in the interactive solver mother brain is a new loc
locations.append({
'Area': "Tourian",
'GraphArea': "Tourian",
'SolveArea': "Tourian",
'Name': "Mother Brain",
'Visibility': "Visible",
'Room': 'Mother Brain Room',
'itemName': "Nothing",
'AccessFrom' : {
'Statues Hallway Left': lambda sm: SMBool(True)
},
'Available': lambda sm: sm.wand(Bosses.allBossesDead(sm), sm.enoughStuffTourian())
})
return locations
class StandardSolver(CommonSolver):
# given a rom and parameters returns the estimated difficulty
def __init__(self, rom, presetFileName, difficultyTarget, pickupStrategy, itemsForbidden=[], type='console', firstItemsLog=None, displayGeneratedPath=False, outputFileName=None):
self.log = log.get('Solver')
self.setConf(difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath)
self.firstLogFile = None
if firstItemsLog is not None:
self.firstLogFile = open(firstItemsLog, 'w')
self.firstLogFile.write('Item;Location;Area\n')
# can be called from command line (console) or from web site (web)
self.type = type
self.output = Out.factory(self.type, self)
self.outputFileName = outputFileName
self.locations = graphLocations
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom)
self.pickup = Pickup(Conf.itemsPickup)
def setConf(self, difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath):
Conf.difficultyTarget = difficultyTarget
Conf.itemsPickup = pickupStrategy
Conf.displayGeneratedPath = displayGeneratedPath
Conf.itemsForbidden = itemsForbidden
def solveRom(self):
self.lastLoc = 'Landing Site'
(self.difficulty, self.itemsOk) = self.computeDifficulty()
if self.firstLogFile is not None:
self.firstLogFile.close()
(self.knowsUsed, self.knowsKnown) = self.getKnowsUsed()
self.output.out()
def getRemainMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getRemainMinors(self):
if self.fullRando == True:
return None
else:
return [loc for loc in self.minorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getSkippedMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == True and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getUnavailMajors(self):
return [loc for loc in self.majorLocations if loc['difficulty'].bool == False and loc['itemName'] not in ['Nothing', 'NoEnergy']]
def getDiffThreshold(self):
target = Conf.difficultyTarget
threshold = target
epsilon = 0.001
if target <= easy:
threshold = medium - epsilon
elif target <= medium:
threshold = hard - epsilon
elif target <= hard:
threshold = harder - epsilon
elif target <= harder:
threshold = hardcore - epsilon
elif target <= hardcore:
threshold = mania - epsilon
return threshold
def computeDifficulty(self):
# loop on the available locations depending on the collected items.
# before getting a new item, loop on all of them and get their difficulty,
# the next collected item is the one with the smallest difficulty,
# if equality between major and minor, take major first.
if not self.fullRando:
self.majorLocations = [loc for loc in self.locations if loc["Class"] == "Major"]
self.minorLocations = [loc for loc in self.locations if loc["Class"] == "Minor"]
else:
self.majorLocations = self.locations[:] # copy
self.minorLocations = self.majorLocations
self.visitedLocations = []
self.collectedItems = []
# with the knowsXXX conditions some roms can be unbeatable, so we have to detect it
previous = -1
current = 0
self.log.debug("{}: available major: {}, available minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
isEndPossible = False
endDifficulty = mania
area = 'Crateria Landing Site'
diffThreshold = self.getDiffThreshold()
while True:
# actual while condition
hasEnoughMinors = self.pickup.enoughMinors(self.smbm, self.minorLocations)
hasEnoughMajors = self.pickup.enoughMajors(self.smbm, self.majorLocations)
hasEnoughItems = hasEnoughMajors and hasEnoughMinors
canEndGame = self.canEndGame()
(isEndPossible, endDifficulty) = (canEndGame.bool, canEndGame.difficulty)
if isEndPossible and hasEnoughItems and endDifficulty <= diffThreshold:
self.log.debug("END")
break
#self.log.debug(str(self.collectedItems))
self.log.debug("Current Area : " + area)
# check if we have collected an item in the last loop
current = len(self.collectedItems)
if current == previous:
if not isEndPossible:
self.log.debug("STUCK ALL")
else:
self.log.debug("HARD END")
break
previous = current
# compute the difficulty of all the locations
self.computeLocationsDifficulty(self.majorLocations)
if self.fullRando == False:
self.computeLocationsDifficulty(self.minorLocations)
# keep only the available locations
majorsAvailable = [loc for loc in self.majorLocations if 'difficulty' in loc and loc["difficulty"].bool == True]
minorsAvailable = [loc for loc in self.minorLocations if 'difficulty' in loc and loc["difficulty"].bool == True]
# check if we're stuck
if len(majorsAvailable) == 0 and len(minorsAvailable) == 0:
if not isEndPossible:
self.log.debug("STUCK MAJORS and MINORS")
else:
self.log.debug("HARD END")
break
# sort them on difficulty and proximity
majorsAvailable = self.getAvailableItemsList(majorsAvailable, area, diffThreshold)
if self.fullRando == True:
minorsAvailable = majorsAvailable
else:
minorsAvailable = self.getAvailableItemsList(minorsAvailable, area, diffThreshold)
# choose one to pick up
area = self.nextDecision(majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold, area)
# main loop end
if isEndPossible:
self.visitedLocations.append({
'item' : 'The End',
'itemName' : 'The End',
'Name' : 'The End',
'Area' : 'The End',
'SolveArea' : 'The End',
'Room': 'Mother Brain Room',
'distance': 0,
'difficulty' : SMBool(True, endDifficulty)
})
# compute difficulty value
(difficulty, itemsOk) = self.computeDifficultyValue()
self.log.debug("difficulty={}".format(difficulty))
self.log.debug("itemsOk={}".format(itemsOk))
self.log.debug("{}: remaining major: {}, remaining minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
self.log.debug("remaining majors:")
for loc in self.majorLocations:
self.log.debug("{} ({})".format(loc['Name'], loc['itemName']))
self.log.debug("bosses: {}".format(Bosses.golden4Dead))
return (difficulty, itemsOk)
def handleNoComeBack(self, locations):
# check if all the available locations have the no come back flag
# if so add a new parameter with the number of locations in each graph area
graphLocs = {}
for loc in locations:
if "comeBack" not in loc:
return False
if loc["comeBack"] == True:
return False
if loc["GraphArea"] in graphLocs:
graphLocs[loc["GraphArea"]] += 1
else:
graphLocs[loc["GraphArea"]] = 1
if len(graphLocs) == 1:
return False
for graphLoc in graphLocs:
graphLocs[graphLoc] = 1.0/graphLocs[graphLoc]
for loc in locations:
loc["areaWeight"] = graphLocs[loc["GraphArea"]]
print("WARNING: use no come back heuristic")
return True
def getAvailableItemsList(self, locations, area, threshold):
# locations without distance are not available
locations = [loc for loc in locations if 'distance' in loc]
cleanAreaWeight = self.handleNoComeBack(locations)
around = [loc for loc in locations if (loc['SolveArea'] == area or loc['distance'] < 3) and loc['difficulty'].difficulty <= threshold and not Bosses.areaBossDead(area) and 'comeBack' in loc and loc['comeBack'] == True]
# pickup action means beating a boss, so do that first if possible
around.sort(key=lambda loc: (loc["areaWeight"] if "areaWeight" in loc
else 0,
0 if 'Pickup' in loc
else 1,
0 if 'comeBack' in loc and loc['comeBack'] == True
else 1,
0 if loc['SolveArea'] == area and loc['difficulty'].difficulty <= threshold
else 1,
loc['distance'] if loc['difficulty'].difficulty <= threshold
else 100000,
loc['difficulty'].difficulty))
outside = [loc for loc in locations if not loc in around]
self.log.debug("around1 = " + str([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in around]))
self.log.debug("outside1 = " + str([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in outside]))
# we want to sort the outside locations by putting the ones is the same
# area first if we don't have enough items,
# then we sort the remaining areas starting whith boss dead status
outside.sort(key=lambda loc: (loc["areaWeight"] if "areaWeight" in loc
else 0,
0 if 'comeBack' in loc and loc['comeBack'] == True
else 1,
0 if loc['SolveArea'] == area and loc['difficulty'].difficulty <= threshold
else 1,
loc['distance'] if loc['difficulty'].difficulty <= threshold
else 100000,
loc['difficulty'].difficulty if not Bosses.areaBossDead(loc['Area'])
and loc['difficulty'].difficulty <= threshold
and 'Pickup' in loc
else 100000,
loc['difficulty'].difficulty if not Bosses.areaBossDead(loc['Area'])
and loc['difficulty'].difficulty <= threshold
else 100000,
loc['difficulty'].difficulty))
self.log.debug("around2 = " + str([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in around]))
self.log.debug("outside2 = " + str([(loc['Name'], loc['difficulty'], loc['distance'], loc['comeBack'], loc['SolveArea']) for loc in outside]))
if cleanAreaWeight == True:
for loc in locations:
del loc["areaWeight"]
return around + outside
def nextDecision(self, majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold, area):
# first take major items of acceptable difficulty in the current area
if (len(majorsAvailable) > 0
and majorsAvailable[0]['SolveArea'] == area
and majorsAvailable[0]['difficulty'].difficulty <= diffThreshold
and majorsAvailable[0]['comeBack'] == True):
return self.collectMajor(majorsAvailable.pop(0))
# next item decision
if len(minorsAvailable) == 0 and len(majorsAvailable) > 0:
self.log.debug('MAJOR')
return self.collectMajor(majorsAvailable.pop(0))
elif len(majorsAvailable) == 0 and len(minorsAvailable) > 0:
# we don't check for hasEnoughMinors here, because we would be stuck, so pickup
# what we can and hope it gets better
self.log.debug('MINOR')
return self.collectMinor(minorsAvailable.pop(0))
elif len(majorsAvailable) > 0 and len(minorsAvailable) > 0:
self.log.debug('BOTH|M=' + majorsAvailable[0]['Name'] + ', m=' + minorsAvailable[0]['Name'])
# if both are available, decide based on area, difficulty and comeBack
nextMajDifficulty = majorsAvailable[0]['difficulty'].difficulty
nextMinArea = minorsAvailable[0]['SolveArea']
nextMinDifficulty = minorsAvailable[0]['difficulty'].difficulty
nextMajComeBack = majorsAvailable[0]['comeBack']
nextMinComeBack = minorsAvailable[0]['comeBack']
nextMajDistance = majorsAvailable[0]['distance']
nextMinDistance = minorsAvailable[0]['distance']
self.log.debug("diff area back dist - diff area back dist")
self.log.debug("maj: {} '{}' {} {}, min: {} '{}' {} {}".format(nextMajDifficulty, majorsAvailable[0]['SolveArea'], nextMajComeBack, nextMajDistance, nextMinDifficulty, nextMinArea, nextMinComeBack, nextMinDistance))
if hasEnoughMinors == True and self.haveAllMinorTypes() == True and self.smbm.haveItem('Charge'):
# we have charge, no longer need minors
return self.collectMajor(majorsAvailable.pop(0))
else:
# first take item from loc where you can come back
if nextMajComeBack != nextMinComeBack:
self.log.debug("!= combeback")
if nextMajComeBack == True:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# if not all the minors type are collected, start with minors
elif nextMinDifficulty <= diffThreshold and not self.haveAllMinorTypes():
self.log.debug("not all minors types")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinArea == area and nextMinDifficulty <= diffThreshold:
self.log.debug("not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
# difficulty over area (this is a difficulty estimator, not a speedrunning simulator)
elif nextMinDifficulty <= diffThreshold and nextMajDistance <= diffThreshold:
# take the closer one
if nextMajDistance != nextMinDistance:
self.log.debug("!= distance")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# take the easier
elif nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
elif nextMinDifficulty > diffThreshold and nextMajDistance > diffThreshold:
# take the easier
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# take the closer one
elif nextMajDistance != nextMinDistance:
self.log.debug("!= distance")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
else:
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
else:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
raise Exception("Can't take a decision")
def computeDifficultyValue(self):
if not self.canEndGame().bool:
# we have aborted
return (-1, False)
else:
# return the maximum difficulty
difficultyMax = 0
for loc in self.visitedLocations:
difficultyMax = max(difficultyMax, loc['difficulty'].difficulty)
difficulty = difficultyMax
# check if we have taken all the requested items
if (self.pickup.enoughMinors(self.smbm, self.minorLocations)
and self.pickup.enoughMajors(self.smbm, self.majorLocations)):
return (difficulty, True)
else:
# can finish but can't take all the requested items
return (difficulty, False)
def getKnowsUsed(self):
knowsUsed = []
for loc in self.visitedLocations:
knowsUsed += loc['difficulty'].knows
# get unique knows
knowsUsed = len(list(set(knowsUsed)))
# get total of known knows
knowsKnown = len([knows for knows in Knows.__dict__ if isKnows(knows) and getattr(Knows, knows)[0] == True])
knowsKnown += len([hellRun for hellRun in Settings.hellRuns if Settings.hellRuns[hellRun] is not None])
return (knowsUsed, knowsKnown)
def tryRemainingLocs(self):
# use preset which knows every techniques to test the remaining locs to
# find which technique could allow to continue the seed
locations = self.majorLocations if self.fullRando == True else self.majorLocations + self.minorLocations
presetFileName = os.path.expanduser('~/RandomMetroidSolver/standard_presets/solution.json')
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastLoc)
return [loc for loc in locations if loc['difficulty'].bool == True]
def haveAllMinorTypes(self):
# the first minor of each type can be seen as a major, so check for them first before going to far in zebes
hasPB = 'PowerBomb' in self.collectedItems
hasSuper = 'Super' in self.collectedItems
hasMissile = 'Missile' in self.collectedItems
return (hasPB and hasSuper and hasMissile)
def canEndGame(self):
# to finish the game you must :
# - beat golden 4 : we force pickup of the 4 items
# behind the bosses to ensure that
# - defeat metroids
# - destroy/skip the zebetites
# - beat Mother Brain
return self.smbm.wand(Bosses.allBossesDead(self.smbm), self.smbm.enoughStuffTourian())
class Out(object):
@staticmethod
def factory(output, solver):
if output == 'web':
return OutWeb(solver)
elif output == 'console':
return OutConsole(solver)
else:
raise Exception("Wrong output type for the Solver: {}".format(output))
class OutWeb:
def __init__(self, solver):
self.solver = solver
def out(self):
s = self.solver
if s.areaRando == True:
dotFileName = os.path.basename(os.path.splitext(s.romFileName)[0])+'.json'
dotFileName = os.path.join(os.path.expanduser('~/web2py/applications/solver/static/graph'), dotFileName)
s.areaGraph.toDot(dotFileName)
(pngFileName, pngThumbFileName) = self.generatePng(dotFileName)
if pngFileName is not None and pngThumbFileName is not None:
pngFileName = os.path.basename(pngFileName)
pngThumbFileName = os.path.basename(pngThumbFileName)
else:
pngFileName = None
pngThumbFileName = None
randomizedRom = os.path.basename(os.path.splitext(s.romFileName)[0])+'.sfc'
diffPercent = DifficultyDisplayer(s.difficulty).percent()
generatedPath = self.getPath(s.visitedLocations)
collectedItems = s.smbm.getItems()
if s.difficulty == -1:
remainTry = self.getPath(s.tryRemainingLocs())
remainMajors = self.getPath(s.getRemainMajors())
remainMinors = self.getPath(s.getRemainMinors())
skippedMajors = None
unavailMajors = None
else:
remainTry = None
remainMajors = None
remainMinors = None
skippedMajors = self.getPath(s.getSkippedMajors())
unavailMajors = self.getPath(s.getUnavailMajors())
result = dict(randomizedRom=randomizedRom, difficulty=s.difficulty,
generatedPath=generatedPath, diffPercent=diffPercent,
knowsUsed=(s.knowsUsed, s.knowsKnown), itemsOk=s.itemsOk, patches=s.patches,
pngFileName=pngFileName, pngThumbFileName=pngThumbFileName,
remainTry=remainTry, remainMajors=remainMajors, remainMinors=remainMinors,
skippedMajors=skippedMajors, unavailMajors=unavailMajors,
collectedItems=collectedItems)
with open(s.outputFileName, 'w') as jsonFile:
json.dump(result, jsonFile)
def getPath(self, locations):
if locations is None:
return None
out = []
for loc in locations:
out.append([(loc['Name'], loc['Room']), loc['Area'], loc['SolveArea'], loc['itemName'],
'{0:.2f}'.format(loc['difficulty'].difficulty),
', '.join(sorted(loc['difficulty'].knows)),
', '.join(sorted(list(set(loc['difficulty'].items)))),
[ap.Name for ap in loc['path']] if 'path' in loc else None])
return out
def generatePng(self, dotFileName):
# use dot to generate the graph's image .png
# use convert to generate the thumbnail
# dotFileName: the /directory/image.dot
# the png and thumbnails are generated in the same directory as the dot
splited = os.path.splitext(dotFileName)
pngFileName = splited[0] + '.png'
pngThumbFileName = splited[0] + '_thumbnail.png'
# dot -Tpng VARIA_Randomizer_AFX5399_noob.dot -oVARIA_Randomizer_AFX5399_noob.png
params = ['dot', '-Tpng', dotFileName, '-o'+pngFileName]
ret = subprocess.call(params)
if ret != 0:
print("Error calling dot {}: {}".format(params, ret))
return (None, None)
params = ['convert', pngFileName, '-resize', '1024', pngThumbFileName]
ret = subprocess.call(params)
if ret != 0:
print("Error calling convert {}: {}".format(params, ret))
os.remove(pngFileName)
return (None, None)
return (pngFileName, pngThumbFileName)
class OutConsole:
def __init__(self, solver):
self.solver = solver
def out(self):
s = self.solver
self.displayOutput()
print("({}, {}): diff : {}".format(s.difficulty, s.itemsOk, s.romFileName))
print("{}/{}: knows Used : {}".format(s.knowsUsed, s.knowsKnown, s.romFileName))
if s.difficulty >= 0:
sys.exit(0)
else:
sys.exit(1)
def printPath(self, message, locations, displayAPs=True):
print("")
print(message)
print('{:>50} {:>12} {:>34} {:>8} {:>16} {:>14} {} {}'.format("Location Name", "Area", "Sub Area", "Distance", "Item", "Difficulty", "Knows used", "Items used"))
print('-'*150)
lastAP = None
for loc in locations:
if displayAPs == True and 'path' in loc:
path = [ap.Name for ap in loc['path']]
lastAP = path[-1]
if not (len(path) == 1 and path[0] == lastAP):
path = " -> ".join(path)
print('{:>50}: {}'.format('Path', path))
print('{:>50}: {:>12} {:>34} {:>8} {:>16} {:>14} {} {}'.format(loc['Name'],
loc['Area'],
loc['SolveArea'],
loc['distance'] if 'distance' in loc else 'nc',
loc['itemName'],
round(loc['difficulty'].difficulty, 2) if 'difficulty' in loc else 'nc',
sorted(loc['difficulty'].knows) if 'difficulty' in loc else 'nc',
list(set(loc['difficulty'].items)) if 'difficulty' in loc else 'nc'))
def displayOutput(self):
s = self.solver
# print generated path
if Conf.displayGeneratedPath == True:
self.printPath("Generated path ({}/101):".format(len(s.visitedLocations)), s.visitedLocations)
# if we've aborted, display missing techniques and remaining locations
if s.difficulty == -1:
self.printPath("Next locs which could have been available if more techniques were known:", s.tryRemainingLocs())
remainMajors = s.getRemainMajors()
if len(remainMajors) > 0:
self.printPath("Remaining major locations:", remainMajors, displayAPs=False)
remainMinors = s.getRemainMinors()
if remainMinors is not None and len(remainMinors) > 0:
self.printPath("Remaining minor locations:", remainMinors, displayAPs=False)
else:
# if some locs are not picked up display those which are available
# and those which are not
skippedMajors = s.getSkippedMajors()
if len(skippedMajors) > 0:
self.printPath("Skipped major locations:", skippedMajors, displayAPs=False)
else:
print("No skipped major locations")
unavailMajors = s.getUnavailMajors()
if len(unavailMajors) > 0:
self.printPath("Unaccessible major locations:", unavailMajors, displayAPs=False)
else:
print("No unaccessible major locations")
items = s.smbm.getItems()
print("ETank: {}, Reserve: {}, Missile: {}, Super: {}, PowerBomb: {}".format(items['ETank'], items['Reserve'], items['Missile'], items['Super'], items['PowerBomb']))
print("Majors: {}".format(sorted([item for item in items if items[item] == True])))
# display difficulty scale
self.displayDifficulty(s.difficulty)
def displayDifficulty(self, difficulty):
if difficulty >= 0:
text = DifficultyDisplayer(difficulty).scale()
print("Estimated difficulty: {}".format(text))
else:
print("Aborted run, can't finish the game with the given prerequisites")
class DifficultyDisplayer:
def __init__(self, difficulty):
self.difficulty = difficulty
def scale(self):
if self.difficulty >= impossibru:
return "IMPOSSIBRU!"
else:
previous = 0
for d in sorted(diff2text):
if self.difficulty >= d:
previous = d
else:
displayString = diff2text[previous]
displayString += ' '
scale = d - previous
pos = int(self.difficulty - previous)
displayString += '-' * pos
displayString += '^'
displayString += '-' * (scale - pos)
displayString += ' '
displayString += diff2text[d]
break
return displayString
def percent(self):
# return the difficulty as a percent
if self.difficulty == -1:
return -1
elif self.difficulty in [0, easy]:
return 0
elif self.difficulty >= mania:
return 100
difficultiesPercent = {
easy: 0,
medium: 20,
hard: 40,
harder: 60,
hardcore: 80,
mania: 100
}
difficulty = self.difficulty
lower = 0
percent = 100
for upper in sorted(diff2text):
if self.difficulty >= upper:
lower = upper
else:
lowerPercent = difficultiesPercent[lower]
upperPercent = difficultiesPercent[upper]
a = (upperPercent-lowerPercent)/float(upper-lower)
b = lowerPercent - a * lower
percent = int(difficulty * a + b)
break
return percent
def interactiveSolver(args):
# to init, requires interactive/romFileName/presetFileName/output parameters
# to iterate, requires interactive/state/loc/action/output parameters
if args.romFileName != None and args.presetFileName != None and args.output != None:
# init
solver = InteractiveSolver(args.output)
solver.initialize(args.romFileName, args.presetFileName)
elif args.state != None and args.action != None and args.output != None:
# iterate
if args.action == "add" and args.loc == None:
print("Missing loc parameter when using action add")
sys.exit(1)
solver = InteractiveSolver(args.output)
solver.iterate(args.state, args.loc, args.action)
else:
print("Wrong parameters for interactive mode")
sys.exit(1)
def standardSolver(args):
if args.romFileName is None:
print("Parameter --romFileName mandatory when not in interactive mode")
sys.exit(1)
if args.difficultyTarget is None:
difficultyTarget = Conf.difficultyTarget
else:
difficultyTarget = args.difficultyTarget
if args.pickupStrategy is None:
pickupStrategy = Conf.itemsPickup
else:
pickupStrategy = args.pickupStrategy
# itemsForbidden is like that: [['Varia'], ['Reserve'], ['Gravity']], fix it
args.itemsForbidden = [item[0] for item in args.itemsForbidden]
solver = StandardSolver(args.romFileName, args.presetFileName, difficultyTarget,
pickupStrategy, args.itemsForbidden, type=args.type,
firstItemsLog=args.firstItemsLog,
displayGeneratedPath=args.displayGeneratedPath,
outputFileName=args.output)
solver.solveRom()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Random Metroid Solver")
parser.add_argument('--romFileName', '-r', help="the input rom", nargs='?',
default=None, dest="romFileName")
parser.add_argument('--preset', '-p', help="the preset file", nargs='?',
default=None, dest='presetFileName')
parser.add_argument('--difficultyTarget', '-t',
help="the difficulty target that the solver will aim for",
dest='difficultyTarget', nargs='?', default=None, type=int)
parser.add_argument('--pickupStrategy', '-s', help="Pickup strategy for the Solver",
dest='pickupStrategy', nargs='?', default=None,
choices=['minimal', 'all', 'any'])
parser.add_argument('--itemsForbidden', '-f', help="Item not picked up during solving",
dest='itemsForbidden', nargs='+', default=[], action='append')
parser.add_argument('--type', '-y', help="web or console", dest='type', nargs='?',
default='console', choices=['web', 'console'])
parser.add_argument('--debug', '-d', help="activate debug logging", dest='debug', action='store_true')
parser.add_argument('--firstItemsLog', '-1',
help="path to file where for each item type the first time it was found and where will be written (spoilers!)",
nargs='?', default=None, type=str, dest='firstItemsLog')
parser.add_argument('--displayGeneratedPath', '-g', help="display the generated path (spoilers!)",
dest='displayGeneratedPath', action='store_true')
# standard/interactive, web site
parser.add_argument('--output', '-o', help="When called from the website, contains the result of the solver",
dest='output', nargs='?', default=None)
# interactive, web site
parser.add_argument('--interactive', '-i', help="Activate interactive mode for the solver",
dest='interactive', action='store_true')
parser.add_argument('--state', help="JSON file of the Solver state (used in interactive mode)",
dest="state", nargs='?', default=None)
parser.add_argument('--loc', help="Name of the location to action on (used in interactive mode)",
dest="loc", nargs='?', default=None)
parser.add_argument('--action', help="Pickup item at location, remove last pickedup location, clear all (used in interactive mode)",
dest="action", nargs="?", default=None, choices=['init', 'add', 'remove', 'clear', 'get'])
args = parser.parse_args()
if args.presetFileName is None:
args.presetFileName = 'standard_presets/regular.json'
log.init(args.debug)
if args.interactive == True:
interactiveSolver(args)
else:
standardSolver(args)
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from operator import add
import random
from pyglet.gl import *
n_vertices = 42
v3f_data = [v/float(n_vertices*3 + 10) for v in range(n_vertices * 3)]
v2f_data = reduce(add, zip(v3f_data[::3], v3f_data[1::3]))
c4f_data = [v/float(n_vertices*4) for v in range(n_vertices * 4)]
c3f_data = reduce(add, zip(c4f_data[::4], c4f_data[1::4], c4f_data[2::4]))
t4f_data = [v/float(n_vertices*4 + 5) for v in range(n_vertices * 4)]
t3f_data = reduce(add, zip(t4f_data[::4], t4f_data[1::4], t4f_data[2::4]))
t2f_data = reduce(add, zip(t3f_data[::3], t3f_data[1::3]))
index_data = range(n_vertices)
random.seed(1)
random.shuffle(index_data)
def get_ordered_data(data, dimensions):
ordered = []
for i in index_data:
ordered.extend(data[i * dimensions:(i+1)*dimensions])
return ordered
feedback_buffer = (GLfloat * 8096)()
def get_feedback(func):
# Project in clip coords
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, 1, 0, 1, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glFeedbackBuffer(len(feedback_buffer), GL_4D_COLOR_TEXTURE, feedback_buffer)
glRenderMode(GL_FEEDBACK)
func()
size = glRenderMode(GL_RENDER)
buffer = feedback_buffer[:size]
vertices = []
colors = []
tex_coords = []
while buffer:
token = int(buffer.pop(0))
assert token == GL_POLYGON_TOKEN
n = int(buffer.pop(0))
for i in range(n):
vertices.extend(buffer[:4])
colors.extend(buffer[4:8])
tex_coords.extend(buffer[8:12])
del buffer[:12]
return vertices, colors, tex_coords
Note that graphics unit tests no longer work on nvidia.
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from operator import add
import random
from pyglet.gl import *
n_vertices = 42
v3f_data = [v/float(n_vertices*3 + 10) for v in range(n_vertices * 3)]
v2f_data = reduce(add, zip(v3f_data[::3], v3f_data[1::3]))
c4f_data = [v/float(n_vertices*4) for v in range(n_vertices * 4)]
c3f_data = reduce(add, zip(c4f_data[::4], c4f_data[1::4], c4f_data[2::4]))
t4f_data = [v/float(n_vertices*4 + 5) for v in range(n_vertices * 4)]
t3f_data = reduce(add, zip(t4f_data[::4], t4f_data[1::4], t4f_data[2::4]))
t2f_data = reduce(add, zip(t3f_data[::3], t3f_data[1::3]))
index_data = range(n_vertices)
random.seed(1)
random.shuffle(index_data)
def get_ordered_data(data, dimensions):
ordered = []
for i in index_data:
ordered.extend(data[i * dimensions:(i+1)*dimensions])
return ordered
feedback_buffer = (GLfloat * 8096)()
def get_feedback(func):
# Project in clip coords
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, 1, 0, 1, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glFeedbackBuffer(len(feedback_buffer), GL_4D_COLOR_TEXTURE, feedback_buffer)
glRenderMode(GL_FEEDBACK)
func()
size = glRenderMode(GL_RENDER)
buffer = feedback_buffer[:size]
vertices = []
colors = []
tex_coords = []
while buffer:
token = int(buffer.pop(0))
assert token == GL_POLYGON_TOKEN
n = int(buffer.pop(0))
for i in range(n):
vertices.extend(buffer[:4])
colors.extend(buffer[4:8])
tex_coords.extend(buffer[8:12])
del buffer[:12]
return vertices, colors, tex_coords
import sys
print >> sys.stderr, 'Note: Graphics tests fail with recent nvidia drivers'
print >> sys.stderr, ' due to reordering and optimisation of vertices'
print >> sys.stderr, ' before they are placed in the feedback queue.'
|
from logging import warn
from robot.libraries.BuiltIn import BuiltIn
s2l = BuiltIn().get_library_instance('Selenium2Library')
currently_waiting_for_keyword_to_succeed = False
# a decorator that sets and unsets a special flag when performing "Wait until" keywords and enforces that the
# screenshots are only taken when failure results from a genuine test failure
def setting_wait_until_flag(func):
def decorator(*args):
global currently_waiting_for_keyword_to_succeed
currently_waiting_for_keyword_to_succeed = True
try:
result = func(*args)
except:
capture_large_screenshot()
raise
finally:
currently_waiting_for_keyword_to_succeed = False
return result
return decorator
@setting_wait_until_flag
def wait_until_keyword_succeeds_without_screenshots(retry, retry_interval, keyword, *args):
return BuiltIn().wait_until_keyword_succeeds(retry, retry_interval, keyword, *args)
@setting_wait_until_flag
def wait_until_element_is_visible_without_screenshots(locator, timeout=None, error=None):
return s2l.wait_until_element_is_visible(locator, timeout, error)
@setting_wait_until_flag
def wait_until_element_is_not_visible_without_screenshots(locator, timeout=None, error=None):
return s2l.wait_until_element_is_not_visible(locator, timeout, error)
@setting_wait_until_flag
def wait_until_page_contains_without_screenshots(text, timeout=None, error=None):
return s2l.wait_until_page_contains(text, timeout, error)
@setting_wait_until_flag
def wait_until_page_contains_element_without_screenshots(locator, timeout=None, error=None):
return s2l.wait_until_page_contains_element(locator, timeout, error)
@setting_wait_until_flag
def wait_until_page_does_not_contain_without_screenshots(text, timeout=None, error=None):
return s2l.wait_until_page_does_not_contain(text, timeout, error)
@setting_wait_until_flag
def wait_until_element_contains_without_screenshots(locator, text, timeout=None, error=None):
return s2l.wait_until_element_contains(locator, text, timeout, error)
@setting_wait_until_flag
def wait_until_element_does_not_contain_without_screenshots(locator, text, timeout=None, error=None):
return s2l.wait_until_element_does_not_contain(locator, text, timeout, error)
@setting_wait_until_flag
def wait_until_element_is_enabled_without_screenshots(locator, timeout=None, error=None):
return s2l.wait_until_element_is_enabled(locator, timeout, error)
@setting_wait_until_flag
def run_keyword_and_ignore_error_without_screenshots(keyword, *args):
return BuiltIn().run_keyword_and_ignore_error(keyword, *args)
@setting_wait_until_flag
def run_keyword_and_return_status_without_screenshots(keyword, *args):
return BuiltIn().run_keyword_and_return_status(keyword, *args)
def capture_page_screenshot_on_failure():
if not currently_waiting_for_keyword_to_succeed:
capture_large_screenshot()
def capture_large_screenshot():
currentWindow = s2l.get_window_size()
page_height = s2l._current_browser().execute_script("return document.body.clientHeight > 0 ? document.body.clientHeight : 1080;")
page_width = currentWindow[0]
original_height = currentWindow[1]
s2l.set_window_size(page_width, page_height)
warn("Capturing a screenshot at URL " + s2l.get_location())
s2l.capture_page_screenshot()
s2l.set_window_size(page_width, original_height)
IFS-238 revert to original code for page height.
from logging import warn
from robot.libraries.BuiltIn import BuiltIn
s2l = BuiltIn().get_library_instance('Selenium2Library')
currently_waiting_for_keyword_to_succeed = False
# a decorator that sets and unsets a special flag when performing "Wait until" keywords and enforces that the
# screenshots are only taken when failure results from a genuine test failure
def setting_wait_until_flag(func):
def decorator(*args):
global currently_waiting_for_keyword_to_succeed
currently_waiting_for_keyword_to_succeed = True
try:
result = func(*args)
except:
capture_large_screenshot()
raise
finally:
currently_waiting_for_keyword_to_succeed = False
return result
return decorator
@setting_wait_until_flag
def wait_until_keyword_succeeds_without_screenshots(retry, retry_interval, keyword, *args):
return BuiltIn().wait_until_keyword_succeeds(retry, retry_interval, keyword, *args)
@setting_wait_until_flag
def wait_until_element_is_visible_without_screenshots(locator, timeout=None, error=None):
return s2l.wait_until_element_is_visible(locator, timeout, error)
@setting_wait_until_flag
def wait_until_element_is_not_visible_without_screenshots(locator, timeout=None, error=None):
return s2l.wait_until_element_is_not_visible(locator, timeout, error)
@setting_wait_until_flag
def wait_until_page_contains_without_screenshots(text, timeout=None, error=None):
return s2l.wait_until_page_contains(text, timeout, error)
@setting_wait_until_flag
def wait_until_page_contains_element_without_screenshots(locator, timeout=None, error=None):
return s2l.wait_until_page_contains_element(locator, timeout, error)
@setting_wait_until_flag
def wait_until_page_does_not_contain_without_screenshots(text, timeout=None, error=None):
return s2l.wait_until_page_does_not_contain(text, timeout, error)
@setting_wait_until_flag
def wait_until_element_contains_without_screenshots(locator, text, timeout=None, error=None):
return s2l.wait_until_element_contains(locator, text, timeout, error)
@setting_wait_until_flag
def wait_until_element_does_not_contain_without_screenshots(locator, text, timeout=None, error=None):
return s2l.wait_until_element_does_not_contain(locator, text, timeout, error)
@setting_wait_until_flag
def wait_until_element_is_enabled_without_screenshots(locator, timeout=None, error=None):
return s2l.wait_until_element_is_enabled(locator, timeout, error)
@setting_wait_until_flag
def run_keyword_and_ignore_error_without_screenshots(keyword, *args):
return BuiltIn().run_keyword_and_ignore_error(keyword, *args)
@setting_wait_until_flag
def run_keyword_and_return_status_without_screenshots(keyword, *args):
return BuiltIn().run_keyword_and_return_status(keyword, *args)
def capture_page_screenshot_on_failure():
if not currently_waiting_for_keyword_to_succeed:
capture_large_screenshot()
def capture_large_screenshot():
currentWindow = s2l.get_window_size()
page_height = s2l._current_browser().execute_script("return jQuery ? jQuery(document).height() : 1080;")
page_width = currentWindow[0]
original_height = currentWindow[1]
s2l.set_window_size(page_width, page_height)
warn("Capturing a screenshot at URL " + s2l.get_location())
s2l.capture_page_screenshot()
s2l.set_window_size(page_width, original_height)
|
#!/usr/bin/python
# Copyright (c) Oskar Skog, 2016
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# This software is provided by the copyright holders and contributors "as is"
# and any express or implied warranties, including, but not limited to, the
# implied warranties of merchantability and fitness for a particular purpose
# are disclaimed. In no event shall the copyright holder or contributors be
# liable for any direct, indirect, incidental, special, exemplary, or
# consequential damages (including, but not limited to, procurement of
# substitute goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether in
# contract, strict liability, or tort (including negligence or otherwise)
# arising in any way out of the use of this software, even if advised of the
# possibility of such damage.
'''A minesweeper that can be solved without guessing
Copyright (c) Oskar Skog, 2016
Released under the FreeBSD license.
A minesweeper that can be solved without guessing
=================================================
This script contains a curses based interface class, a command line
setup function, a line mode setup function and glue.
The engine is in a separate module (anonymine_engine).
The game support three different gametypes:
Moore neighbourhoods: each cell has eight neighbours.
Hexagonal cells: each cell has six neighbours.
Von Neumann neighbourhoods: each cell has four neighbours.
'''
import curses
import os
import sys
# Allow module names to be changed later.
import anonymine_engine as game_engine
# argparse: Losing the ability to take command line options is no biggy.
# traceback: Not needed unless shit happens.
# signal: Required to fix one resizing bug.
import traceback # Not required.
import signal # Not required.
# argparse is new in 2.7 and 3.2.
try:
import argparse
except:
pass
GAME_NAME = 'Anonymine'
GAME_FILENAME = GAME_NAME.lower().replace(' ', '-')
GAME_VERSION = (0, 2, 17)
# GAME_VERSION MAY lag behind the version of the package when no change has
# been made to this file.
GAME_CRAPTEXT = """{0} version {1}.{2}.{3}
Copyright (c) Oskar Skog, 2016
Released under the Simplified BSD license (2 clause).
\n""".format(GAME_NAME, GAME_VERSION[0], GAME_VERSION[1], GAME_VERSION[2])
class curses_game():
'''Class for interface object for `engine.play_game(interface)`.
This is a large part of the curses mode interface for Anonymine.
The "engine" is in the module `anonymine_engine` and could be used
by a different interface.
The engine is currently (as of version 0.0.13) responsible for
creating the "field", initialization of the field (filling it with
mines) and the main loop of a game.
anonymine_engine.game_engine(**params).play_game(interface)
`interface` is an object that needs to provide these methods:
input(self, field)
output(self, field)
anykey_cont(self)
It is recommended that you read the documentation for the engine
as well.
Coordinates
===========
The "cursor" marks the selected cell on the field. It is a
"field coordinate".
A "virtual coordinate" is a coordinate on an imaginary screen.
A virtual coordinate doesn't need to be in the "visible area".
A virtual coordinate can easily be generated from a field
coordinate.
The visible area are those virtual coordinates that exist on
the screen. The visible area can be moved with
`self.window_start`.
A "real coordinate" is a coordinate on the screen and can be
sent to the methods of `self.window`.
real[0] = virtual[0] - window_start[0]
real[1] = virtual[1] - window_start[1]
The screen is `self.window`, not the real one.
Externally used methods
=======================
interface = curses_game(cfgfile, gametype)
`cfgfile` is the path to the cursescfg configuration file
(the configuration file with the key bindings and
textics.)
`gametype` is either 'moore', 'hex' or 'neumann'.
NOTICE: `curses_game.__init__` will enter curses mode.
WARNING: `curses_game.__init__` MAY raise an exception
while in curses mode if something is wrong with the
configuration or if bad parameters are given.
interface.leave()
Leave curses mode.
interface.input(engine)
(This method is called by `engine.play_game`.)
Take command from user and act on `engine.field`.
interface.output(engine)
(This method is called by `engine.play_game`.)
Print the field (`engine.field`) to the screen.
Prints the flags left text, invokes `self.print_square` or
`self.print_hex`, and finally, `self.window.refresh`.
interface.anykey_cont()
(This method is called by `engine.play_game`.)
Pause until input from user.
"Press any key to continue..."
Attributes
==========
interface.window = curses.initscr()
The screen.
interface.cursor = (0, 0)
The *field* coordinate.
interface.window_start = [0, 0]
Needed for translating virtual coordinates into real
coordinates.
Internally used methods
=======================
char, attributes = self.curses_output_cfg(key)
"Parse" the configuration (cursescfg) and pick the most
appropriate mode for the property `key`. The color pair
is properly attached to `attributes`.
self.message(msg)
Print `msg` at the bottom of the screen while in curses
mode. Invokes `self.window.refresh`.
Used for printing the initialization message and by
`self.anykey_cont`.
self.travel(engine, direction)
Modify `self.cursor` to select a new cell in the
specified direction.
self.print_char(x, y, cfg, char=None)
Print a character at the virtual coordinate (x, y) using
the textics property `cfg`. `char` will override the
character specified in the textics directive.
self.move_visible_area(virtual_x,virtual_y,x_border,y_border)
Modify `self.window_start` so (virtual_x, virtual_y) is
visible on the screen, and not too close to any edge.
self.print_square(field)
Print Moore and Neumann fields and the "cursor" to the
screen. Does not invoke `self.window.refresh` and does
not print the flags left text.
self.print_hex(field)
Print hexagonal fields and the "cursor" to the screen.
Does not invoke `self.window.refresh` and does not print
the flags left text.
Constants
=========
self.travel_diffs
A dictionary of dictionaries to translate a direction
into DELTA-x and DELTA-y. Used by `self.travel`.
self.direction_keys
A dictionary of lists representing the valid directions
for a certain gametype. This allows square and hex
directions to share the same keys.
Used by `self.input`.
self.specials
The method `get` of the field object may return one of
these value or a normal number. (Normal numbers will be
printed using their digit as character and the textics
directive 'number'.)
This dictionary maps the special return values into
textics properties.
'''
def __init__(self, cfgfile, gametype):
'''Create interface object and enter curses mode.
`cfgfile` is the path to the cursescfg file.
`gametype` must be 'moore', 'hex' or 'neumann'.
WARNING: This does not leave curses mode on exceptions!
'''
# Constants
self.travel_diffs = {
'square': {
'up': (0, -1),
'right': (1, 0),
'down': (0, 1),
'left': (-1, 0),
'NE': (1, -1),
'SE': (1, 1),
'SW': (-1, 1),
'NW': (-1, -1),
},
'hex-even': {
'hex0': (0, -1), # 5 0
'hex1': (1, 0), # 4 1
'hex2': (0, 1), # 3 2
'hex3': (-1, 1),
'hex4': (-1, 0), # x - 1 and x on even
'hex5': (-1, -1), # rows.
},
'hex-odd': {
'hex0': (1, -1),
'hex1': (1, 0), # x and x + 1 on odd
'hex2': (1, 1), # rows.
'hex3': (0, 1),
'hex4': (-1, 0),
'hex5': (0, -1),
}
}
self.direction_keys = {
'hex': ['hex0', 'hex1', 'hex2', 'hex3', 'hex4', 'hex5'],
'square': ['up', 'NE', 'right', 'SE', 'down', 'SW', 'left', 'NW'],
}
self.specials = {
0: 'zero',
None: 'free',
'F': 'flag',
'X': 'mine',
}
# Initialize...
self.gametype = gametype
self.window_start = [0, 0] # Item assignment
self.cursor = (0, 0)
# Initialize curses.
self.window = curses.initscr()
curses.cbreak()
curses.noecho()
self.window.keypad(True)
try:
self.old_cursor = curses.curs_set(0)
except:
pass
curses.def_prog_mode() # BUG #3 (See the file "BUGS".)
# Check that we have a reasonable size on the window.
height, width = self.window.getmaxyx()
def toosmall():
self.leave()
sys.stdout.flush()
output(sys.stderr,'\nSCREEN TOO SMALL\n')
sys.stderr.flush()
sys.exit(1)
if self.gametype == 'hex' and (width < 10 or height < 8):
toosmall()
if self.gametype != 'hex' and (width < 7 or height < 4):
toosmall()
# Read the configuration.
self.cfg = eval(open(cfgfile).read())
# Apply ord() automatically to the keys in 'curses-input'.
for key in self.cfg['curses-input']:
for index in range(len(self.cfg['curses-input'][key])):
value = self.cfg['curses-input'][key][index]
if isinstance(value, str):
self.cfg['curses-input'][key][index] = ord(value)
# Initialize the color pairs.
if curses.has_colors():
self.use_color = True
# TODO: Check that enough pairs are available.
curses.start_color()
for key in self.cfg['curses-output']:
value = self.cfg['curses-output'][key]
pair, ch, foreground, background, attr = value
foreground = eval('curses.COLOR_' + foreground)
background = eval('curses.COLOR_' + background)
curses.init_pair(pair, foreground, background)
else:
self.use_color = False
def leave(self):
'''Leave curses mode.'''
curses.nocbreak()
curses.echo()
self.window.keypad(False)
try:
curses.curs_set(self.old_cursor)
except:
pass
curses.endwin()
def curses_output_cfg(self, key):
'''Retrieve textics directive from cursescfg.
char, attributes = self.curses_output_cfg(key)
`key` is the property in cursescfg ('curses-output').
`char` is a character and needs to be converted before passed
to a curses function.
`attributes` is an integer to be passed directly to a curses
function. Color is or'ed in.
Retrieve a textics directive from the configuration file
(cursescfg). This function is responsible to choose the
key with the correct property and best available mode.
gametype Best available mode 2nd best worst
'moore': ':moore' ':square' ''
'hex': ':hex' ''
'neumann' ':neumann' ':square' ''
This function will automatically convert the directive line
into two directly useful parts:
`char`: The character to be printed or `None`.
`attributes`: The attributes to be used (curses).
The color pair is also or'ed in.
See also: the configuration file.
'''
cfg = self.cfg['curses-output']
# Choose gametype specific entries if available
if self.gametype == 'neumann':
if key + ':neumann' in cfg:
key += ':neumann'
elif key + ':square' in cfg:
key += ':square'
elif self.gametype == 'hex':
if key + ':hex' in cfg:
key += ':hex'
elif self.gametype == 'moore':
if key + ':moore' in cfg:
key += ':moore'
elif key + ':square' in cfg:
key += ':square'
# Translate the key into (char, attributes)
pair, char, ign1, ign2, attributes = cfg[key]
if self.use_color:
attributes |= curses.color_pair(pair)
return char, attributes
def message(self, msg):
'''Print `msg` at the bottom of the screen while in curses mode.
Invokes `self.window.refresh`.
Used for printing the initialization message and by
`self.anykey_cont`.
'''
height, width = self.window.getmaxyx()
ign, attributes = self.curses_output_cfg('text')
text_width = width - 4 # Pretty margin on the left.
lines = len(msg)//text_width + 1
if lines <= height:
for line in range(lines):
self.window.addstr(
height - lines + line, 3,
msg[line*text_width:(line+1)*text_width],
attributes
)
else:
pass # A screen this small? Seriously?
self.window.refresh()
def anykey_cont(self):
'''Press any key to continue...
Wait for input from the user, discard the input.
(This method is called by `engine.play_game`.)
'''
self.message('Press the "any" key to continue...')
self.window.getch()
def output(self, engine):
'''This method is called by `engine.play_game`.
It erases the window, prints the flags left message if it would
fit on the screen, invokes the appropriate field printer and
refreshes the screen. (In that order.)
'''
# TODO: The background gets set ridiculously often.
# Set the appropriate background.
char, attributes = self.curses_output_cfg('background')
self.window.bkgdset(32, attributes) # 32 instead of `char`.
# BUG #7: window.bkgdset causes a nasty issue when the background
# character is not ' ' and color is unavailable.
# Print the screen.
self.window.erase()
# Screen could resized at any time.
self.height, self.width = self.window.getmaxyx()
chunks = []
if engine.game_status == 'pre-game':
chunks.append('Choose your starting point.')
if engine.game_status == 'play-game':
if engine.field.flags_left is not None:
chunks.append("Flags left: {0}".format(
engine.field.flags_left
))
msg = ' '.join(chunks)
if len(msg) + 4 <= self.width:
ign, attributes = self.curses_output_cfg('text')
self.window.addstr(self.height - 1, 3, msg, attributes)
# (Keeping the following outside the loop magically solves a resizing
# bug that traces back to an `addch` in `self.print_char`.)
# Lie to the field printer functions to preserve the text.
self.height -= 1
# Print the field.
if self.gametype == 'hex':
self.print_hex(engine.field)
else:
self.print_square(engine.field)
# Remember that self.height has already been decremented by one.
self.window.move(self.height, 0) # BUG #3 (See the file "BUGS".)
self.window.refresh()
def input(self, engine):
'''This method is called by `engine.play_game`.
It receives a character from the user and interprets it.
Invokes `self.travel` for the steering of the cursor.
It doesn't do any output except for printing the field
initialization message, and forcing the entire screen to be
redrawn on unrecognised input (to de-fuck-up the screen).
'''
if self.gametype == 'hex':
direction_keys = self.direction_keys['hex']
else:
direction_keys = self.direction_keys['square']
look_for = ['reveal', 'flag'] + direction_keys
# Receive input from player.
ch = self.window.getch()
# Interpret.
command = None
for key in look_for:
if ch in self.cfg['curses-input'][key]:
command = key
# Act.
if command == 'flag':
engine.flag(self.cursor)
elif command == 'reveal':
if engine.game_status == 'pre-game':
self.message('Initializing field... This may take a while.')
curses.reset_shell_mode() # BUG #3 (See the file "BUGS".)
engine.reveal(self.cursor)
curses.reset_prog_mode() # BUG #3 (See the file "BUGS".)
self.window.redrawwin() # BUG #3 (See the file "BUGS".)
elif command in direction_keys:
self.travel(engine.field, command)
else:
self.window.redrawwin()
def travel(self, field, direction):
'''Move the cursor in the specified direction.
It will not move past an edge (or in an otherwise impossible
direction). This is why the `field` argument is required.
Valid directions when self.gametype == 'moore':
'up', 'NE', 'right', 'SE', 'down', 'SW', 'left', 'NW'
Valid directions when self.gametype == 'hex':
'hex0', 'hex1', 'hex2', 'hex3', 'hex4', 'hex5'
Valid directions when self.gametype == 'neumann':
'up', 'right', 'down', 'left'
The hexagonal directions are:
5 0
4 1
3 2
'''
x, y = self.cursor
# Find the appropriate dictionary of direction to DELTA-x and DELTA-y.
if self.gametype != 'hex':
key = 'square'
elif y % 2:
key = 'hex-odd'
else:
key = 'hex-even'
# Move in the specified direction.
x_diff, y_diff = self.travel_diffs[key][direction]
new = x + x_diff, y + y_diff
# Do nothing if it is impossible to move in the specified direction.
x, y = new
if x >= 0 and x < field.dimensions[0]:
if y >= 0 and y < field.dimensions[1]:
self.cursor = new
def print_char(self, x, y, cfg, char=None):
'''Print a character at a virtual coordinate with the right attributes.
Print a character at the virtual coordinate (`x`, `y`)
using the textics directive `cfg`.
`char` is used to override the default character of the
textics directive.
'''
real_x = x - self.window_start[0]
real_y = y - self.window_start[1]
# Verify that the coordinate is printable.
if 0 <= real_x < self.width:
if 0 <= real_y < self.height:
cfg_char, attributes = self.curses_output_cfg(cfg)
if char is None:
char = cfg_char
self.window.addch(real_y, real_x, ord(char), attributes)
def move_visible_area(self, virtual_x, virtual_y, x_border, y_border):
'''Move the area that will be printed by `self.print_char`.
Move the visible area (as printed by `self.print_char`) by
modifying `self.window_start`, which is used for translating
virtual coordinates (a step between field coordinates and
screen coordinates.)
`virtual_x` and `virtual_y` is the virtual coordinate.
`x_border` is the minimal allowed border between the virtual
coordinate and the left or the right side of the screen.
`y_border` is the minimal allowed border between the virtual
coordinate and the top or the bottom of the screen.
'''
real_x = virtual_x - self.window_start[0]
real_y = virtual_y - self.window_start[1]
if real_x + x_border > self.width - 1:
self.window_start[0] = virtual_x - self.width + x_border + 1
if real_x - x_border < 0:
self.window_start[0] = virtual_x - x_border
if real_y + y_border > self.height - 1:
self.window_start[1] = virtual_y - self.height + y_border + 1
if real_y - y_border < 0:
self.window_start[1] = virtual_y - y_border
def print_square(self, field):
'''Helper function for `self.output` for non-hexagonal gametypes.
Print a non-hexagonal field in the area
0 to self.width-1 by 0 to self.height-2.
Also prints the "cursor".
It does not print the flags left text.
It will invoke `self.move_visible_area` to keep the "cursor" on
the screen. It will use `self.print_char` to print characters
on the screen.
_______
| X X X |
| X(*)X |
| X X X |
-------
'''
# Move the visible area.
# Compute the virtual locations on the screen and real locations.
# Adjust the virtual coordinate of the visible area.
#
# Border = 1 cell.
x, y = self.cursor
self.move_visible_area(2*x+1, y, 3, 1)
# Print all cells in a field.
for cell in field.all_cells():
x, y = cell
# Print blank grid .
self.print_char(2*x, y, 'grid', ' ')
self.print_char(2*x+2, y, 'grid', ' ')
# Print the actual cell.
value = field.get(cell)
if value not in self.specials:
self.print_char(2*x+1, y, 'number', str(value))
else:
self.print_char(2*x+1, y, self.specials[value])
# Print the "cursor".
x, y = self.cursor
self.print_char(2*x, y, 'cursor-l')
self.print_char(2*x+2, y, 'cursor-r')
def print_hex(self, field):
r'''Helper function for `self.output` for the hexagonal gametype.
Print a hexagonal field in the area
0 to self.width-1 by 0 to self.height-2.
Also prints the "cursor".
It does not print the flags left text.
It will invoke `self.move_visible_area` to keep the "cursor" on
the screen. It will use `self.print_char` to print characters
on the screen.
0000000000111111111122222222223
0123456789012345678901234567890
00 / \ / \ / \ / \ / \ / \ / \
01 | X | X | X | X | X | X | X |
02 \ / \ / \ / \ / \ / \ / \ / \
03 | X | X | X | X | X | X | X |
04 / \ / \ / \ / \ / \ / \ / \ /
05 | X | X | X |(X)| X | X | X |
06 \ / \ / \ / \ / \ / \ / \ / \
07 | X | X | X | X | X | X | X |
08 \ / \ / \ / \ / \ / \ / \ /
'''
# Define functions that translates field coordinates into
# virtual screen coordinates.
def fx(x, y): return 2 * (2*x + 1 + (y % 2))
def fy(x, y): return 2*y + 1
# Move the visible area.
#
# Compute the virtual locations on the screen and real locations.
# Adjust the virtual coordinate of the visible area.
# Border = 1 cell.
x, y = self.cursor
self.move_visible_area(fx(x, y), fy(x, y), 6, 3)
# Print all cells in a field.
for cell in field.all_cells():
x = 2 * (2*cell[0] + 1 + (cell[1] % 2))
y = 2*cell[1] + 1
# Print blank grid.
# Roof:
self.print_char(x - 1, y - 1, 'grid', '/')
self.print_char(x, y - 1, 'grid', ' ')
self.print_char(x + 1, y - 1, 'grid', '\\')
# Left wall:
self.print_char(x - 2, y, 'grid', '|')
self.print_char(x - 1, y, 'grid', ' ')
# Right wall:
self.print_char(x + 2, y, 'grid', '|')
self.print_char(x + 1, y, 'grid', ' ')
# Floor:
self.print_char(x - 1, y + 1, 'grid', '\\')
self.print_char(x, y + 1, 'grid', ' ')
self.print_char(x + 1, y + 1, 'grid', '/')
# Print the actual cell.
value = field.get(cell)
if value not in self.specials:
self.print_char(x, y, 'number', str(value))
else:
self.print_char(x, y, self.specials[value])
# Print the "cursor".
x, y = self.cursor
self.print_char(fx(x, y) - 1, fy(x, y), 'cursor-l')
self.print_char(fx(x, y) + 1, fy(x, y), 'cursor-r')
def output(stream, content):
'''
Due to BUG #9 syscalls may fail with EINTR after leaving curses mode.
Write `content` to `stream` and flush() without crashing.
Example:
output(sys.stdout, 'Hello world!\n')
'''
def write():
stream.write(content)
def flush():
stream.flush()
for function in (write, flush):
while True:
try:
function()
except InterruptedError:
continue
except IOError as e:
if 'EINTR' in dir(errno):
if e.errno == errno.EINTR:
continue
raise
break
def convert_param(paramtype, s):
'''Convert user input (potentially incorrect text) to the proper type.
Convert the string `s` to the proper type.
Raises ValueError if `s` cannot be converted.
`paramtype` MUST be one of the recognised values:
'str': `s` is returned.
'yesno': "Yes" is True and "no" is False.
'dimension': An integer >= 4
'minecount': Two modes (automatic selection):
An integer >= 1 returned as an integer.
Or a percentage `str(float)+'%'` in
]0%, 100%[ returned as a float:
'gametype': Mapping with case-insensitive keys and
lower-case values:
'a', 'neumann' and '4' to 'neumann'
'b', 'hex', 'hexagonal' and '6' to 'hex'
'c', 'moore' and '8' to 'moore'
'reverse-minecount': `s` is an integer or a float and the
returned value is a string that can be
converted back to `s` with 'minecount'.
'''
if paramtype == 'str':
return s
elif paramtype == 'yesno':
if s.upper() in ('Y', 'YES'):
return True
elif s.upper() in ('N', 'NO'):
return False
else:
output(sys.stderr,'"Yes" or "no" please. (WITHOUT quotes.)\n')
raise ValueError
elif paramtype == 'dimension':
try:
value = int(s)
except ValueError:
# Easter egg.
#
# ~85.6% of English words contain 'a', 'c', 'm' or 'p'.
# All numbers under one thousand belongs to the ~14.4%.
#
# 194 of the numbers between 0 and 200 contain one or more of
# the letters 'n', 'f' and 'h'.
#
# But zero, two, six and twelve aren't included.
# So check for X and startswith('TW').
#
# Only ~10.2% of the words in my word list could be mistaken
# for numbers.
S = s.upper()
if (
('A' not in S) and ('C' not in S) and
('N' in S or 'F' in S or 'H' in S or 'X' in S) or
S.startswith('TW')
):
output(sys.stderr, "Use digits.\n")
else:
output(sys.stderr,
'Invalid width or height;'
' "{0}" is not an integer.\n'.format(s)
)
raise ValueError
if value < 4:
output(sys.stderr, 'Lowest allowed width or height is 4.\n')
raise ValueError
return value
elif paramtype == 'minecount':
if len(s) == 0:
output(sys.stderr, 'No (empty) amount of mines specified.\n')
raise ValueError
if s[-1] == '%':
try:
value = float(s[:-1])/100
except ValueError:
output(sys.stderr,
"You can't have {0} percent of the cells to be mines;"
" {0} is not a number.\n".format(s)
)
raise ValueError
if value >= 1.0 or value <= 0.0:
output(sys.stderr,
'Percentage of the cells that will be mines'
' must be in ]0%, 100%[.\n'
)
raise ValueError
else:
try:
value = int(s)
except ValueError:
output(sys.stderr,
"You can't have {0} mines;"
" {0} is not an integer\n".format(s)
)
raise ValueError
if value <= 0:
output(sys.stderr,'You must have at least ONE mine.\n')
raise ValueError
return value
elif paramtype == 'gametype':
if s.upper() in ('A', 'NEUMANN', '4'):
return 'neumann'
elif s.upper() in ('B', 'HEX', 'HEXAGONAL', '6'):
return 'hex'
elif s.upper() in ('C', 'MOORE', '8'):
return 'moore'
else:
output(sys.stderr,'Invalid gametype. TODO: explain\n')
raise ValueError
elif paramtype == 'reverse-minecount':
if isinstance(s, float):
return '{0}%'.format(100 * s)
else:
return str(s)
else:
while True:
invalid_paramtype = True
def ask(question, paramtype, default):
'''Ask the user a question in line mode. (Not curses mode.)
Ask the user a question (line mode; not curses mode) and return the
answer after it has been converted.
It will invoke `convert_param` to convert the string into the
proper type and check that the user didn't say something stupid.
`default` is what will be sent to `convert_param` if the user
hits enter.
`paramtype` will be sent to `convert_param`. See the doc-string
for `convert_param` to know what values of `paramtype` are permitted.
NOTICE: This function will cause the program to exit if a
KeyboardInterrupt is raised.
'''
while True:
output(sys.stdout, '{0} [{1}]: '.format(question, default))
try:
# Due to BUG #9 syscalls may fail with EINTR after leaving
# curses mode.
while True:
try:
answer = sys.stdin.readline().strip()
except InterruptedError:
continue
except IOError as e:
if 'EINTR' in dir(errno):
if e.errno == errno.EINTR:
continue
raise
break
if not answer:
answer = default
value = convert_param(paramtype, answer)
except ValueError:
continue
except KeyboardInterrupt:
output(sys.stdout,'\n')
sys.exit(0)
return value
def arg_input(default):
'''Get configuration filepaths and game parameters from `sys.argv`.
user_input_required, params = arg_input(default)
This function will retrieve the game parameters, and paths to the
configuration files, from `sys.argv`. If `sys.argv` contains no
game parameters, `user_input` will be True.
`default` is a dictionary that MUST contain these keys:
'width', 'height', 'mines', 'gametype', 'flagcount', 'guessless'
and 'insult'.
Their types are specified in the doc-string for `play_game`.
`params` is a dictionary that contains either all of the keys that
are required for `default`, or none of them. It may also contain
'enginecfg' and/or 'cursescfg' for specifying configuration files
that are not in the ordinary search path.
The program will exit if bogus parameters are given.
NOTICE: If argparse couldn't be imported, this function will exit
the program if there are any command line arguments. If there
aren't, it will return `True, {}`.
'''
# argparse is new in Python 2.7. Allow this to be run by the obsolete 2.6.
try:
dir(argparse)
except NameError:
if len(sys.argv) == 1:
return True, {}
else:
output(sys.stderr,'Cannot parse the arguments without argparse!\n')
sys.exit(1)
# argparse exists:
default_s = {
True: ' (Default)',
False: ' (Not default)'
}
# Get the arguments sent on the command line.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
{0} is a curses mode minesweeper that checks if the field can be solved
without guessing and supports three different game types:
Traditional; Moore neighbourhoods; 8 neighbours {1}
Hexagonal; 6 neighbours {2}
von Neumann neighbourhoods; 4 neighbours {3}
{4}
'''.format(
GAME_NAME,
default_s[default['gametype'] == 'moore'],
default_s[default['gametype'] == 'hex'],
default_s[default['gametype'] == 'neumann'],
{
True: 'By default, it will insult you for both winning and\n'
'losing a game that has been proven to be 100% winnable.',
False: 'By default, it will not insult you for either winning '
'or losing.',
}[default['insult']],
)
)
# Configuration files.
parser.add_argument(
'-c', '--cursescfg', dest='cursescfg',
help=(
'The path to the configuration file for the key bindings '
'and textics directives.\n'
'Default is "~/.{0}/cursescfg" or "/etc/{0}/cursescfg".'.format(
GAME_FILENAME
)
)
)
parser.add_argument(
'-e', '--enginecfg', dest='enginecfg',
help=(
'The path to the configuration file for field '
'initialization and misc. game engine functions.\n'
'Default is "~/.{0}/enginecfg" or "/etc/{0}/enginecfg".'.format(
GAME_FILENAME
)
)
)
# Dimensions and minecount.
parser.add_argument(
'-s', '--size', dest='size',
help=(
"The size of the field width+'x'+height. Ex. 30x16\n"
"Default is {0}x{1}.".format(
default['width'], default['height']
)
)
)
parser.add_argument(
'-m', '--mines', dest='mines',
help=(
"The number of mines. OR the percentage.\n"
"Default is {0}.".format(
convert_param(
'reverse-minecount',
default['mines']
).replace('%', '%%')
)
)
)
# Gametype
gametype = parser.add_mutually_exclusive_group()
gametype.add_argument(
'-4', '--neumann',
action='store_const', dest='gametype', const='neumann',
help=(
"Use von Neumann neighbourhoods. (4 neighbours.)" +
default_s[
default['gametype'] == 'neumann'
]
)
)
gametype.add_argument(
'-6', '--hex', '--hexagonal',
action='store_const', dest='gametype', const='hex',
help=(
"Use a hexagonal field. (6 neighbours.)" +
default_s[
default['gametype'] == 'hex'
]
)
)
gametype.add_argument(
'-8', '--moore', '--traditional',
action='store_const', dest='gametype', const='moore',
help=(
"Traditional minesweeper; Moore neighbourhoods. (8)" +
default_s[
default['gametype'] == 'moore'
]
)
)
# Bools.
flagcount = parser.add_mutually_exclusive_group()
flagcount.add_argument(
'-f', '--flagcount', dest='flagcount', action='store_true',
help=(
"Show how many flags are left." + default_s[
default['flagcount']
]
)
)
flagcount.add_argument(
'-F', '--no-flagcount', dest='noflagcount', action='store_true',
help=(
"Don't Show how many flags are left." + default_s[
not default['flagcount']
]
)
)
guessless = parser.add_mutually_exclusive_group()
guessless.add_argument(
'-g', '--guessless', dest='guessless', action='store_true',
help=(
"Play a minesweeper that can be solved without guessing." +
default_s[
default['guessless']
]
)
)
guessless.add_argument(
'-G', '--no-guessless', dest='noguessless', action='store_true',
help=(
"Play with the risk of having to guess. " +
"Large fields will be initialized much faster." + default_s[
not default['flagcount']
]
)
)
insult = parser.add_mutually_exclusive_group()
insult.add_argument(
'-r', '--rude', dest='insult', action='store_true',
help=(
"<std>" + default_s[
default['insult']
]
)
)
insult.add_argument(
'-n', '--nice', dest='noinsult', action='store_true',
help=(
"(more polite setting)" + default_s[
not default['insult']
]
)
)
#
# Parse the args and store the params.
args = parser.parse_args()
params = {}
user_input_required = True
error = False
# Size, mines and gametype.
if args.size:
user_input_required = False
try:
params['width'], params['height'] = map(
lambda x: convert_param('dimension', x),
args.size.split('x')
)
except ValueError:
error = True
output(sys.stderr,
'Error with "--size": Explanation above.\n'
)
except:
error = True
output(sys.stderr,'Error with "--size": UNKNOWN\n')
raise
if args.mines:
user_input_required = False
try:
params['mines'] = convert_param('minecount', args.mines)
except ValueError:
error = True
output(sys.stderr,
'Error with "--mines": Explanation above.\n'
)
except:
error = True
output(sys.stderr,'Error with "--mines": UNKNOWN\n')
raise
if args.gametype:
user_input_required = False
assert args.gametype in ('moore', 'hex', 'neumann')
params['gametype'] = args.gametype
# flagcount, guessless, insult
if args.flagcount:
user_input_required = False
params['flagcount'] = True
if args.noflagcount:
user_input_required = False
params['flagcount'] = False
if args.guessless:
user_input_required = False
params['guessless'] = True
if args.noguessless:
user_input_required = False
params['guessless'] = False
if args.insult:
user_input_required = False
params['insult'] = True
if args.noinsult:
user_input_required = False
params['insult'] = False
# Configuration
if args.cursescfg:
params['cursescfg'] = args.cursescfg
if args.enginecfg:
params['enginecfg'] = args.enginecfg
# Deal with error and user_input_required.
if error:
sys.exit(1)
if not user_input_required:
for key in default:
if key not in params:
params[key] = default[key]
return user_input_required, params
def user_input(default, cursescfg_path):
'''Retrieve game parameters from the user.
`cursescfg_path` is the path to the configuration file that happens
to contain the key bindings and their documentation, which might be
displayed by this function.
`default` is a dictionary that MUST contain these keys:
'width', 'height', 'mines', 'gametype', 'flagcount', 'guessless'
and 'insult'.
Their types are specified in the doc-string for `play_game`.
`user_input` will return dictionary containing the same keys.
'''
parameters = {}
booldefault = {True: 'Yes', False: 'No'}
parameters['width'] = ask(
'Width of the playing field',
'dimension',
default['width']
)
parameters['height'] = ask(
'Height of the playing field',
'dimension',
default['height']
)
# MUST ask for dimensions before for the # of mines.
# Default is 16% here
parameters['mines'] = ask(
'Mines (# or %)',
'minecount',
convert_param('reverse-minecount', default['mines'])
)
parameters['gametype'] = ask(
'A: Neumann, B: Hexagonal or C: Moore',
'gametype',
default['gametype']
)
parameters['flagcount'] = ask(
'Do you want to know how many flags you have left?',
'yesno',
booldefault[default['flagcount']]
)
parameters['guessless'] = ask(
'100% solvable field (no guessing required)',
'yesno',
booldefault[default['guessless']]
)
# MUST ask for guessless mode before polite mode.
if parameters['guessless']:
parameters['insult'] = not ask(
'Polite mode?',
'yesno',
booldefault[not default['guessless']]
)
# Ask if the user wants to know the key bindings.
if ask('Show key bindings?', 'yesno', 'No'):
cursescfg = eval(open(cursescfg_path).read())
try:
output(sys.stdout,cursescfg['pre-doc'])
if parameters['gametype'] == 'hex':
output(sys.stdout,cursescfg['doc-hex'])
else:
output(sys.stdout,cursescfg['doc-square'])
except KeyError:
output(sys.stdout,
"The configuration file format for cursescfg"
" has been updated since version 0.0.5.\n"
"You'll have to guess what keys to press or"
" update the configuration files.\n"
)
output(sys.stdout,
"\nPressing an unrecognised key will refresh the screen.\n"
"^C (Ctrl-c) to quit a game or the game.\n\n"
)
ask('Press enter to continue...', 'str', '')
return parameters
def highscores_add_entry(title, prompt):
'''
Input callback for `game_engine.hiscores.add_entry`.
'''
output(sys.stdout,title + '\n')
while True:
output(sys.stdout,prompt + ': ')
sys.stdout.flush()
try:
return ask(prompt, 'str', '')
except UnicodeDecodeError:
output(sys.stderr, 'Decoding error.\n')
def highscores_display(title, headers, rows):
'''
Output formatter function for `game_engine.hiscores.display`.
'''
# Create all rows to be displayed.
header_underline = ['='*len(col) for col in headers]
header_blankline = ['' for col in headers]
all_rows = [headers] + [header_underline] + [header_blankline] + rows
# Calculate column widths.
column_width = []
for column in zip(*all_rows):
column_width.append(max(list(map(len, column))) + 1)
# Print
output(sys.stdout,'\n' + '_'*len(title) + '\n')
output(sys.stdout,title + '\n\n')
for row in all_rows:
for index, width in enumerate(column_width):
output(sys.stdout,row[index])
output(sys.stdout,' ' * (width - len(row[index])))
output(sys.stdout,' ')
output(sys.stdout,'\n')
output(sys.stdout,'\n')
def play_game(parameters):
'''Play a custom game of minesweeper.
When called with all required parameters,
one game of minesweeper will be played.
NOTICE: This function does not expect incorrect parameters!
WARNING: If anything, except a KeyboardInterrupt, happens during an
actual game, this function will raise an exception without
leaving curses mode.
`parameters` is a dictionary which MUST contain all these keys:
'width' Integer >= 4
'height' Integer >= 4
'mines' Integer >= 1 or float in ]0.0, 1.0[
'gametype' 'moore', 'hex' or 'neumann'
'flagcount' A boolean (Show flag count)
'guessless' A boolean (no guessing required)
'insult' A boolean (!polite mode)
'enginecfg' The path to the configuration file for the
game engine.
'cursescfg' The path to the configuration file for key
bindings and textics customisation.
'''
if isinstance(parameters['mines'], float):
area = parameters['width'] * parameters['height']
mines = int(parameters['mines'] * area + 0.5)
parameters['mines'] = mines
# WORKAROUND for a special bug.
if parameters['mines'] == 0: # Test: 42
parameters['mines'] == 1 # Test: 0
# Don't blame the player when it's not the players fault.
if not parameters['guessless']:
parameters['insult'] = False
engine = game_engine.game_engine(parameters['enginecfg'], **parameters)
interface = curses_game(
parameters['cursescfg'],
parameters['gametype'],
)
try:
win, highscores = engine.play_game(interface)
except KeyboardInterrupt:
interface.leave()
return
interface.leave()
if parameters['insult']:
if win:
output(sys.stdout,
'\n\n"Congratulations", you won the unlosable game.\n')
else:
output(sys.stdout,'\n\nYou moron, you lost the unlosable game!\n')
ask('Press enter to continue...', 'str', '')
highscores.add_entry(highscores_add_entry)
title, headers, rows = highscores.display()
highscores_display(title, headers, rows)
def main():
'''42
Invoke `arg_input` to find configuration files with non-standard
paths.
Find the remaining configuration files.
NOTICE: As of version 0.0.13, it does not check if the
configuration file is valid, only if it exists.
Invoke `user_input` if `arg_input` didn't return game parameters.
Invoke `play_game`.
Invoke `ask` in a while loop to ask the player if [s]he wants to
continue play again. And while True, invoke `play_game`.
WARNING: MAY raise an exception while in curses mode.
'''
default = {
'width': 20,
'height': 20,
'mines': .2,
'gametype': 'moore',
'flagcount': True,
'guessless': True,
'insult': True,
}
interactive, parameters = arg_input(default)
# Handle the configuration filepaths.
cfgfiles = {
'enginecfg': None,
'cursescfg': None,
}
error = False
for cfgfile in cfgfiles:
if cfgfile in parameters:
cfgfiles[cfgfile] = parameters[cfgfile]
else:
locations = (
os.path.expanduser('~/.' + GAME_FILENAME + '/' + cfgfile),
sys.prefix + '/etc/' + GAME_FILENAME + '/' + cfgfile,
'/etc/' + GAME_FILENAME + '/' + cfgfile,
)
for location in locations:
try:
open(location)
cfgfiles[cfgfile] = location
break
except IOError:
pass
else:
output(sys.stderr,'{0} not found.\n'.format(cfgfile))
error = True
if error:
sys.exit(1)
if interactive:
output(sys.stdout,GAME_CRAPTEXT)
output(sys.stdout,
'You can quit the game with the interrupt signal. (Ctrl + c)\n\n'
)
output(sys.stdout,
'How do you want your game to be set up? Write in the values'
' and press Enter.\nLeave blank to use the default.\n\n'
)
parameters = user_input(default, cfgfiles['cursescfg'])
parameters.update(cfgfiles)
play_game(parameters)
while ask('Play again?', 'yesno', 'Yes'):
parameters = user_input(default, cfgfiles['cursescfg'])
parameters.update(cfgfiles)
play_game(parameters)
assert os.geteuid(), "Why the-fuck(7) are you playing games as root?"
if __name__ == '__main__':
# Force InterruptedError to be defined
try:
InterruptedError
except NameError:
# Both are not going to be expected at the same time.
InterruptedError = SystemExit
try:
main()
except SystemExit as e:
# Cause the interpreter to exit with the expected status.
os._exit(e.code)
except game_engine.security_alert as e:
try:
curses.endwin()
except:
pass
output(sys.stderr,'Security alert: ' + e.message + '\n')
os._exit(1)
except:
# Get the traceback without fucking up the terminal.
exception = sys.exc_info()
try:
curses.endwin()
except:
pass
traceback.print_exception(*exception)
os._exit(1)
os._exit(0)
Forgot to import errno
#!/usr/bin/python
# Copyright (c) Oskar Skog, 2016
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# This software is provided by the copyright holders and contributors "as is"
# and any express or implied warranties, including, but not limited to, the
# implied warranties of merchantability and fitness for a particular purpose
# are disclaimed. In no event shall the copyright holder or contributors be
# liable for any direct, indirect, incidental, special, exemplary, or
# consequential damages (including, but not limited to, procurement of
# substitute goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether in
# contract, strict liability, or tort (including negligence or otherwise)
# arising in any way out of the use of this software, even if advised of the
# possibility of such damage.
'''A minesweeper that can be solved without guessing
Copyright (c) Oskar Skog, 2016
Released under the FreeBSD license.
A minesweeper that can be solved without guessing
=================================================
This script contains a curses based interface class, a command line
setup function, a line mode setup function and glue.
The engine is in a separate module (anonymine_engine).
The game support three different gametypes:
Moore neighbourhoods: each cell has eight neighbours.
Hexagonal cells: each cell has six neighbours.
Von Neumann neighbourhoods: each cell has four neighbours.
'''
import curses
import os
import sys
import errno
# Allow module names to be changed later.
import anonymine_engine as game_engine
# argparse: Losing the ability to take command line options is no biggy.
# traceback: Not needed unless shit happens.
import traceback # Not required.
# argparse is new in 2.7 and 3.2.
try:
import argparse
except:
pass
GAME_NAME = 'Anonymine'
GAME_FILENAME = GAME_NAME.lower().replace(' ', '-')
GAME_VERSION = (0, 2, 17)
# GAME_VERSION MAY lag behind the version of the package when no change has
# been made to this file.
GAME_CRAPTEXT = """{0} version {1}.{2}.{3}
Copyright (c) Oskar Skog, 2016
Released under the Simplified BSD license (2 clause).
\n""".format(GAME_NAME, GAME_VERSION[0], GAME_VERSION[1], GAME_VERSION[2])
class curses_game():
'''Class for interface object for `engine.play_game(interface)`.
This is a large part of the curses mode interface for Anonymine.
The "engine" is in the module `anonymine_engine` and could be used
by a different interface.
The engine is currently (as of version 0.0.13) responsible for
creating the "field", initialization of the field (filling it with
mines) and the main loop of a game.
anonymine_engine.game_engine(**params).play_game(interface)
`interface` is an object that needs to provide these methods:
input(self, field)
output(self, field)
anykey_cont(self)
It is recommended that you read the documentation for the engine
as well.
Coordinates
===========
The "cursor" marks the selected cell on the field. It is a
"field coordinate".
A "virtual coordinate" is a coordinate on an imaginary screen.
A virtual coordinate doesn't need to be in the "visible area".
A virtual coordinate can easily be generated from a field
coordinate.
The visible area are those virtual coordinates that exist on
the screen. The visible area can be moved with
`self.window_start`.
A "real coordinate" is a coordinate on the screen and can be
sent to the methods of `self.window`.
real[0] = virtual[0] - window_start[0]
real[1] = virtual[1] - window_start[1]
The screen is `self.window`, not the real one.
Externally used methods
=======================
interface = curses_game(cfgfile, gametype)
`cfgfile` is the path to the cursescfg configuration file
(the configuration file with the key bindings and
textics.)
`gametype` is either 'moore', 'hex' or 'neumann'.
NOTICE: `curses_game.__init__` will enter curses mode.
WARNING: `curses_game.__init__` MAY raise an exception
while in curses mode if something is wrong with the
configuration or if bad parameters are given.
interface.leave()
Leave curses mode.
interface.input(engine)
(This method is called by `engine.play_game`.)
Take command from user and act on `engine.field`.
interface.output(engine)
(This method is called by `engine.play_game`.)
Print the field (`engine.field`) to the screen.
Prints the flags left text, invokes `self.print_square` or
`self.print_hex`, and finally, `self.window.refresh`.
interface.anykey_cont()
(This method is called by `engine.play_game`.)
Pause until input from user.
"Press any key to continue..."
Attributes
==========
interface.window = curses.initscr()
The screen.
interface.cursor = (0, 0)
The *field* coordinate.
interface.window_start = [0, 0]
Needed for translating virtual coordinates into real
coordinates.
Internally used methods
=======================
char, attributes = self.curses_output_cfg(key)
"Parse" the configuration (cursescfg) and pick the most
appropriate mode for the property `key`. The color pair
is properly attached to `attributes`.
self.message(msg)
Print `msg` at the bottom of the screen while in curses
mode. Invokes `self.window.refresh`.
Used for printing the initialization message and by
`self.anykey_cont`.
self.travel(engine, direction)
Modify `self.cursor` to select a new cell in the
specified direction.
self.print_char(x, y, cfg, char=None)
Print a character at the virtual coordinate (x, y) using
the textics property `cfg`. `char` will override the
character specified in the textics directive.
self.move_visible_area(virtual_x,virtual_y,x_border,y_border)
Modify `self.window_start` so (virtual_x, virtual_y) is
visible on the screen, and not too close to any edge.
self.print_square(field)
Print Moore and Neumann fields and the "cursor" to the
screen. Does not invoke `self.window.refresh` and does
not print the flags left text.
self.print_hex(field)
Print hexagonal fields and the "cursor" to the screen.
Does not invoke `self.window.refresh` and does not print
the flags left text.
Constants
=========
self.travel_diffs
A dictionary of dictionaries to translate a direction
into DELTA-x and DELTA-y. Used by `self.travel`.
self.direction_keys
A dictionary of lists representing the valid directions
for a certain gametype. This allows square and hex
directions to share the same keys.
Used by `self.input`.
self.specials
The method `get` of the field object may return one of
these value or a normal number. (Normal numbers will be
printed using their digit as character and the textics
directive 'number'.)
This dictionary maps the special return values into
textics properties.
'''
def __init__(self, cfgfile, gametype):
'''Create interface object and enter curses mode.
`cfgfile` is the path to the cursescfg file.
`gametype` must be 'moore', 'hex' or 'neumann'.
WARNING: This does not leave curses mode on exceptions!
'''
# Constants
self.travel_diffs = {
'square': {
'up': (0, -1),
'right': (1, 0),
'down': (0, 1),
'left': (-1, 0),
'NE': (1, -1),
'SE': (1, 1),
'SW': (-1, 1),
'NW': (-1, -1),
},
'hex-even': {
'hex0': (0, -1), # 5 0
'hex1': (1, 0), # 4 1
'hex2': (0, 1), # 3 2
'hex3': (-1, 1),
'hex4': (-1, 0), # x - 1 and x on even
'hex5': (-1, -1), # rows.
},
'hex-odd': {
'hex0': (1, -1),
'hex1': (1, 0), # x and x + 1 on odd
'hex2': (1, 1), # rows.
'hex3': (0, 1),
'hex4': (-1, 0),
'hex5': (0, -1),
}
}
self.direction_keys = {
'hex': ['hex0', 'hex1', 'hex2', 'hex3', 'hex4', 'hex5'],
'square': ['up', 'NE', 'right', 'SE', 'down', 'SW', 'left', 'NW'],
}
self.specials = {
0: 'zero',
None: 'free',
'F': 'flag',
'X': 'mine',
}
# Initialize...
self.gametype = gametype
self.window_start = [0, 0] # Item assignment
self.cursor = (0, 0)
# Initialize curses.
self.window = curses.initscr()
curses.cbreak()
curses.noecho()
self.window.keypad(True)
try:
self.old_cursor = curses.curs_set(0)
except:
pass
curses.def_prog_mode() # BUG #3 (See the file "BUGS".)
# Check that we have a reasonable size on the window.
height, width = self.window.getmaxyx()
def toosmall():
self.leave()
sys.stdout.flush()
output(sys.stderr,'\nSCREEN TOO SMALL\n')
sys.stderr.flush()
sys.exit(1)
if self.gametype == 'hex' and (width < 10 or height < 8):
toosmall()
if self.gametype != 'hex' and (width < 7 or height < 4):
toosmall()
# Read the configuration.
self.cfg = eval(open(cfgfile).read())
# Apply ord() automatically to the keys in 'curses-input'.
for key in self.cfg['curses-input']:
for index in range(len(self.cfg['curses-input'][key])):
value = self.cfg['curses-input'][key][index]
if isinstance(value, str):
self.cfg['curses-input'][key][index] = ord(value)
# Initialize the color pairs.
if curses.has_colors():
self.use_color = True
# TODO: Check that enough pairs are available.
curses.start_color()
for key in self.cfg['curses-output']:
value = self.cfg['curses-output'][key]
pair, ch, foreground, background, attr = value
foreground = eval('curses.COLOR_' + foreground)
background = eval('curses.COLOR_' + background)
curses.init_pair(pair, foreground, background)
else:
self.use_color = False
def leave(self):
'''Leave curses mode.'''
curses.nocbreak()
curses.echo()
self.window.keypad(False)
try:
curses.curs_set(self.old_cursor)
except:
pass
curses.endwin()
def curses_output_cfg(self, key):
'''Retrieve textics directive from cursescfg.
char, attributes = self.curses_output_cfg(key)
`key` is the property in cursescfg ('curses-output').
`char` is a character and needs to be converted before passed
to a curses function.
`attributes` is an integer to be passed directly to a curses
function. Color is or'ed in.
Retrieve a textics directive from the configuration file
(cursescfg). This function is responsible to choose the
key with the correct property and best available mode.
gametype Best available mode 2nd best worst
'moore': ':moore' ':square' ''
'hex': ':hex' ''
'neumann' ':neumann' ':square' ''
This function will automatically convert the directive line
into two directly useful parts:
`char`: The character to be printed or `None`.
`attributes`: The attributes to be used (curses).
The color pair is also or'ed in.
See also: the configuration file.
'''
cfg = self.cfg['curses-output']
# Choose gametype specific entries if available
if self.gametype == 'neumann':
if key + ':neumann' in cfg:
key += ':neumann'
elif key + ':square' in cfg:
key += ':square'
elif self.gametype == 'hex':
if key + ':hex' in cfg:
key += ':hex'
elif self.gametype == 'moore':
if key + ':moore' in cfg:
key += ':moore'
elif key + ':square' in cfg:
key += ':square'
# Translate the key into (char, attributes)
pair, char, ign1, ign2, attributes = cfg[key]
if self.use_color:
attributes |= curses.color_pair(pair)
return char, attributes
def message(self, msg):
'''Print `msg` at the bottom of the screen while in curses mode.
Invokes `self.window.refresh`.
Used for printing the initialization message and by
`self.anykey_cont`.
'''
height, width = self.window.getmaxyx()
ign, attributes = self.curses_output_cfg('text')
text_width = width - 4 # Pretty margin on the left.
lines = len(msg)//text_width + 1
if lines <= height:
for line in range(lines):
self.window.addstr(
height - lines + line, 3,
msg[line*text_width:(line+1)*text_width],
attributes
)
else:
pass # A screen this small? Seriously?
self.window.refresh()
def anykey_cont(self):
'''Press any key to continue...
Wait for input from the user, discard the input.
(This method is called by `engine.play_game`.)
'''
self.message('Press the "any" key to continue...')
self.window.getch()
def output(self, engine):
'''This method is called by `engine.play_game`.
It erases the window, prints the flags left message if it would
fit on the screen, invokes the appropriate field printer and
refreshes the screen. (In that order.)
'''
# TODO: The background gets set ridiculously often.
# Set the appropriate background.
char, attributes = self.curses_output_cfg('background')
self.window.bkgdset(32, attributes) # 32 instead of `char`.
# BUG #7: window.bkgdset causes a nasty issue when the background
# character is not ' ' and color is unavailable.
# Print the screen.
self.window.erase()
# Screen could resized at any time.
self.height, self.width = self.window.getmaxyx()
chunks = []
if engine.game_status == 'pre-game':
chunks.append('Choose your starting point.')
if engine.game_status == 'play-game':
if engine.field.flags_left is not None:
chunks.append("Flags left: {0}".format(
engine.field.flags_left
))
msg = ' '.join(chunks)
if len(msg) + 4 <= self.width:
ign, attributes = self.curses_output_cfg('text')
self.window.addstr(self.height - 1, 3, msg, attributes)
# (Keeping the following outside the loop magically solves a resizing
# bug that traces back to an `addch` in `self.print_char`.)
# Lie to the field printer functions to preserve the text.
self.height -= 1
# Print the field.
if self.gametype == 'hex':
self.print_hex(engine.field)
else:
self.print_square(engine.field)
# Remember that self.height has already been decremented by one.
self.window.move(self.height, 0) # BUG #3 (See the file "BUGS".)
self.window.refresh()
def input(self, engine):
'''This method is called by `engine.play_game`.
It receives a character from the user and interprets it.
Invokes `self.travel` for the steering of the cursor.
It doesn't do any output except for printing the field
initialization message, and forcing the entire screen to be
redrawn on unrecognised input (to de-fuck-up the screen).
'''
if self.gametype == 'hex':
direction_keys = self.direction_keys['hex']
else:
direction_keys = self.direction_keys['square']
look_for = ['reveal', 'flag'] + direction_keys
# Receive input from player.
ch = self.window.getch()
# Interpret.
command = None
for key in look_for:
if ch in self.cfg['curses-input'][key]:
command = key
# Act.
if command == 'flag':
engine.flag(self.cursor)
elif command == 'reveal':
if engine.game_status == 'pre-game':
self.message('Initializing field... This may take a while.')
curses.reset_shell_mode() # BUG #3 (See the file "BUGS".)
engine.reveal(self.cursor)
curses.reset_prog_mode() # BUG #3 (See the file "BUGS".)
self.window.redrawwin() # BUG #3 (See the file "BUGS".)
elif command in direction_keys:
self.travel(engine.field, command)
else:
self.window.redrawwin()
def travel(self, field, direction):
'''Move the cursor in the specified direction.
It will not move past an edge (or in an otherwise impossible
direction). This is why the `field` argument is required.
Valid directions when self.gametype == 'moore':
'up', 'NE', 'right', 'SE', 'down', 'SW', 'left', 'NW'
Valid directions when self.gametype == 'hex':
'hex0', 'hex1', 'hex2', 'hex3', 'hex4', 'hex5'
Valid directions when self.gametype == 'neumann':
'up', 'right', 'down', 'left'
The hexagonal directions are:
5 0
4 1
3 2
'''
x, y = self.cursor
# Find the appropriate dictionary of direction to DELTA-x and DELTA-y.
if self.gametype != 'hex':
key = 'square'
elif y % 2:
key = 'hex-odd'
else:
key = 'hex-even'
# Move in the specified direction.
x_diff, y_diff = self.travel_diffs[key][direction]
new = x + x_diff, y + y_diff
# Do nothing if it is impossible to move in the specified direction.
x, y = new
if x >= 0 and x < field.dimensions[0]:
if y >= 0 and y < field.dimensions[1]:
self.cursor = new
def print_char(self, x, y, cfg, char=None):
'''Print a character at a virtual coordinate with the right attributes.
Print a character at the virtual coordinate (`x`, `y`)
using the textics directive `cfg`.
`char` is used to override the default character of the
textics directive.
'''
real_x = x - self.window_start[0]
real_y = y - self.window_start[1]
# Verify that the coordinate is printable.
if 0 <= real_x < self.width:
if 0 <= real_y < self.height:
cfg_char, attributes = self.curses_output_cfg(cfg)
if char is None:
char = cfg_char
self.window.addch(real_y, real_x, ord(char), attributes)
def move_visible_area(self, virtual_x, virtual_y, x_border, y_border):
'''Move the area that will be printed by `self.print_char`.
Move the visible area (as printed by `self.print_char`) by
modifying `self.window_start`, which is used for translating
virtual coordinates (a step between field coordinates and
screen coordinates.)
`virtual_x` and `virtual_y` is the virtual coordinate.
`x_border` is the minimal allowed border between the virtual
coordinate and the left or the right side of the screen.
`y_border` is the minimal allowed border between the virtual
coordinate and the top or the bottom of the screen.
'''
real_x = virtual_x - self.window_start[0]
real_y = virtual_y - self.window_start[1]
if real_x + x_border > self.width - 1:
self.window_start[0] = virtual_x - self.width + x_border + 1
if real_x - x_border < 0:
self.window_start[0] = virtual_x - x_border
if real_y + y_border > self.height - 1:
self.window_start[1] = virtual_y - self.height + y_border + 1
if real_y - y_border < 0:
self.window_start[1] = virtual_y - y_border
def print_square(self, field):
'''Helper function for `self.output` for non-hexagonal gametypes.
Print a non-hexagonal field in the area
0 to self.width-1 by 0 to self.height-2.
Also prints the "cursor".
It does not print the flags left text.
It will invoke `self.move_visible_area` to keep the "cursor" on
the screen. It will use `self.print_char` to print characters
on the screen.
_______
| X X X |
| X(*)X |
| X X X |
-------
'''
# Move the visible area.
# Compute the virtual locations on the screen and real locations.
# Adjust the virtual coordinate of the visible area.
#
# Border = 1 cell.
x, y = self.cursor
self.move_visible_area(2*x+1, y, 3, 1)
# Print all cells in a field.
for cell in field.all_cells():
x, y = cell
# Print blank grid .
self.print_char(2*x, y, 'grid', ' ')
self.print_char(2*x+2, y, 'grid', ' ')
# Print the actual cell.
value = field.get(cell)
if value not in self.specials:
self.print_char(2*x+1, y, 'number', str(value))
else:
self.print_char(2*x+1, y, self.specials[value])
# Print the "cursor".
x, y = self.cursor
self.print_char(2*x, y, 'cursor-l')
self.print_char(2*x+2, y, 'cursor-r')
def print_hex(self, field):
r'''Helper function for `self.output` for the hexagonal gametype.
Print a hexagonal field in the area
0 to self.width-1 by 0 to self.height-2.
Also prints the "cursor".
It does not print the flags left text.
It will invoke `self.move_visible_area` to keep the "cursor" on
the screen. It will use `self.print_char` to print characters
on the screen.
0000000000111111111122222222223
0123456789012345678901234567890
00 / \ / \ / \ / \ / \ / \ / \
01 | X | X | X | X | X | X | X |
02 \ / \ / \ / \ / \ / \ / \ / \
03 | X | X | X | X | X | X | X |
04 / \ / \ / \ / \ / \ / \ / \ /
05 | X | X | X |(X)| X | X | X |
06 \ / \ / \ / \ / \ / \ / \ / \
07 | X | X | X | X | X | X | X |
08 \ / \ / \ / \ / \ / \ / \ /
'''
# Define functions that translates field coordinates into
# virtual screen coordinates.
def fx(x, y): return 2 * (2*x + 1 + (y % 2))
def fy(x, y): return 2*y + 1
# Move the visible area.
#
# Compute the virtual locations on the screen and real locations.
# Adjust the virtual coordinate of the visible area.
# Border = 1 cell.
x, y = self.cursor
self.move_visible_area(fx(x, y), fy(x, y), 6, 3)
# Print all cells in a field.
for cell in field.all_cells():
x = 2 * (2*cell[0] + 1 + (cell[1] % 2))
y = 2*cell[1] + 1
# Print blank grid.
# Roof:
self.print_char(x - 1, y - 1, 'grid', '/')
self.print_char(x, y - 1, 'grid', ' ')
self.print_char(x + 1, y - 1, 'grid', '\\')
# Left wall:
self.print_char(x - 2, y, 'grid', '|')
self.print_char(x - 1, y, 'grid', ' ')
# Right wall:
self.print_char(x + 2, y, 'grid', '|')
self.print_char(x + 1, y, 'grid', ' ')
# Floor:
self.print_char(x - 1, y + 1, 'grid', '\\')
self.print_char(x, y + 1, 'grid', ' ')
self.print_char(x + 1, y + 1, 'grid', '/')
# Print the actual cell.
value = field.get(cell)
if value not in self.specials:
self.print_char(x, y, 'number', str(value))
else:
self.print_char(x, y, self.specials[value])
# Print the "cursor".
x, y = self.cursor
self.print_char(fx(x, y) - 1, fy(x, y), 'cursor-l')
self.print_char(fx(x, y) + 1, fy(x, y), 'cursor-r')
def output(stream, content):
'''
Due to BUG #9 syscalls may fail with EINTR after leaving curses mode.
Write `content` to `stream` and flush() without crashing.
Example:
output(sys.stdout, 'Hello world!\n')
'''
def write():
stream.write(content)
def flush():
stream.flush()
for function in (write, flush):
while True:
try:
function()
except InterruptedError:
continue
except IOError as e:
if 'EINTR' in dir(errno):
if e.errno == errno.EINTR:
continue
raise
break
def convert_param(paramtype, s):
'''Convert user input (potentially incorrect text) to the proper type.
Convert the string `s` to the proper type.
Raises ValueError if `s` cannot be converted.
`paramtype` MUST be one of the recognised values:
'str': `s` is returned.
'yesno': "Yes" is True and "no" is False.
'dimension': An integer >= 4
'minecount': Two modes (automatic selection):
An integer >= 1 returned as an integer.
Or a percentage `str(float)+'%'` in
]0%, 100%[ returned as a float:
'gametype': Mapping with case-insensitive keys and
lower-case values:
'a', 'neumann' and '4' to 'neumann'
'b', 'hex', 'hexagonal' and '6' to 'hex'
'c', 'moore' and '8' to 'moore'
'reverse-minecount': `s` is an integer or a float and the
returned value is a string that can be
converted back to `s` with 'minecount'.
'''
if paramtype == 'str':
return s
elif paramtype == 'yesno':
if s.upper() in ('Y', 'YES'):
return True
elif s.upper() in ('N', 'NO'):
return False
else:
output(sys.stderr,'"Yes" or "no" please. (WITHOUT quotes.)\n')
raise ValueError
elif paramtype == 'dimension':
try:
value = int(s)
except ValueError:
# Easter egg.
#
# ~85.6% of English words contain 'a', 'c', 'm' or 'p'.
# All numbers under one thousand belongs to the ~14.4%.
#
# 194 of the numbers between 0 and 200 contain one or more of
# the letters 'n', 'f' and 'h'.
#
# But zero, two, six and twelve aren't included.
# So check for X and startswith('TW').
#
# Only ~10.2% of the words in my word list could be mistaken
# for numbers.
S = s.upper()
if (
('A' not in S) and ('C' not in S) and
('N' in S or 'F' in S or 'H' in S or 'X' in S) or
S.startswith('TW')
):
output(sys.stderr, "Use digits.\n")
else:
output(sys.stderr,
'Invalid width or height;'
' "{0}" is not an integer.\n'.format(s)
)
raise ValueError
if value < 4:
output(sys.stderr, 'Lowest allowed width or height is 4.\n')
raise ValueError
return value
elif paramtype == 'minecount':
if len(s) == 0:
output(sys.stderr, 'No (empty) amount of mines specified.\n')
raise ValueError
if s[-1] == '%':
try:
value = float(s[:-1])/100
except ValueError:
output(sys.stderr,
"You can't have {0} percent of the cells to be mines;"
" {0} is not a number.\n".format(s)
)
raise ValueError
if value >= 1.0 or value <= 0.0:
output(sys.stderr,
'Percentage of the cells that will be mines'
' must be in ]0%, 100%[.\n'
)
raise ValueError
else:
try:
value = int(s)
except ValueError:
output(sys.stderr,
"You can't have {0} mines;"
" {0} is not an integer\n".format(s)
)
raise ValueError
if value <= 0:
output(sys.stderr,'You must have at least ONE mine.\n')
raise ValueError
return value
elif paramtype == 'gametype':
if s.upper() in ('A', 'NEUMANN', '4'):
return 'neumann'
elif s.upper() in ('B', 'HEX', 'HEXAGONAL', '6'):
return 'hex'
elif s.upper() in ('C', 'MOORE', '8'):
return 'moore'
else:
output(sys.stderr,'Invalid gametype. TODO: explain\n')
raise ValueError
elif paramtype == 'reverse-minecount':
if isinstance(s, float):
return '{0}%'.format(100 * s)
else:
return str(s)
else:
while True:
invalid_paramtype = True
def ask(question, paramtype, default):
'''Ask the user a question in line mode. (Not curses mode.)
Ask the user a question (line mode; not curses mode) and return the
answer after it has been converted.
It will invoke `convert_param` to convert the string into the
proper type and check that the user didn't say something stupid.
`default` is what will be sent to `convert_param` if the user
hits enter.
`paramtype` will be sent to `convert_param`. See the doc-string
for `convert_param` to know what values of `paramtype` are permitted.
NOTICE: This function will cause the program to exit if a
KeyboardInterrupt is raised.
'''
while True:
output(sys.stdout, '{0} [{1}]: '.format(question, default))
try:
# Due to BUG #9 syscalls may fail with EINTR after leaving
# curses mode.
while True:
try:
answer = sys.stdin.readline().strip()
except InterruptedError:
continue
except IOError as e:
if 'EINTR' in dir(errno):
if e.errno == errno.EINTR:
continue
raise
break
if not answer:
answer = default
value = convert_param(paramtype, answer)
except ValueError:
continue
except KeyboardInterrupt:
output(sys.stdout,'\n')
sys.exit(0)
return value
def arg_input(default):
'''Get configuration filepaths and game parameters from `sys.argv`.
user_input_required, params = arg_input(default)
This function will retrieve the game parameters, and paths to the
configuration files, from `sys.argv`. If `sys.argv` contains no
game parameters, `user_input` will be True.
`default` is a dictionary that MUST contain these keys:
'width', 'height', 'mines', 'gametype', 'flagcount', 'guessless'
and 'insult'.
Their types are specified in the doc-string for `play_game`.
`params` is a dictionary that contains either all of the keys that
are required for `default`, or none of them. It may also contain
'enginecfg' and/or 'cursescfg' for specifying configuration files
that are not in the ordinary search path.
The program will exit if bogus parameters are given.
NOTICE: If argparse couldn't be imported, this function will exit
the program if there are any command line arguments. If there
aren't, it will return `True, {}`.
'''
# argparse is new in Python 2.7. Allow this to be run by the obsolete 2.6.
try:
dir(argparse)
except NameError:
if len(sys.argv) == 1:
return True, {}
else:
output(sys.stderr,'Cannot parse the arguments without argparse!\n')
sys.exit(1)
# argparse exists:
default_s = {
True: ' (Default)',
False: ' (Not default)'
}
# Get the arguments sent on the command line.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
{0} is a curses mode minesweeper that checks if the field can be solved
without guessing and supports three different game types:
Traditional; Moore neighbourhoods; 8 neighbours {1}
Hexagonal; 6 neighbours {2}
von Neumann neighbourhoods; 4 neighbours {3}
{4}
'''.format(
GAME_NAME,
default_s[default['gametype'] == 'moore'],
default_s[default['gametype'] == 'hex'],
default_s[default['gametype'] == 'neumann'],
{
True: 'By default, it will insult you for both winning and\n'
'losing a game that has been proven to be 100% winnable.',
False: 'By default, it will not insult you for either winning '
'or losing.',
}[default['insult']],
)
)
# Configuration files.
parser.add_argument(
'-c', '--cursescfg', dest='cursescfg',
help=(
'The path to the configuration file for the key bindings '
'and textics directives.\n'
'Default is "~/.{0}/cursescfg" or "/etc/{0}/cursescfg".'.format(
GAME_FILENAME
)
)
)
parser.add_argument(
'-e', '--enginecfg', dest='enginecfg',
help=(
'The path to the configuration file for field '
'initialization and misc. game engine functions.\n'
'Default is "~/.{0}/enginecfg" or "/etc/{0}/enginecfg".'.format(
GAME_FILENAME
)
)
)
# Dimensions and minecount.
parser.add_argument(
'-s', '--size', dest='size',
help=(
"The size of the field width+'x'+height. Ex. 30x16\n"
"Default is {0}x{1}.".format(
default['width'], default['height']
)
)
)
parser.add_argument(
'-m', '--mines', dest='mines',
help=(
"The number of mines. OR the percentage.\n"
"Default is {0}.".format(
convert_param(
'reverse-minecount',
default['mines']
).replace('%', '%%')
)
)
)
# Gametype
gametype = parser.add_mutually_exclusive_group()
gametype.add_argument(
'-4', '--neumann',
action='store_const', dest='gametype', const='neumann',
help=(
"Use von Neumann neighbourhoods. (4 neighbours.)" +
default_s[
default['gametype'] == 'neumann'
]
)
)
gametype.add_argument(
'-6', '--hex', '--hexagonal',
action='store_const', dest='gametype', const='hex',
help=(
"Use a hexagonal field. (6 neighbours.)" +
default_s[
default['gametype'] == 'hex'
]
)
)
gametype.add_argument(
'-8', '--moore', '--traditional',
action='store_const', dest='gametype', const='moore',
help=(
"Traditional minesweeper; Moore neighbourhoods. (8)" +
default_s[
default['gametype'] == 'moore'
]
)
)
# Bools.
flagcount = parser.add_mutually_exclusive_group()
flagcount.add_argument(
'-f', '--flagcount', dest='flagcount', action='store_true',
help=(
"Show how many flags are left." + default_s[
default['flagcount']
]
)
)
flagcount.add_argument(
'-F', '--no-flagcount', dest='noflagcount', action='store_true',
help=(
"Don't Show how many flags are left." + default_s[
not default['flagcount']
]
)
)
guessless = parser.add_mutually_exclusive_group()
guessless.add_argument(
'-g', '--guessless', dest='guessless', action='store_true',
help=(
"Play a minesweeper that can be solved without guessing." +
default_s[
default['guessless']
]
)
)
guessless.add_argument(
'-G', '--no-guessless', dest='noguessless', action='store_true',
help=(
"Play with the risk of having to guess. " +
"Large fields will be initialized much faster." + default_s[
not default['flagcount']
]
)
)
insult = parser.add_mutually_exclusive_group()
insult.add_argument(
'-r', '--rude', dest='insult', action='store_true',
help=(
"<std>" + default_s[
default['insult']
]
)
)
insult.add_argument(
'-n', '--nice', dest='noinsult', action='store_true',
help=(
"(more polite setting)" + default_s[
not default['insult']
]
)
)
#
# Parse the args and store the params.
args = parser.parse_args()
params = {}
user_input_required = True
error = False
# Size, mines and gametype.
if args.size:
user_input_required = False
try:
params['width'], params['height'] = map(
lambda x: convert_param('dimension', x),
args.size.split('x')
)
except ValueError:
error = True
output(sys.stderr,
'Error with "--size": Explanation above.\n'
)
except:
error = True
output(sys.stderr,'Error with "--size": UNKNOWN\n')
raise
if args.mines:
user_input_required = False
try:
params['mines'] = convert_param('minecount', args.mines)
except ValueError:
error = True
output(sys.stderr,
'Error with "--mines": Explanation above.\n'
)
except:
error = True
output(sys.stderr,'Error with "--mines": UNKNOWN\n')
raise
if args.gametype:
user_input_required = False
assert args.gametype in ('moore', 'hex', 'neumann')
params['gametype'] = args.gametype
# flagcount, guessless, insult
if args.flagcount:
user_input_required = False
params['flagcount'] = True
if args.noflagcount:
user_input_required = False
params['flagcount'] = False
if args.guessless:
user_input_required = False
params['guessless'] = True
if args.noguessless:
user_input_required = False
params['guessless'] = False
if args.insult:
user_input_required = False
params['insult'] = True
if args.noinsult:
user_input_required = False
params['insult'] = False
# Configuration
if args.cursescfg:
params['cursescfg'] = args.cursescfg
if args.enginecfg:
params['enginecfg'] = args.enginecfg
# Deal with error and user_input_required.
if error:
sys.exit(1)
if not user_input_required:
for key in default:
if key not in params:
params[key] = default[key]
return user_input_required, params
def user_input(default, cursescfg_path):
'''Retrieve game parameters from the user.
`cursescfg_path` is the path to the configuration file that happens
to contain the key bindings and their documentation, which might be
displayed by this function.
`default` is a dictionary that MUST contain these keys:
'width', 'height', 'mines', 'gametype', 'flagcount', 'guessless'
and 'insult'.
Their types are specified in the doc-string for `play_game`.
`user_input` will return dictionary containing the same keys.
'''
parameters = {}
booldefault = {True: 'Yes', False: 'No'}
parameters['width'] = ask(
'Width of the playing field',
'dimension',
default['width']
)
parameters['height'] = ask(
'Height of the playing field',
'dimension',
default['height']
)
# MUST ask for dimensions before for the # of mines.
# Default is 16% here
parameters['mines'] = ask(
'Mines (# or %)',
'minecount',
convert_param('reverse-minecount', default['mines'])
)
parameters['gametype'] = ask(
'A: Neumann, B: Hexagonal or C: Moore',
'gametype',
default['gametype']
)
parameters['flagcount'] = ask(
'Do you want to know how many flags you have left?',
'yesno',
booldefault[default['flagcount']]
)
parameters['guessless'] = ask(
'100% solvable field (no guessing required)',
'yesno',
booldefault[default['guessless']]
)
# MUST ask for guessless mode before polite mode.
if parameters['guessless']:
parameters['insult'] = not ask(
'Polite mode?',
'yesno',
booldefault[not default['guessless']]
)
# Ask if the user wants to know the key bindings.
if ask('Show key bindings?', 'yesno', 'No'):
cursescfg = eval(open(cursescfg_path).read())
try:
output(sys.stdout,cursescfg['pre-doc'])
if parameters['gametype'] == 'hex':
output(sys.stdout,cursescfg['doc-hex'])
else:
output(sys.stdout,cursescfg['doc-square'])
except KeyError:
output(sys.stdout,
"The configuration file format for cursescfg"
" has been updated since version 0.0.5.\n"
"You'll have to guess what keys to press or"
" update the configuration files.\n"
)
output(sys.stdout,
"\nPressing an unrecognised key will refresh the screen.\n"
"^C (Ctrl-c) to quit a game or the game.\n\n"
)
ask('Press enter to continue...', 'str', '')
return parameters
def highscores_add_entry(title, prompt):
'''
Input callback for `game_engine.hiscores.add_entry`.
'''
output(sys.stdout,title + '\n')
while True:
output(sys.stdout,prompt + ': ')
sys.stdout.flush()
try:
return ask(prompt, 'str', '')
except UnicodeDecodeError:
output(sys.stderr, 'Decoding error.\n')
def highscores_display(title, headers, rows):
'''
Output formatter function for `game_engine.hiscores.display`.
'''
# Create all rows to be displayed.
header_underline = ['='*len(col) for col in headers]
header_blankline = ['' for col in headers]
all_rows = [headers] + [header_underline] + [header_blankline] + rows
# Calculate column widths.
column_width = []
for column in zip(*all_rows):
column_width.append(max(list(map(len, column))) + 1)
# Print
output(sys.stdout,'\n' + '_'*len(title) + '\n')
output(sys.stdout,title + '\n\n')
for row in all_rows:
for index, width in enumerate(column_width):
output(sys.stdout,row[index])
output(sys.stdout,' ' * (width - len(row[index])))
output(sys.stdout,' ')
output(sys.stdout,'\n')
output(sys.stdout,'\n')
def play_game(parameters):
'''Play a custom game of minesweeper.
When called with all required parameters,
one game of minesweeper will be played.
NOTICE: This function does not expect incorrect parameters!
WARNING: If anything, except a KeyboardInterrupt, happens during an
actual game, this function will raise an exception without
leaving curses mode.
`parameters` is a dictionary which MUST contain all these keys:
'width' Integer >= 4
'height' Integer >= 4
'mines' Integer >= 1 or float in ]0.0, 1.0[
'gametype' 'moore', 'hex' or 'neumann'
'flagcount' A boolean (Show flag count)
'guessless' A boolean (no guessing required)
'insult' A boolean (!polite mode)
'enginecfg' The path to the configuration file for the
game engine.
'cursescfg' The path to the configuration file for key
bindings and textics customisation.
'''
if isinstance(parameters['mines'], float):
area = parameters['width'] * parameters['height']
mines = int(parameters['mines'] * area + 0.5)
parameters['mines'] = mines
# WORKAROUND for a special bug.
if parameters['mines'] == 0: # Test: 42
parameters['mines'] == 1 # Test: 0
# Don't blame the player when it's not the players fault.
if not parameters['guessless']:
parameters['insult'] = False
engine = game_engine.game_engine(parameters['enginecfg'], **parameters)
interface = curses_game(
parameters['cursescfg'],
parameters['gametype'],
)
try:
win, highscores = engine.play_game(interface)
except KeyboardInterrupt:
interface.leave()
return
interface.leave()
if parameters['insult']:
if win:
output(sys.stdout,
'\n\n"Congratulations", you won the unlosable game.\n')
else:
output(sys.stdout,'\n\nYou moron, you lost the unlosable game!\n')
ask('Press enter to continue...', 'str', '')
highscores.add_entry(highscores_add_entry)
title, headers, rows = highscores.display()
highscores_display(title, headers, rows)
def main():
'''42
Invoke `arg_input` to find configuration files with non-standard
paths.
Find the remaining configuration files.
NOTICE: As of version 0.0.13, it does not check if the
configuration file is valid, only if it exists.
Invoke `user_input` if `arg_input` didn't return game parameters.
Invoke `play_game`.
Invoke `ask` in a while loop to ask the player if [s]he wants to
continue play again. And while True, invoke `play_game`.
WARNING: MAY raise an exception while in curses mode.
'''
default = {
'width': 20,
'height': 20,
'mines': .2,
'gametype': 'moore',
'flagcount': True,
'guessless': True,
'insult': True,
}
interactive, parameters = arg_input(default)
# Handle the configuration filepaths.
cfgfiles = {
'enginecfg': None,
'cursescfg': None,
}
error = False
for cfgfile in cfgfiles:
if cfgfile in parameters:
cfgfiles[cfgfile] = parameters[cfgfile]
else:
locations = (
os.path.expanduser('~/.' + GAME_FILENAME + '/' + cfgfile),
sys.prefix + '/etc/' + GAME_FILENAME + '/' + cfgfile,
'/etc/' + GAME_FILENAME + '/' + cfgfile,
)
for location in locations:
try:
open(location)
cfgfiles[cfgfile] = location
break
except IOError:
pass
else:
output(sys.stderr,'{0} not found.\n'.format(cfgfile))
error = True
if error:
sys.exit(1)
if interactive:
output(sys.stdout,GAME_CRAPTEXT)
output(sys.stdout,
'You can quit the game with the interrupt signal. (Ctrl + c)\n\n'
)
output(sys.stdout,
'How do you want your game to be set up? Write in the values'
' and press Enter.\nLeave blank to use the default.\n\n'
)
parameters = user_input(default, cfgfiles['cursescfg'])
parameters.update(cfgfiles)
play_game(parameters)
while ask('Play again?', 'yesno', 'Yes'):
parameters = user_input(default, cfgfiles['cursescfg'])
parameters.update(cfgfiles)
play_game(parameters)
assert os.geteuid(), "Why the-fuck(7) are you playing games as root?"
if __name__ == '__main__':
# Force InterruptedError to be defined
try:
InterruptedError
except NameError:
# Both are not going to be expected at the same time.
InterruptedError = SystemExit
try:
main()
except SystemExit as e:
# Cause the interpreter to exit with the expected status.
os._exit(e.code)
except game_engine.security_alert as e:
try:
curses.endwin()
except:
pass
output(sys.stderr,'Security alert: ' + e.message + '\n')
os._exit(1)
except:
# Get the traceback without fucking up the terminal.
exception = sys.exc_info()
try:
curses.endwin()
except:
pass
traceback.print_exception(*exception)
os._exit(1)
os._exit(0)
|
#!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this.
None of that functionality is actually used in https://cocalc.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in CoCalc; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2016, Sagemath Inc.
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
from __future__ import absolute_import
import six
import os, sys, time, operator
import __future__ as future
from functools import reduce
def is_string(s):
return isinstance(s, six.string_types)
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
if six.PY2:
return str(s).encode('utf-8')
else:
return str(s, 'utf-8')
except:
try:
return str(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n" % (PID, datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x)
for x in args]))
debug_log.write(mesg)
debug_log.flush()
except Exception as err:
print(("an error writing a log message (ignoring) -- %s" % err, args))
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd, re
# for "3x^2 + 4xy - 5(1+x) - 3 abc4ok", this pattern matches "3x", "5(" and "4xy" but not "abc4ok"
# to understand it, see https://regex101.com/ or https://www.debuggex.com/
RE_POSSIBLE_IMPLICIT_MUL = re.compile(r'(?:(?<=[^a-zA-Z])|^)(\d+[a-zA-Z\(]+)')
try:
from . import sage_parsing, sage_salvus
except:
import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
reload_attached_files_if_mod_smc_available = True
def reload_attached_files_if_mod_smc():
# CRITICAL: do NOT impor sage.repl.attach!! That will import IPython, wasting several seconds and
# killing the user experience for no reason.
try:
import sage.repl
sage.repl.attach
except:
# nothing to do -- attach has not been used and is not yet available.
return
global reload_attached_files_if_mod_smc_available
if not reload_attached_files_if_mod_smc_available:
return
try:
from sage.repl.attach import load_attach_path, modified_file_iterator
except:
print("sage_server: attach not available")
reload_attached_files_if_mod_smc_available = False
return
# see sage/src/sage/repl/attach.py reload_attached_files_if_modified()
for filename, mtime in modified_file_iterator():
basename = os.path.basename(filename)
timestr = time.strftime('%T', mtime)
log('reloading attached file {0} modified at {1}'.format(
basename, timestr))
from .sage_salvus import load
load(filename)
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
try:
INFO = json.loads(open(_info_path).read())
except:
# This will fail, e.g., if info.json is invalid (maybe a blank file).
# We definitely don't want sage server startup to be completely broken
# in this case, so we fall back to "no info".
INFO = {}
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]
j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex((int(s[j], 16) & 0x3) | 0x8)[-1]
j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
# avoid common mistake -- conn is supposed to be from socket.socket...
assert not isinstance(conn, ConnectionJSON)
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
if six.PY3 and type(s) == str:
s = s.encode('utf8')
length_header = struct.pack(">L", len(s))
# py3: TypeError: can't concat str to bytes
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
if '\\u0000' in m:
raise RuntimeError("NULL bytes not allowed")
log("sending message '", truncate_text(m, 256), "'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
if six.PY3 and type(blob) == str:
# unicode objects must be encoded before hashing
blob = blob.encode('utf8')
s = uuidsha1(blob)
if six.PY3 and type(blob) == bytes:
# we convert all to bytes first, to avoid unnecessary conversions
self._send(('b' + s).encode('utf8') + blob)
else:
# old sage py2 code
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'" % filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print("_recv(%s)"%n)
# see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
for i in range(20):
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except OSError as e:
#print("socket.error, msg=%s"%msg)
if e.errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if six.PY3:
# bystream to string, in particular s[0] will be e.g. 'j' and not 106
#log("ConnectionJSON::recv s=%s... (type %s)" % (s[:5], type(s)))
# is s always of type bytes?
if type(s) == bytes:
s = s.decode('utf8')
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception as msg:
log("Unable to parse JSON '%s'" % s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'" % s[0])
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
def truncate_text_warn(s, max_size, name):
r"""
Truncate text if too long and format a warning message.
INPUT:
- ``s`` -- string to be truncated
- ``max-size`` - integer truncation limit
- ``name`` - string, name of limiting parameter
OUTPUT:
a triple:
- string -- possibly truncated input string
- boolean -- true if input string was truncated
- string -- warning message if input string was truncated
"""
tmsg = "WARNING: Output: %s truncated by %s to %s. Type 'smc?' to learn how to raise the output limit."
lns = len(s)
if lns > max_size:
tmsg = tmsg % (lns, name, max_size)
return s[:max_size] + "[...]", True, tmsg
else:
return s, False, ''
class Message(object):
def _new(self, event, props={}):
m = {'event': event}
for key, val in props.items():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid': pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(
self,
id,
stdout=None,
stderr=None,
code=None,
html=None,
javascript=None,
coffeescript=None,
interact=None,
md=None,
tex=None,
d3=None,
file=None,
raw_input=None,
obj=None,
once=None,
hide=None,
show=None,
events=None,
clear=None,
delete_last=None,
done=False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single.
):
m = self._new('output')
m['id'] = id
t = truncate_text_warn
did_truncate = False
from . import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate, tmsg = t(code['source'],
sage_server.MAX_CODE_SIZE,
'MAX_CODE_SIZE')
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate, tmsg = t(stderr,
sage_server.MAX_STDERR_SIZE,
'MAX_STDERR_SIZE')
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate, tmsg = t(stdout,
sage_server.MAX_STDOUT_SIZE,
'MAX_STDOUT_SIZE')
if html is not None and len(html) > 0:
m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE,
'MAX_HTML_SIZE')
if md is not None and len(md) > 0:
m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE,
'MAX_MD_SIZE')
if tex is not None and len(tex) > 0:
tex['tex'], did_truncate, tmsg = t(tex['tex'],
sage_server.MAX_TEX_SIZE,
'MAX_TEX_SIZE')
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + tmsg
else:
m['stderr'] = '\n' + tmsg
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print(("PID = %s" % pid))
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: ' % id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout'])
sys.stdout.flush()
if 'stderr' in mesg:
print(('! ' +
'\n! '.join(mesg['stderr'].splitlines())))
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print("Sending interrupt signal")
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print("\nExiting Sage client.")
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
# CRITICAL: we need output to valid PostgreSQL TEXT, so no null bytes
# This is not going to silently corrupt anything -- it's just output that
# is destined to be *rendered* in the browser. This is only a partial
# solution to a more general problem, but it is safe.
try:
self._buf += output.replace('\x00', '')
except UnicodeDecodeError:
self._buf += output.decode('utf-8').replace('\x00', '')
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size)
or (t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
try:
self._f(self._buf, done=done)
except UnicodeDecodeError:
if six.PY2: # str doesn't have errors option in python2!
self._f(unicode(self._buf, errors='replace'), done=done)
else:
self._f(str(self._buf, errors='replace'), done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and x in self._on_change:
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and x in self._on_del:
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if x in self._on_change:
for f in self._on_change[x]:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
except Exception as mesg:
print(mesg)
def __delitem__(self, x):
try:
if x in self._on_del:
for f in self._on_del[x]:
f()
if None in self._on_del:
for f in self._on_del[None]:
f(x)
except Exception as mesg:
print(mesg)
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if x in self._on_change:
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special CoCalc Server functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
_py_features = {}
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace[
'salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once', False):
self._num_output_messages += 1
from . import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages: %s (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._num_output_messages, sage_server.MAX_OUTPUT_MESSAGES)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long: %s -- MAX_OUTPUT (=%s) exceeded (type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._total_output_length, sage_server.MAX_OUTPUT)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>" %
(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});" % (
id, self._action(path, foreground))
self.javascript(s,
obj={
'label': label,
'path': path,
'foreground': foreground
},
once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
if foreground:
action = "worksheet.project_page.open_directory(obj.path);"
else:
action = "worksheet.project_page.set_current_path(obj.path);"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
self.javascript(self._action(path, foreground),
obj={
'path': path,
'foreground': foreground
},
once=True)
def close_tab(self, filename):
"""
Close an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.project_page.close_file(obj)",
obj=filename,
once=True)
def threed(
self,
g, # sage Graphic3d object.
width=None,
height=None,
frame=True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background=None,
foreground=None,
spin=False,
aspect_ratio=None,
frame_aspect_ratio=None, # synonym for aspect_ratio
done=False,
renderer=None, # None, 'webgl', or 'canvas'
):
from .graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = {
'width': width,
'height': height,
'background': background,
'foreground': foreground,
'spin': spin,
'aspect_ratio': aspect_ratio,
'renderer': renderer
}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio,
(list, tuple)) and len(aspect_ratio) == 3):
raise TypeError(
"aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"
% (aspect_ratio, ))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in [
'spin',
'height',
'width',
'background',
'foreground',
'renderer',
]:
if k in extra_kwds and not opts.get(k, None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][
1], b[0][2], b[1][2]
fr = opts['frame'] = {
'xmin': f(xmin),
'xmax': f(xmax),
'ymin': f(ymin),
'ymax': f(ymax),
'zmin': f(zmin),
'zmax': f(zmax)
}
if isinstance(frame, dict):
for k in list(fr.keys()):
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts': opts, 'obj': graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id,
file={
'filename': unicode8("%s.sage3d" % uuid),
'uuid': uuid
},
done=done)
def d3_graph(self, g, **kwds):
from .graphics import graph_to_d3_jsonable
self._send_output(id=self._id,
d3={
"viewer": "graph",
"data": graph_to_d3_jsonable(g, **kwds)
})
def file(self,
filename,
show=True,
done=False,
download=False,
once=False,
events=None,
raw=False,
text=None):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If text is also given that will be used instead of the path to the file.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ['HOME'] + '/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(
"can only send raw files in your home directory")
url = os.path.join('/', info['base_url'].strip('/'),
info['project_id'], 'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'url': url,
'show': show,
'text': text
},
events=events,
done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get(
'sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s" % mesg['error'])
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'uuid': file_uuid,
'show': show,
'text': text
},
events=events,
done=done)
if not show:
info = self.project_info()
url = "%s/blobs/%s?uuid=%s" % (info['base_url'], filename,
file_uuid)
if download:
url += '?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl', 0))
def python_future_feature(self, feature=None, enable=None):
"""
Allow users to enable, disable, and query the features in the python __future__ module.
"""
if feature is None:
if enable is not None:
raise ValueError(
"enable may not be specified when feature is None")
return sorted(Salvus._py_features.keys())
attr = getattr(future, feature, None)
if (feature not in future.all_feature_names) or (
attr is None) or not isinstance(attr, future._Feature):
raise RuntimeError("future feature %.50r is not defined" %
(feature, ))
if enable is None:
return feature in Salvus._py_features
if enable:
Salvus._py_features[feature] = attr
else:
try:
del Salvus._py_features[feature]
except KeyError:
pass
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print("%s MB used"%int(get_memory_usage()))')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
ascii_warn = False
code_error = False
if sys.getdefaultencoding() == 'ascii':
for c in code:
if ord(c) >= 128:
ascii_warn = True
break
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
compile_flags = reduce(operator.or_,
(feature.compiler_flag
for feature in Salvus._py_features.values()),
0)
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print(x)"
blocks = sage_parsing.divide_into_blocks(code)
try:
import sage.repl
# CRITICAL -- we do NOT import sage.repl.interpreter!!!!!!!
# That would waste several seconds importing ipython and much more, which is just dumb.
# The only reason this is needed below is if the user has run preparser(False), which
# would cause sage.repl.interpreter to be imported at that point (as preparser is
# lazy imported.)
sage_repl_interpreter = sage.repl.interpreter
except:
pass # expected behavior usually, since sage.repl.interpreter usually not imported (only used by command line...)
import sage.misc.session
for start, stop, block in blocks:
# if import sage.repl.interpreter fails, sag_repl_interpreter is unreferenced
try:
do_pp = getattr(sage_repl_interpreter, '_do_preparse', True)
except:
do_pp = True
if preparse and do_pp:
block = sage_parsing.preparse_code(block)
sys.stdout.reset()
sys.stderr.reset()
try:
b = block.rstrip()
# get rid of comments at the end of the line -- issue #1835
#from ushlex import shlex
#s = shlex(b)
#s.commenters = '#'
#s.quotes = '"\''
#b = ''.join(s)
# e.g. now a line like 'x = test? # bar' becomes 'x=test?'
if b.endswith('??'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="python")
elif b.endswith('?'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="text/x-rst")
else:
reload_attached_files_if_mod_smc()
if execute.count < 2:
execute.count += 1
if execute.count == 2:
# this fixup has to happen after first block has executed (os.chdir etc)
# but before user assigns any variable in worksheet
# sage.misc.session.init() is not called until first call of show_identifiers
# BUGFIX: be careful to *NOT* assign to _!! see https://github.com/sagemathinc/cocalc/issues/1107
block2 = "sage.misc.session.state_at_init = dict(globals());sage.misc.session._dummy=sage.misc.session.show_identifiers();\n"
exec(compile(block2, '', 'single'), namespace,
locals)
b2a = """
if 'SAGE_STARTUP_FILE' in os.environ and os.path.isfile(os.environ['SAGE_STARTUP_FILE']):
try:
load(os.environ['SAGE_STARTUP_FILE'])
except:
sys.stdout.flush()
sys.stderr.write('\\nException loading startup file: {}\\n'.format(os.environ['SAGE_STARTUP_FILE']))
sys.stderr.flush()
raise
"""
exec(compile(b2a, '', 'exec'), namespace, locals)
features = sage_parsing.get_future_features(
block, 'single')
if features:
compile_flags = reduce(
operator.or_, (feature.compiler_flag
for feature in features.values()),
compile_flags)
exec(
compile(block + '\n',
'',
'single',
flags=compile_flags), namespace, locals)
if features:
Salvus._py_features.update(features)
sys.stdout.flush()
sys.stderr.flush()
except:
if ascii_warn:
sys.stderr.write(
'\n\n*** WARNING: Code contains non-ascii characters ***\n'
)
for c in '\u201c\u201d':
if c in code:
sys.stderr.write(
'*** Maybe the character < %s > should be replaced by < " > ? ***\n'
% c)
break
sys.stderr.write('\n\n')
if six.PY2:
from exceptions import SyntaxError, TypeError
# py3: all standard errors are available by default via "builtin", not available here for some reason ...
if six.PY3:
from builtins import SyntaxError, TypeError
exc_type, _, _ = sys.exc_info()
if exc_type in [SyntaxError, TypeError]:
from .sage_parsing import strip_string_literals
code0, _, _ = strip_string_literals(code)
implicit_mul = RE_POSSIBLE_IMPLICIT_MUL.findall(code0)
if len(implicit_mul) > 0:
implicit_mul_list = ', '.join(
str(_) for _ in implicit_mul)
# we know there is a SyntaxError and there could be an implicit multiplication
sys.stderr.write(
'\n\n*** WARNING: Code contains possible implicit multiplication ***\n'
)
sys.stderr.write(
'*** Check if any of [ %s ] need a "*" sign for multiplication, e.g. 5x should be 5*x ! ***\n\n'
% implicit_mul_list)
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n' %
(start + 1, stop + 1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self,
code_decorators,
code,
preparse=True,
namespace=None,
locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if is_string(code_decorators):
code_decorators = [code_decorators]
if preparse:
code_decorators = list(
map(sage_parsing.preparse_code, code_decorators))
code_decorators = [
eval(code_decorator, self.namespace)
for code_decorator in code_decorators
]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
# eval is for backward compatibility
if hasattr(code_decorator, 'eval'):
print(code_decorator.eval(
code, locals=self.namespace)) # removed , end=' '
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and is_string(code):
self.execute(code,
preparse=preparse,
namespace=namespace,
locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html),
id=self._id,
done=done,
once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if is_string(obj) else self.namespace['latex'](obj, **kwds)
self._send_output(tex={
'tex': tex,
'display': display
},
id=self._id,
done=done,
once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if is_string(output) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if is_string(output) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(
self,
source, # actual source code
mode=None, # the syntax highlight codemirror mode
filename=None, # path of file it is contained in (if applicable)
lineno=-1, # line number where source starts (0-based)
done=False,
once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if is_string(source) else unicode8(source)
code = {
'source': source,
'filename': filename,
'lineno': int(lineno),
'mode': mode
}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print("(Evaluate this cell to use this interact.)")
#raise RuntimeError("Error: No interact with id %s"%id)
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact=I.jsonable(),
id=self._id,
done=done,
once=once)
return sage_salvus.InteractFunction(I)
def javascript(self,
code,
once=False,
coffeescript=False,
done=False,
obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={
'code': code,
'coffeescript': coffeescript
},
id=self._id,
done=done,
obj=obj,
once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self,
prompt='',
default='',
placeholder='',
input_width=None,
label_width=None,
done=False,
type=None): # done is ignored here
self._flush_stdio()
m = {'prompt': unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
log("handling raw input message ", truncate_text(unicode8(mesg), 400))
if typ == 'json' and mesg['event'] == 'sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt(
"raw_input interrupted by another action: event='%s' (expected 'sage_raw_input')"
% mesg['event'])
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def notify(self, **kwds):
"""
Display a graphical notification using the alert_message Javascript function.
INPUTS:
- `type: "default"` - Type of the notice. "default", "warning", "info", "success", or "error".
- `title: ""` - The notice's title.
- `message: ""` - The notice's text.
- `timeout: ?` - Delay in seconds before the notice is automatically removed.
EXAMPLE:
salvus.notify(type="warning", title="This warning", message="This is a quick message.", timeout=3)
"""
obj = {}
for k, v in kwds.items():
if k == 'text': # backward compat
k = 'message'
elif k == 'type' and v == 'notice': # backward compat
v = 'default'
obj[k] = sage_salvus.jsonable(v)
if k == 'delay': # backward compat
obj['timeout'] = v / 1000.0 # units are in seconds now.
self.javascript("alert_message(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(
message.execute_javascript(code,
coffeescript=coffeescript,
obj=json.dumps(obj,
separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
try:
sys.path.insert(0, path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-', '_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base + '.py', 'w').write(content)
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base + '.py')
os.unlink(py_file_base + '.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist" % filename)
base, ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented" %
ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
if six.PY2:
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
else:
Salvus.pdf.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn,
id=id,
data=data,
message_queue=message_queue,
cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.set_salvus(salvus)
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
# execute.count goes from 0 to 2
# used for show_identifiers()
execute.count = 0
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0, mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all
sage.all.set_random_seed()
import random
random.seed(sage.all.initial_seed())
# get_memory_usage is not aware of being forked...
import sage.misc.getusage
sage.misc.getusage._proc_status = "/proc/%s/status" % os.getpid()
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print('INFO:child%s: received message "%s"'%(pid, mesg))
log("handling message ", truncate_text(unicode8(mesg), 400))
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn=conn,
id=mesg['id'],
code=mesg['code'],
data=mesg.get('data', None),
cell_id=mesg.get('cell_id', None),
preparse=mesg.get('preparse', True),
message_queue=mq)
except Exception as err:
log("ERROR -- exception raised '%s' when executing '%s'" %
(err, mesg['code']))
elif event == 'introspect':
try:
# check for introspect from jupyter cell
prefix = Salvus._default_mode
if 'top' in mesg:
top = mesg['top']
log('introspect cell top line %s' % top)
if top.startswith("%"):
prefix = top[1:]
try:
# see if prefix is the name of a jupyter kernel function
kc = eval(prefix + "(get_kernel_client=True)",
namespace, locals())
kn = eval(prefix + "(get_kernel_name=True)", namespace,
locals())
log("jupyter introspect prefix %s kernel %s" %
(prefix, kn)) # e.g. "p2", "python2"
jupyter_introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True),
kc=kc)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
log(lines)
introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True))
except:
pass
else:
raise RuntimeError("invalid message '%s'" % mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def jupyter_introspect(conn, id, line, preparse, kc):
import jupyter_client
from queue import Empty
try:
salvus = Salvus(conn=conn, id=id)
msg_id = kc.complete(line)
shell = kc.shell_channel
iopub = kc.iopub_channel
# handle iopub responses
while True:
try:
msg = iopub.get_msg(timeout=1)
msg_type = msg['msg_type']
content = msg['content']
except Empty:
# shouldn't happen
log("jupyter iopub channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter iopub recv %s %s" % (msg_type, str(content)))
if msg_type == 'status' and content['execution_state'] == 'idle':
break
# handle shell responses
while True:
try:
msg = shell.get_msg(timeout=10)
msg_type = msg['msg_type']
content = msg['content']
except:
# shouldn't happen
log("jupyter shell channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter shell recv %s %s" % (msg_type, str(content)))
if msg_type == 'complete_reply' and content['status'] == 'ok':
# jupyter kernel returns matches like "xyz.append" and smc wants just "append"
matches = content['matches']
offset = content['cursor_end'] - content['cursor_start']
completions = [s[offset:] for s in matches]
mesg = message.introspect_completions(id=id,
completions=completions,
target=line[-offset:])
conn.send_json(mesg)
break
except:
log("jupyter completion exception: %s" % sys.exc_info()[0])
def introspect(conn, id, line, preparse):
salvus = Salvus(
conn=conn, id=id
) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id,
completions=z['result'],
target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id,
docstring=z['result'],
target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id,
source_code=z['result'],
target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
if 'COCALC_SECRET_TOKEN' in os.environ:
secret_token_path = os.environ['COCALC_SECRET_TOKEN']
else:
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send(six.b('n'))
conn.send(
six.
b("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"
% secret_token_path))
conn.close()
n = len(secret_token)
token = six.b('')
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != six.b(secret_token):
log("token='%s'; secret_token='%s'" % (token, secret_token))
conn.send(six.b('n')) # no -- invalid login
conn.send(six.b("Invalid secret token."))
conn.close()
return False
else:
conn.send(six.b('y')) # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s" % mesg)
except Exception as err:
log("Error receiving message: %s (connection terminated)" % str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session." %
mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s" % desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s' % (host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
sage.all.interact = sage_salvus.interact
# Monkey patch the html command.
try:
# need the following for sage_server to start with sage-8.0
# or `import sage.interacts.library` will fail (not really important below, as we don't do that).
import sage.repl.user_globals
sage.repl.user_globals.set_globals(globals())
log("initialized user_globals")
except RuntimeError:
# may happen with sage version < 8.0
log("user_globals.set_globals failed, continuing", sys.exc_info())
sage.all.html = sage.misc.html.html = sage_salvus.html
# CRITICAL: look, we are just going to not do this, and have sage.interacts.library
# be broken. It's **really slow** to do this, and I don't think sage.interacts.library
# ever ended up going anywhere! People use wiki.sagemath.org/interact instead...
#import sage.interacts.library
#sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize'] = [8, 4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = [
'from sage.all import *', 'from sage.calculus.predefined import x',
'import pylab'
]
if extra_imports:
cmds.extend([
'import scipy', 'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'
])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec(cmd, namespace)
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds' %
(time.time() - tm))
for k, v in sage_salvus.interact_functions.items():
namespace[k] = v
# See above -- not doing this, since it is REALLY SLOW to import.
# This does mean that some old code that tries to use interact might break (?).
#namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in [
'anaconda', 'asy', 'attach', 'auto', 'capture', 'cell',
'clear', 'coffeescript', 'cython', 'default_mode',
'delete_last_output', 'dynamic', 'exercise', 'fork', 'fortran',
'go', 'help', 'hide', 'hideall', 'input', 'java', 'javascript',
'julia', 'jupyter', 'license', 'load', 'md', 'mediawiki',
'modes', 'octave', 'pandoc', 'perl', 'plot3d_using_matplotlib',
'prun', 'python_future_feature', 'py3print_mode', 'python',
'python3', 'r', 'raw_input', 'reset', 'restore', 'ruby',
'runfile', 'sage_eval', 'scala', 'scala211',
'script', 'search_doc', 'search_src', 'sh', 'show',
'show_identifiers', 'singular_kernel', 'time', 'timeit',
'typeset_mode', 'var', 'wiki'
]:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[
__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace[
'pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace[
'pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in list(children.keys()):
if os.waitpid(pid, os.WNOHANG) != (0, 0):
log("subprocess %s terminated, closing connection"
% pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection" %
child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception as err:
log("Error taking connection: ", err)
traceback.print_exc(file=open(LOGFILE, 'a'))
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
pid = str(os.getpid())
print("os.getpid() = %s" % pid)
open(pidfile, 'w').write(pid)
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'" %
(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument(
"-p",
dest="port",
type=int,
default=0,
help=
"port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port"
)
parser.add_argument(
"-l",
dest='log_level',
type=str,
default='INFO',
help=
"log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d",
dest="daemon",
default=False,
action="store_const",
const=True,
help="daemon mode (default: False)")
parser.add_argument(
"--host",
dest="host",
type=str,
default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile",
dest="pidfile",
type=str,
default='',
help="store pid in this file")
parser.add_argument(
"--logfile",
dest="logfile",
type=str,
default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c",
dest="client",
default=False,
action="store_const",
const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname",
dest="hostname",
type=str,
default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile",
dest="portfile",
type=str,
default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print(("%s: must specify pidfile in daemon mode" % sys.argv[0]))
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(
port=args.port if args.port else int(open(args.portfile).read()),
hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile, 'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s" % LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
from . import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
get rid of a memory usage monkey patch, which no longer works in sage-9.5 (and probably isn't that important)
#!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this.
None of that functionality is actually used in https://cocalc.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in CoCalc; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2016, Sagemath Inc.
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
from __future__ import absolute_import
import six
import os, sys, time, operator
import __future__ as future
from functools import reduce
def is_string(s):
return isinstance(s, six.string_types)
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
if six.PY2:
return str(s).encode('utf-8')
else:
return str(s, 'utf-8')
except:
try:
return str(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n" % (PID, datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x)
for x in args]))
debug_log.write(mesg)
debug_log.flush()
except Exception as err:
print(("an error writing a log message (ignoring) -- %s" % err, args))
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd, re
# for "3x^2 + 4xy - 5(1+x) - 3 abc4ok", this pattern matches "3x", "5(" and "4xy" but not "abc4ok"
# to understand it, see https://regex101.com/ or https://www.debuggex.com/
RE_POSSIBLE_IMPLICIT_MUL = re.compile(r'(?:(?<=[^a-zA-Z])|^)(\d+[a-zA-Z\(]+)')
try:
from . import sage_parsing, sage_salvus
except:
import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
reload_attached_files_if_mod_smc_available = True
def reload_attached_files_if_mod_smc():
# CRITICAL: do NOT impor sage.repl.attach!! That will import IPython, wasting several seconds and
# killing the user experience for no reason.
try:
import sage.repl
sage.repl.attach
except:
# nothing to do -- attach has not been used and is not yet available.
return
global reload_attached_files_if_mod_smc_available
if not reload_attached_files_if_mod_smc_available:
return
try:
from sage.repl.attach import load_attach_path, modified_file_iterator
except:
print("sage_server: attach not available")
reload_attached_files_if_mod_smc_available = False
return
# see sage/src/sage/repl/attach.py reload_attached_files_if_modified()
for filename, mtime in modified_file_iterator():
basename = os.path.basename(filename)
timestr = time.strftime('%T', mtime)
log('reloading attached file {0} modified at {1}'.format(
basename, timestr))
from .sage_salvus import load
load(filename)
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
try:
INFO = json.loads(open(_info_path).read())
except:
# This will fail, e.g., if info.json is invalid (maybe a blank file).
# We definitely don't want sage server startup to be completely broken
# in this case, so we fall back to "no info".
INFO = {}
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]
j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex((int(s[j], 16) & 0x3) | 0x8)[-1]
j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
# avoid common mistake -- conn is supposed to be from socket.socket...
assert not isinstance(conn, ConnectionJSON)
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
if six.PY3 and type(s) == str:
s = s.encode('utf8')
length_header = struct.pack(">L", len(s))
# py3: TypeError: can't concat str to bytes
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
if '\\u0000' in m:
raise RuntimeError("NULL bytes not allowed")
log("sending message '", truncate_text(m, 256), "'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
if six.PY3 and type(blob) == str:
# unicode objects must be encoded before hashing
blob = blob.encode('utf8')
s = uuidsha1(blob)
if six.PY3 and type(blob) == bytes:
# we convert all to bytes first, to avoid unnecessary conversions
self._send(('b' + s).encode('utf8') + blob)
else:
# old sage py2 code
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'" % filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print("_recv(%s)"%n)
# see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
for i in range(20):
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except OSError as e:
#print("socket.error, msg=%s"%msg)
if e.errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if six.PY3:
# bystream to string, in particular s[0] will be e.g. 'j' and not 106
#log("ConnectionJSON::recv s=%s... (type %s)" % (s[:5], type(s)))
# is s always of type bytes?
if type(s) == bytes:
s = s.decode('utf8')
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception as msg:
log("Unable to parse JSON '%s'" % s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'" % s[0])
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
def truncate_text_warn(s, max_size, name):
r"""
Truncate text if too long and format a warning message.
INPUT:
- ``s`` -- string to be truncated
- ``max-size`` - integer truncation limit
- ``name`` - string, name of limiting parameter
OUTPUT:
a triple:
- string -- possibly truncated input string
- boolean -- true if input string was truncated
- string -- warning message if input string was truncated
"""
tmsg = "WARNING: Output: %s truncated by %s to %s. Type 'smc?' to learn how to raise the output limit."
lns = len(s)
if lns > max_size:
tmsg = tmsg % (lns, name, max_size)
return s[:max_size] + "[...]", True, tmsg
else:
return s, False, ''
class Message(object):
def _new(self, event, props={}):
m = {'event': event}
for key, val in props.items():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid': pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(
self,
id,
stdout=None,
stderr=None,
code=None,
html=None,
javascript=None,
coffeescript=None,
interact=None,
md=None,
tex=None,
d3=None,
file=None,
raw_input=None,
obj=None,
once=None,
hide=None,
show=None,
events=None,
clear=None,
delete_last=None,
done=False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single.
):
m = self._new('output')
m['id'] = id
t = truncate_text_warn
did_truncate = False
from . import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate, tmsg = t(code['source'],
sage_server.MAX_CODE_SIZE,
'MAX_CODE_SIZE')
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate, tmsg = t(stderr,
sage_server.MAX_STDERR_SIZE,
'MAX_STDERR_SIZE')
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate, tmsg = t(stdout,
sage_server.MAX_STDOUT_SIZE,
'MAX_STDOUT_SIZE')
if html is not None and len(html) > 0:
m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE,
'MAX_HTML_SIZE')
if md is not None and len(md) > 0:
m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE,
'MAX_MD_SIZE')
if tex is not None and len(tex) > 0:
tex['tex'], did_truncate, tmsg = t(tex['tex'],
sage_server.MAX_TEX_SIZE,
'MAX_TEX_SIZE')
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + tmsg
else:
m['stderr'] = '\n' + tmsg
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print(("PID = %s" % pid))
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: ' % id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout'])
sys.stdout.flush()
if 'stderr' in mesg:
print(('! ' +
'\n! '.join(mesg['stderr'].splitlines())))
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print("Sending interrupt signal")
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print("\nExiting Sage client.")
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
# CRITICAL: we need output to valid PostgreSQL TEXT, so no null bytes
# This is not going to silently corrupt anything -- it's just output that
# is destined to be *rendered* in the browser. This is only a partial
# solution to a more general problem, but it is safe.
try:
self._buf += output.replace('\x00', '')
except UnicodeDecodeError:
self._buf += output.decode('utf-8').replace('\x00', '')
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size)
or (t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
try:
self._f(self._buf, done=done)
except UnicodeDecodeError:
if six.PY2: # str doesn't have errors option in python2!
self._f(unicode(self._buf, errors='replace'), done=done)
else:
self._f(str(self._buf, errors='replace'), done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and x in self._on_change:
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and x in self._on_del:
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if x in self._on_change:
for f in self._on_change[x]:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
except Exception as mesg:
print(mesg)
def __delitem__(self, x):
try:
if x in self._on_del:
for f in self._on_del[x]:
f()
if None in self._on_del:
for f in self._on_del[None]:
f(x)
except Exception as mesg:
print(mesg)
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if x in self._on_change:
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special CoCalc Server functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
_py_features = {}
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace[
'salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once', False):
self._num_output_messages += 1
from . import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages: %s (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._num_output_messages, sage_server.MAX_OUTPUT_MESSAGES)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long: %s -- MAX_OUTPUT (=%s) exceeded (type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._total_output_length, sage_server.MAX_OUTPUT)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>" %
(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});" % (
id, self._action(path, foreground))
self.javascript(s,
obj={
'label': label,
'path': path,
'foreground': foreground
},
once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
if foreground:
action = "worksheet.project_page.open_directory(obj.path);"
else:
action = "worksheet.project_page.set_current_path(obj.path);"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
self.javascript(self._action(path, foreground),
obj={
'path': path,
'foreground': foreground
},
once=True)
def close_tab(self, filename):
"""
Close an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.project_page.close_file(obj)",
obj=filename,
once=True)
def threed(
self,
g, # sage Graphic3d object.
width=None,
height=None,
frame=True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background=None,
foreground=None,
spin=False,
aspect_ratio=None,
frame_aspect_ratio=None, # synonym for aspect_ratio
done=False,
renderer=None, # None, 'webgl', or 'canvas'
):
from .graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = {
'width': width,
'height': height,
'background': background,
'foreground': foreground,
'spin': spin,
'aspect_ratio': aspect_ratio,
'renderer': renderer
}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio,
(list, tuple)) and len(aspect_ratio) == 3):
raise TypeError(
"aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"
% (aspect_ratio, ))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in [
'spin',
'height',
'width',
'background',
'foreground',
'renderer',
]:
if k in extra_kwds and not opts.get(k, None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][
1], b[0][2], b[1][2]
fr = opts['frame'] = {
'xmin': f(xmin),
'xmax': f(xmax),
'ymin': f(ymin),
'ymax': f(ymax),
'zmin': f(zmin),
'zmax': f(zmax)
}
if isinstance(frame, dict):
for k in list(fr.keys()):
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts': opts, 'obj': graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id,
file={
'filename': unicode8("%s.sage3d" % uuid),
'uuid': uuid
},
done=done)
def d3_graph(self, g, **kwds):
from .graphics import graph_to_d3_jsonable
self._send_output(id=self._id,
d3={
"viewer": "graph",
"data": graph_to_d3_jsonable(g, **kwds)
})
def file(self,
filename,
show=True,
done=False,
download=False,
once=False,
events=None,
raw=False,
text=None):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If text is also given that will be used instead of the path to the file.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ['HOME'] + '/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(
"can only send raw files in your home directory")
url = os.path.join('/', info['base_url'].strip('/'),
info['project_id'], 'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'url': url,
'show': show,
'text': text
},
events=events,
done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get(
'sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s" % mesg['error'])
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'uuid': file_uuid,
'show': show,
'text': text
},
events=events,
done=done)
if not show:
info = self.project_info()
url = "%s/blobs/%s?uuid=%s" % (info['base_url'], filename,
file_uuid)
if download:
url += '?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl', 0))
def python_future_feature(self, feature=None, enable=None):
"""
Allow users to enable, disable, and query the features in the python __future__ module.
"""
if feature is None:
if enable is not None:
raise ValueError(
"enable may not be specified when feature is None")
return sorted(Salvus._py_features.keys())
attr = getattr(future, feature, None)
if (feature not in future.all_feature_names) or (
attr is None) or not isinstance(attr, future._Feature):
raise RuntimeError("future feature %.50r is not defined" %
(feature, ))
if enable is None:
return feature in Salvus._py_features
if enable:
Salvus._py_features[feature] = attr
else:
try:
del Salvus._py_features[feature]
except KeyError:
pass
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print("%s MB used"%int(get_memory_usage()))')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
ascii_warn = False
code_error = False
if sys.getdefaultencoding() == 'ascii':
for c in code:
if ord(c) >= 128:
ascii_warn = True
break
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
compile_flags = reduce(operator.or_,
(feature.compiler_flag
for feature in Salvus._py_features.values()),
0)
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print(x)"
blocks = sage_parsing.divide_into_blocks(code)
try:
import sage.repl
# CRITICAL -- we do NOT import sage.repl.interpreter!!!!!!!
# That would waste several seconds importing ipython and much more, which is just dumb.
# The only reason this is needed below is if the user has run preparser(False), which
# would cause sage.repl.interpreter to be imported at that point (as preparser is
# lazy imported.)
sage_repl_interpreter = sage.repl.interpreter
except:
pass # expected behavior usually, since sage.repl.interpreter usually not imported (only used by command line...)
import sage.misc.session
for start, stop, block in blocks:
# if import sage.repl.interpreter fails, sag_repl_interpreter is unreferenced
try:
do_pp = getattr(sage_repl_interpreter, '_do_preparse', True)
except:
do_pp = True
if preparse and do_pp:
block = sage_parsing.preparse_code(block)
sys.stdout.reset()
sys.stderr.reset()
try:
b = block.rstrip()
# get rid of comments at the end of the line -- issue #1835
#from ushlex import shlex
#s = shlex(b)
#s.commenters = '#'
#s.quotes = '"\''
#b = ''.join(s)
# e.g. now a line like 'x = test? # bar' becomes 'x=test?'
if b.endswith('??'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="python")
elif b.endswith('?'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="text/x-rst")
else:
reload_attached_files_if_mod_smc()
if execute.count < 2:
execute.count += 1
if execute.count == 2:
# this fixup has to happen after first block has executed (os.chdir etc)
# but before user assigns any variable in worksheet
# sage.misc.session.init() is not called until first call of show_identifiers
# BUGFIX: be careful to *NOT* assign to _!! see https://github.com/sagemathinc/cocalc/issues/1107
block2 = "sage.misc.session.state_at_init = dict(globals());sage.misc.session._dummy=sage.misc.session.show_identifiers();\n"
exec(compile(block2, '', 'single'), namespace,
locals)
b2a = """
if 'SAGE_STARTUP_FILE' in os.environ and os.path.isfile(os.environ['SAGE_STARTUP_FILE']):
try:
load(os.environ['SAGE_STARTUP_FILE'])
except:
sys.stdout.flush()
sys.stderr.write('\\nException loading startup file: {}\\n'.format(os.environ['SAGE_STARTUP_FILE']))
sys.stderr.flush()
raise
"""
exec(compile(b2a, '', 'exec'), namespace, locals)
features = sage_parsing.get_future_features(
block, 'single')
if features:
compile_flags = reduce(
operator.or_, (feature.compiler_flag
for feature in features.values()),
compile_flags)
exec(
compile(block + '\n',
'',
'single',
flags=compile_flags), namespace, locals)
if features:
Salvus._py_features.update(features)
sys.stdout.flush()
sys.stderr.flush()
except:
if ascii_warn:
sys.stderr.write(
'\n\n*** WARNING: Code contains non-ascii characters ***\n'
)
for c in '\u201c\u201d':
if c in code:
sys.stderr.write(
'*** Maybe the character < %s > should be replaced by < " > ? ***\n'
% c)
break
sys.stderr.write('\n\n')
if six.PY2:
from exceptions import SyntaxError, TypeError
# py3: all standard errors are available by default via "builtin", not available here for some reason ...
if six.PY3:
from builtins import SyntaxError, TypeError
exc_type, _, _ = sys.exc_info()
if exc_type in [SyntaxError, TypeError]:
from .sage_parsing import strip_string_literals
code0, _, _ = strip_string_literals(code)
implicit_mul = RE_POSSIBLE_IMPLICIT_MUL.findall(code0)
if len(implicit_mul) > 0:
implicit_mul_list = ', '.join(
str(_) for _ in implicit_mul)
# we know there is a SyntaxError and there could be an implicit multiplication
sys.stderr.write(
'\n\n*** WARNING: Code contains possible implicit multiplication ***\n'
)
sys.stderr.write(
'*** Check if any of [ %s ] need a "*" sign for multiplication, e.g. 5x should be 5*x ! ***\n\n'
% implicit_mul_list)
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n' %
(start + 1, stop + 1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self,
code_decorators,
code,
preparse=True,
namespace=None,
locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if is_string(code_decorators):
code_decorators = [code_decorators]
if preparse:
code_decorators = list(
map(sage_parsing.preparse_code, code_decorators))
code_decorators = [
eval(code_decorator, self.namespace)
for code_decorator in code_decorators
]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
# eval is for backward compatibility
if hasattr(code_decorator, 'eval'):
print(code_decorator.eval(
code, locals=self.namespace)) # removed , end=' '
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and is_string(code):
self.execute(code,
preparse=preparse,
namespace=namespace,
locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html),
id=self._id,
done=done,
once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if is_string(obj) else self.namespace['latex'](obj, **kwds)
self._send_output(tex={
'tex': tex,
'display': display
},
id=self._id,
done=done,
once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if is_string(output) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if is_string(output) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(
self,
source, # actual source code
mode=None, # the syntax highlight codemirror mode
filename=None, # path of file it is contained in (if applicable)
lineno=-1, # line number where source starts (0-based)
done=False,
once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if is_string(source) else unicode8(source)
code = {
'source': source,
'filename': filename,
'lineno': int(lineno),
'mode': mode
}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print("(Evaluate this cell to use this interact.)")
#raise RuntimeError("Error: No interact with id %s"%id)
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact=I.jsonable(),
id=self._id,
done=done,
once=once)
return sage_salvus.InteractFunction(I)
def javascript(self,
code,
once=False,
coffeescript=False,
done=False,
obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={
'code': code,
'coffeescript': coffeescript
},
id=self._id,
done=done,
obj=obj,
once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self,
prompt='',
default='',
placeholder='',
input_width=None,
label_width=None,
done=False,
type=None): # done is ignored here
self._flush_stdio()
m = {'prompt': unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
log("handling raw input message ", truncate_text(unicode8(mesg), 400))
if typ == 'json' and mesg['event'] == 'sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt(
"raw_input interrupted by another action: event='%s' (expected 'sage_raw_input')"
% mesg['event'])
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def notify(self, **kwds):
"""
Display a graphical notification using the alert_message Javascript function.
INPUTS:
- `type: "default"` - Type of the notice. "default", "warning", "info", "success", or "error".
- `title: ""` - The notice's title.
- `message: ""` - The notice's text.
- `timeout: ?` - Delay in seconds before the notice is automatically removed.
EXAMPLE:
salvus.notify(type="warning", title="This warning", message="This is a quick message.", timeout=3)
"""
obj = {}
for k, v in kwds.items():
if k == 'text': # backward compat
k = 'message'
elif k == 'type' and v == 'notice': # backward compat
v = 'default'
obj[k] = sage_salvus.jsonable(v)
if k == 'delay': # backward compat
obj['timeout'] = v / 1000.0 # units are in seconds now.
self.javascript("alert_message(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(
message.execute_javascript(code,
coffeescript=coffeescript,
obj=json.dumps(obj,
separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
try:
sys.path.insert(0, path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-', '_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base + '.py', 'w').write(content)
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base + '.py')
os.unlink(py_file_base + '.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist" % filename)
base, ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented" %
ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
if six.PY2:
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
else:
Salvus.pdf.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn,
id=id,
data=data,
message_queue=message_queue,
cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.set_salvus(salvus)
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
# execute.count goes from 0 to 2
# used for show_identifiers()
execute.count = 0
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0, mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all
sage.all.set_random_seed()
import random
random.seed(sage.all.initial_seed())
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print('INFO:child%s: received message "%s"'%(pid, mesg))
log("handling message ", truncate_text(unicode8(mesg), 400))
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn=conn,
id=mesg['id'],
code=mesg['code'],
data=mesg.get('data', None),
cell_id=mesg.get('cell_id', None),
preparse=mesg.get('preparse', True),
message_queue=mq)
except Exception as err:
log("ERROR -- exception raised '%s' when executing '%s'" %
(err, mesg['code']))
elif event == 'introspect':
try:
# check for introspect from jupyter cell
prefix = Salvus._default_mode
if 'top' in mesg:
top = mesg['top']
log('introspect cell top line %s' % top)
if top.startswith("%"):
prefix = top[1:]
try:
# see if prefix is the name of a jupyter kernel function
kc = eval(prefix + "(get_kernel_client=True)",
namespace, locals())
kn = eval(prefix + "(get_kernel_name=True)", namespace,
locals())
log("jupyter introspect prefix %s kernel %s" %
(prefix, kn)) # e.g. "p2", "python2"
jupyter_introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True),
kc=kc)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
log(lines)
introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True))
except:
pass
else:
raise RuntimeError("invalid message '%s'" % mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def jupyter_introspect(conn, id, line, preparse, kc):
import jupyter_client
from queue import Empty
try:
salvus = Salvus(conn=conn, id=id)
msg_id = kc.complete(line)
shell = kc.shell_channel
iopub = kc.iopub_channel
# handle iopub responses
while True:
try:
msg = iopub.get_msg(timeout=1)
msg_type = msg['msg_type']
content = msg['content']
except Empty:
# shouldn't happen
log("jupyter iopub channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter iopub recv %s %s" % (msg_type, str(content)))
if msg_type == 'status' and content['execution_state'] == 'idle':
break
# handle shell responses
while True:
try:
msg = shell.get_msg(timeout=10)
msg_type = msg['msg_type']
content = msg['content']
except:
# shouldn't happen
log("jupyter shell channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter shell recv %s %s" % (msg_type, str(content)))
if msg_type == 'complete_reply' and content['status'] == 'ok':
# jupyter kernel returns matches like "xyz.append" and smc wants just "append"
matches = content['matches']
offset = content['cursor_end'] - content['cursor_start']
completions = [s[offset:] for s in matches]
mesg = message.introspect_completions(id=id,
completions=completions,
target=line[-offset:])
conn.send_json(mesg)
break
except:
log("jupyter completion exception: %s" % sys.exc_info()[0])
def introspect(conn, id, line, preparse):
salvus = Salvus(
conn=conn, id=id
) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id,
completions=z['result'],
target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id,
docstring=z['result'],
target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id,
source_code=z['result'],
target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
if 'COCALC_SECRET_TOKEN' in os.environ:
secret_token_path = os.environ['COCALC_SECRET_TOKEN']
else:
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send(six.b('n'))
conn.send(
six.
b("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"
% secret_token_path))
conn.close()
n = len(secret_token)
token = six.b('')
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != six.b(secret_token):
log("token='%s'; secret_token='%s'" % (token, secret_token))
conn.send(six.b('n')) # no -- invalid login
conn.send(six.b("Invalid secret token."))
conn.close()
return False
else:
conn.send(six.b('y')) # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s" % mesg)
except Exception as err:
log("Error receiving message: %s (connection terminated)" % str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session." %
mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s" % desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s' % (host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
sage.all.interact = sage_salvus.interact
# Monkey patch the html command.
try:
# need the following for sage_server to start with sage-8.0
# or `import sage.interacts.library` will fail (not really important below, as we don't do that).
import sage.repl.user_globals
sage.repl.user_globals.set_globals(globals())
log("initialized user_globals")
except RuntimeError:
# may happen with sage version < 8.0
log("user_globals.set_globals failed, continuing", sys.exc_info())
sage.all.html = sage.misc.html.html = sage_salvus.html
# CRITICAL: look, we are just going to not do this, and have sage.interacts.library
# be broken. It's **really slow** to do this, and I don't think sage.interacts.library
# ever ended up going anywhere! People use wiki.sagemath.org/interact instead...
#import sage.interacts.library
#sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize'] = [8, 4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = [
'from sage.all import *', 'from sage.calculus.predefined import x',
'import pylab'
]
if extra_imports:
cmds.extend([
'import scipy', 'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'
])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec(cmd, namespace)
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds' %
(time.time() - tm))
for k, v in sage_salvus.interact_functions.items():
namespace[k] = v
# See above -- not doing this, since it is REALLY SLOW to import.
# This does mean that some old code that tries to use interact might break (?).
#namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in [
'anaconda', 'asy', 'attach', 'auto', 'capture', 'cell',
'clear', 'coffeescript', 'cython', 'default_mode',
'delete_last_output', 'dynamic', 'exercise', 'fork', 'fortran',
'go', 'help', 'hide', 'hideall', 'input', 'java', 'javascript',
'julia', 'jupyter', 'license', 'load', 'md', 'mediawiki',
'modes', 'octave', 'pandoc', 'perl', 'plot3d_using_matplotlib',
'prun', 'python_future_feature', 'py3print_mode', 'python',
'python3', 'r', 'raw_input', 'reset', 'restore', 'ruby',
'runfile', 'sage_eval', 'scala', 'scala211',
'script', 'search_doc', 'search_src', 'sh', 'show',
'show_identifiers', 'singular_kernel', 'time', 'timeit',
'typeset_mode', 'var', 'wiki'
]:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[
__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace[
'pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace[
'pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in list(children.keys()):
if os.waitpid(pid, os.WNOHANG) != (0, 0):
log("subprocess %s terminated, closing connection"
% pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection" %
child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception as err:
log("Error taking connection: ", err)
traceback.print_exc(file=open(LOGFILE, 'a'))
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
pid = str(os.getpid())
print("os.getpid() = %s" % pid)
open(pidfile, 'w').write(pid)
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'" %
(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument(
"-p",
dest="port",
type=int,
default=0,
help=
"port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port"
)
parser.add_argument(
"-l",
dest='log_level',
type=str,
default='INFO',
help=
"log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d",
dest="daemon",
default=False,
action="store_const",
const=True,
help="daemon mode (default: False)")
parser.add_argument(
"--host",
dest="host",
type=str,
default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile",
dest="pidfile",
type=str,
default='',
help="store pid in this file")
parser.add_argument(
"--logfile",
dest="logfile",
type=str,
default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c",
dest="client",
default=False,
action="store_const",
const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname",
dest="hostname",
type=str,
default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile",
dest="portfile",
type=str,
default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print(("%s: must specify pidfile in daemon mode" % sys.argv[0]))
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(
port=args.port if args.port else int(open(args.portfile).read()),
hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile, 'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s" % LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
from . import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
|
import numpy as np
from selection.algorithms.softmax import nonnegative_softmax
import regreg.api as rr
from selection.bayesian.selection_probability_rr import cube_barrier_scaled, cube_gradient_scaled, cube_hessian_scaled
from selection.algorithms.softmax import nonnegative_softmax
def cube_subproblem_fs_linear(argument,
c,
randomization_CGF_conjugate,
nstep=100,
initial=None,
lipschitz=0,
tol=1.e-10):
'''
Solve the subproblem
$$
\text{minimize}_{z} \Lambda_{-E}^*(u + z_{-E}) + b_{-E}(z; c)
$$
where $u$ is `argument`, $\Lambda_{-E}^*$ is the
conjvex conjugate of the $-E$ coordinates of the
randomization (assumes that randomization has independent
coordinates) and
$b_{-E}$ is a barrier approximation to
the cube $\prod_{j \in -E} [-\lambda_j,\lambda_j]$ with
$\lambda$ being `lagrange`.
Returns the maximizer and the value of the convex conjugate.
'''
k = argument.shape[0]
lagrange = c * np.ones(k)
if initial is None:
current = np.zeros(k, np.float)
else:
current = initial # no copy
current_value = np.inf
conj_value = lambda x: randomization_CGF_conjugate.smooth_objective(x, 'func')
conj_grad = lambda x: randomization_CGF_conjugate.smooth_objective(x, 'grad')
step = np.ones(k, np.float)
objective = lambda u: cube_barrier_scaled(u, lagrange) + conj_value(argument + u)
for itercount in range(nstep):
newton_step = ((cube_gradient_scaled(current, lagrange) +
(conj_grad(argument + current))) /
(cube_hessian_scaled(current, lagrange) + lipschitz))
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * newton_step
failing = (proposal > lagrange) + (proposal < - lagrange)
if not failing.sum():
break
step *= 0.5 ** failing
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
proposal = current - step * newton_step
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value):
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
value = objective(current)
return current, value
class cube_objective_fs_linear(rr.smooth_atom):
def __init__(self,
randomization_CGF_conjugate,
nstep=10,
tol=1.e-10,
initial=None,
coef=1.,
offset=None,
quadratic=None):
(self.randomization_CGF_conjugate,
self.nstep,
self.tol) = (randomization_CGF_conjugate,
nstep,
tol)
rr.smooth_atom.__init__(self,
randomization_CGF_conjugate.shape,
initial=initial,
coef=coef,
offset=offset,
quadratic=quadratic)
def smooth_objective(self, arg, mode='both', check_feasibility=False):
arg = self.apply_offset(arg)
arg_shape = arg.shape[0]
c_bool = np.zeros(arg_shape, bool)
c_bool[(arg_shape-1):] = 1
z = arg[~c_bool]
c = arg[c_bool]
optimizer, value = cube_subproblem_fs_linear(z,
c,
self.randomization_CGF_conjugate,
nstep=self.nstep,
tol=self.tol)
gradient_z = z + optimizer
lagrange = c* np.ones(z.shape[0])
_diff_c = optimizer - lagrange # z - c < 0
_sum_c = optimizer + lagrange # z + c > 0
_num = -2. / ((_diff_c - lagrange)**2) - 1. /( _diff_c**2) + 2. / ((_sum_c + lagrange)**2) \
+ 1. / (_sum_c**2)
_den = 1. - (1. / _diff_c ** 2 + 1. / ((_diff_c - lagrange) ** 2) + 1. / _sum_c ** 2 + \
1. / ((_sum_c + lagrange) ** 2))
gradient_c_max = np.true_divide(_num,_den)
vec = 2./(_diff_c - lagrange) + 2./(_sum_c + lagrange) - 1./_diff_c -1./_sum_c
gradient_c = (z + optimizer + vec).T.dot(gradient_c_max)
#print gradient_z.shape, gradient_c.shape
gradient = np.append(gradient_z, gradient_c)
if mode == 'func':
return self.scale(value)
elif mode == 'grad':
return self.scale(gradient)
elif mode == 'both':
return self.scale(value), self.scale(gradient)
else:
raise ValueError("mode incorrectly specified")
class selection_probability_objective_fs(rr.smooth_atom):
def __init__(self,
X,
feasible_point,
active,
active_sign,
mean_parameter, # in R^n
noise_variance,
randomizer,
coef=1.,
offset=None,
quadratic=None,
nstep=10):
"""
Objective function for $\beta_E$ (i.e. active) with $E$ the `active_set` optimization
variables, and data $z \in \mathbb{R}^n$ (i.e. response).
NEEDS UPDATING
Above, $\beta_E^*$ is the `parameter`, $b_{\geq}$ is the softmax of the non-negative constraint,
$$
B_E = X^TX_E
$$
and
$$
\gamma_E = \begin{pmatrix} \lambda s_E\\ 0\end{pmatrix}
$$
with $\lambda$ being `lagrange`.
Parameters
----------
X : np.float
Design matrix of shape (n,p)
active : np.bool
Boolean indicator of active set of shape (p,).
active_signs : np.float
Signs of active coefficients, of shape (active.sum(),).
lagrange : np.float
Array of lagrange penalties for LASSO of shape (p,)
parameter : np.float
Parameter $\beta_E^*$ for which we want to
approximate the selection probability.
Has shape (active_set.sum(),)
randomization : np.float
Variance of IID Gaussian noise
that was added before selection.
"""
n, p = X.shape
E = 1
self._X = X
self.active = active
self.noise_variance = noise_variance
self.randomization = randomizer
self.inactive_conjugate = self.active_conjugate = randomizer.CGF_conjugate
if self.active_conjugate is None:
raise ValueError(
'randomization must know its CGF_conjugate -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates')
initial = np.zeros(n + E, )
initial[n:] = feasible_point
rr.smooth_atom.__init__(self,
(n + E,),
offset=offset,
quadratic=quadratic,
initial=initial,
coef=coef)
self.coefs[:] = initial
nonnegative = nonnegative_softmax(E)
opt_vars = np.zeros(n + E, bool)
opt_vars[n:] = 1
self._opt_selector = rr.selector(opt_vars, (n + E,))
self.nonnegative_barrier = nonnegative.linear(self._opt_selector)
self._response_selector = rr.selector(~opt_vars, (n + E,))
sign_array = np.zeros((E,E))
sign_array[0:,:] = active_sign
#print sign_array.shape, X[:, active].T.shape, X[:, ~active].T.shape, np.zeros(p-E).shape
self.A_active = np.hstack([-X[:, active].T, sign_array])
self.A_inactive_1 = np.hstack([-X[:, ~active].T, np.zeros((p-E,1))])
self.A_inactive_2 = np.hstack([np.zeros((n,E)).T, np.ones((E,E)).T])
self.A_inactive = np.vstack([self.A_inactive_1, self.A_inactive_2])
#print self.A_active.shape, self.A_inactive.shape
# defines \gamma and likelihood loss
self.set_parameter(mean_parameter, noise_variance)
self.active_conj_loss = rr.affine_smooth(self.active_conjugate, self.A_active)
cube_obj = cube_objective_fs(self.inactive_conjugate)
self.cube_loss = rr.affine_smooth(cube_obj, self.A_inactive)
self.total_loss = rr.smooth_sum([self.active_conj_loss,
self.cube_loss,
self.likelihood_loss,
self.nonnegative_barrier])
def set_parameter(self, mean_parameter, noise_variance):
"""
Set $\beta_E^*$.
"""
mean_parameter = np.squeeze(mean_parameter)
likelihood_loss = rr.signal_approximator(mean_parameter, coef=1. / noise_variance)
self.likelihood_loss = rr.affine_smooth(likelihood_loss, self._response_selector)
def smooth_objective(self, param, mode='both', check_feasibility=False):
"""
Evaluate the smooth objective, computing its value, gradient or both.
Parameters
----------
mean_param : ndarray
The current parameter values.
mode : str
One of ['func', 'grad', 'both'].
check_feasibility : bool
If True, return `np.inf` when
point is not feasible, i.e. when `mean_param` is not
in the domain.
Returns
-------
If `mode` is 'func' returns just the objective value
at `mean_param`, else if `mode` is 'grad' returns the gradient
else returns both.
"""
param = self.apply_offset(param)
if mode == 'func':
f = self.total_loss.smooth_objective(param, 'func')
return self.scale(f)
elif mode == 'grad':
g = self.total_loss.smooth_objective(param, 'grad')
return self.scale(g)
elif mode == 'both':
f, g = self.total_loss.smooth_objective(param, 'both')
return self.scale(f), self.scale(g)
else:
raise ValueError("mode incorrectly specified")
def minimize(self, initial=None, min_its=10, max_its=50, tol=1.e-10):
nonneg_con = self._opt_selector.output_shape[0]
constraint = rr.separable(self.shape,
[rr.nonnegative((nonneg_con,), offset=1.e-12 * np.ones(nonneg_con))],
[self._opt_selector.index_obj])
problem = rr.separable_problem.fromatom(constraint, self)
problem.coefs[:] = 0.5
soln = problem.solve(max_its=max_its, min_its=min_its, tol=tol)
value = problem.objective(soln)
return soln, value
def minimize2(self, step=1, nstep=30, tol=1.e-8):
n, p = self._X.shape
current = self.coefs
current_value = np.inf
objective = lambda u: self.smooth_objective(u, 'func')
grad = lambda u: self.smooth_objective(u, 'grad')
for itercount in range(nstep):
newton_step = grad(current) * self.noise_variance
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * newton_step
if np.all(proposal[n:] > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
proposal = current - step * newton_step
proposed_value = objective(proposal)
# print(current_value, proposed_value, 'minimize')
if proposed_value <= current_value:
break
step *= 0.5
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value):
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
#print('iter', itercount)
value = objective(current)
return current, value
small edit in name
import numpy as np
from selection.algorithms.softmax import nonnegative_softmax
import regreg.api as rr
from selection.bayesian.selection_probability_rr import cube_barrier_scaled, cube_gradient_scaled, cube_hessian_scaled
from selection.algorithms.softmax import nonnegative_softmax
def cube_subproblem_fs_linear(argument,
c,
randomization_CGF_conjugate,
nstep=100,
initial=None,
lipschitz=0,
tol=1.e-10):
'''
Solve the subproblem
$$
\text{minimize}_{z} \Lambda_{-E}^*(u + z_{-E}) + b_{-E}(z; c)
$$
where $u$ is `argument`, $\Lambda_{-E}^*$ is the
conjvex conjugate of the $-E$ coordinates of the
randomization (assumes that randomization has independent
coordinates) and
$b_{-E}$ is a barrier approximation to
the cube $\prod_{j \in -E} [-\lambda_j,\lambda_j]$ with
$\lambda$ being `lagrange`.
Returns the maximizer and the value of the convex conjugate.
'''
k = argument.shape[0]
lagrange = c * np.ones(k)
if initial is None:
current = np.zeros(k, np.float)
else:
current = initial # no copy
current_value = np.inf
conj_value = lambda x: randomization_CGF_conjugate.smooth_objective(x, 'func')
conj_grad = lambda x: randomization_CGF_conjugate.smooth_objective(x, 'grad')
step = np.ones(k, np.float)
objective = lambda u: cube_barrier_scaled(u, lagrange) + conj_value(argument + u)
for itercount in range(nstep):
newton_step = ((cube_gradient_scaled(current, lagrange) +
(conj_grad(argument + current))) /
(cube_hessian_scaled(current, lagrange) + lipschitz))
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * newton_step
failing = (proposal > lagrange) + (proposal < - lagrange)
if not failing.sum():
break
step *= 0.5 ** failing
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
proposal = current - step * newton_step
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value):
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
value = objective(current)
return current, value
class cube_objective_fs_linear(rr.smooth_atom):
def __init__(self,
randomization_CGF_conjugate,
nstep=10,
tol=1.e-10,
initial=None,
coef=1.,
offset=None,
quadratic=None):
(self.randomization_CGF_conjugate,
self.nstep,
self.tol) = (randomization_CGF_conjugate,
nstep,
tol)
rr.smooth_atom.__init__(self,
randomization_CGF_conjugate.shape,
initial=initial,
coef=coef,
offset=offset,
quadratic=quadratic)
def smooth_objective(self, arg, mode='both', check_feasibility=False):
arg = self.apply_offset(arg)
arg_shape = arg.shape[0]
c_bool = np.zeros(arg_shape, bool)
c_bool[(arg_shape-1):] = 1
z = arg[~c_bool]
c = arg[c_bool]
optimizer, value = cube_subproblem_fs_linear(z,
c,
self.randomization_CGF_conjugate,
nstep=self.nstep,
tol=self.tol)
gradient_z = z + optimizer
lagrange = c* np.ones(z.shape[0])
_diff_c = optimizer - lagrange # z - c < 0
_sum_c = optimizer + lagrange # z + c > 0
_num = -2. / ((_diff_c - lagrange)**2) - 1. /( _diff_c**2) + 2. / ((_sum_c + lagrange)**2) \
+ 1. / (_sum_c**2)
_den = 1. - (1. / _diff_c ** 2 + 1. / ((_diff_c - lagrange) ** 2) + 1. / _sum_c ** 2 + \
1. / ((_sum_c + lagrange) ** 2))
gradient_c_max = np.true_divide(_num,_den)
vec = 2./(_diff_c - lagrange) + 2./(_sum_c + lagrange) - 1./_diff_c -1./_sum_c
gradient_c = (z + optimizer + vec).T.dot(gradient_c_max)
#print gradient_z.shape, gradient_c.shape
gradient = np.append(gradient_z, gradient_c)
if mode == 'func':
return self.scale(value)
elif mode == 'grad':
return self.scale(gradient)
elif mode == 'both':
return self.scale(value), self.scale(gradient)
else:
raise ValueError("mode incorrectly specified")
class selection_probability_objective_fs(rr.smooth_atom):
def __init__(self,
X,
feasible_point,
active,
active_sign,
mean_parameter, # in R^n
noise_variance,
randomizer,
coef=1.,
offset=None,
quadratic=None,
nstep=10):
"""
Objective function for $\beta_E$ (i.e. active) with $E$ the `active_set` optimization
variables, and data $z \in \mathbb{R}^n$ (i.e. response).
NEEDS UPDATING
Above, $\beta_E^*$ is the `parameter`, $b_{\geq}$ is the softmax of the non-negative constraint,
$$
B_E = X^TX_E
$$
and
$$
\gamma_E = \begin{pmatrix} \lambda s_E\\ 0\end{pmatrix}
$$
with $\lambda$ being `lagrange`.
Parameters
----------
X : np.float
Design matrix of shape (n,p)
active : np.bool
Boolean indicator of active set of shape (p,).
active_signs : np.float
Signs of active coefficients, of shape (active.sum(),).
lagrange : np.float
Array of lagrange penalties for LASSO of shape (p,)
parameter : np.float
Parameter $\beta_E^*$ for which we want to
approximate the selection probability.
Has shape (active_set.sum(),)
randomization : np.float
Variance of IID Gaussian noise
that was added before selection.
"""
n, p = X.shape
E = 1
self._X = X
self.active = active
self.noise_variance = noise_variance
self.randomization = randomizer
self.inactive_conjugate = self.active_conjugate = randomizer.CGF_conjugate
if self.active_conjugate is None:
raise ValueError(
'randomization must know its CGF_conjugate -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates')
initial = np.zeros(n + E, )
initial[n:] = feasible_point
rr.smooth_atom.__init__(self,
(n + E,),
offset=offset,
quadratic=quadratic,
initial=initial,
coef=coef)
self.coefs[:] = initial
nonnegative = nonnegative_softmax(E)
opt_vars = np.zeros(n + E, bool)
opt_vars[n:] = 1
self._opt_selector = rr.selector(opt_vars, (n + E,))
self.nonnegative_barrier = nonnegative.linear(self._opt_selector)
self._response_selector = rr.selector(~opt_vars, (n + E,))
sign_array = np.zeros((E,E))
sign_array[0:,:] = active_sign
#print sign_array.shape, X[:, active].T.shape, X[:, ~active].T.shape, np.zeros(p-E).shape
self.A_active = np.hstack([-X[:, active].T, sign_array])
self.A_inactive_1 = np.hstack([-X[:, ~active].T, np.zeros((p-E,1))])
self.A_inactive_2 = np.hstack([np.zeros((n,E)).T, np.ones((E,E)).T])
self.A_inactive = np.vstack([self.A_inactive_1, self.A_inactive_2])
#print self.A_active.shape, self.A_inactive.shape
# defines \gamma and likelihood loss
self.set_parameter(mean_parameter, noise_variance)
self.active_conj_loss = rr.affine_smooth(self.active_conjugate, self.A_active)
cube_obj = cube_objective_fs_linear(self.inactive_conjugate)
self.cube_loss = rr.affine_smooth(cube_obj, self.A_inactive)
self.total_loss = rr.smooth_sum([self.active_conj_loss,
self.cube_loss,
self.likelihood_loss,
self.nonnegative_barrier])
def set_parameter(self, mean_parameter, noise_variance):
"""
Set $\beta_E^*$.
"""
mean_parameter = np.squeeze(mean_parameter)
likelihood_loss = rr.signal_approximator(mean_parameter, coef=1. / noise_variance)
self.likelihood_loss = rr.affine_smooth(likelihood_loss, self._response_selector)
def smooth_objective(self, param, mode='both', check_feasibility=False):
"""
Evaluate the smooth objective, computing its value, gradient or both.
Parameters
----------
mean_param : ndarray
The current parameter values.
mode : str
One of ['func', 'grad', 'both'].
check_feasibility : bool
If True, return `np.inf` when
point is not feasible, i.e. when `mean_param` is not
in the domain.
Returns
-------
If `mode` is 'func' returns just the objective value
at `mean_param`, else if `mode` is 'grad' returns the gradient
else returns both.
"""
param = self.apply_offset(param)
if mode == 'func':
f = self.total_loss.smooth_objective(param, 'func')
return self.scale(f)
elif mode == 'grad':
g = self.total_loss.smooth_objective(param, 'grad')
return self.scale(g)
elif mode == 'both':
f, g = self.total_loss.smooth_objective(param, 'both')
return self.scale(f), self.scale(g)
else:
raise ValueError("mode incorrectly specified")
def minimize(self, initial=None, min_its=10, max_its=50, tol=1.e-10):
nonneg_con = self._opt_selector.output_shape[0]
constraint = rr.separable(self.shape,
[rr.nonnegative((nonneg_con,), offset=1.e-12 * np.ones(nonneg_con))],
[self._opt_selector.index_obj])
problem = rr.separable_problem.fromatom(constraint, self)
problem.coefs[:] = 0.5
soln = problem.solve(max_its=max_its, min_its=min_its, tol=tol)
value = problem.objective(soln)
return soln, value
def minimize2(self, step=1, nstep=30, tol=1.e-8):
n, p = self._X.shape
current = self.coefs
current_value = np.inf
objective = lambda u: self.smooth_objective(u, 'func')
grad = lambda u: self.smooth_objective(u, 'grad')
for itercount in range(nstep):
newton_step = grad(current) * self.noise_variance
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * newton_step
if np.all(proposal[n:] > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
proposal = current - step * newton_step
proposed_value = objective(proposal)
# print(current_value, proposed_value, 'minimize')
if proposed_value <= current_value:
break
step *= 0.5
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value):
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
#print('iter', itercount)
value = objective(current)
return current, value |
#!/usr/env python
import logging
import os
import sys
import json
_CONF_OBJ_DICT = {}
def get_conf(request):
global _CONF_OBJ_DICT
app_name = request.application
c = _CONF_OBJ_DICT.get(app_name)
if c is None:
from ConfigParser import SafeConfigParser
c = SafeConfigParser({})
# DON'T convert property names to lower-case!
c.optionxform = str
lcp = "applications/%s/private/localconfig" % app_name
if os.path.isfile(lcp):
c.read(lcp)
else:
c.read("applications/%s/private/config" % app_name)
_CONF_OBJ_DICT[app_name] = c
return c
def get_logging_level(request):
'''
Converts a config files logging section, level attribute to a logging modules'
value (default is logging.INFO)
'''
conf = get_conf(request)
try:
level_str = conf.get("logging", "level").upper()
if level_str == "NOTSET":
return logging.NOTSET
elif level_str == "DEBUG":
return logging.DEBUG
elif level_str == "INFO":
return logging.INFO
elif level_str == "WARNING":
return logging.WARNING
elif level_str == "ERROR":
return logging.ERROR
elif level_str == "CRITICAL":
return logging.CRITICAL
else:
return logging.NOTSET
except:
return logging.INFO
def get_logger(request, name):
'''
Returns a logger object with the level set based on the config file
'''
logger = logging.getLogger(name)
if not hasattr(logger, 'is_configured'):
logger.is_configured = False
if not logger.is_configured:
level = get_logging_level(request)
logging_formatter = logging.Formatter("%(levelname) 8s: %(message)s")
logging_formatter.datefmt='%H:%M:%S'
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(logging_formatter)
logger.addHandler(ch)
logger.is_configured = True
return logger
def get_opentree_services_domains(request):
'''
Reads the local configuration to get the domains and returns a dictionary
with keys:
treemachine_domain
taxomachine_domain
oti_domain
opentree_api_domain
the values of the domain will contain the port (when needed)
This is mainly useful for debugging because it lets developers use local
instances of the service by tweaking private/conf (see private/conf.example)
'''
conf = get_conf(request)
domain_pairs = conf.items('domains')
domains = dict()
for name, url in domain_pairs:
domains[ "%s_domain" % name ] = url
return domains
def get_opentree_services_method_urls(request):
'''
Reads the local configuration to build on domains and return a dictionary
with keys for all domains AND their service methods, whose values are
URLs combining domain and partial paths
This is useful for debugging and for adapting to different ways of
configuring services, eg, proxied through a single domain
(see private/conf.example)
'''
domains = get_opentree_services_domains(request)
conf = get_conf(request)
url_pairs = conf.items('method_urls')
method_urls = domains.copy()
for mname, murl in url_pairs:
# replace any domain tokens, eg, 'treemachine_domain'
for dname, durl in domains.items():
murl = murl.replace('{%s}' % dname, durl)
method_urls[ mname ] = murl
return method_urls
def fetch_current_TNRS_context_names(request):
try:
# fetch the latest contextName values as JSON from remote site
from gluon.tools import fetch
import simplejson
method_dict = get_opentree_services_method_urls(request)
fetch_url = method_dict['getContextsJSON_url']
# as usual, this needs to be a POST (pass empty fetch_args)
contextnames_response = fetch(fetch_url, data='')
contextnames_json = simplejson.loads( contextnames_response )
# start with LIFE group (incl. 'All life'), and add any other ordered suggestions
ordered_group_names = unique_ordered_list(['LIFE','PLANTS','ANIMALS'] + [g for g in contextnames_json])
context_names = [ ]
for gname in ordered_group_names:
# allow for eventual removal or renaming of expected groups
if gname in contextnames_json:
context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]
# draftTreeName = str(ids_json['draftTreeName']).encode('utf-8')
return (context_names)
except Exception, e:
# throw 403 or 500 or just leave it
return ('ERROR', e.message)
def unique_ordered_list(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
# adapted from api.opentreeoflife.org/controllers/default.py (__extract_nexson_from_http_call)
def extract_nexson_from_http_call(request, **kwargs):
"""Returns the nexson blob from `kwargs` or the request.body"""
try:
# check for kwarg 'nexson', or load the full request body
if 'nexson' in kwargs:
nexson = kwargs.get('nexson', {})
else:
nexson = request.body.read()
if not isinstance(nexson, dict):
nexson = json.loads(nexson)
if 'nexson' in nexson:
nexson = nexson['nexson']
except:
# TODO: _LOG.exception('Exception getting nexson content in extract_nexson_from_http_call')
raise HTTP(400, json.dumps({"error": 1, "description": 'NexSON must be valid JSON'}))
return nexson
Fix missing import (HTTP exceptions)
#!/usr/env python
import logging
import os
import sys
import json
from gluon.http import HTTP
_CONF_OBJ_DICT = {}
def get_conf(request):
global _CONF_OBJ_DICT
app_name = request.application
c = _CONF_OBJ_DICT.get(app_name)
if c is None:
from ConfigParser import SafeConfigParser
c = SafeConfigParser({})
# DON'T convert property names to lower-case!
c.optionxform = str
lcp = "applications/%s/private/localconfig" % app_name
if os.path.isfile(lcp):
c.read(lcp)
else:
c.read("applications/%s/private/config" % app_name)
_CONF_OBJ_DICT[app_name] = c
return c
def get_logging_level(request):
'''
Converts a config files logging section, level attribute to a logging modules'
value (default is logging.INFO)
'''
conf = get_conf(request)
try:
level_str = conf.get("logging", "level").upper()
if level_str == "NOTSET":
return logging.NOTSET
elif level_str == "DEBUG":
return logging.DEBUG
elif level_str == "INFO":
return logging.INFO
elif level_str == "WARNING":
return logging.WARNING
elif level_str == "ERROR":
return logging.ERROR
elif level_str == "CRITICAL":
return logging.CRITICAL
else:
return logging.NOTSET
except:
return logging.INFO
def get_logger(request, name):
'''
Returns a logger object with the level set based on the config file
'''
logger = logging.getLogger(name)
if not hasattr(logger, 'is_configured'):
logger.is_configured = False
if not logger.is_configured:
level = get_logging_level(request)
logging_formatter = logging.Formatter("%(levelname) 8s: %(message)s")
logging_formatter.datefmt='%H:%M:%S'
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(logging_formatter)
logger.addHandler(ch)
logger.is_configured = True
return logger
def get_opentree_services_domains(request):
'''
Reads the local configuration to get the domains and returns a dictionary
with keys:
treemachine_domain
taxomachine_domain
oti_domain
opentree_api_domain
the values of the domain will contain the port (when needed)
This is mainly useful for debugging because it lets developers use local
instances of the service by tweaking private/conf (see private/conf.example)
'''
conf = get_conf(request)
domain_pairs = conf.items('domains')
domains = dict()
for name, url in domain_pairs:
domains[ "%s_domain" % name ] = url
return domains
def get_opentree_services_method_urls(request):
'''
Reads the local configuration to build on domains and return a dictionary
with keys for all domains AND their service methods, whose values are
URLs combining domain and partial paths
This is useful for debugging and for adapting to different ways of
configuring services, eg, proxied through a single domain
(see private/conf.example)
'''
domains = get_opentree_services_domains(request)
conf = get_conf(request)
url_pairs = conf.items('method_urls')
method_urls = domains.copy()
for mname, murl in url_pairs:
# replace any domain tokens, eg, 'treemachine_domain'
for dname, durl in domains.items():
murl = murl.replace('{%s}' % dname, durl)
method_urls[ mname ] = murl
return method_urls
def fetch_current_TNRS_context_names(request):
try:
# fetch the latest contextName values as JSON from remote site
from gluon.tools import fetch
import simplejson
method_dict = get_opentree_services_method_urls(request)
fetch_url = method_dict['getContextsJSON_url']
# as usual, this needs to be a POST (pass empty fetch_args)
contextnames_response = fetch(fetch_url, data='')
contextnames_json = simplejson.loads( contextnames_response )
# start with LIFE group (incl. 'All life'), and add any other ordered suggestions
ordered_group_names = unique_ordered_list(['LIFE','PLANTS','ANIMALS'] + [g for g in contextnames_json])
context_names = [ ]
for gname in ordered_group_names:
# allow for eventual removal or renaming of expected groups
if gname in contextnames_json:
context_names += [n.encode('utf-8') for n in contextnames_json[gname] ]
# draftTreeName = str(ids_json['draftTreeName']).encode('utf-8')
return (context_names)
except Exception, e:
# throw 403 or 500 or just leave it
return ('ERROR', e.message)
def unique_ordered_list(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
# adapted from api.opentreeoflife.org/controllers/default.py (__extract_nexson_from_http_call)
def extract_nexson_from_http_call(request, **kwargs):
"""Returns the nexson blob from `kwargs` or the request.body"""
try:
# check for kwarg 'nexson', or load the full request body
if 'nexson' in kwargs:
nexson = kwargs.get('nexson', {})
else:
nexson = request.body.read()
if not isinstance(nexson, dict):
nexson = json.loads(nexson)
if 'nexson' in nexson:
nexson = nexson['nexson']
except:
# TODO: _LOG.exception('Exception getting nexson content in extract_nexson_from_http_call')
raise HTTP(400, json.dumps({"error": 1, "description": 'NexSON must be valid JSON'}))
return nexson
|
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2015 Jamis Hoo
# Project:
# Filename: uploader.py
# Version: 1.0
# Author: Jamis Hoo
# E-mail: hoojamis@gmail.com
# Date: Aug 3, 2015
# Time: 13:29:39
# Description:
###############################################################################
from __future__ import print_function
import tencentyun
class Uploader(object):
def __init__(self, appid, bucket, secret_id, secret_key):
self.appid = appid
self.bucket = bucket
self.secret_id = secret_id
self.secret_key = secret_key
self.image_obj = tencentyun.ImageV2(self.appid, self.secret_id, self.secret_key)
def upload_filename(self, filename, fileid):
response_obj = self.image_obj.upload(filename, self.bucket, fileid)
if "code" not in response_obj or response_obj["code"] != 0:
print(fileid, response_obj)
# upload success
if "code" in response_obj and response_obj["code"] == 0:
return 0
# upload failed
return 1
def upload_binary(self, binary, fileid):
response_obj = self.image_obj.upload_binary(binary, self.bucket, fileid)
if "code" not in response_obj or response_obj["code"] != 0:
print(fileid, response_obj)
if "code" in response_obj and response_obj["code"] == 0:
return 0
return 1
add debug info
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2015 Jamis Hoo
# Project:
# Filename: uploader.py
# Version: 1.0
# Author: Jamis Hoo
# E-mail: hoojamis@gmail.com
# Date: Aug 3, 2015
# Time: 13:29:39
# Description:
###############################################################################
from __future__ import print_function
import tencentyun
class Uploader(object):
def __init__(self, appid, bucket, secret_id, secret_key):
self.appid = appid
self.bucket = bucket
self.secret_id = secret_id
self.secret_key = secret_key
self.image_obj = tencentyun.ImageV2(self.appid, self.secret_id, self.secret_key)
def upload_filename(self, filename, fileid):
response_obj = self.image_obj.upload(filename, self.bucket, fileid)
if "code" not in response_obj or response_obj["code"] != 0:
print(fileid, response_obj)
else:
print(response_obj["data"]["download_url"])
# upload success
if "code" in response_obj and response_obj["code"] == 0:
return 0
# upload failed
return 1
def upload_binary(self, binary, fileid):
response_obj = self.image_obj.upload_binary(binary, self.bucket, fileid)
if "code" not in response_obj or response_obj["code"] != 0:
print(fileid, response_obj)
if "code" in response_obj and response_obj["code"] == 0:
return 0
return 1
|
from Song import Song
# http://code.google.com/p/leveldb/
# http://code.google.com/p/py-leveldb/
import leveldb
import appinfo
import utils
# see <https://github.com/albertz/binstruct/> for documentation
import binstruct
def dbRepr(o): return binstruct.varEncode(o).tostring()
def dbUnRepr(s): return binstruct.varDecode(s)
# Structure of the database:
# There is the main song db songs.db:
# songId -> song dict
# songId is any random string, not too long but long enough to avoid >99.999% collisions.
# song dict can contains (specified in code by global Attribs dict later):
# artist: str
# title: str
# album: str
# tags: weighted tagmap, dict tag->[0,1]
# rating: float in [0,1]
# files: dict filename -> dict with entries:
# sha1: str
# metadata: dict
# fingerprint_AcoustId: str
# gain: float
# values should only be stored if they are certain with best accurary
class DB:
def __init__(self, dir):
import threading
self.writelock = threading.Lock()
self.db = leveldb.LevelDB(appinfo.userdir + "/" + dir, max_open_files=200)
def __getitem__(self, item):
return dbUnRepr(self.db.Get(dbRepr(item)))
def __setitem__(self, key, value):
self.db.Put(dbRepr(key), dbRepr(value))
def __delitem__(self, key):
self.db.Delete(dbRepr(key))
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return self[key]
def rangeIter(self, key_from = None, key_to = None, include_value = True):
def saveDbUnRepr(v):
try: return dbUnRepr(v)
except: return None # not/broken binstruct data
if include_value:
mapFunc = lambda value: (saveDbUnRepr(value[0]), saveDbUnRepr(value[1]))
else:
mapFunc = saveDbUnRepr
return map(mapFunc, self.db.RangeIter(key_from, key_to, include_value))
def init():
global songDb
songDb = DB("songs.db")
global songHashDb
songHashDb = DB("songHashs.db")
def normalizedFilename(fn):
import os
fn = os.path.normpath(fn)
userDir = os.path.expanduser("~")
if fn.startswith(userDir):
fn = "~" + fn[len(userDir):]
return fn
def hash(s):
# I thought about using some more fast Hash like MurmurHash.
# But this is just simpler now.
# CRC32 is too less, to much collisions. We want something here
# which does almost never collide. The whole code in here will
# mostly ignore collisions (the whole DB is optional, so this
# shouldn't be a problem in case anyone ever gets a collsion).
import hashlib
return hashlib.sha1(s).digest()
HashFileBufferSize = 1024 * 10
def hashFile(f):
if isinstance(f, (str,unicode)): f = open(f)
import hashlib
h = hashlib.sha1()
while True:
s = f.read(HashFileBufferSize)
if not s: break
h.update(s)
return h.digest()
# Entries (hash-prefix, attrib, func).
# The function should either return some False value or some non-empty string.
# If an attrib is specified and no func, we just use getattr(song, attrib, None).
SongHashSources = [
("a", "fingerprint_AcoustId", None),
("h", "sha1", None),
("p", None, lambda song: normalizedFilename(song.url)),
]
def mapHash(value):
if len(value) > 32: value = hash(value)
return value
def getSongHashSources(song):
for prefix,attrib,func in SongHashSources:
if not func: func = lambda song: getattr(song, attrib, None)
value = func(song)
if value: yield prefix + mapHash(value)
def maybeUpdateHashAfterAttribUpdate(song, attrib, value):
for prefix,attr,func in SongHashSources:
if attr == attrib:
songHashDb[prefix + mapHash(value)] = song.id
return
def getSongId(song):
for value in getSongHashSources(song):
try: return songHashDb[value]
except KeyError: pass
return None
def updateHashDb(song, songId):
for value in getSongHashSources(song):
songHashDb[value] = songId
def calcNewSongId(song):
"Returns a new unique (in hopefully almost all cases) id for a song."
"Different files with the same song might return the same id."
# Originally, I planned to use the hash of the AcoustID fingerprint.
# However, as this is expensive to calculate in case we have not yet,
# allow some fallbacks.
# Just use any available from SongHashSources.
for value in getSongHashSources(song):
value = mapHash(value)
updateHashDb(song, value)
return value
assert False # should not happen. if there are such cases later, extend SongHashSources!
class SongFileEntry(object):
def __init__(self, songEntry, url):
object.__setattr__(self, "songEntry", songEntry)
object.__setattr__(self, "url", url)
@property
def _dbDict(self):
# Note: If this raises an AttributeError for some reason,
# you will get a *very* strange inf recursion loop in
# getattr(self, "_dbDict").
return self.songEntry.files.filesDict.get(self.url, {})
def __getattr__(self, attr):
try: return self._dbDict[attr]
except KeyError: raise AttributeError, "no attrib " + attr
def __setattr__(self, attr, value):
global songDb
with songDb.writelock:
d = self.songEntry._dbDict
d.setdefault("files",{}).setdefault(self.url,{})[attr] = value
songDb[self.songEntry.id] = d
class SongFilesDict:
def __init__(self, songEntry):
self.songEntry = songEntry
@property
def filesDict(self):
return self.songEntry._dbDict.get("files", {})
def __getitem__(self, url):
url = normalizedFilename(url)
try: self.filesDict[url]
except: raise
else: return SongFileEntry(self.songEntry, url)
def get(self, url):
url = normalizedFilename(url)
return SongFileEntry(self.songEntry, url)
class SongEntry(object):
def __init__(self, song):
object.__setattr__(self, "songObj", song)
@property
def id(self):
return self.songObj.id
@property
def files(self):
return SongFilesDict(self)
@property
def _dbDict(self):
global songDb
try: return songDb[self.id]
except KeyError: return {}
def __getattr__(self, attr):
try: return self._dbDict[attr]
except KeyError: raise AttributeError, "no attrib " + attr
def __setattr__(self, attr, value):
global songDb
with songDb.writelock:
d = self._dbDict
d[attr] = value
songDb[self.id] = d
def getSong(song):
return SongEntry(song)
class Attrib:
def __init__(self, fileSpecific=False):
self.fileSpecific = fileSpecific
def getObject(self, song):
if self.fileSpecific:
return getSong(song).files.get(song.url)
else:
return getSong(song)
Attribs = {
"id": Attrib(), # This is the SongId used here by the DB.
"artist": Attrib(),
"title": Attrib(),
"album": Attrib(),
"tags": Attrib(),
"rating": Attrib(),
"sha1": Attrib(fileSpecific=True),
"metadata": Attrib(fileSpecific=True),
"fingerprint_AcoustId": Attrib(fileSpecific=True),
"gain": Attrib(fileSpecific=True),
# Note that bmpThumbnail is not here. I think it's to heavy
# to be stored for each song in the DB. Let's just calculate it
# on the fly when needed...
# The Song handling code should not assume that all attribs are
# defined here by the DB.
}
def updateSongAttribValue(song, attrib, value):
setattr(Attribs[attrib].getObject(song), attrib, value)
maybeUpdateHashAfterAttribUpdate(song, attrib, value)
def getSongAttrib(song, attrib):
return getattr(Attribs[attrib].getObject(song), attrib)
# Do that right on first import so that all functions here work.
init()
def songdbMain():
# Later, me might scan through the disc and fill the DB and do updates here.
# Right now, we don't.
pass
# For debugging
def dumpDatabases():
global songDb, songHashDb
import sys
from pprint import pprint
print "Main DB:"
for key,value in songDb.rangeIter():
sys.stdout.write("%r: \n" % key)
pprint(value, indent=2)
print "\nHashes:"
for key,value in songHashDb.rangeIter():
sys.stdout.write("%r: " % key)
pprint(value, indent=2)
small fix
from Song import Song
# http://code.google.com/p/leveldb/
# http://code.google.com/p/py-leveldb/
import leveldb
import appinfo
import utils
# see <https://github.com/albertz/binstruct/> for documentation
import binstruct
def dbRepr(o): return binstruct.varEncode(o).tostring()
def dbUnRepr(s): return binstruct.varDecode(s)
# Structure of the database:
# There is the main song db songs.db:
# songId -> song dict
# songId is any random string, not too long but long enough to avoid >99.999% collisions.
# song dict can contains (specified in code by global Attribs dict later):
# artist: str
# title: str
# album: str
# tags: weighted tagmap, dict tag->[0,1]
# rating: float in [0,1]
# files: dict filename -> dict with entries:
# sha1: str
# metadata: dict
# fingerprint_AcoustId: str
# gain: float
# values should only be stored if they are certain with best accurary
class DB:
def __init__(self, dir):
import threading
self.writelock = threading.Lock()
self.db = leveldb.LevelDB(appinfo.userdir + "/" + dir, max_open_files=200)
def __getitem__(self, item):
return dbUnRepr(self.db.Get(dbRepr(item)))
def __setitem__(self, key, value):
self.db.Put(dbRepr(key), dbRepr(value))
def __delitem__(self, key):
self.db.Delete(dbRepr(key))
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return self[key]
def rangeIter(self, key_from = None, key_to = None, include_value = True):
def saveDbUnRepr(v):
try: return dbUnRepr(v)
except: return None # not/broken binstruct data
if include_value:
mapFunc = lambda value: (saveDbUnRepr(value[0]), saveDbUnRepr(value[1]))
else:
mapFunc = saveDbUnRepr
return map(mapFunc, self.db.RangeIter(key_from, key_to, include_value))
def init():
global songDb
songDb = DB("songs.db")
global songHashDb
songHashDb = DB("songHashs.db")
def normalizedFilename(fn):
import os
fn = os.path.normpath(fn)
userDir = os.path.expanduser("~")
if fn.startswith(userDir):
fn = "~" + fn[len(userDir):]
return fn
def hash(s):
# I thought about using some more fast Hash like MurmurHash.
# But this is just simpler now.
# CRC32 is too less, to much collisions. We want something here
# which does almost never collide. The whole code in here will
# mostly ignore collisions (the whole DB is optional, so this
# shouldn't be a problem in case anyone ever gets a collsion).
import hashlib
return hashlib.sha1(s).digest()
HashFileBufferSize = 1024 * 10
def hashFile(f):
if isinstance(f, (str,unicode)): f = open(f)
import hashlib
h = hashlib.sha1()
while True:
s = f.read(HashFileBufferSize)
if not s: break
h.update(s)
return h.digest()
# Entries (hash-prefix, attrib, func).
# The function should either return some False value or some non-empty string.
# If an attrib is specified and no func, we just use getattr(song, attrib, None).
SongHashSources = [
("a", "fingerprint_AcoustId", None),
("h", "sha1", None),
("p", None, lambda song: normalizedFilename(song.url)),
]
def mapHash(value):
if isinstance(value, unicode): value = value.encode("utf-8")
if len(value) > 32: value = hash(value)
return value
def getSongHashSources(song):
for prefix,attrib,func in SongHashSources:
if not func: func = lambda song: getattr(song, attrib, None)
value = func(song)
if value: yield prefix + mapHash(value)
def maybeUpdateHashAfterAttribUpdate(song, attrib, value):
for prefix,attr,func in SongHashSources:
if attr == attrib:
songHashDb[prefix + mapHash(value)] = song.id
return
def getSongId(song):
for value in getSongHashSources(song):
try: return songHashDb[value]
except KeyError: pass
return None
def updateHashDb(song, songId):
for value in getSongHashSources(song):
songHashDb[value] = songId
def calcNewSongId(song):
"Returns a new unique (in hopefully almost all cases) id for a song."
"Different files with the same song might return the same id."
# Originally, I planned to use the hash of the AcoustID fingerprint.
# However, as this is expensive to calculate in case we have not yet,
# allow some fallbacks.
# Just use any available from SongHashSources.
for value in getSongHashSources(song):
value = mapHash(value)
updateHashDb(song, value)
return value
assert False # should not happen. if there are such cases later, extend SongHashSources!
class SongFileEntry(object):
def __init__(self, songEntry, url):
object.__setattr__(self, "songEntry", songEntry)
object.__setattr__(self, "url", url)
@property
def _dbDict(self):
# Note: If this raises an AttributeError for some reason,
# you will get a *very* strange inf recursion loop in
# getattr(self, "_dbDict").
return self.songEntry.files.filesDict.get(self.url, {})
def __getattr__(self, attr):
try: return self._dbDict[attr]
except KeyError: raise AttributeError, "no attrib " + attr
def __setattr__(self, attr, value):
global songDb
with songDb.writelock:
d = self.songEntry._dbDict
d.setdefault("files",{}).setdefault(self.url,{})[attr] = value
songDb[self.songEntry.id] = d
class SongFilesDict:
def __init__(self, songEntry):
self.songEntry = songEntry
@property
def filesDict(self):
return self.songEntry._dbDict.get("files", {})
def __getitem__(self, url):
url = normalizedFilename(url)
try: self.filesDict[url]
except: raise
else: return SongFileEntry(self.songEntry, url)
def get(self, url):
url = normalizedFilename(url)
return SongFileEntry(self.songEntry, url)
class SongEntry(object):
def __init__(self, song):
object.__setattr__(self, "songObj", song)
@property
def id(self):
return self.songObj.id
@property
def files(self):
return SongFilesDict(self)
@property
def _dbDict(self):
global songDb
try: return songDb[self.id]
except KeyError: return {}
def __getattr__(self, attr):
try: return self._dbDict[attr]
except KeyError: raise AttributeError, "no attrib " + attr
def __setattr__(self, attr, value):
global songDb
with songDb.writelock:
d = self._dbDict
d[attr] = value
songDb[self.id] = d
def getSong(song):
return SongEntry(song)
class Attrib:
def __init__(self, fileSpecific=False):
self.fileSpecific = fileSpecific
def getObject(self, song):
if self.fileSpecific:
return getSong(song).files.get(song.url)
else:
return getSong(song)
Attribs = {
"id": Attrib(), # This is the SongId used here by the DB.
"artist": Attrib(),
"title": Attrib(),
"album": Attrib(),
"tags": Attrib(),
"rating": Attrib(),
"sha1": Attrib(fileSpecific=True),
"metadata": Attrib(fileSpecific=True),
"fingerprint_AcoustId": Attrib(fileSpecific=True),
"gain": Attrib(fileSpecific=True),
# Note that bmpThumbnail is not here. I think it's to heavy
# to be stored for each song in the DB. Let's just calculate it
# on the fly when needed...
# The Song handling code should not assume that all attribs are
# defined here by the DB.
}
def updateSongAttribValue(song, attrib, value):
setattr(Attribs[attrib].getObject(song), attrib, value)
maybeUpdateHashAfterAttribUpdate(song, attrib, value)
def getSongAttrib(song, attrib):
return getattr(Attribs[attrib].getObject(song), attrib)
# Do that right on first import so that all functions here work.
init()
def songdbMain():
# Later, me might scan through the disc and fill the DB and do updates here.
# Right now, we don't.
pass
# For debugging
def dumpDatabases():
global songDb, songHashDb
import sys
from pprint import pprint
print "Main DB:"
for key,value in songDb.rangeIter():
sys.stdout.write("%r: \n" % key)
pprint(value, indent=2)
print "\nHashes:"
for key,value in songHashDb.rangeIter():
sys.stdout.write("%r: " % key)
pprint(value, indent=2)
|
from distutils import ccompiler
from distutils import sysconfig
import unittest
import pytest
from install import build
class TestCheckVersion(unittest.TestCase):
def setUp(self):
self.compiler = ccompiler.new_compiler()
sysconfig.customize_compiler(self.compiler)
self.settings = build.get_compiler_setting()
@pytest.mark.gpu
def test_check_cuda_version(self):
self.assertTrue(build.check_cuda_version(
self.compiler, self.settings))
@pytest.mark.gpu
@pytest.mark.cudnn
def test_check_cudnn_version(self):
self.assertTrue(build.check_cudnn_version(
self.compiler, self.settings))
add tests for version functions
from distutils import ccompiler
from distutils import sysconfig
import unittest
import pytest
from install import build
class TestCheckVersion(unittest.TestCase):
def setUp(self):
self.compiler = ccompiler.new_compiler()
sysconfig.customize_compiler(self.compiler)
self.settings = build.get_compiler_setting()
@pytest.mark.gpu
def test_check_cuda_version(self):
with self.assertRaises(Exception):
build.get_cuda_version()
self.assertTrue(build.check_cuda_version(
self.compiler, self.settings))
self.assertIsInstance(build.get_cuda_version(), int)
self.assertIsInstance(build.get_cuda_version(True), str)
@pytest.mark.gpu
@pytest.mark.cudnn
def test_check_cudnn_version(self):
with self.assertRaises(Exception):
build.get_cudnn_version()
self.assertTrue(build.check_cudnn_version(
self.compiler, self.settings))
self.assertIsInstance(build.get_cudnn_version(), int)
self.assertIsInstance(build.get_cudnn_version(True), str)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import astropy.units as u
from astropy.extern import six
from astropy import log
from astropy import table
import emcee
from .utils import sed_conversion, validate_data_table
__all__ = ["plot_chain", "plot_fit", "plot_data", "plot_blob"]
def plot_chain(sampler, p=None, **kwargs):
"""Generate a diagnostic plot of the sampler chains.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler containing the chains to be plotted.
p : int (optional)
Index of the parameter to plot. If omitted, all chains are plotted.
last_step : bool (optional)
Whether to plot the last step of the chain or the complete chain (default).
Returns
-------
figure : `matplotlib.figure.Figure`
Figure
"""
if p == None:
npars = sampler.chain.shape[-1]
for pp, label in zip(six.moves.range(npars), sampler.labels):
_plot_chain_func(sampler, pp, **kwargs)
fig = None
else:
fig = _plot_chain_func(sampler, p, **kwargs)
return fig
def _latex_float(f, format=".3g"):
""" http://stackoverflow.com/a/13490601
"""
float_str = "{{0:{0}}}".format(format).format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"{0}\times 10^{{{1}}}".format(base, int(exponent))
else:
return float_str
def _plot_chain_func(sampler, p, last_step=False):
chain = sampler.chain
label = sampler.labels[p]
import matplotlib.pyplot as plt
# Plot everything in serif to match math exponents
plt.rc('font', family='serif')
from scipy import stats
if len(chain.shape) > 2:
traces = chain[:,:, p]
if last_step == True:
# keep only last step
dist = traces[:, -1]
else:
# convert chain to flatchain
dist = traces.flatten()
else:
log.warning('we need the full chain to plot the traces, not a flatchain!')
return None
nwalkers = traces.shape[0]
nsteps = traces.shape[1]
f = plt.figure()
ax1 = f.add_subplot(221)
ax2 = f.add_subplot(122)
f.subplots_adjust(left=0.1, bottom=0.15, right=0.95, top=0.9)
# plot five percent of the traces darker
colors = np.where(np.arange(nwalkers)/float(nwalkers) > 0.95, '#550000', '0.5')
ax1.set_rasterization_zorder(1)
for t, c in zip(traces, colors): # range(nwalkers):
ax1.plot(t, c=c, lw=1, alpha=0.9, zorder=0)
ax1.set_xlabel('step number')
#[l.set_rotation(45) for l in ax1.get_yticklabels()]
ax1.set_ylabel(label)
ax1.yaxis.set_label_coords(-0.15, 0.5)
ax1.set_title('Walker traces')
# nbins=25 if last_step else 100
nbins = min(max(25, int(len(dist)/100.)), 100)
xlabel = label
n, x, patch = ax2.hist(dist, nbins, histtype='stepfilled', color='#CC0000', lw=0, normed=1)
kde = stats.kde.gaussian_kde(dist)
ax2.plot(x, kde(x), c='k', label='KDE')
# for m,ls,lab in zip([np.mean(dist),np.median(dist)],('--','-.'),('mean: {0:.4g}','median: {0:.4g}')):
# ax2.axvline(m,ls=ls,c='k',alpha=0.5,lw=2,label=lab.format(m))
quant = [16, 50, 84]
xquant = np.percentile(dist, quant)
quantiles = dict(six.moves.zip(quant, xquant))
ax2.axvline(quantiles[50], ls='--', c='k', alpha=0.5, lw=2,
label='50% quantile')
ax2.axvspan(quantiles[16], quantiles[84], color='0.5', alpha=0.25,
label='68% CI')
# ax2.legend()
[l.set_rotation(45) for l in ax2.get_xticklabels()]
#[l.set_rotation(45) for l in ax2.get_yticklabels()]
ax2.set_xlabel(xlabel)
ax2.xaxis.set_label_coords(0.5, -0.1)
ax2.set_title('posterior distribution')
ax2.set_ylim(top=n.max() * 1.05)
# Print distribution parameters on lower-left
mean, median, std = np.mean(dist), np.median(dist), np.std(dist)
xmode = np.linspace(mean-np.sqrt(3)*std, mean+np.sqrt(3)*std, 100)
mode = xmode[np.argmax(kde(xmode))]
median = np.median(dist)
try:
# EnsembleSample.get_autocorr_time was only added in the
# recently released emcee 2.1.0 (2014-05-22), so make it optional
autocorr = sampler.get_autocorr_time(window=chain.shape[1]/4.)[p]
autocorr_message = '{0:.1f}'.format(autocorr)
except AttributeError:
autocorr_message = 'Not available. Update to emcee 2.1 or later.'
if last_step:
clen = 'last ensemble'
else:
clen = 'whole chain'
maxlen = np.max([len(ilabel) for ilabel in sampler.labels])
vartemplate = '{{2:>{0}}}: {{0:>8.3g}} +/- {{1:<8.3g}}\n'.format(maxlen)
chain_props = 'Walkers: {0} \nSteps in chain: {1} \n'.format(nwalkers, nsteps) + \
'Autocorrelation time: {0}\n'.format(autocorr_message) +\
'Mean acceptance fraction: {0:.3f}\n'.format(np.mean(sampler.acceptance_fraction)) +\
'Distribution properties for the {clen}:\n \
- median: ${median}$ \n \
- std: ${std}$ \n' .format(median=_latex_float(quantiles[50]), std=_latex_float(std), clen=clen) +\
' - Median with uncertainties based on \n \
the 16th and 84th percentiles ($\sim$1$\sigma$):\n'
info_line = ' '*10 + '{label} = ${{{median}}}^{{+{uncs[1]}}}_{{-{uncs[0]}}}$'.format(
label=label, median=_latex_float(quantiles[50]),
uncs=(_latex_float(quantiles[50] - quantiles[16]),
_latex_float(quantiles[84] - quantiles[50])))
chain_props += info_line
if 'log10(' in label or 'log(' in label:
nlabel = label.split('(')[-1].split(')')[0]
ltype = label.split('(')[0]
if ltype == 'log10':
new_dist = 10**dist
elif ltype == 'log':
new_dist = np.exp(dist)
quant = [16, 50, 84]
quantiles = dict(six.moves.zip(quant, np.percentile(new_dist, quant)))
label_template = '\n'+' '*10+'{{label:>{0}}}'.format(len(label))
new_line = label_template.format(label=nlabel)
new_line += ' = ${{{median}}}^{{+{uncs[1]}}}_{{-{uncs[0]}}}$'.format(
label=nlabel, median=_latex_float(quantiles[50]),
uncs=(_latex_float(quantiles[50] - quantiles[16]),
_latex_float(quantiles[84] - quantiles[50])))
chain_props += new_line
info_line += new_line
log.info('{0:-^50}\n'.format(label) + info_line)
f.text(0.05, 0.45, chain_props, ha='left', va='top')
return f
def _process_blob(sampler, modelidx,last_step=True):
"""
Process binary blob in sampler. If blob in position modelidx is:
- a Quantity array of len(blob[i])=len(data['energy']: use blob as model, data['energy'] as modelx
- a tuple: use first item as modelx, second as model
- a Quantity scalar: return array of scalars
"""
blob0 = sampler.blobs[-1][0][modelidx]
if isinstance(blob0, u.Quantity):
if blob0.size == sampler.data['energy'].size:
# Energy array for blob is not provided, use data['energy']
modelx = sampler.data['energy']
elif blob0.size == 1:
modelx = None
if last_step:
model = u.Quantity([m[modelidx] for m in sampler.blobs[-1]])
else:
nsteps = len(sampler.blobs)
model = []
for step in sampler.blobs:
for walkerblob in step:
model.append(walkerblob[modelidx])
model = u.Quantity(model)
elif np.isscalar(blob0):
modelx = None
if last_step:
model = u.Quantity([m[modelidx] for m in sampler.blobs[-1]])
else:
nsteps = len(sampler.blobs)
model = []
for step in sampler.blobs:
for walkerblob in step:
model.append(walkerblob[modelidx])
model = u.Quantity(model)
elif (isinstance(blob0, list) or isinstance(blob0, tuple)):
if (len(blob0) == 2 and isinstance(blob0[0], u.Quantity)
and isinstance(blob0[1], u.Quantity)):
# Energy array for model is item 0 in blob, model flux is item 1
modelx = blob0[0]
if last_step:
model = u.Quantity([m[modelidx][1] for m in sampler.blobs[-1]])
else:
nsteps = len(sampler.blobs)
model = []
for step in sampler.blobs:
for walkerblob in step:
model.append(walkerblob[modelidx][1])
model = u.Quantity(model)
else:
raise TypeError('Model {0} has wrong blob format'.format(modelidx))
else:
raise TypeError('Model {0} has wrong blob format'.format(modelidx))
return modelx, model
def _get_model_pt(sampler, modelidx):
blob0 = sampler.blobs[-1][0][modelidx]
if isinstance(blob0, u.Quantity):
pt = blob0.unit.physical_type
elif len(blob0) == 2:
pt = blob0[1].unit.physical_type
else:
raise TypeError('Model {0} has wrong blob format'.format(modelidx))
return pt
def calc_CI(sampler, modelidx=0,confs=[3, 1],last_step=True):
"""Calculate confidence interval.
"""
from scipy import stats
modelx, model = _process_blob(sampler, modelidx, last_step=last_step)
nwalkers = len(model)-1
CI = []
for conf in confs:
fmin = stats.norm.cdf(-conf)
fmax = stats.norm.cdf(conf)
ymin, ymax = [], []
for fr, y in ((fmin, ymin), (fmax, ymax)):
nf = int((fr*nwalkers))
for i, x in enumerate(modelx):
ysort = np.sort(model[:, i])
y.append(ysort[nf])
# create an array from lists ymin and ymax preserving units
CI.append((u.Quantity(ymin), u.Quantity(ymax)))
return modelx, CI
def plot_CI(ax, sampler, modelidx=0, sed=True,confs=[3, 1, 0.5],e_unit=u.eV,**kwargs):
"""Plot confidence interval.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
confs : list, optional
List of confidence levels (in sigma) to use for generating the confidence intervals. Default is `[3,1,0.5]`
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True, default) or the whole chain (False).
"""
modelx, CI = calc_CI(sampler, modelidx=modelidx,confs=confs,**kwargs)
# pick first confidence interval curve for units
f_unit, sedf = sed_conversion(modelx, CI[0][0].unit, sed)
for (ymin, ymax), conf in zip(CI, confs):
color = np.log(conf)/np.log(20)+0.4
ax.fill_between(modelx.to(e_unit).value,
(ymax * sedf).to(f_unit).value,
(ymin * sedf).to(f_unit).value,
lw=0., color='{0}'.format(color),
alpha=0.6, zorder=-10)
# ax.plot(modelx,model_ML,c='k',lw=3,zorder=-5)
def plot_samples(ax, sampler, modelidx=0, sed=True, n_samples=100, e_unit=u.eV, last_step=False):
"""Plot a number of samples from the sampler chain.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
n_samples : int, optional
Number of samples to plot. Default is 100.
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True, default) or the whole chain (False).
"""
modelx, model = _process_blob(sampler, modelidx, last_step=last_step)
# pick first confidence interval curve for units
f_unit, sedf = sed_conversion(modelx, model[0].unit, sed)
for my in model[np.random.randint(len(model), size=n_samples)]:
ax.plot(modelx.to(e_unit).value, (my * sedf).to(f_unit).value,
color='k', alpha=0.1, lw=1)
ML, MLp, MLerr, ML_model = find_ML(sampler, modelidx)
ax.plot(ML_model[0].to(e_unit).value, (ML_model[1] * sedf).to(f_unit).value,
color='r', lw=1.5, alpha=0.8)
def find_ML(sampler, modelidx):
"""
Find Maximum Likelihood parameters as those in the chain with a highest log
probability.
"""
index = np.unravel_index(np.argmax(sampler.lnprobability), sampler.lnprobability.shape)
MLp = sampler.chain[index]
blob = sampler.blobs[index[1]][index[0]][modelidx]
if isinstance(blob, u.Quantity):
modelx = sampler.data['energy'].copy()
model_ML = blob.copy()
elif len(blob) == 2:
modelx = blob[0].copy()
model_ML = blob[1].copy()
else:
raise TypeError('Model {0} has wrong blob format'.format(modelidx))
MLerr = []
for dist in sampler.flatchain.T:
hilo = np.percentile(dist, [16., 84.])
MLerr.append((hilo[1]-hilo[0])/2.)
ML = sampler.lnprobability[index]
return ML, MLp, MLerr, (modelx, model_ML)
def _latex_unit(unit):
""" Hack to get a single line latex representation of a unit
Will be obsolete with format='latex_inline' in astropy 0.4.1
"""
l = unit.to_string('cds').split('.')
out = ''
for uni in l:
try:
int(uni[-1])
if uni[-2] == '-':
out += ' {0}$^{{{1}}}$'.format(uni[:-2], uni[-2:])
else:
out += ' {0}$^{1}$'.format(uni[:-1], uni[-1:])
except ValueError:
out += ' ' + uni
return out[1:]
def plot_blob(sampler, blobidx=0, label=None, last_step=False, **kwargs):
"""
Plot a metadata blob as a fit to spectral data or value distribution
Additional ``kwargs`` are passed to `plot_fit`.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler with a stored chain.
blobidx : int, optional
Metadata blob index to plot.
label : str, optional
Label for the value distribution. Labels for the fit plot can be passed
as ``xlabel`` and ``ylabel`` and will be passed to `plot_fit`.
Returns
-------
figure : `matplotlib.pyplot.Figure`
`matplotlib` figure instance containing the plot.
"""
modelx, model = _process_blob(sampler, blobidx, last_step)
if modelx is None:
# Blob is scalar, plot distribution
f = plot_distribution(model, label)
else:
f = plot_fit(sampler,modelidx=blobidx,last_step=last_step,label=label,**kwargs)
return f
def plot_fit(sampler, modelidx=0,label=None,xlabel=None,ylabel=None,confs=[3, 1, 0.5],
n_samples=None, sed=False, figure=None, residualCI=True, plotdata=None,
e_unit=None, **kwargs):
"""
Plot data with fit confidence regions.
Additional ``kwargs`` are passed to `plot_CI`.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler with a stored chain.
modelidx : int, optional
Model index to plot.
label : str, optional
Label for the title of the plot.
xlabel : str, optional
Label for the ``x`` axis of the plot.
ylabel : str, optional
Label for the ``y`` axis of the plot.
sed : bool, optional
Whether to plot SED or differential spectrum.
confs : list, optional
List of confidence levels (in sigma) to use for generating the
confidence intervals. Default is ``[3,1,0.5]``
n_samples : int, optional
If not ``None``, number of sample models to plot. ``n_samples=100`` is a
good starting point to see the behaviour of the model. Default is
``None``: plot confidence bands instead of samples.
figure : `matplotlib.figure.Figure`, optional
`matplotlib` figure to plot on. If omitted a new one will be generated.
residualCI : bool, optional
Whether to plot the confidence interval bands in the residuals subplot.
plotdata : bool, optional
Wheter to plot data on top of model confidence intervals. Default is
True if the physical types of the data and the model match.
e_unit : `~astropy.units.Unit`
Units for the energy axis of the plot. The default is to use the units
of the energy array of the observed data.
"""
import matplotlib.pyplot as plt
# Plot everything in serif to match math exponents
plt.rc('font', family='serif')
ML, MLp, MLerr, model_ML = find_ML(sampler, modelidx)
infostr = 'Maximum log probability: {0:.3g}\n'.format(ML)
infostr += 'Maximum Likelihood values:\n'
maxlen = np.max([len(ilabel) for ilabel in sampler.labels])
vartemplate = '{{2:>{0}}}: {{0:>8.3g}} +/- {{1:<8.3g}}\n'.format(maxlen)
for p, v, ilabel in zip(MLp, MLerr, sampler.labels):
infostr += vartemplate.format(p, v, ilabel)
# log.info(infostr)
data = sampler.data
plotresiduals = False
if modelidx == 0 and plotdata is None:
plotdata = True
if confs is not None:
plotresiduals = True
elif plotdata is None:
plotdata = False
if plotdata:
# Check that physical types of data and model match
model_pt = _get_model_pt(sampler, modelidx)
data_pt = data['flux'].unit.physical_type
if data_pt != model_pt:
log.info('Model physical type ({0}) and spectral data physical'
' type ({1}) do not match for blob {2}! Not plotting data.'.format(model_pt, data_pt, modelidx))
plotdata = False
if figure == None:
f = plt.figure()
else:
f = figure
if plotdata and plotresiduals:
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
ax2 = plt.subplot2grid((4, 1), (3, 0), sharex=ax1)
for subp in [ax1, ax2]:
f.add_subplot(subp)
else:
ax1 = f.add_subplot(111)
datacol = 'r'
if e_unit is None:
e_unit = data['energy'].unit
if confs is not None and n_samples is None:
plot_CI(ax1, sampler,modelidx,sed=sed,confs=confs,e_unit=e_unit,**kwargs)
elif n_samples is not None:
plot_samples(ax1, sampler, modelidx, sed=sed, n_samples=n_samples, e_unit=e_unit)
else:
residualCI = False
def plot_ulims(ax, x, y, xerr):
"""
Plot upper limits as arrows with cap at value of upper limit.
"""
ax.errorbar(x, y, xerr=xerr, ls='',
color=datacol, elinewidth=2, capsize=0)
ax.errorbar(x, 0.75 * y, yerr=0.25*y, ls='', lolims=True,
color=datacol, elinewidth=2, capsize=5, zorder=10)
if plotdata:
f_unit, sedf = sed_conversion(data['energy'], data['flux'].unit, sed)
ul = data['ul']
notul = -ul
# Hack to show y errors compatible with 0 in loglog plot
yerr = data['dflux'][:, notul]
y = data['flux'][notul].to(yerr.unit)
bad_err = np.where((y-yerr[0]) <= 0.)
yerr[0][bad_err] = y[bad_err]*(1.-1e-7)
ax1.errorbar(data['energy'][notul].to(e_unit).value,
(data['flux'][notul] * sedf[notul]).to(f_unit).value,
yerr=(yerr * sedf[notul]).to(f_unit).value,
xerr=(data['dene'][:, notul]).to(e_unit).value,
zorder=100, marker='o', ls='', elinewidth=2, capsize=0,
mec='w', mew=0, ms=6, color=datacol)
if np.any(ul):
plot_ulims(ax1, data['energy'][ul].to(e_unit).value,
(data['flux'][ul] * sedf[ul]).to(f_unit).value,
(data['dene'][:, ul]).to(e_unit).value)
if plotresiduals:
if len(model_ML) != len(data['energy']):
from scipy.interpolate import interp1d
modelfunc = interp1d(model_ML[0].to(e_unit).value, model_ML[1].value)
difference = data['flux'][notul].value-modelfunc(data['energy'][notul])
difference *= data['flux'].unit
else:
difference = data['flux'][notul]-model_ML[1][notul]
dflux = np.mean(data['dflux'][:, notul], axis=0)
ax2.errorbar(data['energy'][notul].to(e_unit).value,
(difference / dflux).decompose().value,
yerr=(dflux / dflux).decompose().value,
xerr=data['dene'][:, notul].to(e_unit).value,
zorder=100, marker='o', ls='', elinewidth=2, capsize=0,
mec='w', mew=0, ms=6, color=datacol)
ax2.axhline(0, c='k', lw=2, ls='--')
from matplotlib.ticker import MaxNLocator
ax2.yaxis.set_major_locator(MaxNLocator(integer='True', prune='upper'))
ax2.set_ylabel(r'$\Delta\sigma$')
if len(model_ML) == len(data['energy']) and residualCI:
modelx, CI = calc_CI(sampler, modelidx=modelidx,
confs=confs, **kwargs)
for (ymin, ymax), conf in zip(CI, confs):
if conf < 100:
color = np.log(conf)/np.log(20)+0.4
ax2.fill_between(modelx[notul].to(e_unit).value,
((ymax[notul]-model_ML[1][notul])
/ dflux).decompose().value,
((ymin[notul]-model_ML[1][notul])
/ dflux).decompose().value,
lw=0., color='{0}'.format(color), alpha=0.6, zorder=-10)
# ax.plot(modelx,model_ML,c='k',lw=3,zorder=-5)
ax1.set_xscale('log')
ax1.set_yscale('log')
if plotdata:
if plotresiduals:
ax2.set_xscale('log')
for tl in ax1.get_xticklabels():
tl.set_visible(False)
xmin = 10 ** np.floor(np.log10(np.min(data['energy'] - data['dene'][0]).value))
xmax = 10 ** np.ceil(np.log10(np.max(data['energy'] + data['dene'][1]).value))
ax1.set_xlim(xmin, xmax)
# avoid autoscaling to errorbars to 0
if np.any(data['dflux'][:, notul][0] >= data['flux'][notul]):
elo = ((data['flux'][notul] * sedf[notul]).to(f_unit).value -
(data['dflux'][0][notul] * sedf[notul]).to(f_unit).value)
gooderr = np.where(data['dflux'][0][notul] < data['flux'][notul])
ymin = 10 ** np.floor(np.log10(np.min(elo[gooderr])))
ax1.set_ylim(bottom=ymin)
else:
if sed:
ndecades = 10
else:
ndecades = 20
# restrict y axis to ndecades to avoid autoscaling deep exponentials
xmin, xmax, ymin, ymax = ax1.axis()
ymin = max(ymin, ymax/10**ndecades)
ax1.set_ylim(bottom=ymin)
# scale x axis to largest model_ML x point within ndecades decades of
# maximum
f_unit, sedf = sed_conversion(model_ML[0], model_ML[1].unit, sed)
hi = np.where((model_ML[1]*sedf).to(f_unit).value > ymin)
xmax = np.max(model_ML[0][hi])
ax1.set_xlim(right=10 ** np.ceil(np.log10(xmax.to(e_unit).value)))
if confs is not None:
ax1.text(0.05, 0.05, infostr, ha='left', va='bottom',
transform=ax1.transAxes, family='monospace')
if label is not None:
ax1.set_title(label)
if ylabel is None:
if sed:
ax1.set_ylabel(r'$E^2\mathsf{{d}}N/\mathsf{{d}}E$'
' [{0}]'.format(_latex_unit(u.Unit(f_unit))))
else:
ax1.set_ylabel(r'$\mathsf{{d}}N/\mathsf{{d}}E$'
' [{0}]'.format(_latex_unit(u.Unit(f_unit))))
else:
ax1.set_ylabel(ylabel)
if plotdata and plotresiduals:
xlaxis = ax2
else:
xlaxis = ax1
if xlabel is None:
xlaxis.set_xlabel('Energy [{0}]'.format(_latex_unit(e_unit)))
else:
xlaxis.set_xlabel(xlabel)
f.subplots_adjust(hspace=0)
return f
def plot_data(input_data, xlabel=None,ylabel=None,
sed=True, figure=None, e_unit=None, **kwargs):
"""
Plot spectral data.
Additional ``kwargs`` are passed to `plot_fit`, except ``confs`` and
``plotdata``.
Parameters
----------
input_data : `emcee.EnsembleSampler`, `astropy.table.Table`, or `dict`
Spectral data to plot. Can be given as a data table, a dict generated
with `validate_data_table` or a `emcee.EnsembleSampler` with a data
property.
xlabel : str, optional
Label for the ``x`` axis of the plot.
ylabel : str, optional
Label for the ``y`` axis of the plot.
sed : bool, optional
Whether to plot SED or differential spectrum.
figure : `matplotlib.figure.Figure`, optional
`matplotlib` figure to plot on. If omitted a new one will be generated.
"""
import matplotlib.pyplot as plt
# Plot everything in serif to match math exponents
plt.rc('font', family='serif')
if isinstance(input_data, table.Table):
data = validate_data_table(input_data)
elif isinstance(input_data, emcee.EnsembleSampler):
data = input_data.data
elif isinstance(input_data, dict) and 'energy' in input_data.keys():
data = input_data
else:
log.warning('input_data format not know, no plotting data!')
return None
datacol = 'r'
if e_unit is None:
e_unit = data['energy'].unit
if figure == None:
f = plt.figure()
else:
f = figure
ax1 = f.add_subplot(111)
def plot_ulims(ax, x, y, xerr):
"""
Plot upper limits as arrows with cap at value of upper limit.
"""
ax.errorbar(x, y, xerr=xerr, ls='',
color=datacol, elinewidth=2, capsize=0)
ax.errorbar(x, 0.75 * y, yerr=0.25*y, ls='', lolims=True,
color=datacol, elinewidth=2, capsize=5, zorder=10)
f_unit, sedf = sed_conversion(data['energy'], data['flux'].unit, sed)
ul = data['ul']
notul = -ul
# Hack to show y errors compatible with 0 in loglog plot
yerr = data['dflux'][:, notul]
y = data['flux'][notul].to(yerr.unit)
bad_err = np.where((y-yerr[0]) <= 0.)
yerr[0][bad_err] = y[bad_err]*(1.-1e-7)
ax1.errorbar(data['energy'][notul].to(e_unit).value,
(data['flux'][notul] * sedf[notul]).to(f_unit).value,
yerr=(yerr * sedf[notul]).to(f_unit).value,
xerr=(data['dene'][:, notul]).to(e_unit).value,
zorder=100, marker='o', ls='', elinewidth=2, capsize=0,
mec='w', mew=0, ms=6, color=datacol)
if np.any(ul):
plot_ulims(ax1, data['energy'][ul].to(e_unit).value,
(data['flux'][ul] * sedf[ul]).to(f_unit).value,
(data['dene'][:, ul]).to(e_unit).value)
ax1.set_xscale('log')
ax1.set_yscale('log')
xmin = 10 ** np.floor(np.log10(np.min(data['energy'] - data['dene'][0]).value))
xmax = 10 ** np.ceil(np.log10(np.max(data['energy'] + data['dene'][1]).value))
ax1.set_xlim(xmin, xmax)
# avoid autoscaling to errorbars to 0
if np.any(data['dflux'][:, notul][0] >= data['flux'][notul]):
elo = ((data['flux'][notul] * sedf[notul]).to(f_unit).value -
(data['dflux'][0][notul] * sedf[notul]).to(f_unit).value)
gooderr = np.where(data['dflux'][0][notul] < data['flux'][notul])
ymin = 10 ** np.floor(np.log10(np.min(elo[gooderr])))
ax1.set_ylim(bottom=ymin)
if ylabel is None:
if sed:
ax1.set_ylabel(r'$E^2\mathsf{{d}}N/\mathsf{{d}}E$'
' [{0}]'.format(_latex_unit(u.Unit(f_unit))))
else:
ax1.set_ylabel(r'$\mathsf{{d}}N/\mathsf{{d}}E$'
' [{0}]'.format(_latex_unit(u.Unit(f_unit))))
else:
ax1.set_ylabel(ylabel)
if xlabel is None:
ax1.set_xlabel('Energy [{0}]'.format(_latex_unit(e_unit)))
else:
ax1.set_xlabel(xlabel)
return f
def plot_distribution(samples, label):
from scipy import stats
import matplotlib.pyplot as plt
quant = [16, 50, 84]
quantiles = dict(six.moves.zip(quant, np.percentile(samples, quant)))
std = np.std(samples)
if isinstance(samples[0], u.Quantity):
unit = samples[0].unit
else:
unit = ''
if isinstance(std, u.Quantity):
std = std.value
dist_props = '{label} distribution properties:\n \
- median: ${median}$ {unit}, std: ${std}$ {unit}\n \
- Median with uncertainties based on \n \
the 16th and 84th percentiles ($\sim$1$\sigma$):\n\
{label} = ${{{median}}}^{{+{uncs[1]}}}_{{-{uncs[0]}}}$ {unit}'.format(
label=label, median=_latex_float(quantiles[50]),
uncs=(_latex_float(quantiles[50] - quantiles[16]),
_latex_float(quantiles[84] - quantiles[50])), std=_latex_float(std), unit=unit)
f = plt.figure()
f.text(0.1, 0.23, dist_props, ha='left', va='top')
ax = f.add_subplot(111)
f.subplots_adjust(bottom=0.35)
histnbins = min(max(25, int(len(samples)/100.)), 100)
xlabel = label
n, x, patch = ax.hist(samples, histnbins, histtype='stepfilled', color='#CC0000', lw=0, normed=1)
if isinstance(samples, u.Quantity):
samples_nounit = samples.value
else:
samples_nunit = samples
kde = stats.kde.gaussian_kde(samples_nounit)
ax.plot(x, kde(x), c='k', label='KDE')
ax.axvline(quantiles[50], ls='--', c='k', alpha=0.5, lw=2,
label='50% quantile')
ax.axvspan(quantiles[16], quantiles[84], color='0.5', alpha=0.25,
label='68% CI')
# ax.legend()
[l.set_rotation(45) for l in ax.get_xticklabels()]
#[l.set_rotation(45) for l in ax.get_yticklabels()]
if unit != '':
xlabel += ' [{0}]'.format(unit)
ax.set_xlabel(xlabel)
ax.xaxis.set_label_coords(0.5, -0.1)
ax.set_title('posterior distribution of {0}'.format(label))
ax.set_ylim(top=n.max() * 1.05)
return f
remove duplication
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import astropy.units as u
from astropy.extern import six
from astropy import log
from astropy import table
from .utils import sed_conversion, validate_data_table
__all__ = ["plot_chain", "plot_fit", "plot_data", "plot_blob"]
def plot_chain(sampler, p=None, **kwargs):
"""Generate a diagnostic plot of the sampler chains.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler containing the chains to be plotted.
p : int (optional)
Index of the parameter to plot. If omitted, all chains are plotted.
last_step : bool (optional)
Whether to plot the last step of the chain or the complete chain (default).
Returns
-------
figure : `matplotlib.figure.Figure`
Figure
"""
if p == None:
npars = sampler.chain.shape[-1]
for pp, label in zip(six.moves.range(npars), sampler.labels):
_plot_chain_func(sampler, pp, **kwargs)
fig = None
else:
fig = _plot_chain_func(sampler, p, **kwargs)
return fig
def _latex_float(f, format=".3g"):
""" http://stackoverflow.com/a/13490601
"""
float_str = "{{0:{0}}}".format(format).format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"{0}\times 10^{{{1}}}".format(base, int(exponent))
else:
return float_str
def _plot_chain_func(sampler, p, last_step=False):
chain = sampler.chain
label = sampler.labels[p]
import matplotlib.pyplot as plt
# Plot everything in serif to match math exponents
plt.rc('font', family='serif')
from scipy import stats
if len(chain.shape) > 2:
traces = chain[:,:, p]
if last_step == True:
# keep only last step
dist = traces[:, -1]
else:
# convert chain to flatchain
dist = traces.flatten()
else:
log.warning('we need the full chain to plot the traces, not a flatchain!')
return None
nwalkers = traces.shape[0]
nsteps = traces.shape[1]
f = plt.figure()
ax1 = f.add_subplot(221)
ax2 = f.add_subplot(122)
f.subplots_adjust(left=0.1, bottom=0.15, right=0.95, top=0.9)
# plot five percent of the traces darker
colors = np.where(np.arange(nwalkers)/float(nwalkers) > 0.95, '#550000', '0.5')
ax1.set_rasterization_zorder(1)
for t, c in zip(traces, colors): # range(nwalkers):
ax1.plot(t, c=c, lw=1, alpha=0.9, zorder=0)
ax1.set_xlabel('step number')
#[l.set_rotation(45) for l in ax1.get_yticklabels()]
ax1.set_ylabel(label)
ax1.yaxis.set_label_coords(-0.15, 0.5)
ax1.set_title('Walker traces')
# nbins=25 if last_step else 100
nbins = min(max(25, int(len(dist)/100.)), 100)
xlabel = label
n, x, patch = ax2.hist(dist, nbins, histtype='stepfilled', color='#CC0000', lw=0, normed=1)
kde = stats.kde.gaussian_kde(dist)
ax2.plot(x, kde(x), c='k', label='KDE')
# for m,ls,lab in zip([np.mean(dist),np.median(dist)],('--','-.'),('mean: {0:.4g}','median: {0:.4g}')):
# ax2.axvline(m,ls=ls,c='k',alpha=0.5,lw=2,label=lab.format(m))
quant = [16, 50, 84]
xquant = np.percentile(dist, quant)
quantiles = dict(six.moves.zip(quant, xquant))
ax2.axvline(quantiles[50], ls='--', c='k', alpha=0.5, lw=2,
label='50% quantile')
ax2.axvspan(quantiles[16], quantiles[84], color='0.5', alpha=0.25,
label='68% CI')
# ax2.legend()
[l.set_rotation(45) for l in ax2.get_xticklabels()]
#[l.set_rotation(45) for l in ax2.get_yticklabels()]
ax2.set_xlabel(xlabel)
ax2.xaxis.set_label_coords(0.5, -0.1)
ax2.set_title('posterior distribution')
ax2.set_ylim(top=n.max() * 1.05)
# Print distribution parameters on lower-left
mean, median, std = np.mean(dist), np.median(dist), np.std(dist)
xmode = np.linspace(mean-np.sqrt(3)*std, mean+np.sqrt(3)*std, 100)
mode = xmode[np.argmax(kde(xmode))]
median = np.median(dist)
try:
# EnsembleSample.get_autocorr_time was only added in the
# recently released emcee 2.1.0 (2014-05-22), so make it optional
autocorr = sampler.get_autocorr_time(window=chain.shape[1]/4.)[p]
autocorr_message = '{0:.1f}'.format(autocorr)
except AttributeError:
autocorr_message = 'Not available. Update to emcee 2.1 or later.'
if last_step:
clen = 'last ensemble'
else:
clen = 'whole chain'
maxlen = np.max([len(ilabel) for ilabel in sampler.labels])
vartemplate = '{{2:>{0}}}: {{0:>8.3g}} +/- {{1:<8.3g}}\n'.format(maxlen)
chain_props = 'Walkers: {0} \nSteps in chain: {1} \n'.format(nwalkers, nsteps) + \
'Autocorrelation time: {0}\n'.format(autocorr_message) +\
'Mean acceptance fraction: {0:.3f}\n'.format(np.mean(sampler.acceptance_fraction)) +\
'Distribution properties for the {clen}:\n \
- median: ${median}$ \n \
- std: ${std}$ \n' .format(median=_latex_float(quantiles[50]), std=_latex_float(std), clen=clen) +\
' - Median with uncertainties based on \n \
the 16th and 84th percentiles ($\sim$1$\sigma$):\n'
info_line = ' '*10 + '{label} = ${{{median}}}^{{+{uncs[1]}}}_{{-{uncs[0]}}}$'.format(
label=label, median=_latex_float(quantiles[50]),
uncs=(_latex_float(quantiles[50] - quantiles[16]),
_latex_float(quantiles[84] - quantiles[50])))
chain_props += info_line
if 'log10(' in label or 'log(' in label:
nlabel = label.split('(')[-1].split(')')[0]
ltype = label.split('(')[0]
if ltype == 'log10':
new_dist = 10**dist
elif ltype == 'log':
new_dist = np.exp(dist)
quant = [16, 50, 84]
quantiles = dict(six.moves.zip(quant, np.percentile(new_dist, quant)))
label_template = '\n'+' '*10+'{{label:>{0}}}'.format(len(label))
new_line = label_template.format(label=nlabel)
new_line += ' = ${{{median}}}^{{+{uncs[1]}}}_{{-{uncs[0]}}}$'.format(
label=nlabel, median=_latex_float(quantiles[50]),
uncs=(_latex_float(quantiles[50] - quantiles[16]),
_latex_float(quantiles[84] - quantiles[50])))
chain_props += new_line
info_line += new_line
log.info('{0:-^50}\n'.format(label) + info_line)
f.text(0.05, 0.45, chain_props, ha='left', va='top')
return f
def _process_blob(sampler, modelidx,last_step=True):
"""
Process binary blob in sampler. If blob in position modelidx is:
- a Quantity array of len(blob[i])=len(data['energy']: use blob as model, data['energy'] as modelx
- a tuple: use first item as modelx, second as model
- a Quantity scalar: return array of scalars
"""
blob0 = sampler.blobs[-1][0][modelidx]
if isinstance(blob0, u.Quantity):
if blob0.size == sampler.data['energy'].size:
# Energy array for blob is not provided, use data['energy']
modelx = sampler.data['energy']
elif blob0.size == 1:
modelx = None
if last_step:
model = u.Quantity([m[modelidx] for m in sampler.blobs[-1]])
else:
nsteps = len(sampler.blobs)
model = []
for step in sampler.blobs:
for walkerblob in step:
model.append(walkerblob[modelidx])
model = u.Quantity(model)
elif np.isscalar(blob0):
modelx = None
if last_step:
model = u.Quantity([m[modelidx] for m in sampler.blobs[-1]])
else:
nsteps = len(sampler.blobs)
model = []
for step in sampler.blobs:
for walkerblob in step:
model.append(walkerblob[modelidx])
model = u.Quantity(model)
elif (isinstance(blob0, list) or isinstance(blob0, tuple)):
if (len(blob0) == 2 and isinstance(blob0[0], u.Quantity)
and isinstance(blob0[1], u.Quantity)):
# Energy array for model is item 0 in blob, model flux is item 1
modelx = blob0[0]
if last_step:
model = u.Quantity([m[modelidx][1] for m in sampler.blobs[-1]])
else:
nsteps = len(sampler.blobs)
model = []
for step in sampler.blobs:
for walkerblob in step:
model.append(walkerblob[modelidx][1])
model = u.Quantity(model)
else:
raise TypeError('Model {0} has wrong blob format'.format(modelidx))
else:
raise TypeError('Model {0} has wrong blob format'.format(modelidx))
return modelx, model
def _get_model_pt(sampler, modelidx):
blob0 = sampler.blobs[-1][0][modelidx]
if isinstance(blob0, u.Quantity):
pt = blob0.unit.physical_type
elif len(blob0) == 2:
pt = blob0[1].unit.physical_type
else:
raise TypeError('Model {0} has wrong blob format'.format(modelidx))
return pt
def calc_CI(sampler, modelidx=0,confs=[3, 1],last_step=True):
"""Calculate confidence interval.
"""
from scipy import stats
modelx, model = _process_blob(sampler, modelidx, last_step=last_step)
nwalkers = len(model)-1
CI = []
for conf in confs:
fmin = stats.norm.cdf(-conf)
fmax = stats.norm.cdf(conf)
ymin, ymax = [], []
for fr, y in ((fmin, ymin), (fmax, ymax)):
nf = int((fr*nwalkers))
for i, x in enumerate(modelx):
ysort = np.sort(model[:, i])
y.append(ysort[nf])
# create an array from lists ymin and ymax preserving units
CI.append((u.Quantity(ymin), u.Quantity(ymax)))
return modelx, CI
def plot_CI(ax, sampler, modelidx=0, sed=True,confs=[3, 1, 0.5],e_unit=u.eV,**kwargs):
"""Plot confidence interval.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
confs : list, optional
List of confidence levels (in sigma) to use for generating the confidence intervals. Default is `[3,1,0.5]`
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True, default) or the whole chain (False).
"""
modelx, CI = calc_CI(sampler, modelidx=modelidx,confs=confs,**kwargs)
# pick first confidence interval curve for units
f_unit, sedf = sed_conversion(modelx, CI[0][0].unit, sed)
for (ymin, ymax), conf in zip(CI, confs):
color = np.log(conf)/np.log(20)+0.4
ax.fill_between(modelx.to(e_unit).value,
(ymax * sedf).to(f_unit).value,
(ymin * sedf).to(f_unit).value,
lw=0., color='{0}'.format(color),
alpha=0.6, zorder=-10)
# ax.plot(modelx,model_ML,c='k',lw=3,zorder=-5)
def plot_samples(ax, sampler, modelidx=0, sed=True, n_samples=100, e_unit=u.eV, last_step=False):
"""Plot a number of samples from the sampler chain.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
n_samples : int, optional
Number of samples to plot. Default is 100.
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True, default) or the whole chain (False).
"""
modelx, model = _process_blob(sampler, modelidx, last_step=last_step)
# pick first confidence interval curve for units
f_unit, sedf = sed_conversion(modelx, model[0].unit, sed)
for my in model[np.random.randint(len(model), size=n_samples)]:
ax.plot(modelx.to(e_unit).value, (my * sedf).to(f_unit).value,
color='k', alpha=0.1, lw=1)
ML, MLp, MLerr, ML_model = find_ML(sampler, modelidx)
ax.plot(ML_model[0].to(e_unit).value, (ML_model[1] * sedf).to(f_unit).value,
color='r', lw=1.5, alpha=0.8)
def find_ML(sampler, modelidx):
"""
Find Maximum Likelihood parameters as those in the chain with a highest log
probability.
"""
index = np.unravel_index(np.argmax(sampler.lnprobability), sampler.lnprobability.shape)
MLp = sampler.chain[index]
blob = sampler.blobs[index[1]][index[0]][modelidx]
if isinstance(blob, u.Quantity):
modelx = sampler.data['energy'].copy()
model_ML = blob.copy()
elif len(blob) == 2:
modelx = blob[0].copy()
model_ML = blob[1].copy()
else:
raise TypeError('Model {0} has wrong blob format'.format(modelidx))
MLerr = []
for dist in sampler.flatchain.T:
hilo = np.percentile(dist, [16., 84.])
MLerr.append((hilo[1]-hilo[0])/2.)
ML = sampler.lnprobability[index]
return ML, MLp, MLerr, (modelx, model_ML)
def _latex_unit(unit):
""" Hack to get a single line latex representation of a unit
Will be obsolete with format='latex_inline' in astropy 0.4.1
"""
l = unit.to_string('cds').split('.')
out = ''
for uni in l:
try:
int(uni[-1])
if uni[-2] == '-':
out += ' {0}$^{{{1}}}$'.format(uni[:-2], uni[-2:])
else:
out += ' {0}$^{1}$'.format(uni[:-1], uni[-1:])
except ValueError:
out += ' ' + uni
return out[1:]
def plot_blob(sampler, blobidx=0, label=None, last_step=False, **kwargs):
"""
Plot a metadata blob as a fit to spectral data or value distribution
Additional ``kwargs`` are passed to `plot_fit`.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler with a stored chain.
blobidx : int, optional
Metadata blob index to plot.
label : str, optional
Label for the value distribution. Labels for the fit plot can be passed
as ``xlabel`` and ``ylabel`` and will be passed to `plot_fit`.
Returns
-------
figure : `matplotlib.pyplot.Figure`
`matplotlib` figure instance containing the plot.
"""
modelx, model = _process_blob(sampler, blobidx, last_step)
if modelx is None:
# Blob is scalar, plot distribution
f = plot_distribution(model, label)
else:
f = plot_fit(sampler,modelidx=blobidx,last_step=last_step,label=label,**kwargs)
return f
def plot_fit(sampler, modelidx=0,label=None,xlabel=None,ylabel=None,confs=[3, 1, 0.5],
n_samples=None, sed=True, figure=None, residualCI=True, plotdata=None,
e_unit=None, data_color='r', **kwargs):
"""
Plot data with fit confidence regions.
Additional ``kwargs`` are passed to `plot_CI`.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler with a stored chain.
modelidx : int, optional
Model index to plot.
label : str, optional
Label for the title of the plot.
xlabel : str, optional
Label for the ``x`` axis of the plot.
ylabel : str, optional
Label for the ``y`` axis of the plot.
sed : bool, optional
Whether to plot SED or differential spectrum.
confs : list, optional
List of confidence levels (in sigma) to use for generating the
confidence intervals. Default is ``[3,1,0.5]``
n_samples : int, optional
If not ``None``, number of sample models to plot. ``n_samples=100`` is a
good starting point to see the behaviour of the model. Default is
``None``: plot confidence bands instead of samples.
figure : `matplotlib.figure.Figure`, optional
`matplotlib` figure to plot on. If omitted a new one will be generated.
residualCI : bool, optional
Whether to plot the confidence interval bands in the residuals subplot.
plotdata : bool, optional
Wheter to plot data on top of model confidence intervals. Default is
True if the physical types of the data and the model match.
e_unit : `~astropy.units.Unit`
Units for the energy axis of the plot. The default is to use the units
of the energy array of the observed data.
data_color : str
Matplotlib color for the data points.
"""
import matplotlib.pyplot as plt
# Plot everything in serif to match math exponents
plt.rc('font', family='serif')
ML, MLp, MLerr, model_ML = find_ML(sampler, modelidx)
infostr = 'Maximum log probability: {0:.3g}\n'.format(ML)
infostr += 'Maximum Likelihood values:\n'
maxlen = np.max([len(ilabel) for ilabel in sampler.labels])
vartemplate = '{{2:>{0}}}: {{0:>8.3g}} +/- {{1:<8.3g}}\n'.format(maxlen)
for p, v, ilabel in zip(MLp, MLerr, sampler.labels):
infostr += vartemplate.format(p, v, ilabel)
# log.info(infostr)
data = sampler.data
ul = data['ul']
notul = -ul
plotresiduals = False
if modelidx == 0 and plotdata is None:
plotdata = True
if confs is not None:
plotresiduals = True
elif plotdata is None:
plotdata = False
if plotdata:
# Check that physical types of data and model match
model_pt = _get_model_pt(sampler, modelidx)
data_pt = data['flux'].unit.physical_type
if data_pt != model_pt:
log.info('Model physical type ({0}) and spectral data physical'
' type ({1}) do not match for blob {2}! Not plotting data.'.format(model_pt, data_pt, modelidx))
plotdata = False
if figure == None:
f = plt.figure()
else:
f = figure
if plotdata and plotresiduals:
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
ax2 = plt.subplot2grid((4, 1), (3, 0), sharex=ax1)
for subp in [ax1, ax2]:
f.add_subplot(subp)
else:
ax1 = f.add_subplot(111)
if e_unit is None:
e_unit = data['energy'].unit
if confs is not None and n_samples is None:
plot_CI(ax1, sampler,modelidx,sed=sed,confs=confs,e_unit=e_unit,**kwargs)
elif n_samples is not None:
plot_samples(ax1, sampler, modelidx, sed=sed, n_samples=n_samples, e_unit=e_unit)
else:
residualCI = False
if plotdata:
_plot_data_to_ax(data, ax1, e_unit=e_unit, sed=sed,
data_color=data_color, ylabel=ylabel)
if plotresiduals:
if len(model_ML) != len(data['energy']):
from scipy.interpolate import interp1d
modelfunc = interp1d(model_ML[0].to(e_unit).value, model_ML[1].value)
difference = data['flux'][notul].value-modelfunc(data['energy'][notul])
difference *= data['flux'].unit
else:
difference = data['flux'][notul]-model_ML[1][notul]
dflux = np.mean(data['dflux'][:, notul], axis=0)
ax2.errorbar(data['energy'][notul].to(e_unit).value,
(difference / dflux).decompose().value,
yerr=(dflux / dflux).decompose().value,
xerr=data['dene'][:, notul].to(e_unit).value,
zorder=100, marker='o', ls='', elinewidth=2, capsize=0,
mec='w', mew=0, ms=6, color=data_color)
ax2.axhline(0, c='k', lw=2, ls='--')
from matplotlib.ticker import MaxNLocator
ax2.yaxis.set_major_locator(MaxNLocator(integer='True', prune='upper'))
ax2.set_ylabel(r'$\Delta\sigma$')
if len(model_ML) == len(data['energy']) and residualCI:
modelx, CI = calc_CI(sampler, modelidx=modelidx,
confs=confs, **kwargs)
for (ymin, ymax), conf in zip(CI, confs):
if conf < 100:
color = np.log(conf)/np.log(20)+0.4
ax2.fill_between(modelx[notul].to(e_unit).value,
((ymax[notul]-model_ML[1][notul])
/ dflux).decompose().value,
((ymin[notul]-model_ML[1][notul])
/ dflux).decompose().value,
lw=0., color='{0}'.format(color), alpha=0.6, zorder=-10)
# ax.plot(modelx,model_ML,c='k',lw=3,zorder=-5)
ax1.set_xscale('log')
ax1.set_yscale('log')
if plotdata and plotresiduals:
ax2.set_xscale('log')
for tl in ax1.get_xticklabels():
tl.set_visible(False)
else:
if sed:
ndecades = 10
else:
ndecades = 20
# restrict y axis to ndecades to avoid autoscaling deep exponentials
xmin, xmax, ymin, ymax = ax1.axis()
ymin = max(ymin, ymax/10**ndecades)
ax1.set_ylim(bottom=ymin)
# scale x axis to largest model_ML x point within ndecades decades of
# maximum
f_unit, sedf = sed_conversion(model_ML[0], model_ML[1].unit, sed)
hi = np.where((model_ML[1]*sedf).to(f_unit).value > ymin)
xmax = np.max(model_ML[0][hi])
ax1.set_xlim(right=10 ** np.ceil(np.log10(xmax.to(e_unit).value)))
if confs is not None:
ax1.text(0.05, 0.05, infostr, ha='left', va='bottom',
transform=ax1.transAxes, family='monospace')
if label is not None:
ax1.set_title(label)
if plotdata and plotresiduals:
xlaxis = ax2
else:
xlaxis = ax1
if xlabel is None:
xlaxis.set_xlabel('Energy [{0}]'.format(_latex_unit(e_unit)))
else:
xlaxis.set_xlabel(xlabel)
f.subplots_adjust(hspace=0)
return f
def plot_data(input_data, xlabel=None,ylabel=None,
sed=True, figure=None, e_unit=None, data_color='r', **kwargs):
"""
Plot spectral data.
Additional ``kwargs`` are passed to `plot_fit`, except ``confs`` and
``plotdata``.
Parameters
----------
input_data : `emcee.EnsembleSampler`, `astropy.table.Table`, or `dict`
Spectral data to plot. Can be given as a data table, a dict generated
with `validate_data_table` or a `emcee.EnsembleSampler` with a data
property.
xlabel : str, optional
Label for the ``x`` axis of the plot.
ylabel : str, optional
Label for the ``y`` axis of the plot.
sed : bool, optional
Whether to plot SED or differential spectrum.
figure : `matplotlib.figure.Figure`, optional
`matplotlib` figure to plot on. If omitted a new one will be generated.
e_unit : `astropy.unit.Unit`, optional
Units for energy axis. Defaults to those of the data.
data_color : str
Matplotlib color for the data points.
"""
import matplotlib.pyplot as plt
# Plot everything in serif to match math exponents
plt.rc('font', family='serif')
if isinstance(input_data, table.Table):
data = validate_data_table(input_data)
elif hasattr(input_data,'data'):
data = input_data.data
elif isinstance(input_data, dict) and 'energy' in input_data.keys():
data = input_data
else:
log.warning('input_data format not know, no plotting data!')
return None
if figure == None:
f = plt.figure()
else:
f = figure
if e_unit is None:
e_unit = data['energy'].unit
ax1 = f.add_subplot(111)
_plot_data_to_ax(data, ax1, e_unit=e_unit, sed=sed, data_color=data_color,
ylabel=ylabel)
if xlabel is None:
ax1.set_xlabel('Energy [{0}]'.format(_latex_unit(e_unit)))
else:
ax1.set_xlabel(xlabel)
return f
def _plot_data_to_ax(data, ax1, e_unit=None, sed=True, data_color='r',
ylabel=None):
""" Plots data errorbars and upper limits onto ax.
X label is left to plot_data and plot_fit because they depend on whether
residuals are plotted.
"""
if e_unit is None:
e_unit = data['energy'].unit
def plot_ulims(ax, x, y, xerr):
"""
Plot upper limits as arrows with cap at value of upper limit.
"""
ax.errorbar(x, y, xerr=xerr, ls='',
color=data_color, elinewidth=2, capsize=0)
ax.errorbar(x, 0.75 * y, yerr=0.25*y, ls='', lolims=True,
color=data_color, elinewidth=2, capsize=5, zorder=10)
f_unit, sedf = sed_conversion(data['energy'], data['flux'].unit, sed)
ul = data['ul']
notul = -ul
# Hack to show y errors compatible with 0 in loglog plot
yerr = data['dflux'][:, notul]
y = data['flux'][notul].to(yerr.unit)
bad_err = np.where((y-yerr[0]) <= 0.)
yerr[0][bad_err] = y[bad_err]*(1.-1e-7)
ax1.errorbar(data['energy'][notul].to(e_unit).value,
(data['flux'][notul] * sedf[notul]).to(f_unit).value,
yerr=(yerr * sedf[notul]).to(f_unit).value,
xerr=(data['dene'][:, notul]).to(e_unit).value,
zorder=100, marker='o', ls='', elinewidth=2, capsize=0,
mec='w', mew=0, ms=6, color=data_color)
if np.any(ul):
plot_ulims(ax1, data['energy'][ul].to(e_unit).value,
(data['flux'][ul] * sedf[ul]).to(f_unit).value,
(data['dene'][:, ul]).to(e_unit).value)
ax1.set_xscale('log')
ax1.set_yscale('log')
xmin = 10 ** np.floor(np.log10(np.min(data['energy'] - data['dene'][0]).value))
xmax = 10 ** np.ceil(np.log10(np.max(data['energy'] + data['dene'][1]).value))
ax1.set_xlim(xmin, xmax)
# avoid autoscaling to errorbars to 0
if np.any(data['dflux'][:, notul][0] >= data['flux'][notul]):
elo = ((data['flux'][notul] * sedf[notul]).to(f_unit).value -
(data['dflux'][0][notul] * sedf[notul]).to(f_unit).value)
gooderr = np.where(data['dflux'][0][notul] < data['flux'][notul])
ymin = 10 ** np.floor(np.log10(np.min(elo[gooderr])))
ax1.set_ylim(bottom=ymin)
if ylabel is None:
if sed:
ax1.set_ylabel(r'$E^2\mathsf{{d}}N/\mathsf{{d}}E$'
' [{0}]'.format(_latex_unit(u.Unit(f_unit))))
else:
ax1.set_ylabel(r'$\mathsf{{d}}N/\mathsf{{d}}E$'
' [{0}]'.format(_latex_unit(u.Unit(f_unit))))
else:
ax1.set_ylabel(ylabel)
def plot_distribution(samples, label):
from scipy import stats
import matplotlib.pyplot as plt
quant = [16, 50, 84]
quantiles = dict(six.moves.zip(quant, np.percentile(samples, quant)))
std = np.std(samples)
if isinstance(samples[0], u.Quantity):
unit = samples[0].unit
else:
unit = ''
if isinstance(std, u.Quantity):
std = std.value
dist_props = '{label} distribution properties:\n \
- median: ${median}$ {unit}, std: ${std}$ {unit}\n \
- Median with uncertainties based on \n \
the 16th and 84th percentiles ($\sim$1$\sigma$):\n\
{label} = ${{{median}}}^{{+{uncs[1]}}}_{{-{uncs[0]}}}$ {unit}'.format(
label=label, median=_latex_float(quantiles[50]),
uncs=(_latex_float(quantiles[50] - quantiles[16]),
_latex_float(quantiles[84] - quantiles[50])), std=_latex_float(std), unit=unit)
f = plt.figure()
f.text(0.1, 0.23, dist_props, ha='left', va='top')
ax = f.add_subplot(111)
f.subplots_adjust(bottom=0.35)
histnbins = min(max(25, int(len(samples)/100.)), 100)
xlabel = label
n, x, patch = ax.hist(samples, histnbins, histtype='stepfilled', color='#CC0000', lw=0, normed=1)
if isinstance(samples, u.Quantity):
samples_nounit = samples.value
else:
samples_nunit = samples
kde = stats.kde.gaussian_kde(samples_nounit)
ax.plot(x, kde(x), c='k', label='KDE')
ax.axvline(quantiles[50], ls='--', c='k', alpha=0.5, lw=2,
label='50% quantile')
ax.axvspan(quantiles[16], quantiles[84], color='0.5', alpha=0.25,
label='68% CI')
# ax.legend()
[l.set_rotation(45) for l in ax.get_xticklabels()]
#[l.set_rotation(45) for l in ax.get_yticklabels()]
if unit != '':
xlabel += ' [{0}]'.format(unit)
ax.set_xlabel(xlabel)
ax.xaxis.set_label_coords(0.5, -0.1)
ax.set_title('posterior distribution of {0}'.format(label))
ax.set_ylim(top=n.max() * 1.05)
return f
|
import sqlite3
import re
class Query(object):
db = None
echo_commands = False
def __init__(self, model):
self.model = model
self.where_query = {}
self.joiners = []
attributes = ["id", "created_at"] + \
list(self.model.__attributes__)
self.attributes = [
"{table}.{attr}".format(
table=Query.table_name(self.model),
attr=attr
) for attr in attributes
]
def all(self):
return self
def first(self):
record = self._do_query().fetchone()
args = dict(zip(self.attributes, record))
return self.model.from_dict(**args)
def where(self, **restrictions):
for attr, value in restrictions.items():
self.where_query[attr] = value
return self
def joins(self, table):
self.joiners.insert(0, table)
return self
def _build_where(self):
def builder(where_dict, default_table):
for key, value in where_dict.items():
if type(value) is dict:
for entry in builder(value, key):
yield entry
else:
yield (default_table, key, value)
return list(builder(self.where_query, Query.table_name(self.model)))
def _do_query(self):
if self.where_query:
ordered_items = self._build_where()
where_clause = "where {query}".format(
query = " and ".join("{table}.{attr} == ?".format(
table = pair[0],
attr = pair[1]
) for pair in ordered_items)
)
else:
ordered_items = []
where_clause = ""
if self.joiners:
# currently only supports 1 deep
# looking for what multiple deep would mean
join_clause = ("inner join {joined_table} on "
"{joined_table}.{our_record}_id == "
"{our_table}.id ").format(
joined_table = self.joiners[0],
our_record = Query.table_name(self.model)[:-1],
our_table = Query.table_name(self.model)
)
else:
join_clause = ""
cmd = 'select {attrs} from {table} {join_clause}{where_clause}'.format(
table = Query.table_name(self.model),
attrs = ", ".join(self.attributes),
where_clause = where_clause,
join_clause = join_clause
).rstrip()
if Query.echo_commands:
print "SQL:", cmd, tuple(pair[2] for pair in ordered_items)
return Query.db.execute(cmd, [pair[2] for pair in ordered_items])
def __iter__(self):
result = self._do_query().fetchall()
for record in result:
args = dict(zip(self.attributes, record))
yield self.model.from_dict(**args)
def __repr__(self):
return "<{name}({model}):{records}>".format(
name="lazy_record.Query",
model=self.model.__name__,
records=list(self)
)
@staticmethod
def table_name(model):
underscore_regex = re.compile(
'((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
return underscore_regex.sub(r'_\1', model.__name__).lower() + "s"
@classmethod
def connect_db(Query, database=":memory:"):
Query.db = sqlite3.connect(database,
detect_types=sqlite3.PARSE_DECLTYPES)
class __metaclass__(type):
def __repr__(self):
return "<class 'lazy_record.Query'>"
updated query to better support Base
import sqlite3
import re
class Query(object):
db = None
echo_commands = False
def __init__(self, model):
self.model = model
self.where_query = {}
self.joiners = []
self.attributes = ["id", "created_at"] + \
list(self.model.__attributes__)
self.db_attributes = [
"{table}.{attr}".format(
table=Query.table_name(self.model),
attr=attr
) for attr in self.attributes
]
def all(self):
return self
def first(self):
record = self._do_query().fetchone()
if not record:
return None
args = dict(zip(self.attributes, record))
return self.model.from_dict(**args)
def where(self, **restrictions):
for attr, value in restrictions.items():
self.where_query[attr] = value
return self
def joins(self, table):
self.joiners.insert(0, table)
return self
def _build_where(self):
def builder(where_dict, default_table):
for key, value in where_dict.items():
if type(value) is dict:
for entry in builder(value, key):
yield entry
else:
yield (default_table, key, value)
return list(builder(self.where_query, Query.table_name(self.model)))
def _do_query(self):
if self.where_query:
ordered_items = self._build_where()
where_clause = "where {query}".format(
query = " and ".join("{table}.{attr} == ?".format(
table = pair[0],
attr = pair[1]
) for pair in ordered_items)
)
else:
ordered_items = []
where_clause = ""
if self.joiners:
# currently only supports 1 deep
# looking for what multiple deep would mean
join_clause = ("inner join {joined_table} on "
"{joined_table}.{our_record}_id == "
"{our_table}.id ").format(
joined_table = self.joiners[0],
our_record = Query.table_name(self.model)[:-1],
our_table = Query.table_name(self.model)
)
else:
join_clause = ""
cmd = 'select {attrs} from {table} {join_clause}{where_clause}'.format(
table = Query.table_name(self.model),
attrs = ", ".join(self.db_attributes),
where_clause = where_clause,
join_clause = join_clause
).rstrip()
if Query.echo_commands:
print "SQL:", cmd, tuple(pair[2] for pair in ordered_items)
return Query.db.execute(cmd, [pair[2] for pair in ordered_items])
def __iter__(self):
result = self._do_query().fetchall()
for record in result:
args = dict(zip(self.attributes, record))
yield self.model.from_dict(**args)
def __repr__(self):
return "<{name} {records}>".format(
name="lazy_record.Query",
records=list(self)
)
@staticmethod
def table_name(model):
underscore_regex = re.compile(
'((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
return underscore_regex.sub(r'_\1', model.__name__).lower() + "s"
@classmethod
def connect_db(Query, database=":memory:"):
Query.db = sqlite3.connect(database,
detect_types=sqlite3.PARSE_DECLTYPES)
class __metaclass__(type):
def __repr__(self):
return "<class 'lazy_record.Query'>"
|
"""
Various Version Control System management abstraction layer for Python.
"""
VERSION = (0, 1, 8, 'dev')
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__all__ = [
'get_version', 'get_repo', 'get_backend', 'BACKENDS',
'VCSError', 'RepositoryError', 'ChangesetError']
from vcs.backends import get_repo, get_backend, BACKENDS
from vcs.exceptions import VCSError, RepositoryError, ChangesetError
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
return '.'.join((str(each) for each in VERSION[:3]))
Removed 'dev' from version
"""
Various Version Control System management abstraction layer for Python.
"""
VERSION = (0, 1, 8)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__all__ = [
'get_version', 'get_repo', 'get_backend', 'BACKENDS',
'VCSError', 'RepositoryError', 'ChangesetError']
from vcs.backends import get_repo, get_backend, BACKENDS
from vcs.exceptions import VCSError, RepositoryError, ChangesetError
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
return '.'.join((str(each) for each in VERSION[:3]))
|
"""
byceps.blueprints.admin.shop.email.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from decimal import Decimal
from flask import abort
from .....config import ConfigurationError
from .....database import generate_uuid
from .....services.party import service as party_service
from .....services.shop.order.email import service as email_service
from .....services.shop.order.transfer.models import Order, PaymentMethod, \
PaymentState
from .....services.shop.sequence import service as sequence_service
from .....services.shop.shop import service as shop_service
from .....util.framework.blueprint import create_blueprint
from .....util.framework.templating import templated
from .....util.views import textified
from ....authorization.decorators import permission_required
from ....authorization.registry import permission_registry
from ..shop.authorization import ShopPermission
blueprint = create_blueprint('shop_email_admin', __name__)
permission_registry.register_enum(ShopPermission)
@blueprint.route('/for_shop/<shop_id>')
@permission_required(ShopPermission.view)
@templated
def view_for_shop(shop_id):
"""Show e-mail examples."""
shop = _get_shop_or_404(shop_id)
party = party_service.find_party(shop.party_id)
return {
'party': party,
'shop': shop,
}
@blueprint.route('/for_shop/<shop_id>/example/order_placed')
@permission_required(ShopPermission.view)
@textified
def view_example_order_placed(shop_id):
"""Show example of order placed e-mail."""
shop = _get_shop_or_404(shop_id)
order = _build_order(shop.id, PaymentState.open, is_open=True)
party = party_service.find_party(shop.party_id)
data = _build_email_data(order, party)
message = email_service._assemble_email_for_incoming_order_to_orderer(data)
yield from _render_message(message)
@blueprint.route('/for_shop/<shop_id>/example/order_paid')
@permission_required(ShopPermission.view)
@textified
def view_example_order_paid(shop_id):
"""Show example of order paid e-mail."""
shop = _get_shop_or_404(shop_id)
order = _build_order(shop.id, PaymentState.paid, is_paid=True)
party = party_service.find_party(shop.party_id)
data = _build_email_data(order, party)
message = email_service._assemble_email_for_paid_order_to_orderer(data)
yield from _render_message(message)
@blueprint.route('/for_shop/<shop_id>/example/order_canceled')
@permission_required(ShopPermission.view)
@textified
def view_example_order_canceled(shop_id):
"""Show example of order canceled e-mail."""
shop = _get_shop_or_404(shop_id)
order = _build_order(shop.id, PaymentState.canceled_before_paid,
is_canceled=True,
cancelation_reason='Kein fristgerechter Geldeingang feststellbar')
party = party_service.find_party(shop.party_id)
data = _build_email_data(order, party)
message = email_service._assemble_email_for_canceled_order_to_orderer(data)
yield from _render_message(message)
def _get_shop_or_404(shop_id):
shop = shop_service.find_shop(shop_id)
if shop is None:
abort(404)
return shop
def _build_order(shop_id, payment_state, *, is_open=False, is_canceled=False,
is_paid=False, cancelation_reason=None):
order_id = generate_uuid()
order_numer_seq = sequence_service.find_order_number_sequence(shop_id)
order_number = sequence_service.format_order_number(order_numer_seq)
created_at = datetime.utcnow()
placed_by_id = None
first_names = 'Bella-Bernadine'
last_name = 'Ballerwurm'
address = None
total_amount = Decimal('42.95')
items = []
payment_method = PaymentMethod.bank_transfer
return Order(
order_id,
shop_id,
order_number,
created_at,
placed_by_id,
first_names,
last_name,
address,
total_amount,
items,
payment_method,
payment_state,
is_open,
is_canceled,
is_paid,
False, # is_invoiced
False, # is_shipping_required
False, # is_shipped
cancelation_reason,
)
def _build_email_data(order, party):
return email_service.OrderEmailData(
order=order,
email_config_id=party.brand_id,
orderer_screen_name='Besteller',
orderer_email_address='besteller@example.com',
)
def _render_message(message):
if not message.sender:
raise ConfigurationError(
'No e-mail sender address configured for message.')
yield f'From: {message.sender}\n'
yield f'To: {message.recipients}\n'
yield f'Subject: {message.subject}\n'
yield f'\n\n{message.body}\n'
Clarify service name on import to avoid confusion, import struct directly
"""
byceps.blueprints.admin.shop.email.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from decimal import Decimal
from flask import abort
from .....config import ConfigurationError
from .....database import generate_uuid
from .....services.party import service as party_service
from .....services.shop.order.email import service as shop_order_email_service
from .....services.shop.order.email.service import OrderEmailData
from .....services.shop.order.transfer.models import Order, PaymentMethod, \
PaymentState
from .....services.shop.sequence import service as sequence_service
from .....services.shop.shop import service as shop_service
from .....util.framework.blueprint import create_blueprint
from .....util.framework.templating import templated
from .....util.views import textified
from ....authorization.decorators import permission_required
from ....authorization.registry import permission_registry
from ..shop.authorization import ShopPermission
blueprint = create_blueprint('shop_email_admin', __name__)
permission_registry.register_enum(ShopPermission)
@blueprint.route('/for_shop/<shop_id>')
@permission_required(ShopPermission.view)
@templated
def view_for_shop(shop_id):
"""Show e-mail examples."""
shop = _get_shop_or_404(shop_id)
party = party_service.find_party(shop.party_id)
return {
'party': party,
'shop': shop,
}
@blueprint.route('/for_shop/<shop_id>/example/order_placed')
@permission_required(ShopPermission.view)
@textified
def view_example_order_placed(shop_id):
"""Show example of order placed e-mail."""
shop = _get_shop_or_404(shop_id)
order = _build_order(shop.id, PaymentState.open, is_open=True)
party = party_service.find_party(shop.party_id)
data = _build_email_data(order, party)
message = shop_order_email_service \
._assemble_email_for_incoming_order_to_orderer(data)
yield from _render_message(message)
@blueprint.route('/for_shop/<shop_id>/example/order_paid')
@permission_required(ShopPermission.view)
@textified
def view_example_order_paid(shop_id):
"""Show example of order paid e-mail."""
shop = _get_shop_or_404(shop_id)
order = _build_order(shop.id, PaymentState.paid, is_paid=True)
party = party_service.find_party(shop.party_id)
data = _build_email_data(order, party)
message = shop_order_email_service \
._assemble_email_for_paid_order_to_orderer(data)
yield from _render_message(message)
@blueprint.route('/for_shop/<shop_id>/example/order_canceled')
@permission_required(ShopPermission.view)
@textified
def view_example_order_canceled(shop_id):
"""Show example of order canceled e-mail."""
shop = _get_shop_or_404(shop_id)
order = _build_order(shop.id, PaymentState.canceled_before_paid,
is_canceled=True,
cancelation_reason='Kein fristgerechter Geldeingang feststellbar')
party = party_service.find_party(shop.party_id)
data = _build_email_data(order, party)
message = shop_order_email_service \
._assemble_email_for_canceled_order_to_orderer(data)
yield from _render_message(message)
def _get_shop_or_404(shop_id):
shop = shop_service.find_shop(shop_id)
if shop is None:
abort(404)
return shop
def _build_order(shop_id, payment_state, *, is_open=False, is_canceled=False,
is_paid=False, cancelation_reason=None):
order_id = generate_uuid()
order_numer_seq = sequence_service.find_order_number_sequence(shop_id)
order_number = sequence_service.format_order_number(order_numer_seq)
created_at = datetime.utcnow()
placed_by_id = None
first_names = 'Bella-Bernadine'
last_name = 'Ballerwurm'
address = None
total_amount = Decimal('42.95')
items = []
payment_method = PaymentMethod.bank_transfer
return Order(
order_id,
shop_id,
order_number,
created_at,
placed_by_id,
first_names,
last_name,
address,
total_amount,
items,
payment_method,
payment_state,
is_open,
is_canceled,
is_paid,
False, # is_invoiced
False, # is_shipping_required
False, # is_shipped
cancelation_reason,
)
def _build_email_data(order, party):
return OrderEmailData(
order=order,
email_config_id=party.brand_id,
orderer_screen_name='Besteller',
orderer_email_address='besteller@example.com',
)
def _render_message(message):
if not message.sender:
raise ConfigurationError(
'No e-mail sender address configured for message.')
yield f'From: {message.sender}\n'
yield f'To: {message.recipients}\n'
yield f'Subject: {message.subject}\n'
yield f'\n\n{message.body}\n'
|
# -*- coding: utf-8 -*-
'''
Tests for the MySQL states
'''
# Import python libs
import logging
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import (
destructiveTest,
ensure_in_syspath
)
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
from salt.modules import mysql as mysqlmod
log = logging.getLogger(__name__)
NO_MYSQL = False
try:
import MySQLdb
except Exception:
NO_MYSQL = True
@skipIf(
NO_MYSQL,
'Please install MySQL bindings and a MySQL Server before running'
'MySQL integration tests.'
)
class MysqlDatabaseStateTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
Validate the mysql_database state
'''
user = 'root'
password = 'poney'
@destructiveTest
def setUp(self):
'''
Test presence of MySQL server, enforce a root password
'''
super(MysqlDatabaseStateTest, self).setUp()
NO_MYSQL_SERVER = True
# now ensure we know the mysql root password
# one of theses two at least should work
ret1 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' flush-privileges password "'
+ self.password
+ '"'
)
ret2 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' --password="'
+ self.password
+ '" flush-privileges password "'
+ self.password
+ '"'
)
key, value = ret2.popitem()
if value['result']:
NO_MYSQL_SERVER = False
else:
self.skipTest('No MySQL Server running, or no root access on it.')
def _test_database(self, db_name, second_db_name, test_conn, **kwargs):
'''
Create db two times, test conn, remove it two times
'''
# In case of...
ret = self.run_state('mysql_database.absent',
name=db_name,
**kwargs
)
ret = self.run_state('mysql_database.present',
name=db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'The database ' + db_name + ' has been created',
ret
)
#2nd run
ret = self.run_state('mysql_database.present',
name=second_db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' is already present',
ret
)
if test_conn:
# test root connection
ret = self.run_function(
'mysql.query',
database=db_name,
query='SELECT 1',
**kwargs
)
if not isinstance(ret, dict) or not 'results' in ret:
raise AssertionError(
('Unexpected result while testing connection'
' on db {0!r}: {1}').format(
db_name,
repr(ret)
)
)
self.assertEqual([['1']], ret['results'])
# Now removing databases
kwargs.pop('character_set')
kwargs.pop('collate')
ret = self.run_state('mysql_database.absent',
name=db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' has been removed',
ret
)
#2nd run
ret = self.run_state('mysql_database.absent',
name=second_db_name,
** kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' is not present, so it cannot be removed',
ret
)
self.assertSaltStateChangesEqual(ret, {})
@destructiveTest
def test_present_absent(self):
'''
mysql_database.present
'''
self._test_database(
'testdb1',
'testdb1',
test_conn=True,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
# TODO: test with variations on collate and charset, check for db alter
# once it will be done in mysql_database.present state
@destructiveTest
def test_present_absent_fuzzy(self):
'''
mysql_database.present with utf-8 andf fuzzy db name
'''
# this is : ":() ;,?@=`&'\
dbname_fuzzy = '":() ;,?@=`&/\'\\'
# \xe6\xa8\x99\ = \u6a19 = 標
# this is : "();,?:@=`&/標'\
dbname_utf8 = '"();,?@=`&//\xe6\xa8\x99\'\\'
dbname_unicode = u'"();,?@=`&//\u6a19\'\\'
self._test_database(
dbname_fuzzy,
dbname_fuzzy,
test_conn=True,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
# FIXME: MySQLdb bugs on dbnames with utf-8?
self._test_database(
dbname_utf8,
dbname_unicode,
test_conn=False,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
#saltenv={"LC_ALL": "en_US.utf8"}
)
@destructiveTest
@skipIf(True, 'This tests needs issue #8947 to be fixed first')
def test_utf8_from_sls_file(self):
'''
Try to create/destroy an utf-8 database name from an sls file #8947
'''
expected_result = {
'mysql_database_|-A_|-foo \xe6\xba\x96`bar_|-present': {
'__run_num__': 0,
'comment': 'The database foo \xe6\xba\x96`bar has been created',
'result': True},
'mysql_database_|-B_|-foo \xe6\xba\x96`bar_|-absent': {
'__run_num__': 1,
'comment': 'Database foo \xe6\xba\x96`bar has been removed',
'result': True},
}
result = {}
ret = self.run_function('state.sls', mods='mysql_utf8')
if not isinstance(ret, dict):
raise AssertionError(
('Unexpected result while testing external mysql utf8 sls'
': {0}').format(
repr(ret)
)
)
for item, descr in ret.iteritems():
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result']
}
self.assertEqual(expected_result, result)
@skipIf(
NO_MYSQL,
'Please install MySQL bindings and a MySQL Server before running'
'MySQL integration tests.'
)
class MysqlGrantsStateTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
Validate the mysql_grants states
'''
user = 'root'
password = 'poney'
# yep, theses are valid MySQL db names
# very special chars are _ % and .
testdb1 = 'tes.t\'"saltdb'
testdb2 = 't_st `(:=salt%b)'
testdb3 = 'test `(:=salteeb)'
table1 = 'foo'
table2 = "foo `\'%_bar"
users = {
'user1': {
'name': 'foo',
'pwd': 'bar',
},
'user2': {
'name': 'user ";--,?:&/\\',
'pwd': '";--(),?:@=&/\\',
},
# this is : passwd 標標
'user3': {
'name': 'user( @ )=foobar',
'pwd': '\xe6\xa8\x99\xe6\xa8\x99',
},
# this is : user/password containing 標標
'user4': {
'name': 'user \xe6\xa8\x99',
'pwd': '\xe6\xa8\x99\xe6\xa8\x99',
},
}
@destructiveTest
def setUp(self):
'''
Test presence of MySQL server, enforce a root password
'''
super(MysqlGrantsStateTest, self).setUp()
NO_MYSQL_SERVER = True
# now ensure we know the mysql root password
# one of theses two at least should work
ret1 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' flush-privileges password "'
+ self.password
+ '"'
)
ret2 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' --password="'
+ self.password
+ '" flush-privileges password "'
+ self.password
+ '"'
)
key, value = ret2.popitem()
if value['result']:
NO_MYSQL_SERVER = False
else:
self.skipTest('No MySQL Server running, or no root access on it.')
# Create some users and a test db
for user, userdef in self.users.iteritems():
self._userCreation(uname=userdef['name'], password=userdef['pwd'])
self.run_state(
'mysql_database.present',
name=self.testdb1,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
)
self.run_state(
'mysql_database.present',
name=self.testdb2,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
)
create_query = ('CREATE TABLE %(tblname)s ('
' id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,'
' data VARCHAR(100)) ENGINE=%(engine)s;') % dict(
tblname=mysqlmod.quote_identifier(self.table1),
engine='MYISAM',
)
log.info('Adding table {0!r}'.format(self.table1,))
self.run_function(
'mysql.query',
database=self.testdb2,
query=create_query,
connection_user=self.user,
connection_pass=self.password
)
create_query = ('CREATE TABLE %(tblname)s ('
' id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,'
' data VARCHAR(100)) ENGINE=%(engine)s;') % dict(
tblname=mysqlmod.quote_identifier(self.table2),
engine='MYISAM',
)
log.info('Adding table {0!r}'.format(self.table2,))
self.run_function(
'mysql.query',
database=self.testdb2,
query=create_query,
connection_user=self.user,
connection_pass=self.password
)
@destructiveTest
def tearDown(self):
'''
Removes created users and db
'''
for user, userdef in self.users.iteritems():
self._userRemoval(uname=userdef['name'], password=userdef['pwd'])
self.run_state(
'mysql_database.absent',
name=self.testdb1,
connection_user=self.user,
connection_pass=self.password,
)
self.run_function(
'mysql_database.absent',
name=self.testdb2,
connection_user=self.user,
connection_pass=self.password,
)
def _userCreation(self,
uname,
password=None):
'''
Create a test user
'''
self.run_state(
'mysql_user.present',
name=uname,
host='localhost',
password=password,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
def _userRemoval(self,
uname,
password=None):
'''
Removes a test user
'''
self.run_state(
'mysql_user.absent',
name=uname,
host='localhost',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
def _test_grant(self,
user,
host,
grant,
target,
**kwargs):
'''
Create db two times, test conn, remove it two times
'''
# In case of...
ret = self.run_state('mysql_database.absent',
name=db_name,
**kwargs
)
ret = self.run_state('mysql_database.present',
name=db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'The database ' + db_name + ' has been created',
ret
)
#2nd run
ret = self.run_state('mysql_database.present',
name=second_db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' is already present',
ret
)
if test_conn:
# test root connection
ret = self.run_function(
'mysql.query',
database=db_name,
query='SELECT 1',
**kwargs
)
if not isinstance(ret, dict) or not 'results' in ret:
raise AssertionError(
('Unexpected result while testing connection'
' on db {0!r}: {1}').format(
db_name,
repr(ret)
)
)
self.assertEqual([['1']], ret['results'])
# Now removing databases
kwargs.pop('character_set')
kwargs.pop('collate')
ret = self.run_state('mysql_database.absent',
name=db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' has been removed',
ret
)
#2nd run
ret = self.run_state('mysql_database.absent',
name=second_db_name,
** kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' is not present, so it cannot be removed',
ret
)
self.assertSaltStateChangesEqual(ret, {})
@destructiveTest
def test_grant_present_absent(self):
'''
mysql_database.present
'''
ret = self.run_state(
'mysql_grants.present',
name='grant test 1',
grant='SELECT, INSERT',
database=self.testdb1 + '.*',
user=self.users['user1']['name'],
host='localhost',
grant_option=True,
revoke_first=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.present',
name='grant test 2',
grant='SELECT, ALTER,CREATE TEMPORARY tables, execute',
database=self.testdb1 + '.*',
user=self.users['user1']['name'],
host='localhost',
grant_option=True,
revoke_first=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.present',
name='grant test 3',
grant='SELECT, INSERT',
database=self.testdb2 + '.' + self.table2,
user=self.users['user2']['name'],
host='localhost',
grant_option=True,
revoke_first=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.present',
name='grant test 4',
grant='SELECT, INSERT',
database=self.testdb2 + '.' + self.table2,
user=self.users['user2']['name'],
host='localhost',
grant_option=True,
revoke_first=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.present',
name='grant test 5',
grant='SELECT, UPDATE',
database=self.testdb2 + '.*',
user=self.users['user1']['name'],
host='localhost',
grant_option=True,
revoke_first=False,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.absent',
name='grant test 6',
grant='SELECT,update',
database=self.testdb2 + '.*',
user=self.users['user1']['name'],
host='localhost',
grant_option=True,
revoke_first=False,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(MysqlDatabaseStateTest, MysqlGrantsStateTest)
some pylint fixes for mysql tests
# -*- coding: utf-8 -*-
'''
Tests for the MySQL states
'''
# Import python libs
import logging
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import (
destructiveTest,
ensure_in_syspath
)
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
from salt.modules import mysql as mysqlmod
log = logging.getLogger(__name__)
NO_MYSQL = False
try:
import MySQLdb
except Exception:
NO_MYSQL = True
@skipIf(
NO_MYSQL,
'Please install MySQL bindings and a MySQL Server before running'
'MySQL integration tests.'
)
class MysqlDatabaseStateTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
Validate the mysql_database state
'''
user = 'root'
password = 'poney'
@destructiveTest
def setUp(self):
'''
Test presence of MySQL server, enforce a root password
'''
super(MysqlDatabaseStateTest, self).setUp()
NO_MYSQL_SERVER = True
# now ensure we know the mysql root password
# one of theses two at least should work
ret1 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' flush-privileges password "'
+ self.password
+ '"'
)
ret2 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' --password="'
+ self.password
+ '" flush-privileges password "'
+ self.password
+ '"'
)
key, value = ret2.popitem()
if value['result']:
NO_MYSQL_SERVER = False
else:
self.skipTest('No MySQL Server running, or no root access on it.')
def _test_database(self, db_name, second_db_name, test_conn, **kwargs):
'''
Create db two times, test conn, remove it two times
'''
# In case of...
ret = self.run_state('mysql_database.absent',
name=db_name,
**kwargs
)
ret = self.run_state('mysql_database.present',
name=db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'The database ' + db_name + ' has been created',
ret
)
#2nd run
ret = self.run_state('mysql_database.present',
name=second_db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' is already present',
ret
)
if test_conn:
# test root connection
ret = self.run_function(
'mysql.query',
database=db_name,
query='SELECT 1',
**kwargs
)
if not isinstance(ret, dict) or not 'results' in ret:
raise AssertionError(
('Unexpected result while testing connection'
' on db {0!r}: {1}').format(
db_name,
repr(ret)
)
)
self.assertEqual([['1']], ret['results'])
# Now removing databases
kwargs.pop('character_set')
kwargs.pop('collate')
ret = self.run_state('mysql_database.absent',
name=db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' has been removed',
ret
)
#2nd run
ret = self.run_state('mysql_database.absent',
name=second_db_name,
** kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' is not present, so it cannot be removed',
ret
)
self.assertSaltStateChangesEqual(ret, {})
@destructiveTest
def test_present_absent(self):
'''
mysql_database.present
'''
self._test_database(
'testdb1',
'testdb1',
test_conn=True,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
# TODO: test with variations on collate and charset, check for db alter
# once it will be done in mysql_database.present state
@destructiveTest
def test_present_absent_fuzzy(self):
'''
mysql_database.present with utf-8 andf fuzzy db name
'''
# this is : ":() ;,?@=`&'\
dbname_fuzzy = '":() ;,?@=`&/\'\\'
# \xe6\xa8\x99\ = \u6a19 = 標
# this is : "();,?:@=`&/標'\
dbname_utf8 = '"();,?@=`&//\xe6\xa8\x99\'\\'
dbname_unicode = u'"();,?@=`&//\u6a19\'\\'
self._test_database(
dbname_fuzzy,
dbname_fuzzy,
test_conn=True,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
# FIXME: MySQLdb bugs on dbnames with utf-8?
self._test_database(
dbname_utf8,
dbname_unicode,
test_conn=False,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
#saltenv={"LC_ALL": "en_US.utf8"}
)
@destructiveTest
@skipIf(True, 'This tests needs issue #8947 to be fixed first')
def test_utf8_from_sls_file(self):
'''
Try to create/destroy an utf-8 database name from an sls file #8947
'''
expected_result = {
'mysql_database_|-A_|-foo \xe6\xba\x96`bar_|-present': {
'__run_num__': 0,
'comment': 'The database foo \xe6\xba\x96`bar has been created',
'result': True},
'mysql_database_|-B_|-foo \xe6\xba\x96`bar_|-absent': {
'__run_num__': 1,
'comment': 'Database foo \xe6\xba\x96`bar has been removed',
'result': True},
}
result = {}
ret = self.run_function('state.sls', mods='mysql_utf8')
if not isinstance(ret, dict):
raise AssertionError(
('Unexpected result while testing external mysql utf8 sls'
': {0}').format(
repr(ret)
)
)
for item, descr in ret.iteritems():
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result']
}
self.assertEqual(expected_result, result)
@skipIf(
NO_MYSQL,
'Please install MySQL bindings and a MySQL Server before running'
'MySQL integration tests.'
)
class MysqlGrantsStateTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
Validate the mysql_grants states
'''
user = 'root'
password = 'poney'
# yep, theses are valid MySQL db names
# very special chars are _ % and .
testdb1 = 'tes.t\'"saltdb'
testdb2 = 't_st `(:=salt%b)'
testdb3 = 'test `(:=salteeb)'
table1 = 'foo'
table2 = "foo `\'%_bar"
users = {
'user1': {
'name': 'foo',
'pwd': 'bar',
},
'user2': {
'name': 'user ";--,?:&/\\',
'pwd': '";--(),?:@=&/\\',
},
# this is : passwd 標標
'user3': {
'name': 'user( @ )=foobar',
'pwd': '\xe6\xa8\x99\xe6\xa8\x99',
},
# this is : user/password containing 標標
'user4': {
'name': 'user \xe6\xa8\x99',
'pwd': '\xe6\xa8\x99\xe6\xa8\x99',
},
}
@destructiveTest
def setUp(self):
'''
Test presence of MySQL server, enforce a root password
'''
super(MysqlGrantsStateTest, self).setUp()
NO_MYSQL_SERVER = True
# now ensure we know the mysql root password
# one of theses two at least should work
ret1 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' flush-privileges password "'
+ self.password
+ '"'
)
ret2 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' --password="'
+ self.password
+ '" flush-privileges password "'
+ self.password
+ '"'
)
key, value = ret2.popitem()
if value['result']:
NO_MYSQL_SERVER = False
else:
self.skipTest('No MySQL Server running, or no root access on it.')
# Create some users and a test db
for user, userdef in self.users.iteritems():
self._userCreation(uname=userdef['name'], password=userdef['pwd'])
self.run_state(
'mysql_database.present',
name=self.testdb1,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
)
self.run_state(
'mysql_database.present',
name=self.testdb2,
character_set='utf8',
collate='utf8_general_ci',
connection_user=self.user,
connection_pass=self.password,
)
create_query = ('CREATE TABLE %(tblname)s ('
' id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,'
' data VARCHAR(100)) ENGINE=%(engine)s;') % dict(
tblname=mysqlmod.quote_identifier(self.table1),
engine='MYISAM',
)
log.info('Adding table {0!r}'.format(self.table1,))
self.run_function(
'mysql.query',
database=self.testdb2,
query=create_query,
connection_user=self.user,
connection_pass=self.password
)
create_query = ('CREATE TABLE %(tblname)s ('
' id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,'
' data VARCHAR(100)) ENGINE=%(engine)s;') % dict(
tblname=mysqlmod.quote_identifier(self.table2),
engine='MYISAM',
)
log.info('Adding table {0!r}'.format(self.table2,))
self.run_function(
'mysql.query',
database=self.testdb2,
query=create_query,
connection_user=self.user,
connection_pass=self.password
)
@destructiveTest
def tearDown(self):
'''
Removes created users and db
'''
for user, userdef in self.users.iteritems():
self._userRemoval(uname=userdef['name'], password=userdef['pwd'])
self.run_state(
'mysql_database.absent',
name=self.testdb1,
connection_user=self.user,
connection_pass=self.password,
)
self.run_function(
'mysql_database.absent',
name=self.testdb2,
connection_user=self.user,
connection_pass=self.password,
)
def _userCreation(self,
uname,
password=None):
'''
Create a test user
'''
self.run_state(
'mysql_user.present',
name=uname,
host='localhost',
password=password,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
def _userRemoval(self,
uname,
password=None):
'''
Removes a test user
'''
self.run_state(
'mysql_user.absent',
name=uname,
host='localhost',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
def _test_grant(self,
user,
host,
grant,
target,
**kwargs):
'''
Create db two times, test conn, remove it two times
'''
# In case of...
ret = self.run_state('mysql_database.absent',
name=db_name,
**kwargs
)
ret = self.run_state('mysql_database.present',
name=db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'The database ' + db_name + ' has been created',
ret
)
#2nd run
ret = self.run_state('mysql_database.present',
name=second_db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' is already present',
ret
)
if test_conn:
# test root connection
ret = self.run_function(
'mysql.query',
database=db_name,
query='SELECT 1',
**kwargs
)
if not isinstance(ret, dict) or not 'results' in ret:
raise AssertionError(
('Unexpected result while testing connection'
' on db {0!r}: {1}').format(
db_name,
repr(ret)
)
)
self.assertEqual([['1']], ret['results'])
# Now removing databases
kwargs.pop('character_set')
kwargs.pop('collate')
ret = self.run_state('mysql_database.absent',
name=db_name,
**kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' has been removed',
ret
)
#2nd run
ret = self.run_state('mysql_database.absent',
name=second_db_name,
** kwargs
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Database ' + db_name + ' is not present, so it cannot be removed',
ret
)
self.assertSaltStateChangesEqual(ret, {})
@destructiveTest
def test_grant_present_absent(self):
'''
mysql_database.present
'''
ret = self.run_state(
'mysql_grants.present',
name='grant test 1',
grant='SELECT, INSERT',
database=self.testdb1 + '.*',
user=self.users['user1']['name'],
host='localhost',
grant_option=True,
revoke_first=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.present',
name='grant test 2',
grant='SELECT, ALTER,CREATE TEMPORARY tables, execute',
database=self.testdb1 + '.*',
user=self.users['user1']['name'],
host='localhost',
grant_option=True,
revoke_first=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.present',
name='grant test 3',
grant='SELECT, INSERT',
database=self.testdb2 + '.' + self.table2,
user=self.users['user2']['name'],
host='localhost',
grant_option=True,
revoke_first=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.present',
name='grant test 4',
grant='SELECT, INSERT',
database=self.testdb2 + '.' + self.table2,
user=self.users['user2']['name'],
host='localhost',
grant_option=True,
revoke_first=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.present',
name='grant test 5',
grant='SELECT, UPDATE',
database=self.testdb2 + '.*',
user=self.users['user1']['name'],
host='localhost',
grant_option=True,
revoke_first=False,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'mysql_grants.absent',
name='grant test 6',
grant='SELECT,update',
database=self.testdb2 + '.*',
user=self.users['user1']['name'],
host='localhost',
grant_option=True,
revoke_first=False,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8'
)
self.assertSaltTrueReturn(ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(MysqlDatabaseStateTest, MysqlGrantsStateTest)
|
#!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this.
None of that functionality is actually used in https://cocalc.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in CoCalc; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2016, Sagemath Inc.
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
from __future__ import print_function, absolute_import
import six
import os, sys, time, operator
import __future__ as future
from functools import reduce
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
return str(s, 'utf8')
except:
try:
return str(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n" % (PID, datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x)
for x in args]))
debug_log.write(mesg)
debug_log.flush()
except Exception as err:
print(("an error writing a log message (ignoring) -- %s" % err, args))
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd, re
# for "3x^2 + 4xy - 5(1+x) - 3 abc4ok", this pattern matches "3x", "5(" and "4xy" but not "abc4ok"
# to understand it, see https://regex101.com/ or https://www.debuggex.com/
RE_POSSIBLE_IMPLICIT_MUL = re.compile(r'(?:(?<=[^a-zA-Z])|^)(\d+[a-zA-Z\(]+)')
try:
from . import sage_parsing, sage_salvus
except:
import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
reload_attached_files_if_mod_smc_available = True
def reload_attached_files_if_mod_smc():
# CRITICAL: do NOT impor sage.repl.attach!! That will import IPython, wasting several seconds and
# killing the user experience for no reason.
try:
import sage.repl
sage.repl.attach
except:
# nothing to do -- attach has not been used and is not yet available.
return
global reload_attached_files_if_mod_smc_available
if not reload_attached_files_if_mod_smc_available:
return
try:
from sage.repl.attach import load_attach_path, modified_file_iterator
except:
print("sage_server: attach not available")
reload_attached_files_if_mod_smc_available = False
return
# see sage/src/sage/repl/attach.py reload_attached_files_if_modified()
for filename, mtime in modified_file_iterator():
basename = os.path.basename(filename)
timestr = time.strftime('%T', mtime)
log('reloading attached file {0} modified at {1}'.format(
basename, timestr))
from .sage_salvus import load
load(filename)
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
INFO = json.loads(open(_info_path).read())
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]
j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex((int(s[j], 16) & 0x3) | 0x8)[-1]
j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
# avoid common mistake -- conn is supposed to be from socket.socket...
assert not isinstance(conn, ConnectionJSON)
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
if six.PY3 and type(s) == str:
s = s.encode('utf8')
length_header = struct.pack(">L", len(s))
# py3: TypeError: can't concat str to bytes
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
if '\\u0000' in m:
raise RuntimeError("NULL bytes not allowed")
log("sending message '", truncate_text(m, 256), "'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
if six.PY3 and type(blob) == str:
# unicode objects must be encoded before hashing
blob = blob.encode('utf8')
s = uuidsha1(blob)
if six.PY3 and type(blob) == bytes:
# we convert all to bytes first, to avoid unnecessary conversions
self._send(('b' + s).encode('utf8') + blob)
else:
# old sage py2 code
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'" % filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print("_recv(%s)"%n)
# see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
for i in range(20):
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except OSError as e:
#print("socket.error, msg=%s"%msg)
if e.errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if six.PY3:
# bystream to string, in particular s[0] will be e.g. 'j' and not 106
#log("ConnectionJSON::recv s=%s... (type %s)" % (s[:5], type(s)))
# is s always of type bytes?
if type(s) == bytes:
s = s.decode('utf8')
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception as msg:
log("Unable to parse JSON '%s'" % s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'" % s[0])
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
def truncate_text_warn(s, max_size, name):
r"""
Truncate text if too long and format a warning message.
INPUT:
- ``s`` -- string to be truncated
- ``max-size`` - integer truncation limit
- ``name`` - string, name of limiting parameter
OUTPUT:
a triple:
- string -- possibly truncated input string
- boolean -- true if input string was truncated
- string -- warning message if input string was truncated
"""
tmsg = "WARNING: Output: %s truncated by %s to %s. Type 'smc?' to learn how to raise the output limit."
lns = len(s)
if lns > max_size:
tmsg = tmsg % (lns, name, max_size)
return s[:max_size] + "[...]", True, tmsg
else:
return s, False, ''
class Message(object):
def _new(self, event, props={}):
m = {'event': event}
for key, val in props.items():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid': pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(
self,
id,
stdout=None,
stderr=None,
code=None,
html=None,
javascript=None,
coffeescript=None,
interact=None,
md=None,
tex=None,
d3=None,
file=None,
raw_input=None,
obj=None,
once=None,
hide=None,
show=None,
events=None,
clear=None,
delete_last=None,
done=False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single.
):
m = self._new('output')
m['id'] = id
t = truncate_text_warn
did_truncate = False
from . import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate, tmsg = t(code['source'],
sage_server.MAX_CODE_SIZE,
'MAX_CODE_SIZE')
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate, tmsg = t(stderr,
sage_server.MAX_STDERR_SIZE,
'MAX_STDERR_SIZE')
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate, tmsg = t(stdout,
sage_server.MAX_STDOUT_SIZE,
'MAX_STDOUT_SIZE')
if html is not None and len(html) > 0:
m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE,
'MAX_HTML_SIZE')
if md is not None and len(md) > 0:
m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE,
'MAX_MD_SIZE')
if tex is not None and len(tex) > 0:
tex['tex'], did_truncate, tmsg = t(tex['tex'],
sage_server.MAX_TEX_SIZE,
'MAX_TEX_SIZE')
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + tmsg
else:
m['stderr'] = '\n' + tmsg
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print(("PID = %s" % pid))
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: ' % id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout'])
sys.stdout.flush()
if 'stderr' in mesg:
print(('! ' +
'\n! '.join(mesg['stderr'].splitlines())))
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print("Sending interrupt signal")
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print("\nExiting Sage client.")
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
# CRITICAL: we need output to valid PostgreSQL TEXT, so no null bytes
# This is not going to silently corrupt anything -- it's just output that
# is destined to be *rendered* in the browser. This is only a partial
# solution to a more general problem, but it is safe.
try:
self._buf += output.replace('\x00', '')
except UnicodeDecodeError:
self._buf += output.decode('utf-8').replace('\x00', '')
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size)
or (t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
try:
self._f(self._buf, done=done)
except UnicodeDecodeError:
self._f(str(self._buf, errors='replace'), done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and x in self._on_change:
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and x in self._on_del:
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if x in self._on_change:
for f in self._on_change[x]:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
except Exception as mesg:
print(mesg)
def __delitem__(self, x):
try:
if x in self._on_del:
for f in self._on_del[x]:
f()
if None in self._on_del:
for f in self._on_del[None]:
f(x)
except Exception as mesg:
print(mesg)
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if x in self._on_change:
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special CoCalc Server functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
_py_features = {}
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace[
'salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once', False):
self._num_output_messages += 1
from . import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages: %s (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._num_output_messages, sage_server.MAX_OUTPUT_MESSAGES)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long: %s -- MAX_OUTPUT (=%s) exceeded (type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._total_output_length, sage_server.MAX_OUTPUT)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>" %
(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});" % (
id, self._action(path, foreground))
self.javascript(s,
obj={
'label': label,
'path': path,
'foreground': foreground
},
once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
if foreground:
action = "worksheet.project_page.open_directory(obj.path);"
else:
action = "worksheet.project_page.set_current_path(obj.path);"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
self.javascript(self._action(path, foreground),
obj={
'path': path,
'foreground': foreground
},
once=True)
def close_tab(self, filename):
"""
Close an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.project_page.close_file(obj)",
obj=filename,
once=True)
def threed(
self,
g, # sage Graphic3d object.
width=None,
height=None,
frame=True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background=None,
foreground=None,
spin=False,
aspect_ratio=None,
frame_aspect_ratio=None, # synonym for aspect_ratio
done=False,
renderer=None, # None, 'webgl', or 'canvas'
):
from .graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = {
'width': width,
'height': height,
'background': background,
'foreground': foreground,
'spin': spin,
'aspect_ratio': aspect_ratio,
'renderer': renderer
}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio,
(list, tuple)) and len(aspect_ratio) == 3):
raise TypeError(
"aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"
% (aspect_ratio, ))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in [
'spin',
'height',
'width',
'background',
'foreground',
'renderer',
]:
if k in extra_kwds and not opts.get(k, None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][
1], b[0][2], b[1][2]
fr = opts['frame'] = {
'xmin': f(xmin),
'xmax': f(xmax),
'ymin': f(ymin),
'ymax': f(ymax),
'zmin': f(zmin),
'zmax': f(zmax)
}
if isinstance(frame, dict):
for k in list(fr.keys()):
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts': opts, 'obj': graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id,
file={
'filename': unicode8("%s.sage3d" % uuid),
'uuid': uuid
},
done=done)
def d3_graph(self, g, **kwds):
from .graphics import graph_to_d3_jsonable
self._send_output(id=self._id,
d3={
"viewer": "graph",
"data": graph_to_d3_jsonable(g, **kwds)
})
def file(self,
filename,
show=True,
done=False,
download=False,
once=False,
events=None,
raw=False,
text=None):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If text is also given that will be used instead of the path to the file.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ['HOME'] + '/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(
"can only send raw files in your home directory")
url = os.path.join('/', info['base_url'].strip('/'),
info['project_id'], 'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'url': url,
'show': show,
'text': text
},
events=events,
done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get(
'sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s" % mesg['error'])
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'uuid': file_uuid,
'show': show,
'text': text
},
events=events,
done=done)
if not show:
info = self.project_info()
url = "%s/blobs/%s?uuid=%s" % (info['base_url'], filename,
file_uuid)
if download:
url += '?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl', 0))
def python_future_feature(self, feature=None, enable=None):
"""
Allow users to enable, disable, and query the features in the python __future__ module.
"""
if feature is None:
if enable is not None:
raise ValueError(
"enable may not be specified when feature is None")
return sorted(Salvus._py_features.keys())
attr = getattr(future, feature, None)
if (feature not in future.all_feature_names) or (
attr is None) or not isinstance(attr, future._Feature):
raise RuntimeError("future feature %.50r is not defined" %
(feature, ))
if enable is None:
return feature in Salvus._py_features
if enable:
Salvus._py_features[feature] = attr
else:
try:
del Salvus._py_features[feature]
except KeyError:
pass
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print("%s MB used"%int(get_memory_usage()))')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
ascii_warn = False
code_error = False
if sys.getdefaultencoding() == 'ascii':
for c in code:
if ord(c) >= 128:
ascii_warn = True
break
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
compile_flags = reduce(operator.or_,
(feature.compiler_flag
for feature in Salvus._py_features.values()),
0)
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print(x)"
blocks = sage_parsing.divide_into_blocks(code)
try:
import sage.repl
# CRITICAL -- we do NOT import sage.repl.interpreter!!!!!!!
# That would waste several seconds importing ipython and much more, which is just dumb.
# The only reason this is needed below is if the user has run preparser(False), which
# would cause sage.repl.interpreter to be imported at that point (as preparser is
# lazy imported.)
sage_repl_interpreter = sage.repl.interpreter
except:
pass # expected behavior usually, since sage.repl.interpreter usually not imported (only used by command line...)
import sage.misc.session
for start, stop, block in blocks:
# if import sage.repl.interpreter fails, sag_repl_interpreter is unreferenced
try:
do_pp = getattr(sage_repl_interpreter, '_do_preparse', True)
except:
do_pp = True
if preparse and do_pp:
block = sage_parsing.preparse_code(block)
sys.stdout.reset()
sys.stderr.reset()
try:
b = block.rstrip()
# get rid of comments at the end of the line -- issue #1835
#from ushlex import shlex
#s = shlex(b)
#s.commenters = '#'
#s.quotes = '"\''
#b = ''.join(s)
# e.g. now a line like 'x = test? # bar' becomes 'x=test?'
if b.endswith('??'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="python")
elif b.endswith('?'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="text/x-rst")
else:
reload_attached_files_if_mod_smc()
if execute.count < 2:
execute.count += 1
if execute.count == 2:
# this fixup has to happen after first block has executed (os.chdir etc)
# but before user assigns any variable in worksheet
# sage.misc.session.init() is not called until first call of show_identifiers
# BUGFIX: be careful to *NOT* assign to _!! see https://github.com/sagemathinc/cocalc/issues/1107
block2 = "sage.misc.session.state_at_init = dict(globals());sage.misc.session._dummy=sage.misc.session.show_identifiers();\n"
exec(compile(block2, '', 'single'), namespace,
locals)
b2a = """
if 'SAGE_STARTUP_FILE' in os.environ and os.path.isfile(os.environ['SAGE_STARTUP_FILE']):
try:
load(os.environ['SAGE_STARTUP_FILE'])
except:
sys.stdout.flush()
sys.stderr.write('\\nException loading startup file: {}\\n'.format(os.environ['SAGE_STARTUP_FILE']))
sys.stderr.flush()
raise
"""
exec(compile(b2a, '', 'exec'), namespace, locals)
features = sage_parsing.get_future_features(
block, 'single')
if features:
compile_flags = reduce(
operator.or_, (feature.compiler_flag
for feature in features.values()),
compile_flags)
exec(
compile(block + '\n',
'',
'single',
flags=compile_flags), namespace, locals)
if features:
Salvus._py_features.update(features)
sys.stdout.flush()
sys.stderr.flush()
except:
if ascii_warn:
sys.stderr.write(
'\n\n*** WARNING: Code contains non-ascii characters ***\n'
)
for c in '\u201c\u201d':
if c in code:
sys.stderr.write(
'*** Maybe the character < %s > should be replaced by < " > ? ***\n'
% c)
break
sys.stderr.write('\n\n')
if six.PY2:
from exceptions import SyntaxError, TypeError
# py3: all standard errors are available by default via "builtin", not available here for some reason ...
if six.PY3:
from builtins import SyntaxError, TypeError
exc_type, _, _ = sys.exc_info()
if exc_type in [SyntaxError, TypeError]:
from .sage_parsing import strip_string_literals
code0, _, _ = strip_string_literals(code)
implicit_mul = RE_POSSIBLE_IMPLICIT_MUL.findall(code0)
if len(implicit_mul) > 0:
implicit_mul_list = ', '.join(
str(_) for _ in implicit_mul)
# we know there is a SyntaxError and there could be an implicit multiplication
sys.stderr.write(
'\n\n*** WARNING: Code contains possible implicit multiplication ***\n'
)
sys.stderr.write(
'*** Check if any of [ %s ] need a "*" sign for multiplication, e.g. 5x should be 5*x ! ***\n\n'
% implicit_mul_list)
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n' %
(start + 1, stop + 1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self,
code_decorators,
code,
preparse=True,
namespace=None,
locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if isinstance(code_decorators, str):
code_decorators = [code_decorators]
if preparse:
code_decorators = list(
map(sage_parsing.preparse_code, code_decorators))
code_decorators = [
eval(code_decorator, self.namespace)
for code_decorator in code_decorators
]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
# eval is for backward compatibility
if hasattr(code_decorator, 'eval'):
print((code_decorator.eval(code, locals=self.namespace)),
end=' ')
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and isinstance(code, str):
self.execute(code,
preparse=preparse,
namespace=namespace,
locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html),
id=self._id,
done=done,
once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if isinstance(obj, str) else self.namespace['latex'](obj, **
kwds)
self._send_output(tex={
'tex': tex,
'display': display
},
id=self._id,
done=done,
once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if isinstance(output, str) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if isinstance(output, str) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(
self,
source, # actual source code
mode=None, # the syntax highlight codemirror mode
filename=None, # path of file it is contained in (if applicable)
lineno=-1, # line number where source starts (0-based)
done=False,
once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if isinstance(source, str) else unicode8(source)
code = {
'source': source,
'filename': filename,
'lineno': int(lineno),
'mode': mode
}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print("(Evaluate this cell to use this interact.)")
#raise RuntimeError("Error: No interact with id %s"%id)
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact=I.jsonable(),
id=self._id,
done=done,
once=once)
return sage_salvus.InteractFunction(I)
def javascript(self,
code,
once=False,
coffeescript=False,
done=False,
obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={
'code': code,
'coffeescript': coffeescript
},
id=self._id,
done=done,
obj=obj,
once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self,
prompt='',
default='',
placeholder='',
input_width=None,
label_width=None,
done=False,
type=None): # done is ignored here
self._flush_stdio()
m = {'prompt': unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
log("handling raw input message ", truncate_text(unicode8(mesg), 400))
if typ == 'json' and mesg['event'] == 'sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt(
"raw_input interrupted by another action: event='%s' (expected 'sage_raw_input')"
% mesg['event'])
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def notify(self, **kwds):
"""
Display a graphical notification using the alert_message Javascript function.
INPUTS:
- `type: "default"` - Type of the notice. "default", "warning", "info", "success", or "error".
- `title: ""` - The notice's title.
- `message: ""` - The notice's text.
- `timeout: ?` - Delay in seconds before the notice is automatically removed.
EXAMPLE:
salvus.notify(type="warning", title="This warning", message="This is a quick message.", timeout=3)
"""
obj = {}
for k, v in kwds.items():
if k == 'text': # backward compat
k = 'message'
elif k == 'type' and v == 'notice': # backward compat
v = 'default'
obj[k] = sage_salvus.jsonable(v)
if k == 'delay': # backward compat
obj['timeout'] = v / 1000.0 # units are in seconds now.
self.javascript("alert_message(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(
message.execute_javascript(code,
coffeescript=coffeescript,
obj=json.dumps(obj,
separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
try:
sys.path.insert(0, path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-', '_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base + '.py', 'w').write(content)
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base + '.py')
os.unlink(py_file_base + '.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist" % filename)
base, ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented" %
ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
if six.PY2:
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
else:
Salvus.pdf.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn,
id=id,
data=data,
message_queue=message_queue,
cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.set_salvus(salvus)
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
# execute.count goes from 0 to 2
# used for show_identifiers()
execute.count = 0
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0, mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all
sage.all.set_random_seed()
import random
random.seed(sage.all.initial_seed())
# get_memory_usage is not aware of being forked...
import sage.misc.getusage
sage.misc.getusage._proc_status = "/proc/%s/status" % os.getpid()
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print('INFO:child%s: received message "%s"'%(pid, mesg))
log("handling message ", truncate_text(unicode8(mesg), 400))
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn=conn,
id=mesg['id'],
code=mesg['code'],
data=mesg.get('data', None),
cell_id=mesg.get('cell_id', None),
preparse=mesg.get('preparse', True),
message_queue=mq)
except Exception as err:
log("ERROR -- exception raised '%s' when executing '%s'" %
(err, mesg['code']))
elif event == 'introspect':
try:
# check for introspect from jupyter cell
prefix = Salvus._default_mode
if 'top' in mesg:
top = mesg['top']
log('introspect cell top line %s' % top)
if top.startswith("%"):
prefix = top[1:]
try:
# see if prefix is the name of a jupyter kernel function
kc = eval(prefix + "(get_kernel_client=True)",
namespace, locals())
kn = eval(prefix + "(get_kernel_name=True)", namespace,
locals())
log("jupyter introspect prefix %s kernel %s" %
(prefix, kn)) # e.g. "p2", "python2"
jupyter_introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True),
kc=kc)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
log(lines)
introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True))
except:
pass
else:
raise RuntimeError("invalid message '%s'" % mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def jupyter_introspect(conn, id, line, preparse, kc):
import jupyter_client
from queue import Empty
try:
salvus = Salvus(conn=conn, id=id)
msg_id = kc.complete(line)
shell = kc.shell_channel
iopub = kc.iopub_channel
# handle iopub responses
while True:
try:
msg = iopub.get_msg(timeout=1)
msg_type = msg['msg_type']
content = msg['content']
except Empty:
# shouldn't happen
log("jupyter iopub channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter iopub recv %s %s" % (msg_type, str(content)))
if msg_type == 'status' and content['execution_state'] == 'idle':
break
# handle shell responses
while True:
try:
msg = shell.get_msg(timeout=10)
msg_type = msg['msg_type']
content = msg['content']
except:
# shouldn't happen
log("jupyter shell channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter shell recv %s %s" % (msg_type, str(content)))
if msg_type == 'complete_reply' and content['status'] == 'ok':
# jupyter kernel returns matches like "xyz.append" and smc wants just "append"
matches = content['matches']
offset = content['cursor_end'] - content['cursor_start']
completions = [s[offset:] for s in matches]
mesg = message.introspect_completions(id=id,
completions=completions,
target=line[-offset:])
conn.send_json(mesg)
break
except:
log("jupyter completion exception: %s" % sys.exc_info()[0])
def introspect(conn, id, line, preparse):
salvus = Salvus(
conn=conn, id=id
) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id,
completions=z['result'],
target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id,
docstring=z['result'],
target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id,
source_code=z['result'],
target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
if 'COCALC_SECRET_TOKEN' in os.environ:
secret_token_path = os.environ['COCALC_SECRET_TOKEN']
else:
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send(six.b('n'))
conn.send(
six.
b("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"
% secret_token_path))
conn.close()
n = len(secret_token)
token = six.b('')
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != six.b(secret_token):
log("token='%s'; secret_token='%s'" % (token, secret_token))
conn.send(six.b('n')) # no -- invalid login
conn.send(six.b("Invalid secret token."))
conn.close()
return False
else:
conn.send(six.b('y')) # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s" % mesg)
except Exception as err:
log("Error receiving message: %s (connection terminated)" % str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session." %
mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s" % desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s' % (host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
import sagenb.notebook.interact
sagenb.notebook.interact.interact = sage_salvus.interact
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patch the html command.
try:
# need the following for sage_server to start with sage-8.0
# or `import sage.interacts.library` will fail (not really important below, as we don't do that).
import sage.repl.user_globals
sage.repl.user_globals.set_globals(globals())
log("initialized user_globals")
except RuntimeError:
# may happen with sage version < 8.0
log("user_globals.set_globals failed, continuing", sys.exc_info())
sage.all.html = sage.misc.html.html = sage_salvus.html
# CRITICAL: look, we are just going to not do this, and have sage.interacts.library
# be broken. It's **really slow** to do this, and I don't think sage.interacts.library
# ever ended up going anywhere! People use wiki.sagemath.org/interact instead...
#import sage.interacts.library
#sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize'] = [8, 4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = [
'from sage.all import *', 'from sage.calculus.predefined import x',
'import pylab'
]
if extra_imports:
cmds.extend([
'import scipy', 'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'
])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec(cmd, namespace)
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds' %
(time.time() - tm))
for k, v in sage_salvus.interact_functions.items():
namespace[k] = v
# See above -- not doing this, since it is REALLY SLOW to import.
# This does mean that some old code that tries to use interact might break (?).
#namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in [
'anaconda', 'asy', 'attach', 'auto', 'capture', 'cell',
'clear', 'coffeescript', 'cython', 'default_mode',
'delete_last_output', 'dynamic', 'exercise', 'fork', 'fortran',
'go', 'help', 'hide', 'hideall', 'input', 'java', 'javascript',
'julia', 'jupyter', 'license', 'load', 'md', 'mediawiki',
'modes', 'octave', 'pandoc', 'perl', 'plot3d_using_matplotlib',
'prun', 'python_future_feature', 'py3print_mode', 'python',
'python3', 'r', 'raw_input', 'reset', 'restore', 'ruby',
'runfile', 'sage_chat', 'sage_eval', 'scala', 'scala211',
'script', 'search_doc', 'search_src', 'sh', 'show',
'show_identifiers', 'singular_kernel', 'time', 'timeit',
'typeset_mode', 'var', 'wiki'
]:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[
__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace[
'pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace[
'pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in list(children.keys()):
if os.waitpid(pid, os.WNOHANG) != (0, 0):
log("subprocess %s terminated, closing connection"
% pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection" %
child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception as err:
log("Error taking connection: ", err)
traceback.print_exc(file=open(LOGFILE, 'a'))
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
pid = str(os.getpid())
print("os.getpid() = %s" % pid)
open(pidfile, 'w').write(pid)
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'" %
(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument(
"-p",
dest="port",
type=int,
default=0,
help=
"port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port"
)
parser.add_argument(
"-l",
dest='log_level',
type=str,
default='INFO',
help=
"log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d",
dest="daemon",
default=False,
action="store_const",
const=True,
help="daemon mode (default: False)")
parser.add_argument(
"--host",
dest="host",
type=str,
default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile",
dest="pidfile",
type=str,
default='',
help="store pid in this file")
parser.add_argument(
"--logfile",
dest="logfile",
type=str,
default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c",
dest="client",
default=False,
action="store_const",
const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname",
dest="hostname",
type=str,
default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile",
dest="portfile",
type=str,
default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print(("%s: must specify pidfile in daemon mode" % sys.argv[0]))
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(
port=args.port if args.port else int(open(args.portfile).read()),
hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile, 'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s" % LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
from . import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
we might as well make the sage server robust against a corrupted info.json file
#!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this.
None of that functionality is actually used in https://cocalc.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in CoCalc; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2016, Sagemath Inc.
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
from __future__ import print_function, absolute_import
import six
import os, sys, time, operator
import __future__ as future
from functools import reduce
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
return str(s, 'utf8')
except:
try:
return str(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n" % (PID, datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x)
for x in args]))
debug_log.write(mesg)
debug_log.flush()
except Exception as err:
print(("an error writing a log message (ignoring) -- %s" % err, args))
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd, re
# for "3x^2 + 4xy - 5(1+x) - 3 abc4ok", this pattern matches "3x", "5(" and "4xy" but not "abc4ok"
# to understand it, see https://regex101.com/ or https://www.debuggex.com/
RE_POSSIBLE_IMPLICIT_MUL = re.compile(r'(?:(?<=[^a-zA-Z])|^)(\d+[a-zA-Z\(]+)')
try:
from . import sage_parsing, sage_salvus
except:
import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
reload_attached_files_if_mod_smc_available = True
def reload_attached_files_if_mod_smc():
# CRITICAL: do NOT impor sage.repl.attach!! That will import IPython, wasting several seconds and
# killing the user experience for no reason.
try:
import sage.repl
sage.repl.attach
except:
# nothing to do -- attach has not been used and is not yet available.
return
global reload_attached_files_if_mod_smc_available
if not reload_attached_files_if_mod_smc_available:
return
try:
from sage.repl.attach import load_attach_path, modified_file_iterator
except:
print("sage_server: attach not available")
reload_attached_files_if_mod_smc_available = False
return
# see sage/src/sage/repl/attach.py reload_attached_files_if_modified()
for filename, mtime in modified_file_iterator():
basename = os.path.basename(filename)
timestr = time.strftime('%T', mtime)
log('reloading attached file {0} modified at {1}'.format(
basename, timestr))
from .sage_salvus import load
load(filename)
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
try:
INFO = json.loads(open(_info_path).read())
except:
# This will fail, e.g., if info.json is invalid (maybe a blank file).
# We definitely don't want sage server startup to be completely broken
# in this case, so we fall back to "no info".
INFO = {}
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]
j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex((int(s[j], 16) & 0x3) | 0x8)[-1]
j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
# avoid common mistake -- conn is supposed to be from socket.socket...
assert not isinstance(conn, ConnectionJSON)
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
if six.PY3 and type(s) == str:
s = s.encode('utf8')
length_header = struct.pack(">L", len(s))
# py3: TypeError: can't concat str to bytes
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
if '\\u0000' in m:
raise RuntimeError("NULL bytes not allowed")
log("sending message '", truncate_text(m, 256), "'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
if six.PY3 and type(blob) == str:
# unicode objects must be encoded before hashing
blob = blob.encode('utf8')
s = uuidsha1(blob)
if six.PY3 and type(blob) == bytes:
# we convert all to bytes first, to avoid unnecessary conversions
self._send(('b' + s).encode('utf8') + blob)
else:
# old sage py2 code
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'" % filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print("_recv(%s)"%n)
# see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
for i in range(20):
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except OSError as e:
#print("socket.error, msg=%s"%msg)
if e.errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if six.PY3:
# bystream to string, in particular s[0] will be e.g. 'j' and not 106
#log("ConnectionJSON::recv s=%s... (type %s)" % (s[:5], type(s)))
# is s always of type bytes?
if type(s) == bytes:
s = s.decode('utf8')
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception as msg:
log("Unable to parse JSON '%s'" % s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'" % s[0])
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
def truncate_text_warn(s, max_size, name):
r"""
Truncate text if too long and format a warning message.
INPUT:
- ``s`` -- string to be truncated
- ``max-size`` - integer truncation limit
- ``name`` - string, name of limiting parameter
OUTPUT:
a triple:
- string -- possibly truncated input string
- boolean -- true if input string was truncated
- string -- warning message if input string was truncated
"""
tmsg = "WARNING: Output: %s truncated by %s to %s. Type 'smc?' to learn how to raise the output limit."
lns = len(s)
if lns > max_size:
tmsg = tmsg % (lns, name, max_size)
return s[:max_size] + "[...]", True, tmsg
else:
return s, False, ''
class Message(object):
def _new(self, event, props={}):
m = {'event': event}
for key, val in props.items():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid': pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(
self,
id,
stdout=None,
stderr=None,
code=None,
html=None,
javascript=None,
coffeescript=None,
interact=None,
md=None,
tex=None,
d3=None,
file=None,
raw_input=None,
obj=None,
once=None,
hide=None,
show=None,
events=None,
clear=None,
delete_last=None,
done=False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single.
):
m = self._new('output')
m['id'] = id
t = truncate_text_warn
did_truncate = False
from . import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate, tmsg = t(code['source'],
sage_server.MAX_CODE_SIZE,
'MAX_CODE_SIZE')
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate, tmsg = t(stderr,
sage_server.MAX_STDERR_SIZE,
'MAX_STDERR_SIZE')
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate, tmsg = t(stdout,
sage_server.MAX_STDOUT_SIZE,
'MAX_STDOUT_SIZE')
if html is not None and len(html) > 0:
m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE,
'MAX_HTML_SIZE')
if md is not None and len(md) > 0:
m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE,
'MAX_MD_SIZE')
if tex is not None and len(tex) > 0:
tex['tex'], did_truncate, tmsg = t(tex['tex'],
sage_server.MAX_TEX_SIZE,
'MAX_TEX_SIZE')
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + tmsg
else:
m['stderr'] = '\n' + tmsg
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print(("PID = %s" % pid))
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: ' % id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout'])
sys.stdout.flush()
if 'stderr' in mesg:
print(('! ' +
'\n! '.join(mesg['stderr'].splitlines())))
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print("Sending interrupt signal")
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print("\nExiting Sage client.")
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
# CRITICAL: we need output to valid PostgreSQL TEXT, so no null bytes
# This is not going to silently corrupt anything -- it's just output that
# is destined to be *rendered* in the browser. This is only a partial
# solution to a more general problem, but it is safe.
try:
self._buf += output.replace('\x00', '')
except UnicodeDecodeError:
self._buf += output.decode('utf-8').replace('\x00', '')
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size)
or (t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
try:
self._f(self._buf, done=done)
except UnicodeDecodeError:
self._f(str(self._buf, errors='replace'), done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and x in self._on_change:
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and x in self._on_del:
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if x in self._on_change:
for f in self._on_change[x]:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
except Exception as mesg:
print(mesg)
def __delitem__(self, x):
try:
if x in self._on_del:
for f in self._on_del[x]:
f()
if None in self._on_del:
for f in self._on_del[None]:
f(x)
except Exception as mesg:
print(mesg)
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if x in self._on_change:
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special CoCalc Server functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
_py_features = {}
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace[
'salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once', False):
self._num_output_messages += 1
from . import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages: %s (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._num_output_messages, sage_server.MAX_OUTPUT_MESSAGES)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long: %s -- MAX_OUTPUT (=%s) exceeded (type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._total_output_length, sage_server.MAX_OUTPUT)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>" %
(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});" % (
id, self._action(path, foreground))
self.javascript(s,
obj={
'label': label,
'path': path,
'foreground': foreground
},
once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
if foreground:
action = "worksheet.project_page.open_directory(obj.path);"
else:
action = "worksheet.project_page.set_current_path(obj.path);"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
self.javascript(self._action(path, foreground),
obj={
'path': path,
'foreground': foreground
},
once=True)
def close_tab(self, filename):
"""
Close an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.project_page.close_file(obj)",
obj=filename,
once=True)
def threed(
self,
g, # sage Graphic3d object.
width=None,
height=None,
frame=True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background=None,
foreground=None,
spin=False,
aspect_ratio=None,
frame_aspect_ratio=None, # synonym for aspect_ratio
done=False,
renderer=None, # None, 'webgl', or 'canvas'
):
from .graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = {
'width': width,
'height': height,
'background': background,
'foreground': foreground,
'spin': spin,
'aspect_ratio': aspect_ratio,
'renderer': renderer
}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio,
(list, tuple)) and len(aspect_ratio) == 3):
raise TypeError(
"aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"
% (aspect_ratio, ))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in [
'spin',
'height',
'width',
'background',
'foreground',
'renderer',
]:
if k in extra_kwds and not opts.get(k, None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][
1], b[0][2], b[1][2]
fr = opts['frame'] = {
'xmin': f(xmin),
'xmax': f(xmax),
'ymin': f(ymin),
'ymax': f(ymax),
'zmin': f(zmin),
'zmax': f(zmax)
}
if isinstance(frame, dict):
for k in list(fr.keys()):
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts': opts, 'obj': graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id,
file={
'filename': unicode8("%s.sage3d" % uuid),
'uuid': uuid
},
done=done)
def d3_graph(self, g, **kwds):
from .graphics import graph_to_d3_jsonable
self._send_output(id=self._id,
d3={
"viewer": "graph",
"data": graph_to_d3_jsonable(g, **kwds)
})
def file(self,
filename,
show=True,
done=False,
download=False,
once=False,
events=None,
raw=False,
text=None):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If text is also given that will be used instead of the path to the file.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ['HOME'] + '/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(
"can only send raw files in your home directory")
url = os.path.join('/', info['base_url'].strip('/'),
info['project_id'], 'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'url': url,
'show': show,
'text': text
},
events=events,
done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get(
'sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s" % mesg['error'])
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'uuid': file_uuid,
'show': show,
'text': text
},
events=events,
done=done)
if not show:
info = self.project_info()
url = "%s/blobs/%s?uuid=%s" % (info['base_url'], filename,
file_uuid)
if download:
url += '?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl', 0))
def python_future_feature(self, feature=None, enable=None):
"""
Allow users to enable, disable, and query the features in the python __future__ module.
"""
if feature is None:
if enable is not None:
raise ValueError(
"enable may not be specified when feature is None")
return sorted(Salvus._py_features.keys())
attr = getattr(future, feature, None)
if (feature not in future.all_feature_names) or (
attr is None) or not isinstance(attr, future._Feature):
raise RuntimeError("future feature %.50r is not defined" %
(feature, ))
if enable is None:
return feature in Salvus._py_features
if enable:
Salvus._py_features[feature] = attr
else:
try:
del Salvus._py_features[feature]
except KeyError:
pass
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print("%s MB used"%int(get_memory_usage()))')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
ascii_warn = False
code_error = False
if sys.getdefaultencoding() == 'ascii':
for c in code:
if ord(c) >= 128:
ascii_warn = True
break
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
compile_flags = reduce(operator.or_,
(feature.compiler_flag
for feature in Salvus._py_features.values()),
0)
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print(x)"
blocks = sage_parsing.divide_into_blocks(code)
try:
import sage.repl
# CRITICAL -- we do NOT import sage.repl.interpreter!!!!!!!
# That would waste several seconds importing ipython and much more, which is just dumb.
# The only reason this is needed below is if the user has run preparser(False), which
# would cause sage.repl.interpreter to be imported at that point (as preparser is
# lazy imported.)
sage_repl_interpreter = sage.repl.interpreter
except:
pass # expected behavior usually, since sage.repl.interpreter usually not imported (only used by command line...)
import sage.misc.session
for start, stop, block in blocks:
# if import sage.repl.interpreter fails, sag_repl_interpreter is unreferenced
try:
do_pp = getattr(sage_repl_interpreter, '_do_preparse', True)
except:
do_pp = True
if preparse and do_pp:
block = sage_parsing.preparse_code(block)
sys.stdout.reset()
sys.stderr.reset()
try:
b = block.rstrip()
# get rid of comments at the end of the line -- issue #1835
#from ushlex import shlex
#s = shlex(b)
#s.commenters = '#'
#s.quotes = '"\''
#b = ''.join(s)
# e.g. now a line like 'x = test? # bar' becomes 'x=test?'
if b.endswith('??'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="python")
elif b.endswith('?'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="text/x-rst")
else:
reload_attached_files_if_mod_smc()
if execute.count < 2:
execute.count += 1
if execute.count == 2:
# this fixup has to happen after first block has executed (os.chdir etc)
# but before user assigns any variable in worksheet
# sage.misc.session.init() is not called until first call of show_identifiers
# BUGFIX: be careful to *NOT* assign to _!! see https://github.com/sagemathinc/cocalc/issues/1107
block2 = "sage.misc.session.state_at_init = dict(globals());sage.misc.session._dummy=sage.misc.session.show_identifiers();\n"
exec(compile(block2, '', 'single'), namespace,
locals)
b2a = """
if 'SAGE_STARTUP_FILE' in os.environ and os.path.isfile(os.environ['SAGE_STARTUP_FILE']):
try:
load(os.environ['SAGE_STARTUP_FILE'])
except:
sys.stdout.flush()
sys.stderr.write('\\nException loading startup file: {}\\n'.format(os.environ['SAGE_STARTUP_FILE']))
sys.stderr.flush()
raise
"""
exec(compile(b2a, '', 'exec'), namespace, locals)
features = sage_parsing.get_future_features(
block, 'single')
if features:
compile_flags = reduce(
operator.or_, (feature.compiler_flag
for feature in features.values()),
compile_flags)
exec(
compile(block + '\n',
'',
'single',
flags=compile_flags), namespace, locals)
if features:
Salvus._py_features.update(features)
sys.stdout.flush()
sys.stderr.flush()
except:
if ascii_warn:
sys.stderr.write(
'\n\n*** WARNING: Code contains non-ascii characters ***\n'
)
for c in '\u201c\u201d':
if c in code:
sys.stderr.write(
'*** Maybe the character < %s > should be replaced by < " > ? ***\n'
% c)
break
sys.stderr.write('\n\n')
if six.PY2:
from exceptions import SyntaxError, TypeError
# py3: all standard errors are available by default via "builtin", not available here for some reason ...
if six.PY3:
from builtins import SyntaxError, TypeError
exc_type, _, _ = sys.exc_info()
if exc_type in [SyntaxError, TypeError]:
from .sage_parsing import strip_string_literals
code0, _, _ = strip_string_literals(code)
implicit_mul = RE_POSSIBLE_IMPLICIT_MUL.findall(code0)
if len(implicit_mul) > 0:
implicit_mul_list = ', '.join(
str(_) for _ in implicit_mul)
# we know there is a SyntaxError and there could be an implicit multiplication
sys.stderr.write(
'\n\n*** WARNING: Code contains possible implicit multiplication ***\n'
)
sys.stderr.write(
'*** Check if any of [ %s ] need a "*" sign for multiplication, e.g. 5x should be 5*x ! ***\n\n'
% implicit_mul_list)
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n' %
(start + 1, stop + 1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self,
code_decorators,
code,
preparse=True,
namespace=None,
locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if isinstance(code_decorators, str):
code_decorators = [code_decorators]
if preparse:
code_decorators = list(
map(sage_parsing.preparse_code, code_decorators))
code_decorators = [
eval(code_decorator, self.namespace)
for code_decorator in code_decorators
]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
# eval is for backward compatibility
if hasattr(code_decorator, 'eval'):
print((code_decorator.eval(code, locals=self.namespace)),
end=' ')
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and isinstance(code, str):
self.execute(code,
preparse=preparse,
namespace=namespace,
locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html),
id=self._id,
done=done,
once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if isinstance(obj, str) else self.namespace['latex'](obj, **
kwds)
self._send_output(tex={
'tex': tex,
'display': display
},
id=self._id,
done=done,
once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if isinstance(output, str) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if isinstance(output, str) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(
self,
source, # actual source code
mode=None, # the syntax highlight codemirror mode
filename=None, # path of file it is contained in (if applicable)
lineno=-1, # line number where source starts (0-based)
done=False,
once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if isinstance(source, str) else unicode8(source)
code = {
'source': source,
'filename': filename,
'lineno': int(lineno),
'mode': mode
}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print("(Evaluate this cell to use this interact.)")
#raise RuntimeError("Error: No interact with id %s"%id)
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact=I.jsonable(),
id=self._id,
done=done,
once=once)
return sage_salvus.InteractFunction(I)
def javascript(self,
code,
once=False,
coffeescript=False,
done=False,
obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={
'code': code,
'coffeescript': coffeescript
},
id=self._id,
done=done,
obj=obj,
once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self,
prompt='',
default='',
placeholder='',
input_width=None,
label_width=None,
done=False,
type=None): # done is ignored here
self._flush_stdio()
m = {'prompt': unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
log("handling raw input message ", truncate_text(unicode8(mesg), 400))
if typ == 'json' and mesg['event'] == 'sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt(
"raw_input interrupted by another action: event='%s' (expected 'sage_raw_input')"
% mesg['event'])
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def notify(self, **kwds):
"""
Display a graphical notification using the alert_message Javascript function.
INPUTS:
- `type: "default"` - Type of the notice. "default", "warning", "info", "success", or "error".
- `title: ""` - The notice's title.
- `message: ""` - The notice's text.
- `timeout: ?` - Delay in seconds before the notice is automatically removed.
EXAMPLE:
salvus.notify(type="warning", title="This warning", message="This is a quick message.", timeout=3)
"""
obj = {}
for k, v in kwds.items():
if k == 'text': # backward compat
k = 'message'
elif k == 'type' and v == 'notice': # backward compat
v = 'default'
obj[k] = sage_salvus.jsonable(v)
if k == 'delay': # backward compat
obj['timeout'] = v / 1000.0 # units are in seconds now.
self.javascript("alert_message(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(
message.execute_javascript(code,
coffeescript=coffeescript,
obj=json.dumps(obj,
separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
try:
sys.path.insert(0, path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-', '_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base + '.py', 'w').write(content)
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base + '.py')
os.unlink(py_file_base + '.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist" % filename)
base, ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented" %
ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
if six.PY2:
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
else:
Salvus.pdf.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn,
id=id,
data=data,
message_queue=message_queue,
cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.set_salvus(salvus)
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
# execute.count goes from 0 to 2
# used for show_identifiers()
execute.count = 0
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0, mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all
sage.all.set_random_seed()
import random
random.seed(sage.all.initial_seed())
# get_memory_usage is not aware of being forked...
import sage.misc.getusage
sage.misc.getusage._proc_status = "/proc/%s/status" % os.getpid()
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print('INFO:child%s: received message "%s"'%(pid, mesg))
log("handling message ", truncate_text(unicode8(mesg), 400))
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn=conn,
id=mesg['id'],
code=mesg['code'],
data=mesg.get('data', None),
cell_id=mesg.get('cell_id', None),
preparse=mesg.get('preparse', True),
message_queue=mq)
except Exception as err:
log("ERROR -- exception raised '%s' when executing '%s'" %
(err, mesg['code']))
elif event == 'introspect':
try:
# check for introspect from jupyter cell
prefix = Salvus._default_mode
if 'top' in mesg:
top = mesg['top']
log('introspect cell top line %s' % top)
if top.startswith("%"):
prefix = top[1:]
try:
# see if prefix is the name of a jupyter kernel function
kc = eval(prefix + "(get_kernel_client=True)",
namespace, locals())
kn = eval(prefix + "(get_kernel_name=True)", namespace,
locals())
log("jupyter introspect prefix %s kernel %s" %
(prefix, kn)) # e.g. "p2", "python2"
jupyter_introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True),
kc=kc)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
log(lines)
introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True))
except:
pass
else:
raise RuntimeError("invalid message '%s'" % mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def jupyter_introspect(conn, id, line, preparse, kc):
import jupyter_client
from queue import Empty
try:
salvus = Salvus(conn=conn, id=id)
msg_id = kc.complete(line)
shell = kc.shell_channel
iopub = kc.iopub_channel
# handle iopub responses
while True:
try:
msg = iopub.get_msg(timeout=1)
msg_type = msg['msg_type']
content = msg['content']
except Empty:
# shouldn't happen
log("jupyter iopub channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter iopub recv %s %s" % (msg_type, str(content)))
if msg_type == 'status' and content['execution_state'] == 'idle':
break
# handle shell responses
while True:
try:
msg = shell.get_msg(timeout=10)
msg_type = msg['msg_type']
content = msg['content']
except:
# shouldn't happen
log("jupyter shell channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter shell recv %s %s" % (msg_type, str(content)))
if msg_type == 'complete_reply' and content['status'] == 'ok':
# jupyter kernel returns matches like "xyz.append" and smc wants just "append"
matches = content['matches']
offset = content['cursor_end'] - content['cursor_start']
completions = [s[offset:] for s in matches]
mesg = message.introspect_completions(id=id,
completions=completions,
target=line[-offset:])
conn.send_json(mesg)
break
except:
log("jupyter completion exception: %s" % sys.exc_info()[0])
def introspect(conn, id, line, preparse):
salvus = Salvus(
conn=conn, id=id
) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id,
completions=z['result'],
target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id,
docstring=z['result'],
target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id,
source_code=z['result'],
target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
if 'COCALC_SECRET_TOKEN' in os.environ:
secret_token_path = os.environ['COCALC_SECRET_TOKEN']
else:
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send(six.b('n'))
conn.send(
six.
b("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"
% secret_token_path))
conn.close()
n = len(secret_token)
token = six.b('')
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != six.b(secret_token):
log("token='%s'; secret_token='%s'" % (token, secret_token))
conn.send(six.b('n')) # no -- invalid login
conn.send(six.b("Invalid secret token."))
conn.close()
return False
else:
conn.send(six.b('y')) # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s" % mesg)
except Exception as err:
log("Error receiving message: %s (connection terminated)" % str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session." %
mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s" % desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s' % (host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
import sagenb.notebook.interact
sagenb.notebook.interact.interact = sage_salvus.interact
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patch the html command.
try:
# need the following for sage_server to start with sage-8.0
# or `import sage.interacts.library` will fail (not really important below, as we don't do that).
import sage.repl.user_globals
sage.repl.user_globals.set_globals(globals())
log("initialized user_globals")
except RuntimeError:
# may happen with sage version < 8.0
log("user_globals.set_globals failed, continuing", sys.exc_info())
sage.all.html = sage.misc.html.html = sage_salvus.html
# CRITICAL: look, we are just going to not do this, and have sage.interacts.library
# be broken. It's **really slow** to do this, and I don't think sage.interacts.library
# ever ended up going anywhere! People use wiki.sagemath.org/interact instead...
#import sage.interacts.library
#sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize'] = [8, 4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = [
'from sage.all import *', 'from sage.calculus.predefined import x',
'import pylab'
]
if extra_imports:
cmds.extend([
'import scipy', 'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'
])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec(cmd, namespace)
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds' %
(time.time() - tm))
for k, v in sage_salvus.interact_functions.items():
namespace[k] = v
# See above -- not doing this, since it is REALLY SLOW to import.
# This does mean that some old code that tries to use interact might break (?).
#namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in [
'anaconda', 'asy', 'attach', 'auto', 'capture', 'cell',
'clear', 'coffeescript', 'cython', 'default_mode',
'delete_last_output', 'dynamic', 'exercise', 'fork', 'fortran',
'go', 'help', 'hide', 'hideall', 'input', 'java', 'javascript',
'julia', 'jupyter', 'license', 'load', 'md', 'mediawiki',
'modes', 'octave', 'pandoc', 'perl', 'plot3d_using_matplotlib',
'prun', 'python_future_feature', 'py3print_mode', 'python',
'python3', 'r', 'raw_input', 'reset', 'restore', 'ruby',
'runfile', 'sage_chat', 'sage_eval', 'scala', 'scala211',
'script', 'search_doc', 'search_src', 'sh', 'show',
'show_identifiers', 'singular_kernel', 'time', 'timeit',
'typeset_mode', 'var', 'wiki'
]:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[
__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace[
'pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace[
'pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in list(children.keys()):
if os.waitpid(pid, os.WNOHANG) != (0, 0):
log("subprocess %s terminated, closing connection"
% pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection" %
child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception as err:
log("Error taking connection: ", err)
traceback.print_exc(file=open(LOGFILE, 'a'))
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
pid = str(os.getpid())
print("os.getpid() = %s" % pid)
open(pidfile, 'w').write(pid)
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'" %
(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument(
"-p",
dest="port",
type=int,
default=0,
help=
"port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port"
)
parser.add_argument(
"-l",
dest='log_level',
type=str,
default='INFO',
help=
"log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d",
dest="daemon",
default=False,
action="store_const",
const=True,
help="daemon mode (default: False)")
parser.add_argument(
"--host",
dest="host",
type=str,
default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile",
dest="pidfile",
type=str,
default='',
help="store pid in this file")
parser.add_argument(
"--logfile",
dest="logfile",
type=str,
default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c",
dest="client",
default=False,
action="store_const",
const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname",
dest="hostname",
type=str,
default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile",
dest="portfile",
type=str,
default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print(("%s: must specify pidfile in daemon mode" % sys.argv[0]))
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(
port=args.port if args.port else int(open(args.portfile).read()),
hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile, 'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s" % LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
from . import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
|
from .base import TestCase
import os
import mock
import shutil
import time
from django.conf import settings
import whisper
import gzip
from graphite.readers import WhisperReader, GzippedWhisperReader
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
class WhisperReadersTests(TestCase):
start_ts = 0
# Create/wipe test whisper files
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
worker1 = hostcpu.replace('hostname', 'worker1')
worker2 = hostcpu.replace('hostname', 'worker2')
worker3 = hostcpu.replace('hostname', 'worker3')
worker4 = hostcpu.replace('hostname', 'worker4')
worker4 = worker4.replace('cpu.wsp', 'cpu.wsp.gz')
def create_whisper_hosts(self):
self.start_ts = int(time.time())
try:
os.makedirs(self.worker1.replace('cpu.wsp', ''))
os.makedirs(self.worker2.replace('cpu.wsp', ''))
os.makedirs(self.worker3.replace('cpu.wsp', ''))
os.makedirs(self.worker4.replace('cpu.wsp.gz', ''))
except OSError:
pass
whisper.create(self.worker1, [(1, 60)])
whisper.create(self.worker2, [(1, 60)])
open(self.worker3, 'a').close()
whisper.update(self.worker1, 1, self.start_ts)
whisper.update(self.worker2, 2, self.start_ts)
with open(self.worker1, 'rb') as f_in, gzip.open(self.worker4, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def wipe_whisper_hosts(self):
try:
os.remove(self.worker1)
os.remove(self.worker2)
os.remove(self.worker3)
os.remove(self.worker4)
shutil.rmtree(os.path.join(settings.WHISPER_DIR, 'hosts'))
except OSError:
pass
#
# GzippedWHisper Reader tests
#
# Confirm the reader object is not none
def test_GzippedWhisperReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_GzippedWhisperReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start), self.start_ts-60)
self.assertEqual(int(interval.end), self.start_ts)
# Confirm fetch works.
def test_GzippedWhisperReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
#
# WHisper Reader tests
#
# Confirm the reader object is not none
def test_WhisperReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_WhisperReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start), self.start_ts-60)
self.assertEqual(int(interval.end), self.start_ts)
# Confirm fetch works.
def test_WhisperReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
# Whisper Reader broken file
@mock.patch('whisper.fetch')
def test_WhisperReader_fetch_returns_no_data(self, whisper_fetch):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
whisper_fetch.return_value = None
self.assertEqual(reader.fetch(self.start_ts-5, self.start_ts), None)
# Whisper Reader broken file
def test_WhisperReader_broken_file(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
# Test broken whisper file
f = open(self.worker2, 'rb+')
f.seek(10)
f.write('Bad Data')
f.close()
reader = WhisperReader(self.worker2, 'hosts.worker2.cpu')
with self.assertRaises(Exception):
reader.fetch(self.start_ts-5, self.start_ts)
# Whisper Reader CarbonLink Query returns a dict
@mock.patch('graphite.carbonlink.CarbonLinkPool.query')
def test_WhisperReader_CarbonLinkQuery(self, carbonlink_query):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
carbonlink_query.return_value = {}
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
make whisper tests more reliable
from .base import TestCase
import os
import mock
import shutil
import time
from django.conf import settings
import whisper
import gzip
from graphite.readers import WhisperReader, GzippedWhisperReader
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
class WhisperReadersTests(TestCase):
start_ts = 0
# Create/wipe test whisper files
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
worker1 = hostcpu.replace('hostname', 'worker1')
worker2 = hostcpu.replace('hostname', 'worker2')
worker3 = hostcpu.replace('hostname', 'worker3')
worker4 = hostcpu.replace('hostname', 'worker4')
worker4 = worker4.replace('cpu.wsp', 'cpu.wsp.gz')
def create_whisper_hosts(self):
self.start_ts = int(time.time())
try:
os.makedirs(self.worker1.replace('cpu.wsp', ''))
os.makedirs(self.worker2.replace('cpu.wsp', ''))
os.makedirs(self.worker3.replace('cpu.wsp', ''))
os.makedirs(self.worker4.replace('cpu.wsp.gz', ''))
except OSError:
pass
whisper.create(self.worker1, [(1, 60)])
whisper.create(self.worker2, [(1, 60)])
open(self.worker3, 'a').close()
whisper.update(self.worker1, 1, self.start_ts)
whisper.update(self.worker2, 2, self.start_ts)
with open(self.worker1, 'rb') as f_in, gzip.open(self.worker4, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def wipe_whisper_hosts(self):
try:
os.remove(self.worker1)
os.remove(self.worker2)
os.remove(self.worker3)
os.remove(self.worker4)
shutil.rmtree(os.path.join(settings.WHISPER_DIR, 'hosts'))
except OSError:
pass
#
# GzippedWHisper Reader tests
#
# Confirm the reader object is not none
def test_GzippedWhisperReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_GzippedWhisperReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
ts = int(time.time())
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start), ts-60)
self.assertEqual(int(interval.end), ts)
# Confirm fetch works.
def test_GzippedWhisperReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = GzippedWhisperReader(self.worker4, 'hosts.worker4.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
#
# WHisper Reader tests
#
# Confirm the reader object is not none
def test_WhisperReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_WhisperReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
ts = int(time.time())
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start),ts-60)
self.assertEqual(int(interval.end), ts)
# Confirm fetch works.
def test_WhisperReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
# Whisper Reader broken file
@mock.patch('whisper.fetch')
def test_WhisperReader_fetch_returns_no_data(self, whisper_fetch):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
whisper_fetch.return_value = None
self.assertEqual(reader.fetch(self.start_ts-5, self.start_ts), None)
# Whisper Reader broken file
def test_WhisperReader_broken_file(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
# Test broken whisper file
f = open(self.worker2, 'rb+')
f.seek(10)
f.write('Bad Data')
f.close()
reader = WhisperReader(self.worker2, 'hosts.worker2.cpu')
with self.assertRaises(Exception):
reader.fetch(self.start_ts-5, self.start_ts)
# Whisper Reader CarbonLink Query returns a dict
@mock.patch('graphite.carbonlink.CarbonLinkPool.query')
def test_WhisperReader_CarbonLinkQuery(self, carbonlink_query):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
carbonlink_query.return_value = {}
reader = WhisperReader(self.worker1, 'hosts.worker1.cpu')
(_, values) = reader.fetch(self.start_ts-5, self.start_ts)
self.assertEqual(values, [None, None, None, None, 1.0])
|
'''source.py - Compute the source term integral given first order results
This should be run as a standalone script.
Created on 6 Jul 2010
@author: Ian Huston
'''
from __future__ import division
import time
import helpers
import os.path
import numpy as N
import cosmomodels as c
import run_config
from sourceterm import sosource
from sourceterm import srcmerge
import sohelpers
import logging
import sys
import optparse
#Set logging of debug messages on or off
from run_config import _debug
def runsource(fofile, ninit=0, nfinal=-1, sourcefile=None,
ntheta=run_config.ntheta, numsoks=run_config.numsoks, taskarray=None, srcclass=None):
"""Run parallel source integrand and second order calculation."""
id = taskarray["id"]
ntasks = (taskarray["max"] -taskarray["min"]) // taskarray["step"] + 1
try:
m = c.make_wrapper_model(fofile)
except:
log.exception("Error wrapping first order file.")
if sourcefile is None:
sourcefile = run_config.srcstub + str(id) + ".hf5"
if nfinal == -1:
nfinal = m.tresult.shape[0]
nfostart = min(m.fotstartindex).astype(int)
nstar = max(nfostart, ninit)
totalnrange = len(m.tresult[nstar:nfinal])
nrange = N.ceil(totalnrange/ntasks)
#Change myninit to match task id
if id == 1:
myninit = ninit
else:
myninit = nstar + (id-1)*nrange
mynend = nstar + id*nrange
if mynend > nfinal:
mynend = nfinal
log.info("Process rank: %d, ninit: %d, nend: %d", id, myninit, mynend)
#Set source class using run_config
if srcclass is None:
srcclass = run_config.srcclass
#get source integrand and save to file
try:
filesaved = sosource.getsourceandintegrate(m, sourcefile, ninit=myninit, nfinal=mynend,
ntheta=ntheta, numks=numsoks)
log.info("Source term saved as " + filesaved)
except Exception:
log.exception("Error getting source term.")
raise
#Destroy model instance to save memory
if _debug:
log.debug("Destroying model instance to reclaim memory...")
try:
del m
except IOError:
log.exception("Error closing model file!")
raise
return filesaved
def main(argv=None):
"""Main function: deal with command line arguments and start calculation as reqd."""
if not argv:
argv = sys.argv
#Parse command line options
parser = optparse.OptionParser()
parser.add_option("-f", "--filename", action="store", dest="foresults",
default=run_config.foresults, type="string",
metavar="FILE", help="first order results file, default=%default")
arraygroup = optparse.OptionGroup(parser, "Task Array Options",
"These options specify a task array to work inside. "
"The array is the range taskmin:taskmax with step taskstep. "
"The current process should be given a taskid in the range specified. "
"The default is an array of 1:1, step 1 with id 1.")
arraygroup.add_option("--taskmin", action="store", dest="taskmin", default=1,
type="int", help="start of task array range", metavar="NUM")
arraygroup.add_option("--taskmax", action="store", dest="taskmax", default=1,
type="int", help="end of task array range", metavar="NUM")
arraygroup.add_option("--taskstep", action="store", dest="taskstep", default=1,
type="int", help="step size of task array range", metavar="NUM")
arraygroup.add_option("--taskid", action="store", dest="taskid", default=1,
type="int", help="task id of current process", metavar="NUM")
parser.add_option_group(arraygroup)
timegroup = optparse.OptionGroup(parser, "Timestep Options",
"These options affect which timesteps the source term is calculated for.")
timegroup.add_option("--tstart", action="store", dest="tstart", default=0,
type="int", help="first time step to calculate, default=%default")
timegroup.add_option("--tend", action="store", dest="tend", default=-1,
type="int", help="last time step to calculate, use -1 for the last value, default=%default")
parser.add_option_group(timegroup)
loggroup = optparse.OptionGroup(parser, "Log Options",
"These options affect the verbosity of the log files generated.")
loggroup.add_option("-q", "--quiet",
action="store_const", const=logging.FATAL, dest="loglevel",
help="only print fatal error messages")
loggroup.add_option("-v", "--verbose",
action="store_const", const=logging.INFO, dest="loglevel",
help="print informative messages")
loggroup.add_option("--debug",
action="store_const", const=logging.DEBUG, dest="loglevel",
help="log lots of debugging information",
default=run_config.LOGLEVEL)
loggroup.add_option("--console", action="store_true", dest="console",
default=False, help="if selected matches console log level "
"to selected file log level, otherwise only warnings are shown.")
parser.add_option_group(loggroup)
(options, args) = parser.parse_args(args=argv[1:])
#Start the logging module
if options.console:
consolelevel = options.loglevel
else:
consolelevel = logging.WARN
#Change logger to add task id
if options.taskmax != options.taskmin:
log.name = "src-" + str(options.taskid)
sosource.set_log_name()
logfile = os.path.join(run_config.LOGDIR, "src.log")
helpers.startlogging(log, logfile, options.loglevel, consolelevel)
if (not _debug) and (options.loglevel == logging.DEBUG):
log.warn("Debugging information will not be stored due to setting in run_config.")
taskarray = dict(min=options.taskmin,
max=options.taskmax,
step=options.taskstep,
id=options.taskid)
if not os.path.isfile(options.foresults):
raise IOError("First order file %s does not exist! Please run firstorder.py." % options.foresults)
try:
runsource(fofile=options.foresults, ninit=options.tstart,
nfinal=options.tend, taskarray=taskarray)
except Exception:
log.exception("Error getting source integral!")
return 1
return 0
if __name__ == "__main__":
log = logging.getLogger()
log.handlers = []
sys.exit(main())
else:
log = logging.getLogger("src")
Add srcclass argument to call.
'''source.py - Compute the source term integral given first order results
This should be run as a standalone script.
Created on 6 Jul 2010
@author: Ian Huston
'''
from __future__ import division
import time
import helpers
import os.path
import numpy as N
import cosmomodels as c
import run_config
from sourceterm import sosource
from sourceterm import srcmerge
import sohelpers
import logging
import sys
import optparse
#Set logging of debug messages on or off
from run_config import _debug
def runsource(fofile, ninit=0, nfinal=-1, sourcefile=None,
ntheta=run_config.ntheta, numsoks=run_config.numsoks, taskarray=None, srcclass=None):
"""Run parallel source integrand and second order calculation."""
id = taskarray["id"]
ntasks = (taskarray["max"] -taskarray["min"]) // taskarray["step"] + 1
try:
m = c.make_wrapper_model(fofile)
except:
log.exception("Error wrapping first order file.")
if sourcefile is None:
sourcefile = run_config.srcstub + str(id) + ".hf5"
if nfinal == -1:
nfinal = m.tresult.shape[0]
nfostart = min(m.fotstartindex).astype(int)
nstar = max(nfostart, ninit)
totalnrange = len(m.tresult[nstar:nfinal])
nrange = N.ceil(totalnrange/ntasks)
#Change myninit to match task id
if id == 1:
myninit = ninit
else:
myninit = nstar + (id-1)*nrange
mynend = nstar + id*nrange
if mynend > nfinal:
mynend = nfinal
log.info("Process rank: %d, ninit: %d, nend: %d", id, myninit, mynend)
#Set source class using run_config
if srcclass is None:
srcclass = run_config.srcclass
#get source integrand and save to file
try:
filesaved = sosource.getsourceandintegrate(m, sourcefile, ninit=myninit, nfinal=mynend,
ntheta=ntheta, numks=numsoks, srcclass=srcclass)
log.info("Source term saved as " + filesaved)
except Exception:
log.exception("Error getting source term.")
raise
#Destroy model instance to save memory
if _debug:
log.debug("Destroying model instance to reclaim memory...")
try:
del m
except IOError:
log.exception("Error closing model file!")
raise
return filesaved
def main(argv=None):
"""Main function: deal with command line arguments and start calculation as reqd."""
if not argv:
argv = sys.argv
#Parse command line options
parser = optparse.OptionParser()
parser.add_option("-f", "--filename", action="store", dest="foresults",
default=run_config.foresults, type="string",
metavar="FILE", help="first order results file, default=%default")
arraygroup = optparse.OptionGroup(parser, "Task Array Options",
"These options specify a task array to work inside. "
"The array is the range taskmin:taskmax with step taskstep. "
"The current process should be given a taskid in the range specified. "
"The default is an array of 1:1, step 1 with id 1.")
arraygroup.add_option("--taskmin", action="store", dest="taskmin", default=1,
type="int", help="start of task array range", metavar="NUM")
arraygroup.add_option("--taskmax", action="store", dest="taskmax", default=1,
type="int", help="end of task array range", metavar="NUM")
arraygroup.add_option("--taskstep", action="store", dest="taskstep", default=1,
type="int", help="step size of task array range", metavar="NUM")
arraygroup.add_option("--taskid", action="store", dest="taskid", default=1,
type="int", help="task id of current process", metavar="NUM")
parser.add_option_group(arraygroup)
timegroup = optparse.OptionGroup(parser, "Timestep Options",
"These options affect which timesteps the source term is calculated for.")
timegroup.add_option("--tstart", action="store", dest="tstart", default=0,
type="int", help="first time step to calculate, default=%default")
timegroup.add_option("--tend", action="store", dest="tend", default=-1,
type="int", help="last time step to calculate, use -1 for the last value, default=%default")
parser.add_option_group(timegroup)
loggroup = optparse.OptionGroup(parser, "Log Options",
"These options affect the verbosity of the log files generated.")
loggroup.add_option("-q", "--quiet",
action="store_const", const=logging.FATAL, dest="loglevel",
help="only print fatal error messages")
loggroup.add_option("-v", "--verbose",
action="store_const", const=logging.INFO, dest="loglevel",
help="print informative messages")
loggroup.add_option("--debug",
action="store_const", const=logging.DEBUG, dest="loglevel",
help="log lots of debugging information",
default=run_config.LOGLEVEL)
loggroup.add_option("--console", action="store_true", dest="console",
default=False, help="if selected matches console log level "
"to selected file log level, otherwise only warnings are shown.")
parser.add_option_group(loggroup)
(options, args) = parser.parse_args(args=argv[1:])
#Start the logging module
if options.console:
consolelevel = options.loglevel
else:
consolelevel = logging.WARN
#Change logger to add task id
if options.taskmax != options.taskmin:
log.name = "src-" + str(options.taskid)
sosource.set_log_name()
logfile = os.path.join(run_config.LOGDIR, "src.log")
helpers.startlogging(log, logfile, options.loglevel, consolelevel)
if (not _debug) and (options.loglevel == logging.DEBUG):
log.warn("Debugging information will not be stored due to setting in run_config.")
taskarray = dict(min=options.taskmin,
max=options.taskmax,
step=options.taskstep,
id=options.taskid)
if not os.path.isfile(options.foresults):
raise IOError("First order file %s does not exist! Please run firstorder.py." % options.foresults)
try:
runsource(fofile=options.foresults, ninit=options.tstart,
nfinal=options.tend, taskarray=taskarray)
except Exception:
log.exception("Error getting source integral!")
return 1
return 0
if __name__ == "__main__":
log = logging.getLogger()
log.handlers = []
sys.exit(main())
else:
log = logging.getLogger("src")
|
from __future__ import print_function
try:
from queue import Empty # Python 3
except ImportError:
from Queue import Empty # Python 2
import time
import io
from ipykernel.kernelbase import Kernel
from datetime import datetime
import os
import os.path
import tempfile
from jupyter_client.manager import KernelManager
from jupyter_client.ioloop import IOLoopKernelManager
from jupyter_core.application import JupyterApp
import re
import json
from threading import (Thread, Event, Timer)
try:
from os import getcwdu as getcwd # Python 2
except ImportError:
from os import getcwd # Python 3
import pickle
import dateutil
from .log import ExecutionInfo
from traitlets.config.configurable import LoggingConfigurable, MultipleInstanceError
from traitlets import (
Unicode, List, default
)
from ipython_genutils import py3compat
from ipython_genutils.py3compat import PY3
from types import MethodType
from fluent import sender
SUMMARIZE_KEY = 'lc_wrapper'
IGNORE_SUMMARIZE_KEY = 'lc_wrapper_regex'
FORCE_SUMMARIZE_KEY = 'lc_wrapper_force'
IPYTHON_DEFAULT_PATTERN_FILE = '.lc_wrapper_regex.txt'
IPYTHON_DEFAULT_PATTERN = '''ERROR|error|Error|Panic|panic|Invalid|invalid|Warning|warning|Bad|bad
FAIL|Fail|fail
(Not|not) (Found|found)
(Device)? not ready
out of (Memory|memory)
interrupt(ed)?|abort(ed)?|stop(ped)?
insecure|inaccessible|Forbidden|forbidden|Denied|denied
Unauthorised|unauthorised|Unauthorized|unauthorized
(No|no|Low|low) (.+ )?(Capacity|capacity|Space|space)
has (encountered|stopped)
is not
initialize(d)?|initialise(d)?|start(ed)?|restart(ed)?|spawn(ed)?|complete(d)?
finish(ed)?|resume(d)?|begin|attach(ed)?|detach(ed)?|reboot(ed)?|suspend(ed)?
done|terminate(d)?|open(ed)?|close(d)?|(dis)?connect(ed)?|establish(ed)?
allocate(d)?|assign(ed)?|load(ed)?|(in|re)?activate(d)?|block(ed)?|kill(ed)?
refuse(d)?|insufficient|lack
link(ed)? (up|down)'''
class ChannelReaderThread(Thread, LoggingConfigurable):
_exiting = False
def __init__(self, kernel, client, stream, session, channel, **kwargs):
Thread.__init__(self, **kwargs)
LoggingConfigurable.__init__(self, **kwargs)
self.daemon = True
self.channel_name = channel
self.channel = getattr(client, channel + "_channel")
self.kernel = kernel
self.client = client
self.stream = stream
self.session = session
self.log.debug("init ChannelReaderThread: channel_name=%s",
self.channel_name)
def run(self):
self.log.debug("start ChannelReaderThread: channel_name=%s",
self.channel_name)
while True:
try:
msg = self.channel.get_msg(block=True, timeout=0.2)
self.log.debug("Received %s message: %s",
self.channel_name, str(msg))
msg_type = msg['msg_type']
idle = False
status_msg = False
if self.channel_name == 'iopub':
content = msg['content']
if msg_type == 'status':
status_msg = True
if content['execution_state'] == 'idle':
self.kernel.idle_parent_header = msg['parent_header']
idle = True
if msg['parent_header']['msg_type'] == 'shutdown_request':
continue
msg_id = msg['parent_header']['msg_id']
parent_header = self.kernel.parent_headers.get(msg_id)
self.log.debug("parent_header: %s", str(parent_header))
if self.channel_name == 'iopub':
ident = self.kernel._topic(msg_type)
msg_content = self.kernel._hook_iopub_msg(parent_header, msg)
else:
ident = self.kernel._parent_ident
msg_content = msg['content']
if not status_msg:
self.session.send(self.stream,
msg_type,
msg_content,
parent=parent_header,
ident=ident,
header=msg['header'],
metadata=msg['metadata'],
buffers=msg['buffers'])
if self.channel_name == 'stdin' and msg_type == 'input_request':
self.log.debug("do input_request")
self.input_request()
if idle:
self.kernel.idle_event.set()
parent_msg_id = msg['parent_header'].get('msg_id')
if parent_msg_id is not None:
self.kernel._remove_parent_header(parent_msg_id)
except Empty as e:
pass
except Exception as e:
self.log.error(e, exc_info=True)
finally:
if self._exiting:
break
self.log.debug("exit ChannelReaderThread: %s", self.channel_name)
def input_request(self):
self.log.debug("wait input_reply")
while True:
try:
ident, reply = self.session.recv(self.stream, 0)
except Exception:
self.log.warn("Invalid Message:", exc_info=True)
except KeyboardInterrupt:
# re-raise KeyboardInterrupt, to truncate traceback
raise KeyboardInterrupt
else:
break
self.log.debug("input_reply: %s", str(reply))
msg = self.client.session.msg(reply['msg_type'],
content=reply['content'],
parent=reply['parent_header'],
header=reply['header'],
metadata=reply['metadata'])
self.client.stdin_channel.send(msg)
def stop(self):
if self.isAlive():
self._exiting = True
self.join()
class BufferedKernelBase(Kernel):
blocking_msg_types = [
'execute_request',
'history_request',
'complete_request',
'inspect_request',
'kernel_info_request',
'comm_info_request',
'shutdown_request'
]
proxy_channles = ['iopub', 'stdin']
threads = {}
parent_headers = {}
idle_event = Event()
idle_parent_header = None
execute_request_msg_id = None
log_file_object = None
data_dir = Unicode()
@default('data_dir')
def _data_dir_default(self):
app = None
try:
if JupyterApp.initialized():
app = JupyterApp.instance()
except MultipleInstanceError:
pass
if app is None:
# create an app, without the global instance
app = JupyterApp()
app.initialize(argv=[])
return app.data_dir
server_signature_file = Unicode(
help="""The file where the server signature is stored."""
).tag(config=True)
@default('server_signature_file')
def _server_signature_file_default(self):
if 'lc_nblineage_server_signature_path' in os.environ:
return os.environ['lc_nblineage_server_signature_path']
if not self.data_dir:
return ''
return os.path.join(self.data_dir, 'server_signature')
keyword_pattern_file_paths = List()
@default('keyword_pattern_file_paths')
def _keyword_pattern_file_paths_default(self):
return [
os.path.join(self.get_notebook_path(), IPYTHON_DEFAULT_PATTERN_FILE),
os.path.join(os.path.expanduser('~/'), IPYTHON_DEFAULT_PATTERN_FILE)
]
log_dirs = List()
@default('log_dirs')
def _log_dirs_default(self):
return [
os.path.join(self.get_notebook_path(), '.log'),
os.path.expanduser('~/.log')
]
configfile_paths = List()
@default('configfile_paths')
def _configfile_paths_default(self):
return [
os.path.join(self.get_notebook_path(), '.lc_wrapper'),
os.path.join(os.path.expanduser('~/'), '.lc_wrapper')
]
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
if 'lc_wrapper_fluentd_host' in os.environ:
fluentd_host = os.environ['lc_wrapper_fluentd_host']
fluentd_port = int(os.environ.get('lc_wrapper_fluentd_port', '24224'))
fluentd_tag = os.environ.get('lc_wrapper_fluentd_tag', 'lc_wrapper')
self.sender = sender.FluentSender(fluentd_tag,
host=fluentd_host,
port=fluentd_port)
self.log.info('lc_wrapper: Enabled fluent logger: host=%s, port=%s, tag=%s',
fluentd_host, fluentd_port, fluentd_tag)
else:
self.sender = None
self._init_message_handler()
self.start_ipython_kernel()
def _init_message_handler(self):
def handler(self, stream, ident, parent):
self.log.debug("Received shell message: %s", str(parent))
msg_type = parent['msg_type']
content = parent['content']
self._hook_request_msg(parent)
self.idle_event.clear()
msg = self.kc.session.msg(msg_type, content)
msgid = msg['header']['msg_id']
self.log.debug("save parent_header: %s => %s", msgid, str(parent['header']))
self.parent_headers[msgid] = parent['header']
self.kc.shell_channel.send(msg)
reply_msg = None
if msg_type in self.blocking_msg_types:
while True:
try:
reply_msg = self.kc._recv_reply(msgid, timeout=None)
break
except KeyboardInterrupt:
self.log.debug("KeyboardInterrupt", exc_info=True)
# propagate SIGINT to wrapped kernel
self.km.interrupt_kernel()
# this timer fire when the ipython kernel didnot interrupt within 5.0 sec.
self.timer = Timer(5.0, self.close_files)
self.log.debug('>>>>> close files: timer fired')
self.timer.start()
reply_msg_content = self._hook_reply_msg(reply_msg)
self.log.debug('reply: %s', reply_msg)
reply_msg = self.session.send(stream,
reply_msg['msg_type'],
reply_msg_content,
parent, ident,
header=reply_msg['header'],
metadata=reply_msg['metadata'],
buffers=reply_msg['buffers'])
self._post_send_reply_msg(parent, reply_msg)
self._wait_for_idle(msgid)
self._post_wait_for_idle(parent, reply_msg)
for msg_type in self.msg_types:
if msg_type == 'kernel_info_request':
continue
if msg_type == 'shutdown_request':
continue
self.log.debug('override shell message handler: msg_type=%s', msg_type)
if PY3:
setattr(self, msg_type, MethodType(handler, self))
else:
setattr(self, msg_type, MethodType(handler, self, type(self)))
self.shell_handlers[msg_type] = getattr(self, msg_type)
comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
for msg_type in comm_msg_types:
self.log.debug('init shell comm message handler: msg_type=%s', msg_type)
if PY3:
setattr(self, msg_type, MethodType(handler, self))
else:
setattr(self, msg_type, MethodType(handler, self, type(self)))
self.shell_handlers[msg_type] = getattr(self, msg_type)
def start_ipython_kernel(self):
kernel_name = self._get_wrapped_kernel_name()
self.km = KernelManager(kernel_name=kernel_name,
client_class='jupyter_client.blocking.BlockingKernelClient')
self.log.debug('kernel_manager: %s', str(self.km))
self.log.info('start wrapped kernel: %s', kernel_name)
self.km.start_kernel()
self.kc = self.km.client()
self.log.debug('kernel_client: %s', str(self.kc))
self.log.debug('start_channels')
self.kc.start_channels()
try:
self.log.debug('wait for ready of wrapped kernel')
self.kc.wait_for_ready(timeout=None)
except RuntimeError:
self.kc.stop_channels()
self.km.shutdown_kernel()
raise
for channel in self.proxy_channles:
stream = getattr(self, channel + '_socket')
thread = ChannelReaderThread(self, self.kc, stream, self.session, channel)
thread.start()
self.threads[channel] = thread
for log_dir in self.log_dirs:
if self._is_writable_dir(log_dir):
self.log_path = log_dir
break
self.log.debug('log output directory: %s', self.log_path)
if self._find_default_keyword_pattern_file() is None:
self.log.info('default keyword pattern file "%s" not found', IPYTHON_DEFAULT_PATTERN_FILE)
try:
self._generate_default_keyword_pattern_file()
except Exception as e:
self.log.exception("failed to generate default keyword pattern file: %s", e)
self.exec_info = None
self.notebook_path = self.get_notebook_path()
self.log.debug('notebook_path: %s', self.notebook_path)
def _is_writable_dir(self, path):
temp_dir = None
try:
if not os.path.exists(path):
os.makedirs(path)
temp_dir = tempfile.mkdtemp(dir=path)
return True
except (OSError, IOError) as e:
self.log.debug("_is_writable_dir: %s", e)
return False
finally:
if temp_dir is not None:
os.rmdir(temp_dir)
def _get_wrapped_kernel_name(self, km):
raise NotImplementedError()
def _remove_parent_header(self, msg_id):
if msg_id in self.parent_headers:
parent_header = self.parent_headers[msg_id]
self.log.debug("remove parent_header: %s => %s", msg_id, str(parent_header))
del self.parent_headers[msg_id]
def _hook_request_msg(self, parent):
msg_type = parent['msg_type']
if msg_type == 'execute_request':
self._hook_execute_request_msg(parent)
def _hook_execute_request_msg(self, parent):
try:
content = parent[u'content']
code = py3compat.cast_unicode_py2(content[u'code'])
silent = content[u'silent']
allow_stdin = content.get('allow_stdin', False)
except:
self.log.error("Got bad msg: ")
self.log.error("%s", parent)
return
self.execute_request_msg_id = parent['header']['msg_id']
if not silent:
self.execution_count += 1
cell_log_id = self._get_cell_id(parent)
if cell_log_id is not None:
self.log_history_file_path = os.path.join(self.log_path,
cell_log_id,
cell_log_id + u'.json')
else:
self.log_history_file_path = None
self.log_history_id = cell_log_id
self.log_history_data = self._read_log_history_file()
notebook_data = self._get_notebook_data(parent)
self.exec_info = ExecutionInfo(code, self.get_server_signature(), notebook_data)
if not silent:
env = self._get_config()
self.summarize_on, new_code = self.is_summarize_on(code, env)
self._init_default_config()
self._start_log()
if self.summarize_on:
self._start_summarize()
self._load_env(env)
if not self.log_history_id is None:
meme = {'lc_cell_meme': {'current': self.log_history_id}}
self.log_buff_append(u'{}\n----\n'.format(json.dumps(meme)))
self.log_buff_append(u'{}\n----\n'.format(code)) # code
self._log_buff_flush()
self.log_buff_append(self.exec_info.to_logfile_header() + u'----\n')
content[u'code'] = new_code
self._allow_stdin = allow_stdin
def _hook_reply_msg(self, reply_msg):
if reply_msg['msg_type'] == 'execute_reply':
return self._hook_execute_reply_msg(reply_msg)
return reply_msg['content']
def _hook_execute_reply_msg(self, reply):
if hasattr(self, "timer"):
self.timer.cancel()
self.log.debug('>>>>> close files: timer cancelled')
content = reply['content']
content['execution_count'] = self.execution_count
content['lc_wrapper'] = {
'log_path': self.file_full_path
}
self.exec_info.execute_reply_status = content['status']
return content
def _post_send_reply_msg(self, parent, reply_msg):
msg_type = parent['msg_type']
if msg_type == 'execute_request':
content = parent['content']
silent = content['silent']
stop_on_error = content.get('stop_on_error', True)
if not silent and reply_msg['content']['status'] == u'error' and stop_on_error:
self._abort_queues()
def _post_wait_for_idle(self, parent, reply_msg):
if reply_msg is None:
return
if reply_msg['msg_type'] == 'execute_reply':
self.log.debug('flushing stdout stream')
self._send_last_stdout_stream_text()
self.log.debug('flushed stdout stream')
self.execute_request_msg_id = None
def _hook_iopub_msg(self, parent_header, msg):
msg_id = parent_header['msg_id']
content = msg['content']
# replace msg_id in the content
self._replace_msg_id(msg_id, msg['parent_header']['msg_id'], content)
if self.execute_request_msg_id == msg_id:
return self._output_hook(msg)
return content
def _replace_msg_id(self, msg_id, wrapped_msg_id, content):
for k, v in content.items():
if isinstance(v, dict):
self._replace_msg_id(msg_id, wrapped_msg_id, v)
elif v == wrapped_msg_id:
content[k] = msg_id
self.log.debug('replace msg_id in content: %s => %s',
wrapped_msg_id, msg_id)
def _write_log(self, msg):
if not msg is None:
self.log_file_object.write(msg)
self.exec_info.file_size = self.log_file_object.tell()
def open_log_file(self, path):
self.log.debug('>>>>> open_log_file')
now = datetime.now(dateutil.tz.tzlocal())
path = os.path.join(path, now.strftime("%Y%m%d"))
if not os.path.exists(path):
os.makedirs(path)
file_name = now.strftime("%Y%m%d-%H%M%S") + "-%04d" % (now.microsecond // 1000)
self.file_full_path = os.path.join(path, file_name + u'.log')
self.exec_info.log_path = self.file_full_path
self.log_file_object = io.open(self.file_full_path, "a", encoding='utf-8')
self.log.debug(self.file_full_path)
self.log.debug(self.log_file_object)
def close_log_file(self):
self.log.debug('>>>>> close_log_file')
if self.log_file_object is None:
self.log.debug('>>>>> close_log_file: not executed because self.log_file_object is None')
return
if not self.log_file_object.closed:
self.log.debug('>>>>> log file closed')
self.log_file_object.close()
self.send_fluent_log()
else:
self.log.debug('>>>>> close_log_file: not executed because self.log_file_object is already closed')
self.log.debug('close_log_file: self.log_file_object = None')
self.log_file_object = None
def send_fluent_log(self):
if self.sender is None:
return
self.log.debug('>>>>> send_fluent_log')
record = {}
with io.open(self.exec_info.log_path, 'r') as f:
record['log'] = f.read()
self.sender.emit(None, record)
self.log.info('lc_wrapper: send_fluent_log: cell_meme=%s, uid=%s, gid=%s',
self.log_history_id, os.getuid(), os.getgid(), self.get_server_signature())
def get_server_signature(self):
if os.path.exists(self.server_signature_file):
with io.open(self.server_signature_file, 'r') as f:
return f.read()
else:
return None
def _wait_for_idle(self, msg_id):
self.log.debug('waiting for idle: msg_id=%s', msg_id)
while True:
self.idle_event.wait()
if self.idle_parent_header['msg_id'] != msg_id:
self.log.warn('unexpected idle message received: expected msg_id=%s, received msg_id=%s',
msg_id, self.idle_parent_header['msg_id'])
continue
self.log.debug('idle: msg_id=%s', msg_id)
return
def get_notebook_path(self):
return getcwd()
def _find_config_file(self):
for path in self.configfile_paths:
if os.path.exists(path):
return path
return None
def _get_config(self):
env = os.environ
config_path = self._find_config_file()
if config_path is None:
return env
line_pattern = re.compile(r'(\S+)=(".*?"|\S+)')
config = {}
with io.open(config_path, 'r', encoding='utf-8') as f:
for l in f.readlines():
l = l.strip()
if len(l) == 0 or l.startswith('#'):
continue
m = line_pattern.match(l)
if m:
config[m.group(1)] = m.group(2)
else:
self.log.warning('Unexpected line: {} at {}'.format(l, config_path))
for k, v in env.items():
config[k] = v
return config
def send_clear_content_msg(self):
clear_content = {'wait': True}
self.session.send(self.iopub_socket, 'clear_output', clear_content, self._parent_header,
ident=None, buffers=None, track=False, header=None, metadata=None)
def _load_env(self, env):
summarize = env.get(SUMMARIZE_KEY, '')
self.log.debug("lc_wrapper = " + summarize)
summarize_pattern = re.compile(r'^([0-9]*):([0-9]*):([0-9]*):([0-9]*)$')
summarize_params = summarize_pattern.match(summarize)
if summarize_params is not None and len(summarize_params.group(1)) != 0:
self.summarize_start_lines = int(summarize_params.group(1))
if summarize_params is not None and len(summarize_params.group(2)) != 0:
self.summarize_header_lines = int(summarize_params.group(2))
if summarize_params is not None and len(summarize_params.group(3)) != 0:
self.summarize_exec_lines = int(summarize_params.group(3))
if summarize_params is not None and len(summarize_params.group(4)) != 0:
self.summarize_footer_lines = int(summarize_params.group(4))
self.summarize_start_lines = max(self.summarize_start_lines,
self.summarize_header_lines + \
self.summarize_footer_lines + 1)
self.log_history_data = self._read_log_history_file()
self.repatter = []
text = env.get(IGNORE_SUMMARIZE_KEY, 'file:default')
if text is None or len(text) == 0:
pass
elif 'file:' in text:
file_name = text[text.rfind('find:')+6:].strip()
if file_name == 'default':
file_path = self._find_default_keyword_pattern_file()
else:
file_path = os.path.join(self.notebook_path, file_name)
if file_path is None:
self.keyword_buff_append(u'error : {} Not found'.format(IPYTHON_DEFAULT_PATTERN_FILE))
self.log.warning('lc_wrapper_regex: %s Not found', IPYTHON_DEFAULT_PATTERN_FILE)
elif os.path.exists(file_path):
try:
patterns = self._read_keyword_pattern_file(file_path)
for ptxt in patterns:
self.repatter.append(re.compile(ptxt))
except Exception as e:
self.keyword_buff_append(u'error : ' + str(e))
self.log.exception("lc_wrapper_regex: %s", e)
else:
self.keyword_buff_append(u'error : {} Not found'.format(file_path))
self.log.warning('lc_wrapper_regex: %s Not found', file_path)
else:
try:
self.repatter.append(re.compile(text))
except Exception as e:
self.keyword_buff_append(u'error : ' + str(e))
self.log.exception("lc_wrapper_regex: %s", e)
def _find_default_keyword_pattern_file(self):
for path in self.keyword_pattern_file_paths:
if os.path.exists(path):
return path
return None
def _read_keyword_pattern_file(self, filename):
with open(filename, 'r') as file:
patterns = file.readlines()
patterns = [x.strip() for x in patterns if len(x.strip()) > 0]
self.log.debug('patterns :')
for patt in patterns:
self.log.debug(patt)
return patterns
def _generate_default_keyword_pattern_file(self):
error = None
self.log.info('generate default keyword pattern file')
for path in self.keyword_pattern_file_paths:
if not os.path.exists(path):
try:
with open(path, 'w') as f:
f.write(IPYTHON_DEFAULT_PATTERN)
self.log.info('generated default keyword pattern file: %s', path)
return
except Exception as e:
self.log.debug('_generate_default_keyword_pattern_file: %s', str(e))
error = e
if error is not None:
raise error
def is_summarize_on(self, code, env):
force = None
if FORCE_SUMMARIZE_KEY in env:
force_text = env[FORCE_SUMMARIZE_KEY].strip().lower()
if force_text == 'on':
force = True
elif force_text == 'off':
force = False
regx = r'^\s*!!'
m = re.match(regx, code, re.M)
if m:
return (force if force is not None else True,
code[m.end():])
else:
return (force if force is not None else False,
code)
def _log_buff_flush(self, force=False):
if force or len(self.log_buff) > 100:
self._write_log(u''.join(self.log_buff))
del self.log_buff[:]
def log_buff_append(self, text=None):
if self.block_messages:
return
if not text is None:
if isinstance(text, list):
self.log_buff.extend(text)
else:
self.log_buff.append(text)
def keyword_buff_append(self, text, highlight=True):
if isinstance(text, list):
self.keyword_buff.extend([u'\033[0;31m{}\033[0m'.format(t)
if highlight else t for t in text])
else:
self.keyword_buff.append(u'\033[0;31m{}\033[0m'.format(text)
if highlight else text)
def display_keyword_buff(self):
if len(self.keyword_buff) == 0:
return ''
stream_text = u'...\n'
stream_text += u'\n'.join(self.keyword_buff[:self.summarize_header_lines * 2]) + '\n'
if len(self.keyword_buff) <= self.summarize_header_lines * 2:
return stream_text
msg = u'Matched lines exceed maximum number of view ({})' \
.format(self.summarize_header_lines * 2)
stream_text += u'\033[0;31m{}\033[0m\n'.format(msg)
return stream_text
def highlight_keywords(self, text):
matched = [p.search(text) for p in self.repatter]
matched = [m for m in matched if m is not None]
if len(matched) == 0:
return None
remain = text
result = None
while len(matched) > 0:
left = min([m.start() for m in matched])
if result is None:
result = remain[:left]
else:
result += remain[:left]
keywords = [m.group() for m in matched if m.start() == left]
keyword = sorted(keywords, key=lambda s: len(s))[-1]
result += u'\033[0;31m{}\033[0m'.format(keyword)
remain = remain[left + len(keyword):]
matched = [p.search(remain) for p in self.repatter]
matched = [m for m in matched if m is not None]
return result + remain
def _read_log_history_file(self):
if self.log_history_file_path is not None and \
os.path.exists(self.log_history_file_path):
with open(self.log_history_file_path, 'r') as f:
data = json.load(f)
return data
else:
return []
def _write_log_history_file(self, data):
if self.log_history_file_path is None:
self.log.debug('Skipped to save log history')
return
data.append(self.exec_info.to_log())
pathdir = os.path.dirname(self.log_history_file_path)
if not os.path.exists(pathdir):
os.makedirs(pathdir)
log_full_dir, log_filename = os.path.split(self.file_full_path)
log_full_dir, log_dirname = os.path.split(log_full_dir)
os.symlink(os.path.join('..', log_dirname, log_filename),
os.path.join(pathdir, os.path.basename(self.file_full_path)))
with open(self.log_history_file_path, 'w') as f:
json.dump(data, f)
self.log.debug('Log history saved: {}'.format(self.log_history_file_path))
self.log_history_file_path = None
def close_files(self):
self.log.debug('>>>>> close_files')
if self.log_file_object is not None:
self.exec_info.finished(len(self.keyword_buff))
self.log_buff_append(u'\n----\n{}----\n'.format(self.exec_info.to_logfile_footer()))
for result in self.result_files:
self.log_buff_append(u'result: {}\n'.format(result))
self.log_buff_append(u'execute_reply_status: {}\n'.format(self.exec_info.execute_reply_status))
self.block_messages = True
self._log_buff_flush(force=True)
self.close_log_file()
#save log file path
self._write_log_history_file(self.log_history_data)
def _init_default_config(self):
self.summarize_start_lines = 50
self.summarize_header_lines = 20
self.summarize_exec_lines = 1
self.summarize_footer_lines = 20
def _start_summarize(self):
self.count = 0
self.summarize_header_buff = []
self.summarize_last_buff = []
def _start_log(self):
self.block_messages = False
self.log_buff = []
self.keyword_buff = []
self.result_files = []
self.file_full_path = None
self.log_file_object = None
self.open_log_file(self.log_path)
def _store_result(self, result):
if self.file_full_path is None:
self.log.error('Log file already closed. Skip to store results')
return
log_dir, log_name = os.path.split(self.file_full_path)
log_name_body, _ = os.path.splitext(log_name)
result_file = os.path.join(log_dir,
u'{}-{}.pkl'.format(log_name_body,
len(self.result_files)))
with open(result_file, 'wb') as f:
pickle.dump(result, f)
self.result_files.append(result_file)
def _store_last_lines(self, content_text_list):
# save the last few lines
lines = max(self.summarize_footer_lines, self.summarize_start_lines)
if len(content_text_list) < lines:
if len(content_text_list) + len(self.summarize_last_buff) > lines:
del self.summarize_last_buff[:len(content_text_list)]
self.summarize_last_buff.extend(content_text_list)
else:
del self.summarize_last_buff[:]
self.summarize_last_buff.extend(content_text_list[-lines:])
def _output_hook(self, msg=None):
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'stream':
if 'ExecutionResult' in content['text']:
return content
else:
self.log_buff_append(content['text'])
self._log_buff_flush()
content_text_list = content['text'].splitlines(False) # with LF
# save the stderr messages
if content['name'] == 'stderr':
self.keyword_buff_append(content_text_list)
# save the sentences the keyword matched
elif not self.repatter is None and len(self.repatter) > 0:
for text in content_text_list:
matched = self.highlight_keywords(text)
if matched is not None:
self.keyword_buff_append(matched, highlight=False)
if self.summarize_on:
return self._summarize_stream_output(msg, content, content_text_list)
return content
elif msg_type in ('display_data', 'execute_result'):
execute_result = content.copy()
execute_result['execution_count'] = self.execution_count
self._store_result({'msg_type': msg_type, 'content': execute_result})
return execute_result
elif msg_type == 'error':
error_result = content.copy()
error_result['execution_count'] = self.execution_count
self._store_result({'msg_type': msg_type, 'content': error_result})
return error_result
return content
def _summarize_stream_output(self, msg, content, lines):
# save the first few lines
if len(self.summarize_header_buff) < self.summarize_header_lines:
self.summarize_header_buff.extend(lines)
self._store_last_lines(lines)
if self.count < self.summarize_start_lines:
self.count += len(lines)
stream_content = {'name': content['name'], 'text': content['text']}
else:
self._log_buff_flush()
self.send_clear_content_msg()
stream_text = u''
stream_text += self.exec_info.to_stream() + u'----\n'
stream_text += u'{}\n'.format('\n'.join(self.summarize_header_buff[:self.summarize_header_lines]))
stream_text += self.display_keyword_buff()
stream_text += u'...\n'
stream_text += u'{}'.format('\n'.join(lines[:self.summarize_exec_lines]))
stream_content = {'name': 'stdout', 'text': stream_text}
return stream_content
def _send_last_stdout_stream_text(self):
self.log.debug('_flush_stdout_stream')
self.close_files()
if self.summarize_on:
self._send_last_summarized_stdout_stream_text()
self.result_files = []
def _send_last_summarized_stdout_stream_text(self):
self.send_clear_content_msg()
stream_text = u''
stream_text += self.exec_info.to_stream(len(self.log_history_data)) + u'----\n'
if self.count < self.summarize_start_lines:
stream_text += u'\n'.join(self.summarize_last_buff)
else:
stream_text += u'{}\n'.format('\n'.join(self.summarize_header_buff[:self.summarize_header_lines]))
stream_text += self.display_keyword_buff()
stream_text += u'...\n'
stream_text += u'{}'.format('\n'.join(self.summarize_last_buff[-self.summarize_footer_lines:]))
stream_content = {'name': 'stdout', 'text': stream_text}
self.send_response(self.iopub_socket, 'stream', stream_content)
# Send exeuction result again because last result can be cleared
for resultf in self.result_files:
with open(resultf, 'rb') as f:
result = pickle.load(f)
self.session.send(self.iopub_socket,
result['msg_type'],
result['content'],
self._parent_header,
ident=None,
buffers=None,
track=False,
header=None,
metadata=None)
def _get_cell_id(self, parent):
if 'content' not in parent:
return None
content = parent['content']
if 'lc_cell_data' not in content:
return None
lc_cell_data = content['lc_cell_data']
if 'lc_cell_meme' not in lc_cell_data:
return None
lc_cell_meme = lc_cell_data['lc_cell_meme']
if 'current' not in lc_cell_meme:
return None
return lc_cell_meme['current']
def _get_notebook_data(self, parent):
if 'content' not in parent:
return None
content = parent['content']
if 'lc_notebook_data' not in content:
return None
return content['lc_notebook_data']
def do_shutdown(self, restart):
self.log.debug('>>>>> do_shutdown')
self.close_files()
if self.sender is not None:
self.log.debug('close fluent logger sender')
self.sender.close()
self.log.info('stopping wrapped kernel')
if hasattr(self, "km"):
self.km.shutdown_kernel(restart=restart)
for channel, thread in self.threads.items():
self.log.info('stopping %s ChannelReaderThread', channel)
thread.stop()
return {'status': 'ok', 'restart': restart}
class LCWrapperKernelManager(IOLoopKernelManager):
"""Kernel manager for LC_wrapper kernel"""
def shutdown_kernel(self, now=False, restart=False):
# Stop monitoring for restarting while we shutdown.
self.stop_restarter()
self.log.debug("Interrupting the wrapper kernel and its subprocesses")
self.interrupt_kernel()
time.sleep(5.0)
if now:
self._kill_kernel()
else:
self.request_shutdown(restart=restart)
# Don't send any additional kernel kill messages immediately, to give
# the kernel a chance to properly execute shutdown actions. Wait for at
# most 1s, checking every 0.1s.
self.finish_shutdown()
self.cleanup(connection_file=not restart)
Fix the issue input_request cannot be interrupted
from __future__ import print_function
try:
from queue import Empty # Python 3
except ImportError:
from Queue import Empty # Python 2
import time
import io
from ipykernel.kernelbase import Kernel
from datetime import datetime
import os
import os.path
import tempfile
from jupyter_client.manager import KernelManager
from jupyter_client.ioloop import IOLoopKernelManager
from jupyter_core.application import JupyterApp
import re
import json
from threading import (Thread, Event, Timer)
try:
from os import getcwdu as getcwd # Python 2
except ImportError:
from os import getcwd # Python 3
import pickle
import dateutil
from .log import ExecutionInfo
from traitlets.config.configurable import LoggingConfigurable, MultipleInstanceError
from traitlets import (
Unicode, List, default
)
from ipython_genutils import py3compat
from ipython_genutils.py3compat import PY3
from types import MethodType
from fluent import sender
SUMMARIZE_KEY = 'lc_wrapper'
IGNORE_SUMMARIZE_KEY = 'lc_wrapper_regex'
FORCE_SUMMARIZE_KEY = 'lc_wrapper_force'
IPYTHON_DEFAULT_PATTERN_FILE = '.lc_wrapper_regex.txt'
IPYTHON_DEFAULT_PATTERN = '''ERROR|error|Error|Panic|panic|Invalid|invalid|Warning|warning|Bad|bad
FAIL|Fail|fail
(Not|not) (Found|found)
(Device)? not ready
out of (Memory|memory)
interrupt(ed)?|abort(ed)?|stop(ped)?
insecure|inaccessible|Forbidden|forbidden|Denied|denied
Unauthorised|unauthorised|Unauthorized|unauthorized
(No|no|Low|low) (.+ )?(Capacity|capacity|Space|space)
has (encountered|stopped)
is not
initialize(d)?|initialise(d)?|start(ed)?|restart(ed)?|spawn(ed)?|complete(d)?
finish(ed)?|resume(d)?|begin|attach(ed)?|detach(ed)?|reboot(ed)?|suspend(ed)?
done|terminate(d)?|open(ed)?|close(d)?|(dis)?connect(ed)?|establish(ed)?
allocate(d)?|assign(ed)?|load(ed)?|(in|re)?activate(d)?|block(ed)?|kill(ed)?
refuse(d)?|insufficient|lack
link(ed)? (up|down)'''
class ChannelReaderThread(Thread, LoggingConfigurable):
_exiting = False
def __init__(self, kernel, client, stream, session, channel, **kwargs):
Thread.__init__(self, **kwargs)
LoggingConfigurable.__init__(self, **kwargs)
self.daemon = True
self.channel_name = channel
self.channel = getattr(client, channel + "_channel")
self.kernel = kernel
self.client = client
self.stream = stream
self.session = session
self.log.debug("init ChannelReaderThread: channel_name=%s",
self.channel_name)
def run(self):
self.log.debug("start ChannelReaderThread: channel_name=%s",
self.channel_name)
while True:
try:
msg = self.channel.get_msg(block=True, timeout=0.2)
self.log.debug("Received %s message: %s",
self.channel_name, str(msg))
msg_type = msg['msg_type']
idle = False
status_msg = False
if self.channel_name == 'iopub':
content = msg['content']
if msg_type == 'status':
status_msg = True
if content['execution_state'] == 'idle':
self.kernel.idle_parent_header = msg['parent_header']
idle = True
if msg['parent_header']['msg_type'] == 'shutdown_request':
continue
msg_id = msg['parent_header']['msg_id']
parent_header = self.kernel.parent_headers.get(msg_id)
self.log.debug("parent_header: %s", str(parent_header))
if self.channel_name == 'iopub':
ident = self.kernel._topic(msg_type)
msg_content = self.kernel._hook_iopub_msg(parent_header, msg)
else:
ident = self.kernel._parent_ident
msg_content = msg['content']
if not status_msg:
self.session.send(self.stream,
msg_type,
msg_content,
parent=parent_header,
ident=ident,
header=msg['header'],
metadata=msg['metadata'],
buffers=msg['buffers'])
if self.channel_name == 'stdin' and msg_type == 'input_request':
self.log.debug("do input_request")
self.input_request()
if idle:
self.kernel.idle_event.set()
parent_msg_id = msg['parent_header'].get('msg_id')
if parent_msg_id is not None:
self.kernel._remove_parent_header(parent_msg_id)
except Empty as e:
pass
except Exception as e:
self.log.error(e, exc_info=True)
finally:
if self._exiting:
break
self.log.debug("exit ChannelReaderThread: %s", self.channel_name)
def input_request(self):
self.log.debug("wait input_reply")
while True:
try:
reply = self._get_msg_from_frontend()
except Empty:
if self.kernel.keyboard_interrupt:
self.log.debug("input_request: interrupted")
return
except Exception:
self.log.warn("Invalid Message:", exc_info=True)
else:
break
self.log.debug("input_reply: %s", str(reply))
msg = self.client.session.msg(reply['msg_type'],
content=reply['content'],
parent=reply['parent_header'],
header=reply['header'],
metadata=reply['metadata'])
self.client.stdin_channel.send(msg)
def _get_msg_from_frontend(self, timeout=200):
ready = self.stream.poll(timeout)
if ready:
return self._recv_from_frontend()
else:
raise Empty
def _recv_from_frontend(self, **kwargs):
msg = self.stream.recv_multipart(**kwargs)
ident,smsg = self.session.feed_identities(msg)
return self.session.deserialize(smsg)
def stop(self):
if self.isAlive():
self._exiting = True
self.join()
class BufferedKernelBase(Kernel):
blocking_msg_types = [
'execute_request',
'history_request',
'complete_request',
'inspect_request',
'kernel_info_request',
'comm_info_request',
'shutdown_request'
]
proxy_channles = ['iopub', 'stdin']
threads = {}
parent_headers = {}
idle_event = Event()
idle_parent_header = None
keyboard_interrupt = False
execute_request_msg_id = None
log_file_object = None
data_dir = Unicode()
@default('data_dir')
def _data_dir_default(self):
app = None
try:
if JupyterApp.initialized():
app = JupyterApp.instance()
except MultipleInstanceError:
pass
if app is None:
# create an app, without the global instance
app = JupyterApp()
app.initialize(argv=[])
return app.data_dir
server_signature_file = Unicode(
help="""The file where the server signature is stored."""
).tag(config=True)
@default('server_signature_file')
def _server_signature_file_default(self):
if 'lc_nblineage_server_signature_path' in os.environ:
return os.environ['lc_nblineage_server_signature_path']
if not self.data_dir:
return ''
return os.path.join(self.data_dir, 'server_signature')
keyword_pattern_file_paths = List()
@default('keyword_pattern_file_paths')
def _keyword_pattern_file_paths_default(self):
return [
os.path.join(self.get_notebook_path(), IPYTHON_DEFAULT_PATTERN_FILE),
os.path.join(os.path.expanduser('~/'), IPYTHON_DEFAULT_PATTERN_FILE)
]
log_dirs = List()
@default('log_dirs')
def _log_dirs_default(self):
return [
os.path.join(self.get_notebook_path(), '.log'),
os.path.expanduser('~/.log')
]
configfile_paths = List()
@default('configfile_paths')
def _configfile_paths_default(self):
return [
os.path.join(self.get_notebook_path(), '.lc_wrapper'),
os.path.join(os.path.expanduser('~/'), '.lc_wrapper')
]
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
if 'lc_wrapper_fluentd_host' in os.environ:
fluentd_host = os.environ['lc_wrapper_fluentd_host']
fluentd_port = int(os.environ.get('lc_wrapper_fluentd_port', '24224'))
fluentd_tag = os.environ.get('lc_wrapper_fluentd_tag', 'lc_wrapper')
self.sender = sender.FluentSender(fluentd_tag,
host=fluentd_host,
port=fluentd_port)
self.log.info('lc_wrapper: Enabled fluent logger: host=%s, port=%s, tag=%s',
fluentd_host, fluentd_port, fluentd_tag)
else:
self.sender = None
self._init_message_handler()
self.start_ipython_kernel()
def _init_message_handler(self):
def handler(self, stream, ident, parent):
self.log.debug("Received shell message: %s", str(parent))
msg_type = parent['msg_type']
content = parent['content']
self._hook_request_msg(parent)
self.idle_event.clear()
self.keyboard_interrupt = False
msg = self.kc.session.msg(msg_type, content)
msgid = msg['header']['msg_id']
self.log.debug("save parent_header: %s => %s", msgid, str(parent['header']))
self.parent_headers[msgid] = parent['header']
self.kc.shell_channel.send(msg)
reply_msg = None
if msg_type in self.blocking_msg_types:
while True:
try:
reply_msg = self.kc._recv_reply(msgid, timeout=None)
break
except KeyboardInterrupt:
self.log.debug("KeyboardInterrupt", exc_info=True)
# propagate SIGINT to wrapped kernel
self.km.interrupt_kernel()
self.keyboard_interrupt = True
# this timer fire when the ipython kernel didnot interrupt within 5.0 sec.
self.timer = Timer(5.0, self.close_files)
self.log.debug('>>>>> close files: timer fired')
self.timer.start()
reply_msg_content = self._hook_reply_msg(reply_msg)
self.log.debug('reply: %s', reply_msg)
reply_msg = self.session.send(stream,
reply_msg['msg_type'],
reply_msg_content,
parent, ident,
header=reply_msg['header'],
metadata=reply_msg['metadata'],
buffers=reply_msg['buffers'])
self._post_send_reply_msg(parent, reply_msg)
self._wait_for_idle(msgid)
self._post_wait_for_idle(parent, reply_msg)
for msg_type in self.msg_types:
if msg_type == 'kernel_info_request':
continue
if msg_type == 'shutdown_request':
continue
self.log.debug('override shell message handler: msg_type=%s', msg_type)
if PY3:
setattr(self, msg_type, MethodType(handler, self))
else:
setattr(self, msg_type, MethodType(handler, self, type(self)))
self.shell_handlers[msg_type] = getattr(self, msg_type)
comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
for msg_type in comm_msg_types:
self.log.debug('init shell comm message handler: msg_type=%s', msg_type)
if PY3:
setattr(self, msg_type, MethodType(handler, self))
else:
setattr(self, msg_type, MethodType(handler, self, type(self)))
self.shell_handlers[msg_type] = getattr(self, msg_type)
def start_ipython_kernel(self):
kernel_name = self._get_wrapped_kernel_name()
self.km = KernelManager(kernel_name=kernel_name,
client_class='jupyter_client.blocking.BlockingKernelClient')
self.log.debug('kernel_manager: %s', str(self.km))
self.log.info('start wrapped kernel: %s', kernel_name)
self.km.start_kernel()
self.kc = self.km.client()
self.log.debug('kernel_client: %s', str(self.kc))
self.log.debug('start_channels')
self.kc.start_channels()
try:
self.log.debug('wait for ready of wrapped kernel')
self.kc.wait_for_ready(timeout=None)
except RuntimeError:
self.kc.stop_channels()
self.km.shutdown_kernel()
raise
for channel in self.proxy_channles:
stream = getattr(self, channel + '_socket')
thread = ChannelReaderThread(self, self.kc, stream, self.session, channel)
thread.start()
self.threads[channel] = thread
for log_dir in self.log_dirs:
if self._is_writable_dir(log_dir):
self.log_path = log_dir
break
self.log.debug('log output directory: %s', self.log_path)
if self._find_default_keyword_pattern_file() is None:
self.log.info('default keyword pattern file "%s" not found', IPYTHON_DEFAULT_PATTERN_FILE)
try:
self._generate_default_keyword_pattern_file()
except Exception as e:
self.log.exception("failed to generate default keyword pattern file: %s", e)
self.exec_info = None
self.notebook_path = self.get_notebook_path()
self.log.debug('notebook_path: %s', self.notebook_path)
def _is_writable_dir(self, path):
temp_dir = None
try:
if not os.path.exists(path):
os.makedirs(path)
temp_dir = tempfile.mkdtemp(dir=path)
return True
except (OSError, IOError) as e:
self.log.debug("_is_writable_dir: %s", e)
return False
finally:
if temp_dir is not None:
os.rmdir(temp_dir)
def _get_wrapped_kernel_name(self, km):
raise NotImplementedError()
def _remove_parent_header(self, msg_id):
if msg_id in self.parent_headers:
parent_header = self.parent_headers[msg_id]
self.log.debug("remove parent_header: %s => %s", msg_id, str(parent_header))
del self.parent_headers[msg_id]
def _hook_request_msg(self, parent):
msg_type = parent['msg_type']
if msg_type == 'execute_request':
self._hook_execute_request_msg(parent)
def _hook_execute_request_msg(self, parent):
try:
content = parent[u'content']
code = py3compat.cast_unicode_py2(content[u'code'])
silent = content[u'silent']
allow_stdin = content.get('allow_stdin', False)
except:
self.log.error("Got bad msg: ")
self.log.error("%s", parent)
return
self.execute_request_msg_id = parent['header']['msg_id']
if not silent:
self.execution_count += 1
cell_log_id = self._get_cell_id(parent)
if cell_log_id is not None:
self.log_history_file_path = os.path.join(self.log_path,
cell_log_id,
cell_log_id + u'.json')
else:
self.log_history_file_path = None
self.log_history_id = cell_log_id
self.log_history_data = self._read_log_history_file()
notebook_data = self._get_notebook_data(parent)
self.exec_info = ExecutionInfo(code, self.get_server_signature(), notebook_data)
if not silent:
env = self._get_config()
self.summarize_on, new_code = self.is_summarize_on(code, env)
self._init_default_config()
self._start_log()
if self.summarize_on:
self._start_summarize()
self._load_env(env)
if not self.log_history_id is None:
meme = {'lc_cell_meme': {'current': self.log_history_id}}
self.log_buff_append(u'{}\n----\n'.format(json.dumps(meme)))
self.log_buff_append(u'{}\n----\n'.format(code)) # code
self._log_buff_flush()
self.log_buff_append(self.exec_info.to_logfile_header() + u'----\n')
content[u'code'] = new_code
self._allow_stdin = allow_stdin
def _hook_reply_msg(self, reply_msg):
if reply_msg['msg_type'] == 'execute_reply':
return self._hook_execute_reply_msg(reply_msg)
return reply_msg['content']
def _hook_execute_reply_msg(self, reply):
if hasattr(self, "timer"):
self.timer.cancel()
self.log.debug('>>>>> close files: timer cancelled')
content = reply['content']
content['execution_count'] = self.execution_count
content['lc_wrapper'] = {
'log_path': self.file_full_path
}
self.exec_info.execute_reply_status = content['status']
return content
def _post_send_reply_msg(self, parent, reply_msg):
msg_type = parent['msg_type']
if msg_type == 'execute_request':
content = parent['content']
silent = content['silent']
stop_on_error = content.get('stop_on_error', True)
if not silent and reply_msg['content']['status'] == u'error' and stop_on_error:
self._abort_queues()
def _post_wait_for_idle(self, parent, reply_msg):
if reply_msg is None:
return
if reply_msg['msg_type'] == 'execute_reply':
self.log.debug('flushing stdout stream')
self._send_last_stdout_stream_text()
self.log.debug('flushed stdout stream')
self.execute_request_msg_id = None
def _hook_iopub_msg(self, parent_header, msg):
msg_id = parent_header['msg_id']
content = msg['content']
# replace msg_id in the content
self._replace_msg_id(msg_id, msg['parent_header']['msg_id'], content)
if self.execute_request_msg_id == msg_id:
return self._output_hook(msg)
return content
def _replace_msg_id(self, msg_id, wrapped_msg_id, content):
for k, v in content.items():
if isinstance(v, dict):
self._replace_msg_id(msg_id, wrapped_msg_id, v)
elif v == wrapped_msg_id:
content[k] = msg_id
self.log.debug('replace msg_id in content: %s => %s',
wrapped_msg_id, msg_id)
def _write_log(self, msg):
if not msg is None:
self.log_file_object.write(msg)
self.exec_info.file_size = self.log_file_object.tell()
def open_log_file(self, path):
self.log.debug('>>>>> open_log_file')
now = datetime.now(dateutil.tz.tzlocal())
path = os.path.join(path, now.strftime("%Y%m%d"))
if not os.path.exists(path):
os.makedirs(path)
file_name = now.strftime("%Y%m%d-%H%M%S") + "-%04d" % (now.microsecond // 1000)
self.file_full_path = os.path.join(path, file_name + u'.log')
self.exec_info.log_path = self.file_full_path
self.log_file_object = io.open(self.file_full_path, "a", encoding='utf-8')
self.log.debug(self.file_full_path)
self.log.debug(self.log_file_object)
def close_log_file(self):
self.log.debug('>>>>> close_log_file')
if self.log_file_object is None:
self.log.debug('>>>>> close_log_file: not executed because self.log_file_object is None')
return
if not self.log_file_object.closed:
self.log.debug('>>>>> log file closed')
self.log_file_object.close()
self.send_fluent_log()
else:
self.log.debug('>>>>> close_log_file: not executed because self.log_file_object is already closed')
self.log.debug('close_log_file: self.log_file_object = None')
self.log_file_object = None
def send_fluent_log(self):
if self.sender is None:
return
self.log.debug('>>>>> send_fluent_log')
record = {}
with io.open(self.exec_info.log_path, 'r') as f:
record['log'] = f.read()
self.sender.emit(None, record)
self.log.info('lc_wrapper: send_fluent_log: cell_meme=%s, uid=%s, gid=%s',
self.log_history_id, os.getuid(), os.getgid(), self.get_server_signature())
def get_server_signature(self):
if os.path.exists(self.server_signature_file):
with io.open(self.server_signature_file, 'r') as f:
return f.read()
else:
return None
def _wait_for_idle(self, msg_id):
self.log.debug('waiting for idle: msg_id=%s', msg_id)
while True:
self.idle_event.wait()
if self.idle_parent_header['msg_id'] != msg_id:
self.log.warn('unexpected idle message received: expected msg_id=%s, received msg_id=%s',
msg_id, self.idle_parent_header['msg_id'])
continue
self.log.debug('idle: msg_id=%s', msg_id)
return
def get_notebook_path(self):
return getcwd()
def _find_config_file(self):
for path in self.configfile_paths:
if os.path.exists(path):
return path
return None
def _get_config(self):
env = os.environ
config_path = self._find_config_file()
if config_path is None:
return env
line_pattern = re.compile(r'(\S+)=(".*?"|\S+)')
config = {}
with io.open(config_path, 'r', encoding='utf-8') as f:
for l in f.readlines():
l = l.strip()
if len(l) == 0 or l.startswith('#'):
continue
m = line_pattern.match(l)
if m:
config[m.group(1)] = m.group(2)
else:
self.log.warning('Unexpected line: {} at {}'.format(l, config_path))
for k, v in env.items():
config[k] = v
return config
def send_clear_content_msg(self):
clear_content = {'wait': True}
self.session.send(self.iopub_socket, 'clear_output', clear_content, self._parent_header,
ident=None, buffers=None, track=False, header=None, metadata=None)
def _load_env(self, env):
summarize = env.get(SUMMARIZE_KEY, '')
self.log.debug("lc_wrapper = " + summarize)
summarize_pattern = re.compile(r'^([0-9]*):([0-9]*):([0-9]*):([0-9]*)$')
summarize_params = summarize_pattern.match(summarize)
if summarize_params is not None and len(summarize_params.group(1)) != 0:
self.summarize_start_lines = int(summarize_params.group(1))
if summarize_params is not None and len(summarize_params.group(2)) != 0:
self.summarize_header_lines = int(summarize_params.group(2))
if summarize_params is not None and len(summarize_params.group(3)) != 0:
self.summarize_exec_lines = int(summarize_params.group(3))
if summarize_params is not None and len(summarize_params.group(4)) != 0:
self.summarize_footer_lines = int(summarize_params.group(4))
self.summarize_start_lines = max(self.summarize_start_lines,
self.summarize_header_lines + \
self.summarize_footer_lines + 1)
self.log_history_data = self._read_log_history_file()
self.repatter = []
text = env.get(IGNORE_SUMMARIZE_KEY, 'file:default')
if text is None or len(text) == 0:
pass
elif 'file:' in text:
file_name = text[text.rfind('find:')+6:].strip()
if file_name == 'default':
file_path = self._find_default_keyword_pattern_file()
else:
file_path = os.path.join(self.notebook_path, file_name)
if file_path is None:
self.keyword_buff_append(u'error : {} Not found'.format(IPYTHON_DEFAULT_PATTERN_FILE))
self.log.warning('lc_wrapper_regex: %s Not found', IPYTHON_DEFAULT_PATTERN_FILE)
elif os.path.exists(file_path):
try:
patterns = self._read_keyword_pattern_file(file_path)
for ptxt in patterns:
self.repatter.append(re.compile(ptxt))
except Exception as e:
self.keyword_buff_append(u'error : ' + str(e))
self.log.exception("lc_wrapper_regex: %s", e)
else:
self.keyword_buff_append(u'error : {} Not found'.format(file_path))
self.log.warning('lc_wrapper_regex: %s Not found', file_path)
else:
try:
self.repatter.append(re.compile(text))
except Exception as e:
self.keyword_buff_append(u'error : ' + str(e))
self.log.exception("lc_wrapper_regex: %s", e)
def _find_default_keyword_pattern_file(self):
for path in self.keyword_pattern_file_paths:
if os.path.exists(path):
return path
return None
def _read_keyword_pattern_file(self, filename):
with open(filename, 'r') as file:
patterns = file.readlines()
patterns = [x.strip() for x in patterns if len(x.strip()) > 0]
self.log.debug('patterns :')
for patt in patterns:
self.log.debug(patt)
return patterns
def _generate_default_keyword_pattern_file(self):
error = None
self.log.info('generate default keyword pattern file')
for path in self.keyword_pattern_file_paths:
if not os.path.exists(path):
try:
with open(path, 'w') as f:
f.write(IPYTHON_DEFAULT_PATTERN)
self.log.info('generated default keyword pattern file: %s', path)
return
except Exception as e:
self.log.debug('_generate_default_keyword_pattern_file: %s', str(e))
error = e
if error is not None:
raise error
def is_summarize_on(self, code, env):
force = None
if FORCE_SUMMARIZE_KEY in env:
force_text = env[FORCE_SUMMARIZE_KEY].strip().lower()
if force_text == 'on':
force = True
elif force_text == 'off':
force = False
regx = r'^\s*!!'
m = re.match(regx, code, re.M)
if m:
return (force if force is not None else True,
code[m.end():])
else:
return (force if force is not None else False,
code)
def _log_buff_flush(self, force=False):
if force or len(self.log_buff) > 100:
self._write_log(u''.join(self.log_buff))
del self.log_buff[:]
def log_buff_append(self, text=None):
if self.block_messages:
return
if not text is None:
if isinstance(text, list):
self.log_buff.extend(text)
else:
self.log_buff.append(text)
def keyword_buff_append(self, text, highlight=True):
if isinstance(text, list):
self.keyword_buff.extend([u'\033[0;31m{}\033[0m'.format(t)
if highlight else t for t in text])
else:
self.keyword_buff.append(u'\033[0;31m{}\033[0m'.format(text)
if highlight else text)
def display_keyword_buff(self):
if len(self.keyword_buff) == 0:
return ''
stream_text = u'...\n'
stream_text += u'\n'.join(self.keyword_buff[:self.summarize_header_lines * 2]) + '\n'
if len(self.keyword_buff) <= self.summarize_header_lines * 2:
return stream_text
msg = u'Matched lines exceed maximum number of view ({})' \
.format(self.summarize_header_lines * 2)
stream_text += u'\033[0;31m{}\033[0m\n'.format(msg)
return stream_text
def highlight_keywords(self, text):
matched = [p.search(text) for p in self.repatter]
matched = [m for m in matched if m is not None]
if len(matched) == 0:
return None
remain = text
result = None
while len(matched) > 0:
left = min([m.start() for m in matched])
if result is None:
result = remain[:left]
else:
result += remain[:left]
keywords = [m.group() for m in matched if m.start() == left]
keyword = sorted(keywords, key=lambda s: len(s))[-1]
result += u'\033[0;31m{}\033[0m'.format(keyword)
remain = remain[left + len(keyword):]
matched = [p.search(remain) for p in self.repatter]
matched = [m for m in matched if m is not None]
return result + remain
def _read_log_history_file(self):
if self.log_history_file_path is not None and \
os.path.exists(self.log_history_file_path):
with open(self.log_history_file_path, 'r') as f:
data = json.load(f)
return data
else:
return []
def _write_log_history_file(self, data):
if self.log_history_file_path is None:
self.log.debug('Skipped to save log history')
return
data.append(self.exec_info.to_log())
pathdir = os.path.dirname(self.log_history_file_path)
if not os.path.exists(pathdir):
os.makedirs(pathdir)
log_full_dir, log_filename = os.path.split(self.file_full_path)
log_full_dir, log_dirname = os.path.split(log_full_dir)
os.symlink(os.path.join('..', log_dirname, log_filename),
os.path.join(pathdir, os.path.basename(self.file_full_path)))
with open(self.log_history_file_path, 'w') as f:
json.dump(data, f)
self.log.debug('Log history saved: {}'.format(self.log_history_file_path))
self.log_history_file_path = None
def close_files(self):
self.log.debug('>>>>> close_files')
if self.log_file_object is not None:
self.exec_info.finished(len(self.keyword_buff))
self.log_buff_append(u'\n----\n{}----\n'.format(self.exec_info.to_logfile_footer()))
for result in self.result_files:
self.log_buff_append(u'result: {}\n'.format(result))
self.log_buff_append(u'execute_reply_status: {}\n'.format(self.exec_info.execute_reply_status))
self.block_messages = True
self._log_buff_flush(force=True)
self.close_log_file()
#save log file path
self._write_log_history_file(self.log_history_data)
def _init_default_config(self):
self.summarize_start_lines = 50
self.summarize_header_lines = 20
self.summarize_exec_lines = 1
self.summarize_footer_lines = 20
def _start_summarize(self):
self.count = 0
self.summarize_header_buff = []
self.summarize_last_buff = []
def _start_log(self):
self.block_messages = False
self.log_buff = []
self.keyword_buff = []
self.result_files = []
self.file_full_path = None
self.log_file_object = None
self.open_log_file(self.log_path)
def _store_result(self, result):
if self.file_full_path is None:
self.log.error('Log file already closed. Skip to store results')
return
log_dir, log_name = os.path.split(self.file_full_path)
log_name_body, _ = os.path.splitext(log_name)
result_file = os.path.join(log_dir,
u'{}-{}.pkl'.format(log_name_body,
len(self.result_files)))
with open(result_file, 'wb') as f:
pickle.dump(result, f)
self.result_files.append(result_file)
def _store_last_lines(self, content_text_list):
# save the last few lines
lines = max(self.summarize_footer_lines, self.summarize_start_lines)
if len(content_text_list) < lines:
if len(content_text_list) + len(self.summarize_last_buff) > lines:
del self.summarize_last_buff[:len(content_text_list)]
self.summarize_last_buff.extend(content_text_list)
else:
del self.summarize_last_buff[:]
self.summarize_last_buff.extend(content_text_list[-lines:])
def _output_hook(self, msg=None):
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'stream':
if 'ExecutionResult' in content['text']:
return content
else:
self.log_buff_append(content['text'])
self._log_buff_flush()
content_text_list = content['text'].splitlines(False) # with LF
# save the stderr messages
if content['name'] == 'stderr':
self.keyword_buff_append(content_text_list)
# save the sentences the keyword matched
elif not self.repatter is None and len(self.repatter) > 0:
for text in content_text_list:
matched = self.highlight_keywords(text)
if matched is not None:
self.keyword_buff_append(matched, highlight=False)
if self.summarize_on:
return self._summarize_stream_output(msg, content, content_text_list)
return content
elif msg_type in ('display_data', 'execute_result'):
execute_result = content.copy()
execute_result['execution_count'] = self.execution_count
self._store_result({'msg_type': msg_type, 'content': execute_result})
return execute_result
elif msg_type == 'error':
error_result = content.copy()
error_result['execution_count'] = self.execution_count
self._store_result({'msg_type': msg_type, 'content': error_result})
return error_result
return content
def _summarize_stream_output(self, msg, content, lines):
# save the first few lines
if len(self.summarize_header_buff) < self.summarize_header_lines:
self.summarize_header_buff.extend(lines)
self._store_last_lines(lines)
if self.count < self.summarize_start_lines:
self.count += len(lines)
stream_content = {'name': content['name'], 'text': content['text']}
else:
self._log_buff_flush()
self.send_clear_content_msg()
stream_text = u''
stream_text += self.exec_info.to_stream() + u'----\n'
stream_text += u'{}\n'.format('\n'.join(self.summarize_header_buff[:self.summarize_header_lines]))
stream_text += self.display_keyword_buff()
stream_text += u'...\n'
stream_text += u'{}'.format('\n'.join(lines[:self.summarize_exec_lines]))
stream_content = {'name': 'stdout', 'text': stream_text}
return stream_content
def _send_last_stdout_stream_text(self):
self.log.debug('_flush_stdout_stream')
self.close_files()
if self.summarize_on:
self._send_last_summarized_stdout_stream_text()
self.result_files = []
def _send_last_summarized_stdout_stream_text(self):
self.send_clear_content_msg()
stream_text = u''
stream_text += self.exec_info.to_stream(len(self.log_history_data)) + u'----\n'
if self.count < self.summarize_start_lines:
stream_text += u'\n'.join(self.summarize_last_buff)
else:
stream_text += u'{}\n'.format('\n'.join(self.summarize_header_buff[:self.summarize_header_lines]))
stream_text += self.display_keyword_buff()
stream_text += u'...\n'
stream_text += u'{}'.format('\n'.join(self.summarize_last_buff[-self.summarize_footer_lines:]))
stream_content = {'name': 'stdout', 'text': stream_text}
self.send_response(self.iopub_socket, 'stream', stream_content)
# Send exeuction result again because last result can be cleared
for resultf in self.result_files:
with open(resultf, 'rb') as f:
result = pickle.load(f)
self.session.send(self.iopub_socket,
result['msg_type'],
result['content'],
self._parent_header,
ident=None,
buffers=None,
track=False,
header=None,
metadata=None)
def _get_cell_id(self, parent):
if 'content' not in parent:
return None
content = parent['content']
if 'lc_cell_data' not in content:
return None
lc_cell_data = content['lc_cell_data']
if 'lc_cell_meme' not in lc_cell_data:
return None
lc_cell_meme = lc_cell_data['lc_cell_meme']
if 'current' not in lc_cell_meme:
return None
return lc_cell_meme['current']
def _get_notebook_data(self, parent):
if 'content' not in parent:
return None
content = parent['content']
if 'lc_notebook_data' not in content:
return None
return content['lc_notebook_data']
def do_shutdown(self, restart):
self.log.debug('>>>>> do_shutdown')
self.close_files()
if self.sender is not None:
self.log.debug('close fluent logger sender')
self.sender.close()
self.log.info('stopping wrapped kernel')
if hasattr(self, "km"):
self.km.shutdown_kernel(restart=restart)
for channel, thread in self.threads.items():
self.log.info('stopping %s ChannelReaderThread', channel)
thread.stop()
return {'status': 'ok', 'restart': restart}
class LCWrapperKernelManager(IOLoopKernelManager):
"""Kernel manager for LC_wrapper kernel"""
def shutdown_kernel(self, now=False, restart=False):
# Stop monitoring for restarting while we shutdown.
self.stop_restarter()
self.log.debug("Interrupting the wrapper kernel and its subprocesses")
self.interrupt_kernel()
time.sleep(5.0)
if now:
self._kill_kernel()
else:
self.request_shutdown(restart=restart)
# Don't send any additional kernel kill messages immediately, to give
# the kernel a chance to properly execute shutdown actions. Wait for at
# most 1s, checking every 0.1s.
self.finish_shutdown()
self.cleanup(connection_file=not restart)
|
import re
from bson.objectid import ObjectId, InvalidId
import datetime
from girder.models.model_base import AccessControlledModel
from girder.constants import AccessType
from girder.models.model_base import ValidationException
from girder.models.group import Group
from girder import events
from girder.plugins.materialsdatabank.models.slug import Slug, SlugUpdateException
from girder.plugins.materialsdatabank.models.reconstruction import Reconstruction as ReconstructionModel
from girder.plugins.materialsdatabank.models.structure import Structure as StructureModel
from girder.plugins.materialsdatabank.models.projection import Projection as ProjectionModel
from ..constants import ELEMENT_SYMBOLS_LOWER, ELEMENT_SYMBOLS
class Dataset(AccessControlledModel):
def initialize(self):
self.name = 'mdb.datasets'
self.ensureIndices(('authors', 'title', 'atomicSpecies', 'mdbId'))
self.ensureTextIndex({
'authors': 1,
'title': 1
})
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'authors', 'title', 'atomicSpecies', 'doi', 'mdbId'))
def validate(self, dataset):
if 'mdbId' in dataset and dataset['mdbId'] is not None:
# Do we already have it
if len(list(self.find(mdb_id=dataset['mdbId'], force=True))) > 0:
raise ValidationException('"%s" has already been taken.' % dataset['mdbId'], field='mdbId')
return dataset
def _generate_mdb_id_prefix(self, species):
prefix = []
def _chars_left():
return 4 - sum([len(x) for x in prefix])
for s in species:
if len(s) <= _chars_left():
prefix.append(s)
prefix += ['X'] * _chars_left()
return ''.join(prefix)
def _generate_mdb_id_postfix(self, prefix):
# Search for existing datasets with this prefix
regex = re.compile('^%s(\d{5})' % prefix)
query = {
'mdbId': {
'$regex': regex
}
}
cursor = super(Dataset, self).find(query, fields=['mdbId'])
postfix = 0
for d in cursor:
match = regex.match(d['mdbId'])
p = int(match.group(1))
if p > postfix:
postfix = p
postfix += 1
return str(postfix).zfill(5)
def ensure_mdb_id(self, dataset, species, updates):
species = [ELEMENT_SYMBOLS[n] for n in species]
if 'mbdId' not in dataset:
prefix = self._generate_mdb_id_prefix(species)
# Try up to 5 times, in case we have overlapping updates
retry_count = 5
while True:
postfix = self._generate_mdb_id_postfix(prefix)
mdb_id = '%s%s' % (prefix, postfix)
# Now update atomically the slugs document
try:
Slug().add(mdb_id)
break
except SlugUpdateException:
if retry_count == 0:
raise Exception('Unable to create new mdb id after 5 retries.')
retry_count -= 1
# Now we have allocated the mdbId add it to the dataset model
updates.setdefault('$set', {})['mdbId'] = mdb_id
def create(self, authors, title=None, doi=None, microscope=None, image_file_id=None,
user=None, public=False):
dataset = {
'authors': authors,
'title': title,
'doi': doi,
'editable': False,
'deposited': datetime.datetime.utcnow()
}
if image_file_id is not None:
dataset['imageFileId'] = ObjectId(image_file_id)
self.setPublic(dataset, public=public)
if user:
dataset['userId'] = user['_id']
self.setUserAccess(dataset, user=user, level=AccessType.ADMIN)
curator = list(Group().find({
'name': 'curator',
}))
if len(curator) > 0:
self.setGroupAccess(dataset, group=curator[0], level=AccessType.ADMIN)
else:
dataset['userId'] = None
dataset = self.save(dataset)
events.trigger('mdb.dataset.created', {
'dataset': dataset,
'user': user
})
return dataset
def update(self, dataset, dataset_updates=None, user=None, atomic_species=None, validation=None,
public=None):
query = {
'_id': dataset['_id']
}
if dataset_updates is None:
dataset_updates = {}
updates = {}
mutable_props = ['authors', 'title', 'doi', 'editable']
for prop in dataset_updates:
if prop in mutable_props:
updates.setdefault('$set', {})[prop] = dataset_updates[prop]
if atomic_species is not None:
new_atomic_species = set(dataset.get('atomicSpecies', {}))
new_atomic_species.update(atomic_species)
if atomic_species is not None:
updates.setdefault('$set', {})['atomicSpecies'] = list(new_atomic_species)
self.ensure_mdb_id(dataset, new_atomic_species, updates)
if public is not None:
updates.setdefault('$set', {})['public'] = public
# Trigger event if this dataset is being approved ( being made public )
if public and not dataset.get('public', False):
events.trigger('mdb.dataset.approved', {
'dataset': dataset,
'approver': user
})
updates.setdefault('$set', {})['released'] = datetime.datetime.utcnow()
if validation is not None:
updates.setdefault('$set', {})['validation'] = validation
if updates:
super(Dataset, self).update(query, update=updates, multi=False)
return self.load(dataset['_id'], user=user, level=AccessType.READ)
return dataset
def _normalize_element(self, element):
# Try looking up element
try:
atomic_number = ELEMENT_SYMBOLS_LOWER.index(element.lower())
except ValueError:
# Try convert to int
atomic_number = int(element)
return atomic_number
def search(self, search_terms=None, atomic_species=None, offset=0, limit=None,
sort=None, user=None):
query = {}
if search_terms is not None:
filters = []
for search in search_terms:
filters.append({
'$text': {
'$search': search
}
})
filters.append({
'mdbId': search
})
try:
atomic_number = self._normalize_element(search)
filters.append({
'atomicSpecies': {
'$in': [atomic_number]
}
})
except ValueError:
# The search term can't be an atomic number
pass
query['$or'] = filters
cursor = super(Dataset, self).find(query=query, sort=sort, user=user)
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
def find(self, authors=None, title=None, atomic_species=None, mdb_id=None,
owner=None, offset=0, limit=None, sort=None, user=None, force=False):
query = {}
if authors is not None:
if not isinstance(authors, (list, tuple)):
authors = [authors]
author_regexs = []
for author in authors:
author_regexs.append(re.compile('.*%s.*' % author, re.IGNORECASE))
query['authors'] = {
'$in': author_regexs
}
if title is not None:
regex = re.compile('.*%s.*' % title, re.IGNORECASE)
query['title'] = {
'$regex': regex
}
if atomic_species:
species = []
for s in atomic_species:
try:
atomic_number = self._normalize_element(s)
species.append(atomic_number)
except ValueError:
# The search term can't be an atomic number
pass
query['atomicSpecies'] = {
'$in': species
}
if mdb_id is not None:
query['mdbId'] = mdb_id
if owner is not None:
if not isinstance(owner, ObjectId):
try:
owner = ObjectId(owner)
except InvalidId:
raise ValidationException('Invalid ObjectId: %s' % owner,
field='owner')
query['userId'] = owner
cursor = super(Dataset, self).find(query=query, sort=sort, user=user)
if not force:
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
else:
for r in cursor:
yield r
def load(self, id, user=None, level=AccessType.READ, force=False):
try:
ObjectId(id)
return super(Dataset, self).load(id, user=user, level=level, force=force)
except InvalidId:
# Try it as a mdb id
dataset = list(self.find(mdb_id=id, limit=1, user=user))
print(dataset)
if len(dataset) > 0:
return dataset[0]
return None
def delete(self, dataset, user):
dataset_id = dataset['_id']
# Delete reconstruction
reconstruction = ReconstructionModel().find(dataset_id).next()
ReconstructionModel().delete(reconstruction, user)
# Delete structure
structure = StructureModel().find(dataset_id).next()
StructureModel().delete(structure, user)
# Delete projection
projection = ProjectionModel().find(dataset_id).next()
ProjectionModel().delete(projection, user)
# Remove the slug
Slug().remove(dataset['mdbId'])
# Now delete the dataset
super(Dataset, self).remove(dataset)
Fix deletion of incomplete dataset
Add try exception for StopIteration in case dataset is not complete.
import re
from bson.objectid import ObjectId, InvalidId
import datetime
from girder.models.model_base import AccessControlledModel
from girder.constants import AccessType
from girder.models.model_base import ValidationException
from girder.models.group import Group
from girder import events
from girder.plugins.materialsdatabank.models.slug import Slug, SlugUpdateException
from girder.plugins.materialsdatabank.models.reconstruction import Reconstruction as ReconstructionModel
from girder.plugins.materialsdatabank.models.structure import Structure as StructureModel
from girder.plugins.materialsdatabank.models.projection import Projection as ProjectionModel
from ..constants import ELEMENT_SYMBOLS_LOWER, ELEMENT_SYMBOLS
class Dataset(AccessControlledModel):
def initialize(self):
self.name = 'mdb.datasets'
self.ensureIndices(('authors', 'title', 'atomicSpecies', 'mdbId'))
self.ensureTextIndex({
'authors': 1,
'title': 1
})
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'authors', 'title', 'atomicSpecies', 'doi', 'mdbId'))
def validate(self, dataset):
if 'mdbId' in dataset and dataset['mdbId'] is not None:
# Do we already have it
if len(list(self.find(mdb_id=dataset['mdbId'], force=True))) > 0:
raise ValidationException('"%s" has already been taken.' % dataset['mdbId'], field='mdbId')
return dataset
def _generate_mdb_id_prefix(self, species):
prefix = []
def _chars_left():
return 4 - sum([len(x) for x in prefix])
for s in species:
if len(s) <= _chars_left():
prefix.append(s)
prefix += ['X'] * _chars_left()
return ''.join(prefix)
def _generate_mdb_id_postfix(self, prefix):
# Search for existing datasets with this prefix
regex = re.compile('^%s(\d{5})' % prefix)
query = {
'mdbId': {
'$regex': regex
}
}
cursor = super(Dataset, self).find(query, fields=['mdbId'])
postfix = 0
for d in cursor:
match = regex.match(d['mdbId'])
p = int(match.group(1))
if p > postfix:
postfix = p
postfix += 1
return str(postfix).zfill(5)
def ensure_mdb_id(self, dataset, species, updates):
species = [ELEMENT_SYMBOLS[n] for n in species]
if 'mbdId' not in dataset:
prefix = self._generate_mdb_id_prefix(species)
# Try up to 5 times, in case we have overlapping updates
retry_count = 5
while True:
postfix = self._generate_mdb_id_postfix(prefix)
mdb_id = '%s%s' % (prefix, postfix)
# Now update atomically the slugs document
try:
Slug().add(mdb_id)
break
except SlugUpdateException:
if retry_count == 0:
raise Exception('Unable to create new mdb id after 5 retries.')
retry_count -= 1
# Now we have allocated the mdbId add it to the dataset model
updates.setdefault('$set', {})['mdbId'] = mdb_id
def create(self, authors, title=None, doi=None, microscope=None, image_file_id=None,
user=None, public=False):
dataset = {
'authors': authors,
'title': title,
'doi': doi,
'editable': False,
'deposited': datetime.datetime.utcnow()
}
if image_file_id is not None:
dataset['imageFileId'] = ObjectId(image_file_id)
self.setPublic(dataset, public=public)
if user:
dataset['userId'] = user['_id']
self.setUserAccess(dataset, user=user, level=AccessType.ADMIN)
curator = list(Group().find({
'name': 'curator',
}))
if len(curator) > 0:
self.setGroupAccess(dataset, group=curator[0], level=AccessType.ADMIN)
else:
dataset['userId'] = None
dataset = self.save(dataset)
events.trigger('mdb.dataset.created', {
'dataset': dataset,
'user': user
})
return dataset
def update(self, dataset, dataset_updates=None, user=None, atomic_species=None, validation=None,
public=None):
query = {
'_id': dataset['_id']
}
if dataset_updates is None:
dataset_updates = {}
updates = {}
mutable_props = ['authors', 'title', 'doi', 'editable']
for prop in dataset_updates:
if prop in mutable_props:
updates.setdefault('$set', {})[prop] = dataset_updates[prop]
if atomic_species is not None:
new_atomic_species = set(dataset.get('atomicSpecies', {}))
new_atomic_species.update(atomic_species)
if atomic_species is not None:
updates.setdefault('$set', {})['atomicSpecies'] = list(new_atomic_species)
self.ensure_mdb_id(dataset, new_atomic_species, updates)
if public is not None:
updates.setdefault('$set', {})['public'] = public
# Trigger event if this dataset is being approved ( being made public )
if public and not dataset.get('public', False):
events.trigger('mdb.dataset.approved', {
'dataset': dataset,
'approver': user
})
updates.setdefault('$set', {})['released'] = datetime.datetime.utcnow()
if validation is not None:
updates.setdefault('$set', {})['validation'] = validation
if updates:
super(Dataset, self).update(query, update=updates, multi=False)
return self.load(dataset['_id'], user=user, level=AccessType.READ)
return dataset
def _normalize_element(self, element):
# Try looking up element
try:
atomic_number = ELEMENT_SYMBOLS_LOWER.index(element.lower())
except ValueError:
# Try convert to int
atomic_number = int(element)
return atomic_number
def search(self, search_terms=None, atomic_species=None, offset=0, limit=None,
sort=None, user=None):
query = {}
if search_terms is not None:
filters = []
for search in search_terms:
filters.append({
'$text': {
'$search': search
}
})
filters.append({
'mdbId': search
})
try:
atomic_number = self._normalize_element(search)
filters.append({
'atomicSpecies': {
'$in': [atomic_number]
}
})
except ValueError:
# The search term can't be an atomic number
pass
query['$or'] = filters
cursor = super(Dataset, self).find(query=query, sort=sort, user=user)
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
def find(self, authors=None, title=None, atomic_species=None, mdb_id=None,
owner=None, offset=0, limit=None, sort=None, user=None, force=False):
query = {}
if authors is not None:
if not isinstance(authors, (list, tuple)):
authors = [authors]
author_regexs = []
for author in authors:
author_regexs.append(re.compile('.*%s.*' % author, re.IGNORECASE))
query['authors'] = {
'$in': author_regexs
}
if title is not None:
regex = re.compile('.*%s.*' % title, re.IGNORECASE)
query['title'] = {
'$regex': regex
}
if atomic_species:
species = []
for s in atomic_species:
try:
atomic_number = self._normalize_element(s)
species.append(atomic_number)
except ValueError:
# The search term can't be an atomic number
pass
query['atomicSpecies'] = {
'$in': species
}
if mdb_id is not None:
query['mdbId'] = mdb_id
if owner is not None:
if not isinstance(owner, ObjectId):
try:
owner = ObjectId(owner)
except InvalidId:
raise ValidationException('Invalid ObjectId: %s' % owner,
field='owner')
query['userId'] = owner
cursor = super(Dataset, self).find(query=query, sort=sort, user=user)
if not force:
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
else:
for r in cursor:
yield r
def load(self, id, user=None, level=AccessType.READ, force=False):
try:
ObjectId(id)
return super(Dataset, self).load(id, user=user, level=level, force=force)
except InvalidId:
# Try it as a mdb id
dataset = list(self.find(mdb_id=id, limit=1, user=user))
print(dataset)
if len(dataset) > 0:
return dataset[0]
return None
def delete(self, dataset, user):
dataset_id = dataset['_id']
# Delete reconstruction
try:
reconstruction = ReconstructionModel().find(dataset_id).next()
ReconstructionModel().delete(reconstruction, user)
except StopIteration:
pass
# Delete structure
try:
structure = StructureModel().find(dataset_id).next()
StructureModel().delete(structure, user)
except StopIteration:
pass
# Delete projection
try:
projection = ProjectionModel().find(dataset_id).next()
ProjectionModel().delete(projection, user)
except StopIteration:
pass
# Remove the slug
Slug().remove(dataset['mdbId'])
# Now delete the dataset
super(Dataset, self).remove(dataset)
|
# -*- coding: utf-8 -*-
"""
vcs
~~~
Various version Control System (vcs) management abstraction layer for
Python.
:created_on: Apr 8, 2010
:copyright: (c) 2010-2011 by Marcin Kuzminski, Lukasz Balcerzak.
"""
VERSION = (0, 4, 0)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__all__ = [
'get_version', 'get_repo', 'get_backend',
'VCSError', 'RepositoryError', 'ChangesetError'
]
import sys
from vcs.backends import get_repo, get_backend
from vcs.exceptions import VCSError, RepositoryError, ChangesetError
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
return '.'.join((str(each) for each in VERSION[:3]))
def main(argv=None):
if argv is None:
argv = sys.argv
from vcs.cli import ExecutionManager
manager = ExecutionManager(argv)
manager.execute()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
Bumped version
# -*- coding: utf-8 -*-
"""
vcs
~~~
Various version Control System (vcs) management abstraction layer for
Python.
:created_on: Apr 8, 2010
:copyright: (c) 2010-2011 by Marcin Kuzminski, Lukasz Balcerzak.
"""
VERSION = (0, 5, 0, 'dev')
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__all__ = [
'get_version', 'get_repo', 'get_backend',
'VCSError', 'RepositoryError', 'ChangesetError'
]
import sys
from vcs.backends import get_repo, get_backend
from vcs.exceptions import VCSError, RepositoryError, ChangesetError
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
return '.'.join((str(each) for each in VERSION[:3]))
def main(argv=None):
if argv is None:
argv = sys.argv
from vcs.cli import ExecutionManager
manager = ExecutionManager(argv)
manager.execute()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
"""
byceps.blueprints.authentication.session
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import Optional, Union
from flask import session
from ...services.authentication.exceptions import AuthenticationFailed
from ...services.authentication.session import service as session_service
from ...services.user.models.user import AnonymousUser, User
from ...services.user import service as user_service
from ...typing import UserID
KEY_USER_ID = 'user_id'
KEY_USER_AUTH_TOKEN = 'user_auth_token'
def start(user_id: UserID, auth_token: str, *, permanent: bool=False) -> None:
"""Initialize the user's session by putting the relevant data
into the session cookie.
"""
session[KEY_USER_ID] = str(user_id)
session[KEY_USER_AUTH_TOKEN] = str(auth_token)
session.permanent = permanent
def end() -> None:
"""End the user's session by deleting the session cookie."""
session.pop(KEY_USER_ID, None)
session.pop(KEY_USER_AUTH_TOKEN, None)
session.permanent = False
def get_user() -> Union[AnonymousUser, User]:
"""Return the current user, falling back to the anonymous user."""
return _load_user(_get_user_id(), _get_auth_token())
def _get_user_id() -> Optional[str]:
"""Return the current user's ID, or `None` if not available."""
return session.get(KEY_USER_ID)
def _get_auth_token() -> Optional[str]:
"""Return the current user's auth token, or `None` if not available."""
return session.get(KEY_USER_AUTH_TOKEN)
def _load_user(user_id: str, auth_token: str) -> Union[AnonymousUser, User]:
"""Load the user with that ID.
Fall back to the anonymous user if the ID is unknown, the account is
not enabled, or the auth token is invalid.
"""
if user_id is None:
return user_service.get_anonymous_user()
user = user_service.find_active_db_user(user_id)
if (user is None) or not user.enabled:
return user_service.get_anonymous_user()
# Validate auth token.
if not _is_auth_token_valid(user.id, auth_token):
# Bad auth token, not logging in.
return user_service.get_anonymous_user()
return user
def _is_auth_token_valid(user_id: UserID, auth_token) -> bool:
try:
session_service.authenticate_session(user_id, auth_token)
except AuthenticationFailed:
return False
else:
return True
Do not explicitly check `enabled` flag at call site
The user service function already does that, and only returns a user if it is enabled.
"""
byceps.blueprints.authentication.session
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import Optional, Union
from flask import session
from ...services.authentication.exceptions import AuthenticationFailed
from ...services.authentication.session import service as session_service
from ...services.user.models.user import AnonymousUser, User
from ...services.user import service as user_service
from ...typing import UserID
KEY_USER_ID = 'user_id'
KEY_USER_AUTH_TOKEN = 'user_auth_token'
def start(user_id: UserID, auth_token: str, *, permanent: bool=False) -> None:
"""Initialize the user's session by putting the relevant data
into the session cookie.
"""
session[KEY_USER_ID] = str(user_id)
session[KEY_USER_AUTH_TOKEN] = str(auth_token)
session.permanent = permanent
def end() -> None:
"""End the user's session by deleting the session cookie."""
session.pop(KEY_USER_ID, None)
session.pop(KEY_USER_AUTH_TOKEN, None)
session.permanent = False
def get_user() -> Union[AnonymousUser, User]:
"""Return the current user, falling back to the anonymous user."""
return _load_user(_get_user_id(), _get_auth_token())
def _get_user_id() -> Optional[str]:
"""Return the current user's ID, or `None` if not available."""
return session.get(KEY_USER_ID)
def _get_auth_token() -> Optional[str]:
"""Return the current user's auth token, or `None` if not available."""
return session.get(KEY_USER_AUTH_TOKEN)
def _load_user(user_id: str, auth_token: str) -> Union[AnonymousUser, User]:
"""Load the user with that ID.
Fall back to the anonymous user if the ID is unknown, the account is
not enabled, or the auth token is invalid.
"""
if user_id is None:
return user_service.get_anonymous_user()
user = user_service.find_active_db_user(user_id)
if user is None:
return user_service.get_anonymous_user()
# Validate auth token.
if not _is_auth_token_valid(user.id, auth_token):
# Bad auth token, not logging in.
return user_service.get_anonymous_user()
return user
def _is_auth_token_valid(user_id: UserID, auth_token) -> bool:
try:
session_service.authenticate_session(user_id, auth_token)
except AuthenticationFailed:
return False
else:
return True
|
import json
import tempfile
import logging
from binascii import unhexlify
from lbrynet.extras.wallet.transaction import Transaction
from lbrynet.error import InsufficientFundsError
from lbrynet.schema.claim import ClaimDict
from torba.testcase import IntegrationTestCase
import lbrynet.schema
lbrynet.schema.BLOCKCHAIN_NAME = 'lbrycrd_regtest'
from lbrynet.conf import Config
from lbrynet.extras.daemon.Daemon import Daemon, jsonrpc_dumps_pretty
from lbrynet.extras.wallet import LbryWalletManager
from lbrynet.extras.daemon.Components import WalletComponent
from lbrynet.extras.daemon.Components import (
DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
)
from lbrynet.extras.daemon.ComponentManager import ComponentManager
class CommandTestCase(IntegrationTestCase):
timeout = 180
MANAGER = LbryWalletManager
VERBOSITY = logging.WARN
async def asyncSetUp(self):
await super().asyncSetUp()
logging.getLogger('lbrynet.blob_exchange').setLevel(self.VERBOSITY)
logging.getLogger('lbrynet.daemon').setLevel(self.VERBOSITY)
conf = Config()
conf.data_dir = self.wallet_node.data_path
conf.wallet_dir = self.wallet_node.data_path
conf.download_dir = self.wallet_node.data_path
conf.share_usage_data = False
conf.use_upnp = False
conf.reflect_streams = False
conf.blockchain_name = 'lbrycrd_regtest'
conf.lbryum_servers = [('localhost', 50001)]
conf.reflector_servers = []
conf.known_dht_nodes = []
await self.account.ensure_address_gap()
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
sendtxid = await self.blockchain.send_to_address(address, 10)
await self.confirm_tx(sendtxid)
await self.generate(5)
def wallet_maker(component_manager):
self.wallet_component = WalletComponent(component_manager)
self.wallet_component.wallet_manager = self.manager
self.wallet_component._running = True
return self.wallet_component
conf.components_to_skip = [
DHT_COMPONENT, UPNP_COMPONENT, HASH_ANNOUNCER_COMPONENT,
PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
]
self.daemon = Daemon(conf, ComponentManager(
conf, skip_components=conf.components_to_skip, wallet=wallet_maker
))
await self.daemon.initialize()
self.manager.old_db = self.daemon.storage
async def asyncTearDown(self):
await super().asyncTearDown()
self.wallet_component._running = False
await self.daemon.stop()
async def confirm_tx(self, txid):
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
await self.on_transaction_id(txid)
await self.generate(1)
await self.on_transaction_id(txid)
async def on_transaction_dict(self, tx):
await self.ledger.wait(
self.ledger.transaction_class(unhexlify(tx['hex']))
)
@staticmethod
def get_all_addresses(tx):
addresses = set()
for txi in tx['inputs']:
addresses.add(txi['address'])
for txo in tx['outputs']:
addresses.add(txo['address'])
return list(addresses)
async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
await self.blockchain.generate(blocks)
await self.ledger.on_header.where(self.blockchain.is_expected_block)
async def out(self, awaitable):
""" Converts Daemon API call results (dictionary)
to JSON and then back to a dictionary. """
return json.loads(jsonrpc_dumps_pretty(await awaitable, ledger=self.ledger))['result']
class EpicAdventuresOfChris45(CommandTestCase):
VERBOSITY = logging.WARN
async def test_no_this_is_not_a_test_its_an_adventure(self):
# Chris45 is an avid user of LBRY and this is his story. It's fact and fiction
# and everything in between; it's also the setting of some record setting
# integration tests.
# Chris45 starts everyday by checking his balance.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '10.0')
# "10 LBC, yippy! I can do a lot with that.", he thinks to himself,
# enthusiastically. But he is hungry so he goes into the kitchen
# to make himself a spamdwich.
# While making the spamdwich he wonders... has anyone on LBRY
# registered the @spam channel yet? "I should do that!" he
# exclaims and goes back to his computer to do just that!
channel = await self.out(self.daemon.jsonrpc_channel_new('@spam', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# Do we have it locally?
channels = await self.out(self.daemon.jsonrpc_channel_list())
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@spam')
# As the new channel claim travels through the intertubes and makes its
# way into the mempool and then a block and then into the claimtrie,
# Chris doesn't sit idly by: he checks his balance!
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '8.989893')
# He waits for 6 more blocks (confirmations) to make sure the balance has been settled.
await self.generate(6)
result = await self.daemon.jsonrpc_account_balance(confirmations=6)
self.assertEqual(result, '8.989893')
# And is the channel resolvable and empty?
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam'))
self.assertIn('lbry://@spam', response)
self.assertIn('certificate', response['lbry://@spam'])
# "What goes well with spam?" ponders Chris...
# "A hovercraft with eels!" he exclaims.
# "That's what goes great with spam!" he further confirms.
# And so, many hours later, Chris is finished writing his epic story
# about eels driving a hovercraft across the wetlands while eating spam
# and decides it's time to publish it to the @spam channel.
with tempfile.NamedTemporaryFile() as file:
file.write(b'blah blah blah...')
file.write(b'[insert long story about eels driving hovercraft]')
file.write(b'yada yada yada!')
file.write(b'the end')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_id=channel['claim_id']
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# He quickly checks the unconfirmed balance to make sure everything looks
# correct.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '7.969786')
# Also checks that his new story can be found on the blockchain before
# giving the link to all his friends.
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam/hovercraft'))
self.assertIn('lbry://@spam/hovercraft', response)
self.assertIn('claim', response['lbry://@spam/hovercraft'])
# He goes to tell everyone about it and in the meantime 5 blocks are confirmed.
await self.generate(5)
# When he comes back he verifies the confirmed balance.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '7.969786')
# As people start reading his story they discover some typos and notify
# Chris who explains in despair "Oh! Noooooos!" but then remembers
# "No big deal! I can update my claim." And so he updates his claim.
with tempfile.NamedTemporaryFile() as file:
file.write(b'blah blah blah...')
file.write(b'[typo fixing sounds being made]')
file.write(b'yada yada yada!')
file.flush()
claim2 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_name='@spam'
))
self.assertTrue(claim2['success'])
self.assertEqual(claim2['claim_id'], claim1['claim_id'])
await self.confirm_tx(claim2['tx']['txid'])
# After some soul searching Chris decides that his story needs more
# heart and a better ending. He takes down the story and begins the rewrite.
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(claim1['claim_id'], blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
# And now checks that the claim doesn't resolve anymore.
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam/hovercraft'))
self.assertNotIn('claim', response['lbry://@spam/hovercraft'])
# After abandoning he just waits for his LBCs to be returned to his account
await self.generate(5)
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '8.9693585')
# Amidst all this Chris receives a call from his friend Ramsey
# who says that it is of utmost urgency that Chris transfer him
# 1 LBC to which Chris readily obliges
ramsey_account_id = (await self.daemon.jsonrpc_account_create("Ramsey"))['id']
ramsey_account = self.daemon.get_account_or_error(ramsey_account_id)
ramsey_address = await self.daemon.jsonrpc_address_unused(ramsey_account_id)
result = await self.out(self.daemon.jsonrpc_wallet_send('1.0', ramsey_address))
self.assertIn("txid", result)
await self.confirm_tx(result['txid'])
# Chris then eagerly waits for 6 confirmations to check his balance and then calls Ramsey to verify whether
# he received it or not
await self.generate(5)
result = await self.daemon.jsonrpc_account_balance()
# Chris' balance was correct
self.assertEqual(result, '7.9692345')
# Ramsey too assured him that he had received the 1 LBC and thanks him
result = await self.daemon.jsonrpc_account_balance(ramsey_account_id)
self.assertEqual(result, '1.0')
# After Chris is done with all the "helping other people" stuff he decides that it's time to
# write a new story and publish it to lbry. All he needed was a fresh start and he came up with:
with tempfile.NamedTemporaryFile() as file:
file.write(b'Amazingly Original First Line')
file.write(b'Super plot for the grand novel')
file.write(b'Totally un-cliched ending')
file.write(b'**Audience Gasps**')
file.flush()
claim3 = await self.out(self.daemon.jsonrpc_publish(
'fresh-start', '1.0', file_path=file.name, channel_name='@spam'
))
self.assertTrue(claim3['success'])
await self.confirm_tx(claim3['tx']['txid'])
await self.generate(5)
# He gives the link of his story to all his friends and hopes that this is the much needed break for him
uri = 'lbry://@spam/fresh-start'
# And voila, and bravo and encore! His Best Friend Ramsey read the story and immediately knew this was a hit
# Now to keep this claim winning on the lbry blockchain he immediately supports the claim
tx = await self.out(self.daemon.jsonrpc_claim_new_support(
'fresh-start', claim3['claim_id'], '0.2', account_id=ramsey_account_id
))
await self.confirm_tx(tx['txid'])
# And check if his support showed up
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# It obviously did! Because, blockchain baby \O/
self.assertEqual(resolve_result[uri]['claim']['amount'], '1.0')
self.assertEqual(resolve_result[uri]['claim']['effective_amount'], '1.2')
self.assertEqual(resolve_result[uri]['claim']['supports'][0]['amount'], '0.2')
self.assertEqual(resolve_result[uri]['claim']['supports'][0]['txid'], tx['txid'])
await self.generate(5)
# Now he also wanted to support the original creator of the Award Winning Novel
# So he quickly decides to send a tip to him
tx = await self.out(
self.daemon.jsonrpc_claim_tip(claim3['claim_id'], '0.3', account_id=ramsey_account_id))
await self.confirm_tx(tx['txid'])
# And again checks if it went to the just right place
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# Which it obviously did. Because....?????
self.assertEqual(resolve_result[uri]['claim']['supports'][1]['amount'], '0.3')
self.assertEqual(resolve_result[uri]['claim']['supports'][1]['txid'], tx['txid'])
await self.generate(5)
# Seeing the ravishing success of his novel Chris adds support to his claim too
tx = await self.out(self.daemon.jsonrpc_claim_new_support('fresh-start', claim3['claim_id'], '0.4'))
await self.confirm_tx(tx['txid'])
# And check if his support showed up
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# It did!
self.assertEqual(resolve_result[uri]['claim']['supports'][2]['amount'], '0.4')
self.assertEqual(resolve_result[uri]['claim']['supports'][2]['txid'], tx['txid'])
await self.generate(5)
# Now Ramsey who is a singer by profession, is preparing for his new "gig". He has everything in place for that
# the instruments, the theatre, the ads, everything, EXCEPT lyrics!! He panicked.. But then he remembered
# something, so he un-panicked. He quickly calls up his best bud Chris and requests him to write hit lyrics for
# his song, seeing as his novel had smashed all the records, he was the perfect candidate!
# .......
# Chris agrees.. 17 hours 43 minutes and 14 seconds later, he makes his publish
with tempfile.NamedTemporaryFile() as file:
file.write(b'The Whale amd The Bookmark')
file.write(b'I know right? Totally a hit song')
file.write(b'That\'s what goes around for songs these days anyways')
file.flush()
claim4 = await self.out(self.daemon.jsonrpc_publish(
'hit-song', '1.0', file_path=file.name, channel_id=channel['claim_id']
))
self.assertTrue(claim4['success'])
await self.confirm_tx(claim4['tx']['txid'])
await self.generate(5)
# He sends the link to Ramsey, all happy and proud
uri = 'lbry://@spam/hit-song'
# But sadly Ramsey wasn't so pleased. It was hard for him to tell Chris...
# Chris, though a bit heartbroken, abandoned the claim for now, but instantly started working on new hit lyrics
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=claim4['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
# He them checks that the claim doesn't resolve anymore.
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertNotIn('claim', response[uri])
class AccountManagement(CommandTestCase):
VERBOSE = False
async def test_performing_account_management_commands(self):
# check initial account
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 1)
# change account name and gap
account_id = response['lbc_regtest'][0]['id']
self.daemon.jsonrpc_account_set(
account_id=account_id, new_name='test account',
receiving_gap=95, receiving_max_uses=96,
change_gap=97, change_max_uses=98
)
response = (await self.daemon.jsonrpc_account_list())['lbc_regtest'][0]
self.assertEqual(response['name'], 'test account')
self.assertEqual(
response['address_generator']['receiving'],
{'gap': 95, 'maximum_uses_per_address': 96}
)
self.assertEqual(
response['address_generator']['change'],
{'gap': 97, 'maximum_uses_per_address': 98}
)
# create another account
await self.daemon.jsonrpc_account_create('second account')
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 2)
self.assertEqual(response['lbc_regtest'][1]['name'], 'second account')
account_id2 = response['lbc_regtest'][1]['id']
# make new account the default
self.daemon.jsonrpc_account_set(account_id=account_id2, default=True)
response = await self.daemon.jsonrpc_account_list(show_seed=True)
self.assertEqual(response['lbc_regtest'][0]['name'], 'second account')
account_seed = response['lbc_regtest'][1]['seed']
# remove account
self.daemon.jsonrpc_account_remove(response['lbc_regtest'][1]['id'])
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 1)
# add account
await self.daemon.jsonrpc_account_add('recreated account', seed=account_seed)
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 2)
self.assertEqual(response['lbc_regtest'][1]['name'], 'recreated account')
# list specific account
response = await self.daemon.jsonrpc_account_list(account_id, include_claims=True)
self.assertEqual(response['name'], 'recreated account')
class ClaimManagement(CommandTestCase):
VERBOSITY = logging.WARN
async def make_claim(self, name='hovercraft', amount='1.0', data=b'hi!', channel_name=None, confirm=True):
with tempfile.NamedTemporaryFile() as file:
file.write(data)
file.flush()
claim = await self.out(self.daemon.jsonrpc_publish(
name, amount, file_path=file.name, channel_name=channel_name
))
self.assertTrue(claim['success'])
if confirm:
await self.on_transaction_dict(claim['tx'])
await self.generate(1)
await self.on_transaction_dict(claim['tx'])
return claim
async def craft_claim(self, name, amount_dewies, claim_dict, address):
# FIXME: this is here mostly because publish has defensive code for situations that happens accidentally
# However, it still happens... So, let's reproduce them.
claim = ClaimDict.load_dict(claim_dict)
address = address or (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
tx = await Transaction.claim(name, claim, amount_dewies, address, [self.account], self.account)
await self.broadcast(tx)
await self.ledger.wait(tx)
await self.generate(1)
await self.ledger.wait(tx)
return tx
async def test_create_update_and_abandon_claim(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
claim = await self.make_claim(amount='2.5') # creates new claim
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['claim_info']), 1)
self.assertEqual(txs[0]['confirmations'], 1)
self.assertEqual(txs[0]['claim_info'][0]['balance_delta'], '-2.5')
self.assertEqual(txs[0]['claim_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.020107')
self.assertEqual('7.479893', await self.daemon.jsonrpc_account_balance())
await self.make_claim(amount='1.0') # updates previous claim
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['update_info']), 1)
self.assertEqual(txs[0]['update_info'][0]['balance_delta'], '1.5')
self.assertEqual(txs[0]['update_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.0001985')
self.assertEqual('8.9796945', await self.daemon.jsonrpc_account_balance())
await self.out(self.daemon.jsonrpc_claim_abandon(claim['claim_id']))
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['abandon_info']), 1)
self.assertEqual(txs[0]['abandon_info'][0]['balance_delta'], '1.0')
self.assertEqual(txs[0]['abandon_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.000107')
self.assertEqual('9.9795875', await self.daemon.jsonrpc_account_balance())
async def test_update_claim_holding_address(self):
other_account_id = (await self.daemon.jsonrpc_account_create('second account'))['id']
other_account = self.daemon.get_account_or_error(other_account_id)
other_address = await other_account.receiving.get_or_create_usable_address()
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
# create the initial name claim
claim = await self.make_claim()
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine()), 1)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine(account_id=other_account_id)), 0)
tx = await self.daemon.jsonrpc_claim_send_to_address(
claim['claim_id'], other_address
)
await self.ledger.wait(tx)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine()), 0)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine(account_id=other_account_id)), 1)
async def test_publishing_checks_all_accounts_for_certificate(self):
account1_id, account1 = self.account.id, self.account
new_account = await self.daemon.jsonrpc_account_create('second account')
account2_id, account2 = new_account['id'], self.daemon.get_account_or_error(new_account['id'])
spam_channel = await self.out(self.daemon.jsonrpc_channel_new('@spam', '1.0'))
self.assertTrue(spam_channel['success'])
await self.confirm_tx(spam_channel['tx']['txid'])
self.assertEqual('8.989893', await self.daemon.jsonrpc_account_balance())
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
))
await self.confirm_tx(result['txid'])
self.assertEqual('3.989769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
baz_channel = await self.out(self.daemon.jsonrpc_channel_new('@baz', '1.0', account2_id))
self.assertTrue(baz_channel['success'])
await self.confirm_tx(baz_channel['tx']['txid'])
channels = await self.out(self.daemon.jsonrpc_channel_list(account1_id))
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@spam')
self.assertEqual(channels, await self.out(self.daemon.jsonrpc_channel_list()))
channels = await self.out(self.daemon.jsonrpc_channel_list(account2_id))
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@baz')
# defaults to using all accounts to lookup channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_name='@baz'
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# uses only the specific accounts which contains the channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name,
channel_name='@baz', channel_account_id=[account2_id]
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# fails when specifying account which does not contain channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
with self.assertRaisesRegex(ValueError, "Couldn't find channel with name '@baz'."):
await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name,
channel_name='@baz', channel_account_id=[account1_id]
))
async def test_updating_claim_includes_claim_value_in_balance_check(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
await self.make_claim(amount='9.0')
self.assertEqual('0.979893', await self.daemon.jsonrpc_account_balance())
# update the same claim
await self.make_claim(amount='9.0')
self.assertEqual('0.9796205', await self.daemon.jsonrpc_account_balance())
# update the claim a second time but use even more funds
await self.make_claim(amount='9.97')
self.assertEqual('0.009348', await self.daemon.jsonrpc_account_balance())
# fails when specifying more than available
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
with self.assertRaisesRegex(
InsufficientFundsError,
"Please lower the bid value, the maximum amount"
" you can specify for this claim is 9.979274."
):
await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '9.98', file_path=file.name
))
async def test_abandoning_claim_at_loss(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
claim = await self.make_claim(amount='0.0001')
self.assertEqual('9.979793', await self.daemon.jsonrpc_account_balance())
await self.out(self.daemon.jsonrpc_claim_abandon(claim['claim_id']))
self.assertEqual('9.97968399', await self.daemon.jsonrpc_account_balance())
async def test_claim_show(self):
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
channel_from_claim_show = await self.out(
self.daemon.jsonrpc_claim_show(txid=channel['tx']['txid'], nout=channel['output']['nout'])
)
self.assertEqual(channel_from_claim_show['value'], channel['output']['value'])
channel_from_claim_show = await self.out(
self.daemon.jsonrpc_claim_show(claim_id=channel['claim_id'])
)
self.assertEqual(channel_from_claim_show['value'], channel['output']['value'])
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=channel['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
not_a_claim = await self.out(
self.daemon.jsonrpc_claim_show(txid=abandon['tx']['txid'], nout=0)
)
self.assertEqual(not_a_claim, 'claim not found')
async def test_claim_list(self):
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
claim = await self.make_claim(amount='0.0001', name='on-channel-claim', channel_name='@abc')
self.assertTrue(claim['success'])
unsigned_claim = await self.make_claim(amount='0.0001', name='unsigned')
self.assertTrue(claim['success'])
channel_from_claim_list = await self.out(self.daemon.jsonrpc_claim_list('@abc'))
self.assertEqual(channel_from_claim_list['claims'][0]['value'], channel['output']['value'])
signed_claim_from_claim_list = await self.out(self.daemon.jsonrpc_claim_list('on-channel-claim'))
self.assertEqual(signed_claim_from_claim_list['claims'][0]['value'], claim['output']['value'])
unsigned_claim_from_claim_list = await self.out(self.daemon.jsonrpc_claim_list('unsigned'))
self.assertEqual(unsigned_claim_from_claim_list['claims'][0]['value'], unsigned_claim['output']['value'])
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=channel['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
empty = await self.out(self.daemon.jsonrpc_claim_list('@abc'))
self.assertEqual(len(empty['claims']), 0)
async def test_abandoned_channel_with_signed_claims(self):
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
claim = await self.make_claim(amount='0.0001', name='on-channel-claim', channel_name='@abc')
self.assertTrue(claim['success'])
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=channel['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# Original channel doesnt exists anymore, so the signature is invalid. For invalid signatures, resolution is
# only possible outside a channel
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@abc/on-channel-claim'))
self.assertNotIn('claim', response['lbry://@abc/on-channel-claim'])
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://on-channel-claim'))
self.assertIn('claim', response['lbry://on-channel-claim'])
self.assertFalse(response['lbry://on-channel-claim']['claim']['signature_is_valid'])
direct_uri = 'lbry://on-channel-claim#' + claim['claim_id']
response = await self.out(self.daemon.jsonrpc_resolve(uri=direct_uri))
self.assertIn('claim', response[direct_uri])
self.assertFalse(response[direct_uri]['claim']['signature_is_valid'])
uri = 'lbry://@abc/on-channel-claim'
# now, claim something on this channel (it will update the invalid claim, but we save and forcefully restore)
original_claim = await self.make_claim(amount='0.00000001', name='on-channel-claim', channel_name='@abc')
self.assertTrue(original_claim['success'])
# resolves normally
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
# tamper it, invalidating the signature
value = response[uri]['claim']['value'].copy()
value['stream']['metadata']['author'] = 'some troll'
address = response[uri]['claim']['address']
await self.craft_claim('on-channel-claim', 1, value, address)
# it resolves to the now only valid claim under the channel, ignoring the fake one
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
# ooops! claimed a valid conflict! (this happens on the wild, mostly by accident or race condition)
await self.craft_claim('on-channel-claim', 1, response[uri]['claim']['value'], address)
# it still resolves! but to the older claim
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
self.assertEqual(response[uri]['claim']['txid'], original_claim['tx']['txid'])
async def test_claim_list_by_channel(self):
self.maxDiff = None
tx = await self.daemon.jsonrpc_account_fund(None, None, '0.001', outputs=100, broadcast=True)
await self.ledger.wait(tx)
await self.generate(1)
await self.ledger.wait(tx)
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "0.0001"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# 4 claims per block, 3 blocks. Sorted by height (descending) then claim_id (ascending).
claims = []
for j in range(3):
same_height_claims = []
for k in range(3):
claim = await self.make_claim(amount='0.000001', name=f'c{j}-{k}', channel_name='@abc', confirm=False)
self.assertTrue(claim['success'])
same_height_claims.append(claim['claim_id'])
await self.on_transaction_dict(claim['tx'])
claim = await self.make_claim(amount='0.000001', name=f'c{j}-4', channel_name='@abc', confirm=True)
self.assertTrue(claim['success'])
same_height_claims.append(claim['claim_id'])
same_height_claims.sort(key=lambda x: int(x, 16))
claims = same_height_claims + claims
page = await self.out(self.daemon.jsonrpc_claim_list_by_channel(1, page_size=20, uri='@abc'))
page_claim_ids = [item['claim_id'] for item in page['@abc']['claims_in_channel']]
self.assertEqual(page_claim_ids, claims)
page = await self.out(self.daemon.jsonrpc_claim_list_by_channel(1, page_size=6, uri='@abc'))
page_claim_ids = [item['claim_id'] for item in page['@abc']['claims_in_channel']]
self.assertEqual(page_claim_ids, claims[:6])
out_of_bounds = await self.out(self.daemon.jsonrpc_claim_list_by_channel(2, page_size=20, uri='@abc'))
self.assertEqual(out_of_bounds['error'], 'claim 20 greater than max 12')
async def test_regular_supports_and_tip_supports(self):
# account2 will be used to send tips and supports to account1
account2_id = (await self.daemon.jsonrpc_account_create('second account'))['id']
# send account2 5 LBC out of the 10 LBC in account1
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
))
await self.confirm_tx(result['txid'])
# account1 and account2 balances:
self.assertEqual('4.999876', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
# create the claim we'll be tipping and supporting
claim = await self.make_claim()
# account1 and account2 balances:
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
# send a tip to the claim using account2
tip = await self.out(
self.daemon.jsonrpc_claim_tip(claim['claim_id'], '1.0', account2_id)
)
await self.on_transaction_dict(tip)
await self.generate(1)
await self.on_transaction_dict(tip)
# tips don't affect balance so account1 balance is same but account2 balance went down
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('3.9998585', await self.daemon.jsonrpc_account_balance(account2_id))
# verify that the incoming tip is marked correctly as is_tip=True in account1
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['support_info']), 1)
self.assertEqual(txs[0]['support_info'][0]['balance_delta'], '1.0')
self.assertEqual(txs[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['support_info'][0]['is_tip'], True)
self.assertEqual(txs[0]['value'], '1.0')
self.assertEqual(txs[0]['fee'], '0.0')
# verify that the outgoing tip is marked correctly as is_tip=True in account2
txs2 = await self.out(
self.daemon.jsonrpc_transaction_list(account2_id)
)
self.assertEqual(len(txs2[0]['support_info']), 1)
self.assertEqual(txs2[0]['support_info'][0]['balance_delta'], '-1.0')
self.assertEqual(txs2[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs2[0]['support_info'][0]['is_tip'], True)
self.assertEqual(txs2[0]['value'], '-1.0')
self.assertEqual(txs2[0]['fee'], '-0.0001415')
# send a support to the claim using account2
support = await self.out(
self.daemon.jsonrpc_claim_new_support('hovercraft', claim['claim_id'], '2.0', account2_id)
)
await self.on_transaction_dict(support)
await self.generate(1)
await self.on_transaction_dict(support)
# account2 balance went down ~2
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('1.999717', await self.daemon.jsonrpc_account_balance(account2_id))
# verify that the outgoing support is marked correctly as is_tip=False in account2
txs2 = await self.out(self.daemon.jsonrpc_transaction_list(account2_id))
self.assertEqual(len(txs2[0]['support_info']), 1)
self.assertEqual(txs2[0]['support_info'][0]['balance_delta'], '-2.0')
self.assertEqual(txs2[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs2[0]['support_info'][0]['is_tip'], False)
self.assertEqual(txs2[0]['value'], '0.0')
self.assertEqual(txs2[0]['fee'], '-0.0001415')
class TransactionCommandsTestCase(CommandTestCase):
async def test_transaction_show(self):
# local tx
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(self.account.id)
))
await self.confirm_tx(result['txid'])
tx = await self.daemon.jsonrpc_transaction_show(result['txid'])
self.assertEqual(tx.id, result['txid'])
# someone's tx
change_address = await self.blockchain.get_raw_change_address()
sendtxid = await self.blockchain.send_to_address(change_address, 10)
tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertEqual(tx.id, sendtxid)
self.assertEqual(tx.height, -2)
await self.generate(1)
tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertEqual(tx.height, self.ledger.headers.height)
# inexistent
result = await self.daemon.jsonrpc_transaction_show('0'*64)
self.assertFalse(result['success'])
async def test_utxo_release(self):
sendtxid = await self.blockchain.send_to_address(
await self.account.receiving.get_or_create_usable_address(), 1
)
await self.confirm_tx(sendtxid)
await self.assertBalance(self.account, '11.0')
await self.ledger.reserve_outputs(await self.account.get_utxos())
await self.assertBalance(self.account, '0.0')
await self.daemon.jsonrpc_utxo_release()
await self.assertBalance(self.account, '11.0')
moved all other tests out of test_chris45.py and into dedicated files
import tempfile
from .testcase import CommandTestCase
class EpicAdventuresOfChris45(CommandTestCase):
async def test_no_this_is_not_a_test_its_an_adventure(self):
# Chris45 is an avid user of LBRY and this is his story. It's fact and fiction
# and everything in between; it's also the setting of some record setting
# integration tests.
# Chris45 starts everyday by checking his balance.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '10.0')
# "10 LBC, yippy! I can do a lot with that.", he thinks to himself,
# enthusiastically. But he is hungry so he goes into the kitchen
# to make himself a spamdwich.
# While making the spamdwich he wonders... has anyone on LBRY
# registered the @spam channel yet? "I should do that!" he
# exclaims and goes back to his computer to do just that!
channel = await self.out(self.daemon.jsonrpc_channel_new('@spam', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# Do we have it locally?
channels = await self.out(self.daemon.jsonrpc_channel_list())
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@spam')
# As the new channel claim travels through the intertubes and makes its
# way into the mempool and then a block and then into the claimtrie,
# Chris doesn't sit idly by: he checks his balance!
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '8.989893')
# He waits for 6 more blocks (confirmations) to make sure the balance has been settled.
await self.generate(6)
result = await self.daemon.jsonrpc_account_balance(confirmations=6)
self.assertEqual(result, '8.989893')
# And is the channel resolvable and empty?
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam'))
self.assertIn('lbry://@spam', response)
self.assertIn('certificate', response['lbry://@spam'])
# "What goes well with spam?" ponders Chris...
# "A hovercraft with eels!" he exclaims.
# "That's what goes great with spam!" he further confirms.
# And so, many hours later, Chris is finished writing his epic story
# about eels driving a hovercraft across the wetlands while eating spam
# and decides it's time to publish it to the @spam channel.
with tempfile.NamedTemporaryFile() as file:
file.write(b'blah blah blah...')
file.write(b'[insert long story about eels driving hovercraft]')
file.write(b'yada yada yada!')
file.write(b'the end')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_id=channel['claim_id']
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# He quickly checks the unconfirmed balance to make sure everything looks
# correct.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '7.969786')
# Also checks that his new story can be found on the blockchain before
# giving the link to all his friends.
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam/hovercraft'))
self.assertIn('lbry://@spam/hovercraft', response)
self.assertIn('claim', response['lbry://@spam/hovercraft'])
# He goes to tell everyone about it and in the meantime 5 blocks are confirmed.
await self.generate(5)
# When he comes back he verifies the confirmed balance.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '7.969786')
# As people start reading his story they discover some typos and notify
# Chris who explains in despair "Oh! Noooooos!" but then remembers
# "No big deal! I can update my claim." And so he updates his claim.
with tempfile.NamedTemporaryFile() as file:
file.write(b'blah blah blah...')
file.write(b'[typo fixing sounds being made]')
file.write(b'yada yada yada!')
file.flush()
claim2 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_name='@spam'
))
self.assertTrue(claim2['success'])
self.assertEqual(claim2['claim_id'], claim1['claim_id'])
await self.confirm_tx(claim2['tx']['txid'])
# After some soul searching Chris decides that his story needs more
# heart and a better ending. He takes down the story and begins the rewrite.
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(claim1['claim_id'], blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
# And now checks that the claim doesn't resolve anymore.
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam/hovercraft'))
self.assertNotIn('claim', response['lbry://@spam/hovercraft'])
# After abandoning he just waits for his LBCs to be returned to his account
await self.generate(5)
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '8.9693585')
# Amidst all this Chris receives a call from his friend Ramsey
# who says that it is of utmost urgency that Chris transfer him
# 1 LBC to which Chris readily obliges
ramsey_account_id = (await self.daemon.jsonrpc_account_create("Ramsey"))['id']
ramsey_account = self.daemon.get_account_or_error(ramsey_account_id)
ramsey_address = await self.daemon.jsonrpc_address_unused(ramsey_account_id)
result = await self.out(self.daemon.jsonrpc_wallet_send('1.0', ramsey_address))
self.assertIn("txid", result)
await self.confirm_tx(result['txid'])
# Chris then eagerly waits for 6 confirmations to check his balance and then calls Ramsey to verify whether
# he received it or not
await self.generate(5)
result = await self.daemon.jsonrpc_account_balance()
# Chris' balance was correct
self.assertEqual(result, '7.9692345')
# Ramsey too assured him that he had received the 1 LBC and thanks him
result = await self.daemon.jsonrpc_account_balance(ramsey_account_id)
self.assertEqual(result, '1.0')
# After Chris is done with all the "helping other people" stuff he decides that it's time to
# write a new story and publish it to lbry. All he needed was a fresh start and he came up with:
with tempfile.NamedTemporaryFile() as file:
file.write(b'Amazingly Original First Line')
file.write(b'Super plot for the grand novel')
file.write(b'Totally un-cliched ending')
file.write(b'**Audience Gasps**')
file.flush()
claim3 = await self.out(self.daemon.jsonrpc_publish(
'fresh-start', '1.0', file_path=file.name, channel_name='@spam'
))
self.assertTrue(claim3['success'])
await self.confirm_tx(claim3['tx']['txid'])
await self.generate(5)
# He gives the link of his story to all his friends and hopes that this is the much needed break for him
uri = 'lbry://@spam/fresh-start'
# And voila, and bravo and encore! His Best Friend Ramsey read the story and immediately knew this was a hit
# Now to keep this claim winning on the lbry blockchain he immediately supports the claim
tx = await self.out(self.daemon.jsonrpc_claim_new_support(
'fresh-start', claim3['claim_id'], '0.2', account_id=ramsey_account_id
))
await self.confirm_tx(tx['txid'])
# And check if his support showed up
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# It obviously did! Because, blockchain baby \O/
self.assertEqual(resolve_result[uri]['claim']['amount'], '1.0')
self.assertEqual(resolve_result[uri]['claim']['effective_amount'], '1.2')
self.assertEqual(resolve_result[uri]['claim']['supports'][0]['amount'], '0.2')
self.assertEqual(resolve_result[uri]['claim']['supports'][0]['txid'], tx['txid'])
await self.generate(5)
# Now he also wanted to support the original creator of the Award Winning Novel
# So he quickly decides to send a tip to him
tx = await self.out(
self.daemon.jsonrpc_claim_tip(claim3['claim_id'], '0.3', account_id=ramsey_account_id))
await self.confirm_tx(tx['txid'])
# And again checks if it went to the just right place
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# Which it obviously did. Because....?????
self.assertEqual(resolve_result[uri]['claim']['supports'][1]['amount'], '0.3')
self.assertEqual(resolve_result[uri]['claim']['supports'][1]['txid'], tx['txid'])
await self.generate(5)
# Seeing the ravishing success of his novel Chris adds support to his claim too
tx = await self.out(self.daemon.jsonrpc_claim_new_support('fresh-start', claim3['claim_id'], '0.4'))
await self.confirm_tx(tx['txid'])
# And check if his support showed up
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# It did!
self.assertEqual(resolve_result[uri]['claim']['supports'][2]['amount'], '0.4')
self.assertEqual(resolve_result[uri]['claim']['supports'][2]['txid'], tx['txid'])
await self.generate(5)
# Now Ramsey who is a singer by profession, is preparing for his new "gig". He has everything in place for that
# the instruments, the theatre, the ads, everything, EXCEPT lyrics!! He panicked.. But then he remembered
# something, so he un-panicked. He quickly calls up his best bud Chris and requests him to write hit lyrics for
# his song, seeing as his novel had smashed all the records, he was the perfect candidate!
# .......
# Chris agrees.. 17 hours 43 minutes and 14 seconds later, he makes his publish
with tempfile.NamedTemporaryFile() as file:
file.write(b'The Whale amd The Bookmark')
file.write(b'I know right? Totally a hit song')
file.write(b'That\'s what goes around for songs these days anyways')
file.flush()
claim4 = await self.out(self.daemon.jsonrpc_publish(
'hit-song', '1.0', file_path=file.name, channel_id=channel['claim_id']
))
self.assertTrue(claim4['success'])
await self.confirm_tx(claim4['tx']['txid'])
await self.generate(5)
# He sends the link to Ramsey, all happy and proud
uri = 'lbry://@spam/hit-song'
# But sadly Ramsey wasn't so pleased. It was hard for him to tell Chris...
# Chris, though a bit heartbroken, abandoned the claim for now, but instantly started working on new hit lyrics
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=claim4['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
# He them checks that the claim doesn't resolve anymore.
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertNotIn('claim', response[uri])
|
#!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this.
None of that functionality is actually used in https://cocalc.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in CoCalc; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2016, Sagemath Inc.
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
from __future__ import print_function, absolute_import
import six
import os, sys, time, operator
import __future__ as future
from functools import reduce
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
return str(s, 'utf8')
except:
try:
return str(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n" % (PID, datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x)
for x in args]))
debug_log.write(mesg)
debug_log.flush()
except Exception as err:
print(("an error writing a log message (ignoring) -- %s" % err, args))
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd, re
# for "3x^2 + 4xy - 5(1+x) - 3 abc4ok", this pattern matches "3x", "5(" and "4xy" but not "abc4ok"
# to understand it, see https://regex101.com/ or https://www.debuggex.com/
RE_POSSIBLE_IMPLICIT_MUL = re.compile(r'(?:(?<=[^a-zA-Z])|^)(\d+[a-zA-Z\(]+)')
from . import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
reload_attached_files_if_mod_smc_available = True
def reload_attached_files_if_mod_smc():
# CRITICAL: do NOT impor sage.repl.attach!! That will import IPython, wasting several seconds and
# killing the user experience for no reason.
try:
import sage.repl
sage.repl.attach
except:
# nothing to do -- attach has not been used and is not yet available.
return
global reload_attached_files_if_mod_smc_available
if not reload_attached_files_if_mod_smc_available:
return
try:
from sage.repl.attach import load_attach_path, modified_file_iterator
except:
print("sage_server: attach not available")
reload_attached_files_if_mod_smc_available = False
return
# see sage/src/sage/repl/attach.py reload_attached_files_if_modified()
for filename, mtime in modified_file_iterator():
basename = os.path.basename(filename)
timestr = time.strftime('%T', mtime)
log('reloading attached file {0} modified at {1}'.format(
basename, timestr))
from .sage_salvus import load
load(filename)
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
INFO = json.loads(open(_info_path).read())
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]
j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex((int(s[j], 16) & 0x3) | 0x8)[-1]
j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
assert not isinstance(
conn, ConnectionJSON
) # avoid common mistake -- conn is supposed to be from socket.socket...
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
length_header = struct.pack(">L", len(s))
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
if '\\u0000' in m:
raise RuntimeError("NULL bytes not allowed")
log("sending message '", truncate_text(m, 256), "'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
s = uuidsha1(blob)
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'" % filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print("_recv(%s)"%n)
# see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
for i in range(20):
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except OSError as e:
#print("socket.error, msg=%s"%msg)
if e.errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if six.PY3:
# bystream to string, in particular s[0] will be e.g. 'j' and not 106
s = s.deocde()
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception as msg:
log("Unable to parse JSON '%s'" % s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'" % s[0])
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
def truncate_text_warn(s, max_size, name):
r"""
Truncate text if too long and format a warning message.
INPUT:
- ``s`` -- string to be truncated
- ``max-size`` - integer truncation limit
- ``name`` - string, name of limiting parameter
OUTPUT:
a triple:
- string -- possibly truncated input string
- boolean -- true if input string was truncated
- string -- warning message if input string was truncated
"""
tmsg = "WARNING: Output: %s truncated by %s to %s. Type 'smc?' to learn how to raise the output limit."
lns = len(s)
if lns > max_size:
tmsg = tmsg % (lns, name, max_size)
return s[:max_size] + "[...]", True, tmsg
else:
return s, False, ''
class Message(object):
def _new(self, event, props={}):
m = {'event': event}
for key, val in props.items():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid': pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(
self,
id,
stdout=None,
stderr=None,
code=None,
html=None,
javascript=None,
coffeescript=None,
interact=None,
md=None,
tex=None,
d3=None,
file=None,
raw_input=None,
obj=None,
once=None,
hide=None,
show=None,
events=None,
clear=None,
delete_last=None,
done=False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single.
):
m = self._new('output')
m['id'] = id
t = truncate_text_warn
did_truncate = False
from . import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate, tmsg = t(code['source'],
sage_server.MAX_CODE_SIZE,
'MAX_CODE_SIZE')
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate, tmsg = t(stderr,
sage_server.MAX_STDERR_SIZE,
'MAX_STDERR_SIZE')
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate, tmsg = t(stdout,
sage_server.MAX_STDOUT_SIZE,
'MAX_STDOUT_SIZE')
if html is not None and len(html) > 0:
m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE,
'MAX_HTML_SIZE')
if md is not None and len(md) > 0:
m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE,
'MAX_MD_SIZE')
if tex is not None and len(tex) > 0:
tex['tex'], did_truncate, tmsg = t(tex['tex'],
sage_server.MAX_TEX_SIZE,
'MAX_TEX_SIZE')
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + tmsg
else:
m['stderr'] = '\n' + tmsg
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print(("PID = %s" % pid))
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: ' % id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout'])
sys.stdout.flush()
if 'stderr' in mesg:
print(('! ' +
'\n! '.join(mesg['stderr'].splitlines())))
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print("Sending interrupt signal")
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print("\nExiting Sage client.")
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
# CRITICAL: we need output to valid PostgreSQL TEXT, so no null bytes
# This is not going to silently corrupt anything -- it's just output that
# is destined to be *rendered* in the browser. This is only a partial
# solution to a more general problem, but it is safe.
try:
self._buf += output.replace('\x00', '')
except UnicodeDecodeError:
self._buf += output.decode('utf-8').replace('\x00', '')
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size)
or (t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
try:
self._f(self._buf, done=done)
except UnicodeDecodeError:
self._f(str(self._buf, errors='replace'), done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and x in self._on_change:
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and x in self._on_del:
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if x in self._on_change:
for f in self._on_change[x]:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
except Exception as mesg:
print(mesg)
def __delitem__(self, x):
try:
if x in self._on_del:
for f in self._on_del[x]:
f()
if None in self._on_del:
for f in self._on_del[None]:
f(x)
except Exception as mesg:
print(mesg)
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if x in self._on_change:
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special CoCalc Server functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
_py_features = {}
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace[
'salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once', False):
self._num_output_messages += 1
from . import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages: %s (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._num_output_messages, sage_server.MAX_OUTPUT_MESSAGES)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long: %s -- MAX_OUTPUT (=%s) exceeded (type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._total_output_length, sage_server.MAX_OUTPUT)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>" %
(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});" % (
id, self._action(path, foreground))
self.javascript(s,
obj={
'label': label,
'path': path,
'foreground': foreground
},
once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
if foreground:
action = "worksheet.project_page.open_directory(obj.path);"
else:
action = "worksheet.project_page.set_current_path(obj.path);"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
self.javascript(self._action(path, foreground),
obj={
'path': path,
'foreground': foreground
},
once=True)
def close_tab(self, filename):
"""
Close an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.project_page.close_file(obj)",
obj=filename,
once=True)
def threed(
self,
g, # sage Graphic3d object.
width=None,
height=None,
frame=True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background=None,
foreground=None,
spin=False,
aspect_ratio=None,
frame_aspect_ratio=None, # synonym for aspect_ratio
done=False,
renderer=None, # None, 'webgl', or 'canvas'
):
from .graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = {
'width': width,
'height': height,
'background': background,
'foreground': foreground,
'spin': spin,
'aspect_ratio': aspect_ratio,
'renderer': renderer
}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio,
(list, tuple)) and len(aspect_ratio) == 3):
raise TypeError(
"aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"
% (aspect_ratio, ))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in [
'spin', 'height', 'width', 'background', 'foreground',
'renderer'
]:
if k in extra_kwds and not opts.get(k, None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][
1], b[0][2], b[1][2]
fr = opts['frame'] = {
'xmin': f(xmin),
'xmax': f(xmax),
'ymin': f(ymin),
'ymax': f(ymax),
'zmin': f(zmin),
'zmax': f(zmax)
}
if isinstance(frame, dict):
for k in list(fr.keys()):
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts': opts, 'obj': graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id,
file={
'filename': unicode8("%s.sage3d" % uuid),
'uuid': uuid
},
done=done)
def d3_graph(self, g, **kwds):
from .graphics import graph_to_d3_jsonable
self._send_output(id=self._id,
d3={
"viewer": "graph",
"data": graph_to_d3_jsonable(g, **kwds)
})
def file(self,
filename,
show=True,
done=False,
download=False,
once=False,
events=None,
raw=False,
text=None):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If text is also given that will be used instead of the path to the file.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ['HOME'] + '/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(
"can only send raw files in your home directory")
url = os.path.join('/', info['base_url'].strip('/'),
info['project_id'], 'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'url': url,
'show': show,
'text': text
},
events=events,
done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get(
'sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s" % mesg['error'])
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'uuid': file_uuid,
'show': show,
'text': text
},
events=events,
done=done)
if not show:
info = self.project_info()
url = "%s/blobs/%s?uuid=%s" % (info['base_url'], filename,
file_uuid)
if download:
url += '?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl', 0))
def python_future_feature(self, feature=None, enable=None):
"""
Allow users to enable, disable, and query the features in the python __future__ module.
"""
if feature is None:
if enable is not None:
raise ValueError(
"enable may not be specified when feature is None")
return sorted(Salvus._py_features.keys())
attr = getattr(future, feature, None)
if (feature not in future.all_feature_names) or (
attr is None) or not isinstance(attr, future._Feature):
raise RuntimeError("future feature %.50r is not defined" %
(feature, ))
if enable is None:
return feature in Salvus._py_features
if enable:
Salvus._py_features[feature] = attr
else:
try:
del Salvus._py_features[feature]
except KeyError:
pass
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print("%s MB used"%int(get_memory_usage()))')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
ascii_warn = False
code_error = False
if sys.getdefaultencoding() == 'ascii':
for c in code:
if ord(c) >= 128:
ascii_warn = True
break
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
compile_flags = reduce(operator.or_,
(feature.compiler_flag
for feature in Salvus._py_features.values()),
0)
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print(x)"
blocks = sage_parsing.divide_into_blocks(code)
try:
import sage.repl
# CRITICAL -- we do NOT import sage.repl.interpreter!!!!!!!
# That would waste several seconds importing ipython and much more, which is just dumb.
# The only reason this is needed below is if the user has run preparser(False), which
# would cause sage.repl.interpreter to be imported at that point (as preparser is
# lazy imported.)
sage_repl_interpreter = sage.repl.interpreter
except:
pass # expected behavior usually, since sage.repl.interpreter usually not imported (only used by command line...)
import sage.misc.session
for start, stop, block in blocks:
# if import sage.repl.interpreter fails, sag_repl_interpreter is unreferenced
try:
do_pp = getattr(sage_repl_interpreter, '_do_preparse', True)
except:
do_pp = True
if preparse and do_pp:
block = sage_parsing.preparse_code(block)
sys.stdout.reset()
sys.stderr.reset()
try:
b = block.rstrip()
# get rid of comments at the end of the line -- issue #1835
#from ushlex import shlex
#s = shlex(b)
#s.commenters = '#'
#s.quotes = '"\''
#b = ''.join(s)
# e.g. now a line like 'x = test? # bar' becomes 'x=test?'
if b.endswith('??'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="python")
elif b.endswith('?'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="text/x-rst")
else:
reload_attached_files_if_mod_smc()
if execute.count < 2:
execute.count += 1
if execute.count == 2:
# this fixup has to happen after first block has executed (os.chdir etc)
# but before user assigns any variable in worksheet
# sage.misc.session.init() is not called until first call of show_identifiers
# BUGFIX: be careful to *NOT* assign to _!! see https://github.com/sagemathinc/cocalc/issues/1107
block2 = "sage.misc.session.state_at_init = dict(globals());sage.misc.session._dummy=sage.misc.session.show_identifiers();\n"
exec(compile(block2, '', 'single'), namespace,
locals)
b2a = """
if 'SAGE_STARTUP_FILE' in os.environ and os.path.isfile(os.environ['SAGE_STARTUP_FILE']):
try:
load(os.environ['SAGE_STARTUP_FILE'])
except:
sys.stdout.flush()
sys.stderr.write('\\nException loading startup file: {}\\n'.format(os.environ['SAGE_STARTUP_FILE']))
sys.stderr.flush()
raise
"""
exec(compile(b2a, '', 'exec'), namespace, locals)
features = sage_parsing.get_future_features(
block, 'single')
if features:
compile_flags = reduce(
operator.or_, (feature.compiler_flag
for feature in features.values()),
compile_flags)
exec(
compile(block + '\n',
'',
'single',
flags=compile_flags), namespace, locals)
if features:
Salvus._py_features.update(features)
sys.stdout.flush()
sys.stderr.flush()
except:
if ascii_warn:
sys.stderr.write(
'\n\n*** WARNING: Code contains non-ascii characters ***\n'
)
for c in '\u201c\u201d':
if c in code:
sys.stderr.write(
'*** Maybe the character < %s > should be replaced by < " > ? ***\n'
% c)
break
sys.stderr.write('\n\n')
from exceptions import SyntaxError, TypeError
exc_type, _, _ = sys.exc_info()
if exc_type in [SyntaxError, TypeError]:
from .sage_parsing import strip_string_literals
code0, _, _ = strip_string_literals(code)
implicit_mul = RE_POSSIBLE_IMPLICIT_MUL.findall(code0)
if len(implicit_mul) > 0:
implicit_mul_list = ', '.join(
str(_) for _ in implicit_mul)
# we know there is a SyntaxError and there could be an implicit multiplication
sys.stderr.write(
'\n\n*** WARNING: Code contains possible implicit multiplication ***\n'
)
sys.stderr.write(
'*** Check if any of [ %s ] need a "*" sign for multiplication, e.g. 5x should be 5*x ! ***\n\n'
% implicit_mul_list)
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n' %
(start + 1, stop + 1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self,
code_decorators,
code,
preparse=True,
namespace=None,
locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if isinstance(code_decorators, str):
code_decorators = [code_decorators]
if preparse:
code_decorators = list(
map(sage_parsing.preparse_code, code_decorators))
code_decorators = [
eval(code_decorator, self.namespace)
for code_decorator in code_decorators
]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
# eval is for backward compatibility
if hasattr(code_decorator, 'eval'):
print((code_decorator.eval(code, locals=self.namespace)),
end=' ')
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and isinstance(code, str):
self.execute(code,
preparse=preparse,
namespace=namespace,
locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html),
id=self._id,
done=done,
once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if isinstance(obj, str) else self.namespace['latex'](obj, **
kwds)
self._send_output(tex={
'tex': tex,
'display': display
},
id=self._id,
done=done,
once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if isinstance(output, str) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if isinstance(output, str) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(
self,
source, # actual source code
mode=None, # the syntax highlight codemirror mode
filename=None, # path of file it is contained in (if applicable)
lineno=-1, # line number where source starts (0-based)
done=False,
once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if isinstance(source, str) else unicode8(source)
code = {
'source': source,
'filename': filename,
'lineno': int(lineno),
'mode': mode
}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print("(Evaluate this cell to use this interact.)")
#raise RuntimeError("Error: No interact with id %s"%id)
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact=I.jsonable(),
id=self._id,
done=done,
once=once)
return sage_salvus.InteractFunction(I)
def javascript(self,
code,
once=False,
coffeescript=False,
done=False,
obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={
'code': code,
'coffeescript': coffeescript
},
id=self._id,
done=done,
obj=obj,
once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self,
prompt='',
default='',
placeholder='',
input_width=None,
label_width=None,
done=False,
type=None): # done is ignored here
self._flush_stdio()
m = {'prompt': unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
log("handling raw input message ", truncate_text(unicode8(mesg), 400))
if typ == 'json' and mesg['event'] == 'sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt(
"raw_input interrupted by another action: event='%s' (expected 'sage_raw_input')"
% mesg['event'])
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def notify(self, **kwds):
"""
Display a graphical notification using the alert_message Javascript function.
INPUTS:
- `type: "default"` - Type of the notice. "default", "warning", "info", "success", or "error".
- `title: ""` - The notice's title.
- `message: ""` - The notice's text.
- `timeout: ?` - Delay in seconds before the notice is automatically removed.
EXAMPLE:
salvus.notify(type="warning", title="This warning", message="This is a quick message.", timeout=3)
"""
obj = {}
for k, v in kwds.items():
if k == 'text': # backward compat
k = 'message'
elif k == 'type' and v == 'notice': # backward compat
v = 'default'
obj[k] = sage_salvus.jsonable(v)
if k == 'delay': # backward compat
obj['timeout'] = v / 1000.0 # units are in seconds now.
self.javascript("alert_message(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(
message.execute_javascript(code,
coffeescript=coffeescript,
obj=json.dumps(obj,
separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
try:
sys.path.insert(0, path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-', '_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base + '.py', 'w').write(content)
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base + '.py')
os.unlink(py_file_base + '.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist" % filename)
base, ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented" %
ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
if six.PY2:
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
else:
Salvus.pdf.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn,
id=id,
data=data,
message_queue=message_queue,
cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.set_salvus(salvus)
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
# execute.count goes from 0 to 2
# used for show_identifiers()
execute.count = 0
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0, mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all
sage.all.set_random_seed()
import random
random.seed(sage.all.initial_seed())
# get_memory_usage is not aware of being forked...
import sage.misc.getusage
sage.misc.getusage._proc_status = "/proc/%s/status" % os.getpid()
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print('INFO:child%s: received message "%s"'%(pid, mesg))
log("handling message ", truncate_text(unicode8(mesg), 400))
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn=conn,
id=mesg['id'],
code=mesg['code'],
data=mesg.get('data', None),
cell_id=mesg.get('cell_id', None),
preparse=mesg.get('preparse', True),
message_queue=mq)
except Exception as err:
log("ERROR -- exception raised '%s' when executing '%s'" %
(err, mesg['code']))
elif event == 'introspect':
try:
# check for introspect from jupyter cell
prefix = Salvus._default_mode
if 'top' in mesg:
top = mesg['top']
log('introspect cell top line %s' % top)
if top.startswith("%"):
prefix = top[1:]
try:
# see if prefix is the name of a jupyter kernel function
kc = eval(prefix + "(get_kernel_client=True)",
namespace, locals())
kn = eval(prefix + "(get_kernel_name=True)", namespace,
locals())
log("jupyter introspect prefix %s kernel %s" %
(prefix, kn)) # e.g. "p2", "python2"
jupyter_introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True),
kc=kc)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
log(lines)
introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True))
except:
pass
else:
raise RuntimeError("invalid message '%s'" % mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def jupyter_introspect(conn, id, line, preparse, kc):
import jupyter_client
from queue import Empty
try:
salvus = Salvus(conn=conn, id=id)
msg_id = kc.complete(line)
shell = kc.shell_channel
iopub = kc.iopub_channel
# handle iopub responses
while True:
try:
msg = iopub.get_msg(timeout=1)
msg_type = msg['msg_type']
content = msg['content']
except Empty:
# shouldn't happen
log("jupyter iopub channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter iopub recv %s %s" % (msg_type, str(content)))
if msg_type == 'status' and content['execution_state'] == 'idle':
break
# handle shell responses
while True:
try:
msg = shell.get_msg(timeout=10)
msg_type = msg['msg_type']
content = msg['content']
except:
# shouldn't happen
log("jupyter shell channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter shell recv %s %s" % (msg_type, str(content)))
if msg_type == 'complete_reply' and content['status'] == 'ok':
# jupyter kernel returns matches like "xyz.append" and smc wants just "append"
matches = content['matches']
offset = content['cursor_end'] - content['cursor_start']
completions = [s[offset:] for s in matches]
mesg = message.introspect_completions(id=id,
completions=completions,
target=line[-offset:])
conn.send_json(mesg)
break
except:
log("jupyter completion exception: %s" % sys.exc_info()[0])
def introspect(conn, id, line, preparse):
salvus = Salvus(
conn=conn, id=id
) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id,
completions=z['result'],
target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id,
docstring=z['result'],
target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id,
source_code=z['result'],
target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
if 'COCALC_SECRET_TOKEN' in os.environ:
secret_token_path = os.environ['COCALC_SECRET_TOKEN']
else:
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send(six.b('n'))
conn.send(
six.
b("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"
% secret_token_path))
conn.close()
n = len(secret_token)
token = six.b('')
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != six.b(secret_token):
log("token='%s'; secret_token='%s'" % (token, secret_token))
conn.send(six.b('n')) # no -- invalid login
conn.send(six.b("Invalid secret token."))
conn.close()
return False
else:
conn.send(six.b('y')) # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s" % mesg)
except Exception as err:
log("Error receiving message: %s (connection terminated)" % str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session." %
mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s" % desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s' % (host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
import sagenb.notebook.interact
sagenb.notebook.interact.interact = sage_salvus.interact
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patch the html command.
try:
# need the following for sage_server to start with sage-8.0
# or `import sage.interacts.library` will fail (not really important below, as we don't do that).
import sage.repl.user_globals
sage.repl.user_globals.set_globals(globals())
log("initialized user_globals")
except RuntimeError:
# may happen with sage version < 8.0
log("user_globals.set_globals failed, continuing", sys.exc_info())
sage.all.html = sage.misc.html.html = sage_salvus.html
# CRITICAL: look, we are just going to not do this, and have sage.interacts.library
# be broken. It's **really slow** to do this, and I don't think sage.interacts.library
# ever ended up going anywhere! People use wiki.sagemath.org/interact instead...
#import sage.interacts.library
#sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize'] = [8, 4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = [
'from sage.all import *', 'from sage.calculus.predefined import x',
'import pylab'
]
if extra_imports:
cmds.extend([
'import scipy', 'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'
])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec(cmd, namespace)
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds' %
(time.time() - tm))
for k, v in sage_salvus.interact_functions.items():
namespace[k] = v
# See above -- not doing this, since it is REALLY SLOW to import.
# This does mean that some old code that tries to use interact might break (?).
#namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in [
'anaconda', 'asy', 'attach', 'auto', 'capture', 'cell',
'clear', 'coffeescript', 'cython', 'default_mode',
'delete_last_output', 'dynamic', 'exercise', 'fork', 'fortran',
'go', 'help', 'hide', 'hideall', 'input', 'java', 'javascript',
'julia', 'jupyter', 'license', 'load', 'md', 'mediawiki',
'modes', 'octave', 'pandoc', 'perl', 'plot3d_using_matplotlib',
'prun', 'python_future_feature', 'py3print_mode', 'python',
'python3', 'r', 'raw_input', 'reset', 'restore', 'ruby',
'runfile', 'sage_chat', 'sage_eval', 'scala', 'scala211',
'script', 'search_doc', 'search_src', 'sh', 'show',
'show_identifiers', 'singular_kernel', 'time', 'timeit',
'typeset_mode', 'var', 'wiki'
]:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[
__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace[
'pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace[
'pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in list(children.keys()):
if os.waitpid(pid, os.WNOHANG) != (0, 0):
log("subprocess %s terminated, closing connection"
% pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection" %
child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception as err:
log("Error taking connection: ", err)
traceback.print_exc(file=open(LOGFILE, 'a'))
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
pid = str(os.getpid())
print("os.getpid() = %s" % pid)
open(pidfile, 'w').write(pid)
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'" %
(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument(
"-p",
dest="port",
type=int,
default=0,
help=
"port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port"
)
parser.add_argument(
"-l",
dest='log_level',
type=str,
default='INFO',
help=
"log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d",
dest="daemon",
default=False,
action="store_const",
const=True,
help="daemon mode (default: False)")
parser.add_argument(
"--host",
dest="host",
type=str,
default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile",
dest="pidfile",
type=str,
default='',
help="store pid in this file")
parser.add_argument(
"--logfile",
dest="logfile",
type=str,
default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c",
dest="client",
default=False,
action="store_const",
const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname",
dest="hostname",
type=str,
default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile",
dest="portfile",
type=str,
default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print(("%s: must specify pidfile in daemon mode" % sys.argv[0]))
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(
port=args.port if args.port else int(open(args.portfile).read()),
hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile, 'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s" % LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
from . import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
smc_sagews: py3 compat good enough to eval code, but not more …
#!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this.
None of that functionality is actually used in https://cocalc.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in CoCalc; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2016, Sagemath Inc.
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
from __future__ import print_function, absolute_import
import six
import os, sys, time, operator
import __future__ as future
from functools import reduce
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
return str(s, 'utf8')
except:
try:
return str(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n" % (PID, datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x)
for x in args]))
debug_log.write(mesg)
debug_log.flush()
except Exception as err:
print(("an error writing a log message (ignoring) -- %s" % err, args))
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd, re
# for "3x^2 + 4xy - 5(1+x) - 3 abc4ok", this pattern matches "3x", "5(" and "4xy" but not "abc4ok"
# to understand it, see https://regex101.com/ or https://www.debuggex.com/
RE_POSSIBLE_IMPLICIT_MUL = re.compile(r'(?:(?<=[^a-zA-Z])|^)(\d+[a-zA-Z\(]+)')
from . import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
reload_attached_files_if_mod_smc_available = True
def reload_attached_files_if_mod_smc():
# CRITICAL: do NOT impor sage.repl.attach!! That will import IPython, wasting several seconds and
# killing the user experience for no reason.
try:
import sage.repl
sage.repl.attach
except:
# nothing to do -- attach has not been used and is not yet available.
return
global reload_attached_files_if_mod_smc_available
if not reload_attached_files_if_mod_smc_available:
return
try:
from sage.repl.attach import load_attach_path, modified_file_iterator
except:
print("sage_server: attach not available")
reload_attached_files_if_mod_smc_available = False
return
# see sage/src/sage/repl/attach.py reload_attached_files_if_modified()
for filename, mtime in modified_file_iterator():
basename = os.path.basename(filename)
timestr = time.strftime('%T', mtime)
log('reloading attached file {0} modified at {1}'.format(
basename, timestr))
from .sage_salvus import load
load(filename)
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
INFO = json.loads(open(_info_path).read())
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]
j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex((int(s[j], 16) & 0x3) | 0x8)[-1]
j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
# avoid common mistake -- conn is supposed to be from socket.socket...
assert not isinstance(conn, ConnectionJSON)
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
if six.PY3:
s = s.encode('utf8')
length_header = struct.pack(">L", len(s))
# py3: TypeError: can't concat str to bytes
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
if '\\u0000' in m:
raise RuntimeError("NULL bytes not allowed")
log("sending message '", truncate_text(m, 256), "'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
s = uuidsha1(blob)
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'" % filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print("_recv(%s)"%n)
# see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
for i in range(20):
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except OSError as e:
#print("socket.error, msg=%s"%msg)
if e.errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if six.PY3:
# bystream to string, in particular s[0] will be e.g. 'j' and not 106
#log("ConnectionJSON::recv s=%s... (type %s)" % (s[:5], type(s)))
# is s always of type bytes?
if type(s) == bytes:
s = s.decode('utf8')
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception as msg:
log("Unable to parse JSON '%s'" % s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'" % s[0])
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
def truncate_text_warn(s, max_size, name):
r"""
Truncate text if too long and format a warning message.
INPUT:
- ``s`` -- string to be truncated
- ``max-size`` - integer truncation limit
- ``name`` - string, name of limiting parameter
OUTPUT:
a triple:
- string -- possibly truncated input string
- boolean -- true if input string was truncated
- string -- warning message if input string was truncated
"""
tmsg = "WARNING: Output: %s truncated by %s to %s. Type 'smc?' to learn how to raise the output limit."
lns = len(s)
if lns > max_size:
tmsg = tmsg % (lns, name, max_size)
return s[:max_size] + "[...]", True, tmsg
else:
return s, False, ''
class Message(object):
def _new(self, event, props={}):
m = {'event': event}
for key, val in props.items():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid': pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(
self,
id,
stdout=None,
stderr=None,
code=None,
html=None,
javascript=None,
coffeescript=None,
interact=None,
md=None,
tex=None,
d3=None,
file=None,
raw_input=None,
obj=None,
once=None,
hide=None,
show=None,
events=None,
clear=None,
delete_last=None,
done=False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single.
):
m = self._new('output')
m['id'] = id
t = truncate_text_warn
did_truncate = False
from . import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate, tmsg = t(code['source'],
sage_server.MAX_CODE_SIZE,
'MAX_CODE_SIZE')
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate, tmsg = t(stderr,
sage_server.MAX_STDERR_SIZE,
'MAX_STDERR_SIZE')
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate, tmsg = t(stdout,
sage_server.MAX_STDOUT_SIZE,
'MAX_STDOUT_SIZE')
if html is not None and len(html) > 0:
m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE,
'MAX_HTML_SIZE')
if md is not None and len(md) > 0:
m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE,
'MAX_MD_SIZE')
if tex is not None and len(tex) > 0:
tex['tex'], did_truncate, tmsg = t(tex['tex'],
sage_server.MAX_TEX_SIZE,
'MAX_TEX_SIZE')
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + tmsg
else:
m['stderr'] = '\n' + tmsg
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print(("PID = %s" % pid))
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: ' % id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout'])
sys.stdout.flush()
if 'stderr' in mesg:
print(('! ' +
'\n! '.join(mesg['stderr'].splitlines())))
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print("Sending interrupt signal")
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print("\nExiting Sage client.")
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
# CRITICAL: we need output to valid PostgreSQL TEXT, so no null bytes
# This is not going to silently corrupt anything -- it's just output that
# is destined to be *rendered* in the browser. This is only a partial
# solution to a more general problem, but it is safe.
try:
self._buf += output.replace('\x00', '')
except UnicodeDecodeError:
self._buf += output.decode('utf-8').replace('\x00', '')
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size)
or (t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
try:
self._f(self._buf, done=done)
except UnicodeDecodeError:
self._f(str(self._buf, errors='replace'), done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and x in self._on_change:
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and x in self._on_del:
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if x in self._on_change:
for f in self._on_change[x]:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
except Exception as mesg:
print(mesg)
def __delitem__(self, x):
try:
if x in self._on_del:
for f in self._on_del[x]:
f()
if None in self._on_del:
for f in self._on_del[None]:
f(x)
except Exception as mesg:
print(mesg)
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if x in self._on_change:
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special CoCalc Server functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
_py_features = {}
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace[
'salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once', False):
self._num_output_messages += 1
from . import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages: %s (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._num_output_messages, sage_server.MAX_OUTPUT_MESSAGES)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long: %s -- MAX_OUTPUT (=%s) exceeded (type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._total_output_length, sage_server.MAX_OUTPUT)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>" %
(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});" % (
id, self._action(path, foreground))
self.javascript(s,
obj={
'label': label,
'path': path,
'foreground': foreground
},
once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
if foreground:
action = "worksheet.project_page.open_directory(obj.path);"
else:
action = "worksheet.project_page.set_current_path(obj.path);"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
self.javascript(self._action(path, foreground),
obj={
'path': path,
'foreground': foreground
},
once=True)
def close_tab(self, filename):
"""
Close an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.project_page.close_file(obj)",
obj=filename,
once=True)
def threed(
self,
g, # sage Graphic3d object.
width=None,
height=None,
frame=True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background=None,
foreground=None,
spin=False,
aspect_ratio=None,
frame_aspect_ratio=None, # synonym for aspect_ratio
done=False,
renderer=None, # None, 'webgl', or 'canvas'
):
from .graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = {
'width': width,
'height': height,
'background': background,
'foreground': foreground,
'spin': spin,
'aspect_ratio': aspect_ratio,
'renderer': renderer
}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio,
(list, tuple)) and len(aspect_ratio) == 3):
raise TypeError(
"aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"
% (aspect_ratio, ))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in [
'spin', 'height', 'width', 'background', 'foreground',
'renderer'
]:
if k in extra_kwds and not opts.get(k, None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][
1], b[0][2], b[1][2]
fr = opts['frame'] = {
'xmin': f(xmin),
'xmax': f(xmax),
'ymin': f(ymin),
'ymax': f(ymax),
'zmin': f(zmin),
'zmax': f(zmax)
}
if isinstance(frame, dict):
for k in list(fr.keys()):
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts': opts, 'obj': graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id,
file={
'filename': unicode8("%s.sage3d" % uuid),
'uuid': uuid
},
done=done)
def d3_graph(self, g, **kwds):
from .graphics import graph_to_d3_jsonable
self._send_output(id=self._id,
d3={
"viewer": "graph",
"data": graph_to_d3_jsonable(g, **kwds)
})
def file(self,
filename,
show=True,
done=False,
download=False,
once=False,
events=None,
raw=False,
text=None):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If text is also given that will be used instead of the path to the file.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ['HOME'] + '/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(
"can only send raw files in your home directory")
url = os.path.join('/', info['base_url'].strip('/'),
info['project_id'], 'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'url': url,
'show': show,
'text': text
},
events=events,
done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get(
'sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s" % mesg['error'])
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'uuid': file_uuid,
'show': show,
'text': text
},
events=events,
done=done)
if not show:
info = self.project_info()
url = "%s/blobs/%s?uuid=%s" % (info['base_url'], filename,
file_uuid)
if download:
url += '?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl', 0))
def python_future_feature(self, feature=None, enable=None):
"""
Allow users to enable, disable, and query the features in the python __future__ module.
"""
if feature is None:
if enable is not None:
raise ValueError(
"enable may not be specified when feature is None")
return sorted(Salvus._py_features.keys())
attr = getattr(future, feature, None)
if (feature not in future.all_feature_names) or (
attr is None) or not isinstance(attr, future._Feature):
raise RuntimeError("future feature %.50r is not defined" %
(feature, ))
if enable is None:
return feature in Salvus._py_features
if enable:
Salvus._py_features[feature] = attr
else:
try:
del Salvus._py_features[feature]
except KeyError:
pass
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print("%s MB used"%int(get_memory_usage()))')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
ascii_warn = False
code_error = False
if sys.getdefaultencoding() == 'ascii':
for c in code:
if ord(c) >= 128:
ascii_warn = True
break
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
compile_flags = reduce(operator.or_,
(feature.compiler_flag
for feature in Salvus._py_features.values()),
0)
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print(x)"
blocks = sage_parsing.divide_into_blocks(code)
try:
import sage.repl
# CRITICAL -- we do NOT import sage.repl.interpreter!!!!!!!
# That would waste several seconds importing ipython and much more, which is just dumb.
# The only reason this is needed below is if the user has run preparser(False), which
# would cause sage.repl.interpreter to be imported at that point (as preparser is
# lazy imported.)
sage_repl_interpreter = sage.repl.interpreter
except:
pass # expected behavior usually, since sage.repl.interpreter usually not imported (only used by command line...)
import sage.misc.session
for start, stop, block in blocks:
# if import sage.repl.interpreter fails, sag_repl_interpreter is unreferenced
try:
do_pp = getattr(sage_repl_interpreter, '_do_preparse', True)
except:
do_pp = True
if preparse and do_pp:
block = sage_parsing.preparse_code(block)
sys.stdout.reset()
sys.stderr.reset()
try:
b = block.rstrip()
# get rid of comments at the end of the line -- issue #1835
#from ushlex import shlex
#s = shlex(b)
#s.commenters = '#'
#s.quotes = '"\''
#b = ''.join(s)
# e.g. now a line like 'x = test? # bar' becomes 'x=test?'
if b.endswith('??'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="python")
elif b.endswith('?'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="text/x-rst")
else:
reload_attached_files_if_mod_smc()
if execute.count < 2:
execute.count += 1
if execute.count == 2:
# this fixup has to happen after first block has executed (os.chdir etc)
# but before user assigns any variable in worksheet
# sage.misc.session.init() is not called until first call of show_identifiers
# BUGFIX: be careful to *NOT* assign to _!! see https://github.com/sagemathinc/cocalc/issues/1107
block2 = "sage.misc.session.state_at_init = dict(globals());sage.misc.session._dummy=sage.misc.session.show_identifiers();\n"
exec(compile(block2, '', 'single'), namespace,
locals)
b2a = """
if 'SAGE_STARTUP_FILE' in os.environ and os.path.isfile(os.environ['SAGE_STARTUP_FILE']):
try:
load(os.environ['SAGE_STARTUP_FILE'])
except:
sys.stdout.flush()
sys.stderr.write('\\nException loading startup file: {}\\n'.format(os.environ['SAGE_STARTUP_FILE']))
sys.stderr.flush()
raise
"""
exec(compile(b2a, '', 'exec'), namespace, locals)
features = sage_parsing.get_future_features(
block, 'single')
if features:
compile_flags = reduce(
operator.or_, (feature.compiler_flag
for feature in features.values()),
compile_flags)
exec(
compile(block + '\n',
'',
'single',
flags=compile_flags), namespace, locals)
if features:
Salvus._py_features.update(features)
sys.stdout.flush()
sys.stderr.flush()
except:
if ascii_warn:
sys.stderr.write(
'\n\n*** WARNING: Code contains non-ascii characters ***\n'
)
for c in '\u201c\u201d':
if c in code:
sys.stderr.write(
'*** Maybe the character < %s > should be replaced by < " > ? ***\n'
% c)
break
sys.stderr.write('\n\n')
from exceptions import SyntaxError, TypeError
exc_type, _, _ = sys.exc_info()
if exc_type in [SyntaxError, TypeError]:
from .sage_parsing import strip_string_literals
code0, _, _ = strip_string_literals(code)
implicit_mul = RE_POSSIBLE_IMPLICIT_MUL.findall(code0)
if len(implicit_mul) > 0:
implicit_mul_list = ', '.join(
str(_) for _ in implicit_mul)
# we know there is a SyntaxError and there could be an implicit multiplication
sys.stderr.write(
'\n\n*** WARNING: Code contains possible implicit multiplication ***\n'
)
sys.stderr.write(
'*** Check if any of [ %s ] need a "*" sign for multiplication, e.g. 5x should be 5*x ! ***\n\n'
% implicit_mul_list)
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n' %
(start + 1, stop + 1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self,
code_decorators,
code,
preparse=True,
namespace=None,
locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if isinstance(code_decorators, str):
code_decorators = [code_decorators]
if preparse:
code_decorators = list(
map(sage_parsing.preparse_code, code_decorators))
code_decorators = [
eval(code_decorator, self.namespace)
for code_decorator in code_decorators
]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
# eval is for backward compatibility
if hasattr(code_decorator, 'eval'):
print((code_decorator.eval(code, locals=self.namespace)),
end=' ')
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and isinstance(code, str):
self.execute(code,
preparse=preparse,
namespace=namespace,
locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html),
id=self._id,
done=done,
once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if isinstance(obj, str) else self.namespace['latex'](obj, **
kwds)
self._send_output(tex={
'tex': tex,
'display': display
},
id=self._id,
done=done,
once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if isinstance(output, str) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if isinstance(output, str) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(
self,
source, # actual source code
mode=None, # the syntax highlight codemirror mode
filename=None, # path of file it is contained in (if applicable)
lineno=-1, # line number where source starts (0-based)
done=False,
once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if isinstance(source, str) else unicode8(source)
code = {
'source': source,
'filename': filename,
'lineno': int(lineno),
'mode': mode
}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print("(Evaluate this cell to use this interact.)")
#raise RuntimeError("Error: No interact with id %s"%id)
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact=I.jsonable(),
id=self._id,
done=done,
once=once)
return sage_salvus.InteractFunction(I)
def javascript(self,
code,
once=False,
coffeescript=False,
done=False,
obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={
'code': code,
'coffeescript': coffeescript
},
id=self._id,
done=done,
obj=obj,
once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self,
prompt='',
default='',
placeholder='',
input_width=None,
label_width=None,
done=False,
type=None): # done is ignored here
self._flush_stdio()
m = {'prompt': unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
log("handling raw input message ", truncate_text(unicode8(mesg), 400))
if typ == 'json' and mesg['event'] == 'sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt(
"raw_input interrupted by another action: event='%s' (expected 'sage_raw_input')"
% mesg['event'])
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def notify(self, **kwds):
"""
Display a graphical notification using the alert_message Javascript function.
INPUTS:
- `type: "default"` - Type of the notice. "default", "warning", "info", "success", or "error".
- `title: ""` - The notice's title.
- `message: ""` - The notice's text.
- `timeout: ?` - Delay in seconds before the notice is automatically removed.
EXAMPLE:
salvus.notify(type="warning", title="This warning", message="This is a quick message.", timeout=3)
"""
obj = {}
for k, v in kwds.items():
if k == 'text': # backward compat
k = 'message'
elif k == 'type' and v == 'notice': # backward compat
v = 'default'
obj[k] = sage_salvus.jsonable(v)
if k == 'delay': # backward compat
obj['timeout'] = v / 1000.0 # units are in seconds now.
self.javascript("alert_message(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(
message.execute_javascript(code,
coffeescript=coffeescript,
obj=json.dumps(obj,
separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
try:
sys.path.insert(0, path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-', '_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base + '.py', 'w').write(content)
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base + '.py')
os.unlink(py_file_base + '.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist" % filename)
base, ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented" %
ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
if six.PY2:
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
else:
Salvus.pdf.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn,
id=id,
data=data,
message_queue=message_queue,
cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.set_salvus(salvus)
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
# execute.count goes from 0 to 2
# used for show_identifiers()
execute.count = 0
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0, mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all
sage.all.set_random_seed()
import random
random.seed(sage.all.initial_seed())
# get_memory_usage is not aware of being forked...
import sage.misc.getusage
sage.misc.getusage._proc_status = "/proc/%s/status" % os.getpid()
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print('INFO:child%s: received message "%s"'%(pid, mesg))
log("handling message ", truncate_text(unicode8(mesg), 400))
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn=conn,
id=mesg['id'],
code=mesg['code'],
data=mesg.get('data', None),
cell_id=mesg.get('cell_id', None),
preparse=mesg.get('preparse', True),
message_queue=mq)
except Exception as err:
log("ERROR -- exception raised '%s' when executing '%s'" %
(err, mesg['code']))
elif event == 'introspect':
try:
# check for introspect from jupyter cell
prefix = Salvus._default_mode
if 'top' in mesg:
top = mesg['top']
log('introspect cell top line %s' % top)
if top.startswith("%"):
prefix = top[1:]
try:
# see if prefix is the name of a jupyter kernel function
kc = eval(prefix + "(get_kernel_client=True)",
namespace, locals())
kn = eval(prefix + "(get_kernel_name=True)", namespace,
locals())
log("jupyter introspect prefix %s kernel %s" %
(prefix, kn)) # e.g. "p2", "python2"
jupyter_introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True),
kc=kc)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
log(lines)
introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True))
except:
pass
else:
raise RuntimeError("invalid message '%s'" % mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def jupyter_introspect(conn, id, line, preparse, kc):
import jupyter_client
from queue import Empty
try:
salvus = Salvus(conn=conn, id=id)
msg_id = kc.complete(line)
shell = kc.shell_channel
iopub = kc.iopub_channel
# handle iopub responses
while True:
try:
msg = iopub.get_msg(timeout=1)
msg_type = msg['msg_type']
content = msg['content']
except Empty:
# shouldn't happen
log("jupyter iopub channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter iopub recv %s %s" % (msg_type, str(content)))
if msg_type == 'status' and content['execution_state'] == 'idle':
break
# handle shell responses
while True:
try:
msg = shell.get_msg(timeout=10)
msg_type = msg['msg_type']
content = msg['content']
except:
# shouldn't happen
log("jupyter shell channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter shell recv %s %s" % (msg_type, str(content)))
if msg_type == 'complete_reply' and content['status'] == 'ok':
# jupyter kernel returns matches like "xyz.append" and smc wants just "append"
matches = content['matches']
offset = content['cursor_end'] - content['cursor_start']
completions = [s[offset:] for s in matches]
mesg = message.introspect_completions(id=id,
completions=completions,
target=line[-offset:])
conn.send_json(mesg)
break
except:
log("jupyter completion exception: %s" % sys.exc_info()[0])
def introspect(conn, id, line, preparse):
salvus = Salvus(
conn=conn, id=id
) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id,
completions=z['result'],
target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id,
docstring=z['result'],
target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id,
source_code=z['result'],
target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
if 'COCALC_SECRET_TOKEN' in os.environ:
secret_token_path = os.environ['COCALC_SECRET_TOKEN']
else:
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send(six.b('n'))
conn.send(
six.
b("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"
% secret_token_path))
conn.close()
n = len(secret_token)
token = six.b('')
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != six.b(secret_token):
log("token='%s'; secret_token='%s'" % (token, secret_token))
conn.send(six.b('n')) # no -- invalid login
conn.send(six.b("Invalid secret token."))
conn.close()
return False
else:
conn.send(six.b('y')) # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s" % mesg)
except Exception as err:
log("Error receiving message: %s (connection terminated)" % str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session." %
mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s" % desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s' % (host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
import sagenb.notebook.interact
sagenb.notebook.interact.interact = sage_salvus.interact
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patch the html command.
try:
# need the following for sage_server to start with sage-8.0
# or `import sage.interacts.library` will fail (not really important below, as we don't do that).
import sage.repl.user_globals
sage.repl.user_globals.set_globals(globals())
log("initialized user_globals")
except RuntimeError:
# may happen with sage version < 8.0
log("user_globals.set_globals failed, continuing", sys.exc_info())
sage.all.html = sage.misc.html.html = sage_salvus.html
# CRITICAL: look, we are just going to not do this, and have sage.interacts.library
# be broken. It's **really slow** to do this, and I don't think sage.interacts.library
# ever ended up going anywhere! People use wiki.sagemath.org/interact instead...
#import sage.interacts.library
#sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize'] = [8, 4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = [
'from sage.all import *', 'from sage.calculus.predefined import x',
'import pylab'
]
if extra_imports:
cmds.extend([
'import scipy', 'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'
])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec(cmd, namespace)
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds' %
(time.time() - tm))
for k, v in sage_salvus.interact_functions.items():
namespace[k] = v
# See above -- not doing this, since it is REALLY SLOW to import.
# This does mean that some old code that tries to use interact might break (?).
#namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in [
'anaconda', 'asy', 'attach', 'auto', 'capture', 'cell',
'clear', 'coffeescript', 'cython', 'default_mode',
'delete_last_output', 'dynamic', 'exercise', 'fork', 'fortran',
'go', 'help', 'hide', 'hideall', 'input', 'java', 'javascript',
'julia', 'jupyter', 'license', 'load', 'md', 'mediawiki',
'modes', 'octave', 'pandoc', 'perl', 'plot3d_using_matplotlib',
'prun', 'python_future_feature', 'py3print_mode', 'python',
'python3', 'r', 'raw_input', 'reset', 'restore', 'ruby',
'runfile', 'sage_chat', 'sage_eval', 'scala', 'scala211',
'script', 'search_doc', 'search_src', 'sh', 'show',
'show_identifiers', 'singular_kernel', 'time', 'timeit',
'typeset_mode', 'var', 'wiki'
]:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[
__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace[
'pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace[
'pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in list(children.keys()):
if os.waitpid(pid, os.WNOHANG) != (0, 0):
log("subprocess %s terminated, closing connection"
% pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection" %
child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception as err:
log("Error taking connection: ", err)
traceback.print_exc(file=open(LOGFILE, 'a'))
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
pid = str(os.getpid())
print("os.getpid() = %s" % pid)
open(pidfile, 'w').write(pid)
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'" %
(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument(
"-p",
dest="port",
type=int,
default=0,
help=
"port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port"
)
parser.add_argument(
"-l",
dest='log_level',
type=str,
default='INFO',
help=
"log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d",
dest="daemon",
default=False,
action="store_const",
const=True,
help="daemon mode (default: False)")
parser.add_argument(
"--host",
dest="host",
type=str,
default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile",
dest="pidfile",
type=str,
default='',
help="store pid in this file")
parser.add_argument(
"--logfile",
dest="logfile",
type=str,
default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c",
dest="client",
default=False,
action="store_const",
const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname",
dest="hostname",
type=str,
default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile",
dest="portfile",
type=str,
default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print(("%s: must specify pidfile in daemon mode" % sys.argv[0]))
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(
port=args.port if args.port else int(open(args.portfile).read()),
hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile, 'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s" % LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
from . import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
|
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import time
import shutil
import ntpath
import string
import tempfile
import xmlrpclib
from datetime import datetime
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.config import Config
from lib.cuckoo.common.constants import CUCKOO_ROOT
try:
import chardet
HAVE_CHARDET = True
except ImportError:
HAVE_CHARDET = False
def create_folders(root=".", folders=[]):
"""Create directories.
@param root: root path.
@param folders: folders list to be created.
@raise CuckooOperationalError: if fails to create folder.
"""
for folder in folders:
create_folder(root, folder)
def create_folder(root=".", folder=None):
"""Create directory.
@param root: root path.
@param folder: folder name to be created.
@raise CuckooOperationalError: if fails to create folder.
"""
folder_path = os.path.join(root, folder)
if folder and not os.path.isdir(folder_path):
try:
os.makedirs(folder_path)
except OSError:
raise CuckooOperationalError("Unable to create folder: %s" %
folder_path)
def delete_folder(folder):
"""Delete a folder and all its subdirectories.
@param folder: path to delete.
@raise CuckooOperationalError: if fails to delete folder.
"""
if os.path.exists(folder):
try:
shutil.rmtree(folder)
except OSError:
raise CuckooOperationalError("Unable to delete folder: "
"{0}".format(folder))
# Don't allow all characters in "string.printable", as newlines, carriage
# returns, tabs, \x0b, and \x0c may mess up reports.
PRINTABLE_CHARACTERS = \
string.letters + string.digits + string.punctuation + " \t\r\n"
def convert_char(c):
"""Escapes characters.
@param c: dirty char.
@return: sanitized char.
"""
if c in PRINTABLE_CHARACTERS:
return c
else:
return "\\x%02x" % ord(c)
def is_printable(s):
""" Test if a string is printable."""
for c in s:
if c not in PRINTABLE_CHARACTERS:
return False
return True
def convert_to_printable(s):
"""Convert char to printable.
@param s: string.
@return: sanitized string.
"""
if is_printable(s):
return s
return "".join(convert_char(c) for c in s)
def datetime_to_iso(timestamp):
"""Parse a datatime string and returns a datetime in iso format.
@param timestamp: timestamp string
@return: ISO datetime
"""
return datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S").isoformat()
def get_filename_from_path(path):
"""Cross-platform filename extraction from path.
@param path: file path.
@return: filename.
"""
dirpath, filename = ntpath.split(path)
return filename if filename else ntpath.basename(dirpath)
def store_temp_file(filedata, filename, path=None):
"""Store a temporary file.
@param filedata: content of the original file.
@param filename: name of the original file.
@param path: optional path for temp directory.
@return: path to the temporary file.
"""
filename = get_filename_from_path(filename)
# Reduce length (100 is arbitrary).
filename = filename[:100]
options = Config()
# Create temporary directory path.
if path:
target_path = path
else:
tmp_path = options.cuckoo.get("tmppath", "/tmp")
target_path = os.path.join(tmp_path, "cuckoo-tmp")
if not os.path.exists(target_path):
os.mkdir(target_path)
tmp_dir = tempfile.mkdtemp(prefix="upload_", dir=target_path)
tmp_file_path = os.path.join(tmp_dir, filename)
with open(tmp_file_path, "wb") as tmp_file:
# If filedata is file object, do chunked copy.
if hasattr(filedata, "read"):
chunk = filedata.read(1024)
while chunk:
tmp_file.write(chunk)
chunk = filedata.read(1024)
else:
tmp_file.write(filedata)
return tmp_file_path
class TimeoutServer(xmlrpclib.ServerProxy):
"""Timeout server for XMLRPC.
XMLRPC + timeout - still a bit ugly - but at least gets rid of setdefaulttimeout
inspired by http://stackoverflow.com/questions/372365/set-timeout-for-xmlrpclib-serverproxy
(although their stuff was messy, this is cleaner)
@see: http://stackoverflow.com/questions/372365/set-timeout-for-xmlrpclib-serverproxy
"""
def __init__(self, *args, **kwargs):
timeout = kwargs.pop("timeout", None)
kwargs["transport"] = TimeoutTransport(timeout=timeout)
xmlrpclib.ServerProxy.__init__(self, *args, **kwargs)
def _set_timeout(self, timeout):
t = self._ServerProxy__transport
t.timeout = timeout
# If we still have a socket we need to update that as well.
if hasattr(t, "_connection") and t._connection[1] and t._connection[1].sock:
t._connection[1].sock.settimeout(timeout)
class TimeoutTransport(xmlrpclib.Transport):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.pop("timeout", None)
xmlrpclib.Transport.__init__(self, *args, **kwargs)
def make_connection(self, *args, **kwargs):
conn = xmlrpclib.Transport.make_connection(self, *args, **kwargs)
if self.timeout is not None:
conn.timeout = self.timeout
return conn
class Singleton(type):
"""Singleton.
@see: http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def logtime(dt):
"""Formats time like a logger does, for the csv output
(e.g. "2013-01-25 13:21:44,590")
@param dt: datetime object
@return: time string
"""
t = time.strftime("%Y-%m-%d %H:%M:%S", dt.timetuple())
s = "%s,%03d" % (t, dt.microsecond/1000)
return s
def time_from_cuckoomon(s):
"""Parse time string received from cuckoomon via netlog
@param s: time string
@return: datetime object
"""
return datetime.strptime(s, "%Y-%m-%d %H:%M:%S,%f")
def to_unicode(s):
"""Attempt to fix non uft-8 string into utf-8. It tries to guess input encoding,
if fail retry with a replace strategy (so undetectable chars will be escaped).
@see: fuller list of encodings at http://docs.python.org/library/codecs.html#standard-encodings
"""
def brute_enc(s2):
"""Trying to decode via simple brute forcing."""
encodings = ("ascii", "utf8", "latin1")
for enc in encodings:
try:
return unicode(s2, enc)
except UnicodeDecodeError:
pass
return None
def chardet_enc(s2):
"""Guess encoding via chardet."""
enc = chardet.detect(s2)["encoding"]
try:
return unicode(s2, enc)
except UnicodeDecodeError:
pass
return None
# If already in unicode, skip.
if isinstance(s, unicode):
return s
# First try to decode against a little set of common encodings.
result = brute_enc(s)
# Try via chardet.
if not result and HAVE_CHARDET:
result = chardet_enc(s)
# If not possible to convert the input string, try again with
# a replace strategy.
if not result:
result = unicode(s, errors="replace")
return result
def cleanup_value(v):
"""Cleanup utility function, strips some unwanted parts from values."""
v = str(v)
if v.startswith("\\??\\"):
v = v[4:]
return v
def sanitize_filename(x):
"""Kind of awful but necessary sanitizing of filenames to
get rid of unicode problems."""
out = ""
for c in x:
if c in string.letters + string.digits + " _-.":
out += c
else:
out += "_"
return out
def classlock(f):
"""Classlock decorator (created for database.Database).
Used to put a lock to avoid sqlite errors.
"""
def inner(self, *args, **kwargs):
with self._lock:
return f(self, *args, **kwargs)
return inner
Removed not used import
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import time
import shutil
import ntpath
import string
import tempfile
import xmlrpclib
from datetime import datetime
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.config import Config
try:
import chardet
HAVE_CHARDET = True
except ImportError:
HAVE_CHARDET = False
def create_folders(root=".", folders=[]):
"""Create directories.
@param root: root path.
@param folders: folders list to be created.
@raise CuckooOperationalError: if fails to create folder.
"""
for folder in folders:
create_folder(root, folder)
def create_folder(root=".", folder=None):
"""Create directory.
@param root: root path.
@param folder: folder name to be created.
@raise CuckooOperationalError: if fails to create folder.
"""
folder_path = os.path.join(root, folder)
if folder and not os.path.isdir(folder_path):
try:
os.makedirs(folder_path)
except OSError:
raise CuckooOperationalError("Unable to create folder: %s" %
folder_path)
def delete_folder(folder):
"""Delete a folder and all its subdirectories.
@param folder: path to delete.
@raise CuckooOperationalError: if fails to delete folder.
"""
if os.path.exists(folder):
try:
shutil.rmtree(folder)
except OSError:
raise CuckooOperationalError("Unable to delete folder: "
"{0}".format(folder))
# Don't allow all characters in "string.printable", as newlines, carriage
# returns, tabs, \x0b, and \x0c may mess up reports.
PRINTABLE_CHARACTERS = \
string.letters + string.digits + string.punctuation + " \t\r\n"
def convert_char(c):
"""Escapes characters.
@param c: dirty char.
@return: sanitized char.
"""
if c in PRINTABLE_CHARACTERS:
return c
else:
return "\\x%02x" % ord(c)
def is_printable(s):
""" Test if a string is printable."""
for c in s:
if c not in PRINTABLE_CHARACTERS:
return False
return True
def convert_to_printable(s):
"""Convert char to printable.
@param s: string.
@return: sanitized string.
"""
if is_printable(s):
return s
return "".join(convert_char(c) for c in s)
def datetime_to_iso(timestamp):
"""Parse a datatime string and returns a datetime in iso format.
@param timestamp: timestamp string
@return: ISO datetime
"""
return datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S").isoformat()
def get_filename_from_path(path):
"""Cross-platform filename extraction from path.
@param path: file path.
@return: filename.
"""
dirpath, filename = ntpath.split(path)
return filename if filename else ntpath.basename(dirpath)
def store_temp_file(filedata, filename, path=None):
"""Store a temporary file.
@param filedata: content of the original file.
@param filename: name of the original file.
@param path: optional path for temp directory.
@return: path to the temporary file.
"""
filename = get_filename_from_path(filename)
# Reduce length (100 is arbitrary).
filename = filename[:100]
options = Config()
# Create temporary directory path.
if path:
target_path = path
else:
tmp_path = options.cuckoo.get("tmppath", "/tmp")
target_path = os.path.join(tmp_path, "cuckoo-tmp")
if not os.path.exists(target_path):
os.mkdir(target_path)
tmp_dir = tempfile.mkdtemp(prefix="upload_", dir=target_path)
tmp_file_path = os.path.join(tmp_dir, filename)
with open(tmp_file_path, "wb") as tmp_file:
# If filedata is file object, do chunked copy.
if hasattr(filedata, "read"):
chunk = filedata.read(1024)
while chunk:
tmp_file.write(chunk)
chunk = filedata.read(1024)
else:
tmp_file.write(filedata)
return tmp_file_path
class TimeoutServer(xmlrpclib.ServerProxy):
"""Timeout server for XMLRPC.
XMLRPC + timeout - still a bit ugly - but at least gets rid of setdefaulttimeout
inspired by http://stackoverflow.com/questions/372365/set-timeout-for-xmlrpclib-serverproxy
(although their stuff was messy, this is cleaner)
@see: http://stackoverflow.com/questions/372365/set-timeout-for-xmlrpclib-serverproxy
"""
def __init__(self, *args, **kwargs):
timeout = kwargs.pop("timeout", None)
kwargs["transport"] = TimeoutTransport(timeout=timeout)
xmlrpclib.ServerProxy.__init__(self, *args, **kwargs)
def _set_timeout(self, timeout):
t = self._ServerProxy__transport
t.timeout = timeout
# If we still have a socket we need to update that as well.
if hasattr(t, "_connection") and t._connection[1] and t._connection[1].sock:
t._connection[1].sock.settimeout(timeout)
class TimeoutTransport(xmlrpclib.Transport):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.pop("timeout", None)
xmlrpclib.Transport.__init__(self, *args, **kwargs)
def make_connection(self, *args, **kwargs):
conn = xmlrpclib.Transport.make_connection(self, *args, **kwargs)
if self.timeout is not None:
conn.timeout = self.timeout
return conn
class Singleton(type):
"""Singleton.
@see: http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def logtime(dt):
"""Formats time like a logger does, for the csv output
(e.g. "2013-01-25 13:21:44,590")
@param dt: datetime object
@return: time string
"""
t = time.strftime("%Y-%m-%d %H:%M:%S", dt.timetuple())
s = "%s,%03d" % (t, dt.microsecond/1000)
return s
def time_from_cuckoomon(s):
"""Parse time string received from cuckoomon via netlog
@param s: time string
@return: datetime object
"""
return datetime.strptime(s, "%Y-%m-%d %H:%M:%S,%f")
def to_unicode(s):
"""Attempt to fix non uft-8 string into utf-8. It tries to guess input encoding,
if fail retry with a replace strategy (so undetectable chars will be escaped).
@see: fuller list of encodings at http://docs.python.org/library/codecs.html#standard-encodings
"""
def brute_enc(s2):
"""Trying to decode via simple brute forcing."""
encodings = ("ascii", "utf8", "latin1")
for enc in encodings:
try:
return unicode(s2, enc)
except UnicodeDecodeError:
pass
return None
def chardet_enc(s2):
"""Guess encoding via chardet."""
enc = chardet.detect(s2)["encoding"]
try:
return unicode(s2, enc)
except UnicodeDecodeError:
pass
return None
# If already in unicode, skip.
if isinstance(s, unicode):
return s
# First try to decode against a little set of common encodings.
result = brute_enc(s)
# Try via chardet.
if not result and HAVE_CHARDET:
result = chardet_enc(s)
# If not possible to convert the input string, try again with
# a replace strategy.
if not result:
result = unicode(s, errors="replace")
return result
def cleanup_value(v):
"""Cleanup utility function, strips some unwanted parts from values."""
v = str(v)
if v.startswith("\\??\\"):
v = v[4:]
return v
def sanitize_filename(x):
"""Kind of awful but necessary sanitizing of filenames to
get rid of unicode problems."""
out = ""
for c in x:
if c in string.letters + string.digits + " _-.":
out += c
else:
out += "_"
return out
def classlock(f):
"""Classlock decorator (created for database.Database).
Used to put a lock to avoid sqlite errors.
"""
def inner(self, *args, **kwargs):
with self._lock:
return f(self, *args, **kwargs)
return inner
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sys import exc_info
from dns.resolver import query, NXDOMAIN
from requests import get, post, HTTPError
class SpamBLError(Exception):
''' Base exception class for spambl module '''
class UnknownCodeError(SpamBLError):
''' Raise when trying to use an unexpected value of dnsbl return code '''
class DNSBLClientError(SpamBLError):
''' A base for some exceptions raised by BaseDNSBLClient '''
msg_tpl = None
def __init__(self, client, dnsbl_service, *args):
msg = self.msg_tpl.format(client.__class__.__name__, dnsbl_service.__class__.__name__)
super(DNSBLClientError, self).__init__(msg, *args)
class DNSBLContentError(DNSBLClientError, ValueError):
''' Raise when trying to use an instance of DNSBL service that doesn't
support expected type of items
'''
msg_tpl = 'This instance of {} does not list items required by {}'
class DNSBLTypeError(DNSBLClientError, TypeError):
''' Raise when trying to use an object that is expected to represent dnsbl service
but doesn't have required attributes
'''
msg_tpl = 'This instance of {} does not have an attribute required by {}'
class UnathorizedAPIKeyError(SpamBLError):
''' Raise when trying to use an unathorized api key '''
class DNSBLItem(object):
''' Represents a host listed on a DNS blacklist '''
_classification = None
def __init__(self, host, source, return_code):
''' Create a new instance of DNSBLItem
:param host: the host value listed on a DNS blacklist, either host name or ip address
:param source: dnsbl service object
:param return_code: last octet of ip address returned after querying the source for the host
'''
self.host = host
self.source = source
self._return_code = return_code
@property
def classification(self):
''' Classification of this host according to provider of the list from which it has been extracted '''
if not self._classification:
self._classification = self.source.get_classification(self._return_code)
return self._classification
class DNSBLService(object):
''' Represents a DNSBL service '''
def __init__(self, identifier, query_suffix, code_item_class, lists_ips, lists_uris):
''' Create new DNSBLService object
:param identifier: a value designating DNSBL service provider: its name or url address.
:param query_suffix: a suffix added to DNSBL query address
:param code_item_class: item classes associated with DNSBL query return codes
:param lists_ips: information if this object represents an ip blocklist
:param lists_uris: information if this object represents a domain name blocklist
'''
self.identifier = identifier
self._query_suffix = query_suffix
self._code_item_class = code_item_class
self.lists_ips = lists_ips
self.lists_uris = lists_uris
def get_classification(self, code):
''' Return classification for given code
:param code: a valid return code extracted from response to DNSBL query
:raises UnknownCodeError: raised when given code is not specified in self._code_item_class
:returns: a value associated with a valid return code
'''
try:
return self._code_item_class[code]
except KeyError:
msg_template = 'Unexpected code value for dnsbl service {}: {}'
raise UnknownCodeError(msg_template.format(self.identifier, code)), None, exc_info()[2]
def query(self, value):
''' Query DNSBL service for given value
:param value: a valid hostname or a valid inverted ip address
:returns: an integer representing classification code for given value, if it is listed. Otherwise,
it returns None
'''
try:
response = query(value+'.'+self._query_suffix)
except NXDOMAIN:
return None
else:
last_octet = response[0].to_text().split('.')[-1]
return int(last_octet)
class BaseDNSBLClient(object):
''' Implements basic feaures of DNSBL client classes '''
def __init__(self):
self.dnsbl_services = []
def _get_relative_domain(self, host):
''' Get relative domain name for given host
:param host: a valid host
:returns: a dns name object
'''
raise NotImplementedError('The method is not implemented')
def _required_content_in(self, dnsbl):
''' Check if dnsbl has content required by this client
:param dnsbl: an object representing dnsbl service
'''
raise NotImplementedError('The method is not implemented')
def add_dnsbl(self, dnsbl):
''' Create new instance
:param dnsbl: an object representing dnsbl service
'''
try:
required_content_in = self._required_content_in(dnsbl)
except AttributeError:
raise DNSBLTypeError(self, dnsbl), None, exc_info()[2]
if not required_content_in:
raise DNSBLContentError(self, dnsbl)
self.dnsbl_services.append(dnsbl)
def _get_item_data(self, host):
''' Query registered dnsbl services for data on given host
:param host: a valid host
:returns: a tuple containing host, source and return code for listed host, or
an empty tuple for not listed one
'''
for source in self.dnsbl_services:
return_code = source.query(host)
yield (host, source, return_code) if return_code else ()
def __contains__(self, host):
return any(self._get_item_data(host))
def lookup(self, host):
''' Get all items listed in registered dnsbl services for given host
:params host: a valid host
:returns: a list of objects representing host on different dns blocklists on which
it is listed
'''
return tuple(DNSBLItem(*data) for data in self._get_item_data(host) if data)
class DNSBLClient(object):
''' Responsible for querying DNSBL services that list ip addresses'''
class URIDNSBLClient(object):
''' Responsible for querying DNSBL services that list hostnames '''
class HpHostsItem(object):
''' Represents a host listed in hpHosts'''
def __init__(self, host, source, classification):
self.host = host
self.source = source
self.classification = classification
class HpHosts(object):
''' hpHosts client '''
identifier = ' http://www.hosts-file.net/'
_LISTED = 'Listed'
def __init__(self, client_name):
'''
Constructor
:param client_name: name of client using the service
'''
self.app_id = client_name
def _query(self, host, classification = False):
''' Query the client for data of given host
:param host: a valid host string
:param classification: if True: hpHosts is queried also for classification for given host, if listed
:returns: content of response to GET request to hpHosts for data on the given host
'''
url = 'http://verify.hosts-file.net/?v={0}&s={1}'.format(self.app_id, host)
url = url + '&class=true' if classification else url
return get(url).content
def __contains__(self, host):
''' Check if given host is present in hpHosts blacklist
:param host: a valid host string
:returns: a boolean value True if given host is listed on hpHosts, False otherwise
'''
return self._LISTED in self._query(host)
def lookup(self, host):
''' Get an object representing a value for a given host, if listed in hpHosts
:param host: a valid host string
:returns: a HpHostItem object, or None if host is not listed
'''
data = self._query(host, True)
if self._LISTED in data:
elements = data.split(',')
classification = elements[1] if len(elements) > 1 else None
return HpHostsItem(host, self.identifier, classification)
return None
class GoogleSafeBrowsing(object):
''' Google Safe Browsing lookup API client '''
protocol_version = '3.1'
max_urls_per_request = 500
def __init__(self, client_name, app_version, api_key):
''' Create new instance
:param client_name: name of application using the API
:param app_version: version of the application
:param api_key: API key given by Google:
https://developers.google.com/safe-browsing/key_signup
'''
self.api_key = api_key
self.client_name = client_name
self.app_version = app_version
self._request_address_val = ''
@property
def _request_address(self):
''' Get address of POST request to the service '''
if not self._request_address_val:
tpl = 'https://sb-ssl.google.com/safebrowsing/api/lookup?client={0}&key={1}&appver={2}&pver={3}'
self._request_address_val = tpl.format(self.client_name, self.api_key, self.app_version, self.protocol_version)
return self._request_address_val
def _query_once(self, urls):
''' Perform a single POST request using lookup API
:param urls: a sequence of urls to put in request body
:returns: a response object
:raises UnathorizedAPIKeyError: when the API key for this instance
is not valid
'''
request_body = '{}\n{}'.format(len(urls), '\n'.join(urls))
response = post(self._request_address, request_body)
try:
response.raise_for_status()
except HTTPError as e:
if e.code == 401:
raise UnathorizedAPIKeyError('The API key is not authorized'), None, exc_info()[2]
return response
def _query(self, urls):
''' Test urls for being listed by the service
:param urls: a sequence of urls to be tested
:returns: a tuple containing chunk of urls and a response pertaining to them
if the code of response was 200, which means at least one of the queried URLs
is matched in either the phishing, malware, or unwanted software lists.
'''
for i in range(0, len(urls), self.max_urls_per_request):
chunk = urls[i:i+self.max_urls_per_request]
response = self._query_once(chunk)
if response.status_code == 200:
yield chunk, response
if __name__ == '__main__':
pass
Fix checking for error code in GoogleSafeBrowsing._query_once
The HTTPError class doesn't have a code property, so the status_code
property of response is checked instead
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sys import exc_info
from dns.resolver import query, NXDOMAIN
from requests import get, post, HTTPError
class SpamBLError(Exception):
''' Base exception class for spambl module '''
class UnknownCodeError(SpamBLError):
''' Raise when trying to use an unexpected value of dnsbl return code '''
class DNSBLClientError(SpamBLError):
''' A base for some exceptions raised by BaseDNSBLClient '''
msg_tpl = None
def __init__(self, client, dnsbl_service, *args):
msg = self.msg_tpl.format(client.__class__.__name__, dnsbl_service.__class__.__name__)
super(DNSBLClientError, self).__init__(msg, *args)
class DNSBLContentError(DNSBLClientError, ValueError):
''' Raise when trying to use an instance of DNSBL service that doesn't
support expected type of items
'''
msg_tpl = 'This instance of {} does not list items required by {}'
class DNSBLTypeError(DNSBLClientError, TypeError):
''' Raise when trying to use an object that is expected to represent dnsbl service
but doesn't have required attributes
'''
msg_tpl = 'This instance of {} does not have an attribute required by {}'
class UnathorizedAPIKeyError(SpamBLError):
''' Raise when trying to use an unathorized api key '''
class DNSBLItem(object):
''' Represents a host listed on a DNS blacklist '''
_classification = None
def __init__(self, host, source, return_code):
''' Create a new instance of DNSBLItem
:param host: the host value listed on a DNS blacklist, either host name or ip address
:param source: dnsbl service object
:param return_code: last octet of ip address returned after querying the source for the host
'''
self.host = host
self.source = source
self._return_code = return_code
@property
def classification(self):
''' Classification of this host according to provider of the list from which it has been extracted '''
if not self._classification:
self._classification = self.source.get_classification(self._return_code)
return self._classification
class DNSBLService(object):
''' Represents a DNSBL service '''
def __init__(self, identifier, query_suffix, code_item_class, lists_ips, lists_uris):
''' Create new DNSBLService object
:param identifier: a value designating DNSBL service provider: its name or url address.
:param query_suffix: a suffix added to DNSBL query address
:param code_item_class: item classes associated with DNSBL query return codes
:param lists_ips: information if this object represents an ip blocklist
:param lists_uris: information if this object represents a domain name blocklist
'''
self.identifier = identifier
self._query_suffix = query_suffix
self._code_item_class = code_item_class
self.lists_ips = lists_ips
self.lists_uris = lists_uris
def get_classification(self, code):
''' Return classification for given code
:param code: a valid return code extracted from response to DNSBL query
:raises UnknownCodeError: raised when given code is not specified in self._code_item_class
:returns: a value associated with a valid return code
'''
try:
return self._code_item_class[code]
except KeyError:
msg_template = 'Unexpected code value for dnsbl service {}: {}'
raise UnknownCodeError(msg_template.format(self.identifier, code)), None, exc_info()[2]
def query(self, value):
''' Query DNSBL service for given value
:param value: a valid hostname or a valid inverted ip address
:returns: an integer representing classification code for given value, if it is listed. Otherwise,
it returns None
'''
try:
response = query(value+'.'+self._query_suffix)
except NXDOMAIN:
return None
else:
last_octet = response[0].to_text().split('.')[-1]
return int(last_octet)
class BaseDNSBLClient(object):
''' Implements basic feaures of DNSBL client classes '''
def __init__(self):
self.dnsbl_services = []
def _get_relative_domain(self, host):
''' Get relative domain name for given host
:param host: a valid host
:returns: a dns name object
'''
raise NotImplementedError('The method is not implemented')
def _required_content_in(self, dnsbl):
''' Check if dnsbl has content required by this client
:param dnsbl: an object representing dnsbl service
'''
raise NotImplementedError('The method is not implemented')
def add_dnsbl(self, dnsbl):
''' Create new instance
:param dnsbl: an object representing dnsbl service
'''
try:
required_content_in = self._required_content_in(dnsbl)
except AttributeError:
raise DNSBLTypeError(self, dnsbl), None, exc_info()[2]
if not required_content_in:
raise DNSBLContentError(self, dnsbl)
self.dnsbl_services.append(dnsbl)
def _get_item_data(self, host):
''' Query registered dnsbl services for data on given host
:param host: a valid host
:returns: a tuple containing host, source and return code for listed host, or
an empty tuple for not listed one
'''
for source in self.dnsbl_services:
return_code = source.query(host)
yield (host, source, return_code) if return_code else ()
def __contains__(self, host):
return any(self._get_item_data(host))
def lookup(self, host):
''' Get all items listed in registered dnsbl services for given host
:params host: a valid host
:returns: a list of objects representing host on different dns blocklists on which
it is listed
'''
return tuple(DNSBLItem(*data) for data in self._get_item_data(host) if data)
class DNSBLClient(object):
''' Responsible for querying DNSBL services that list ip addresses'''
class URIDNSBLClient(object):
''' Responsible for querying DNSBL services that list hostnames '''
class HpHostsItem(object):
''' Represents a host listed in hpHosts'''
def __init__(self, host, source, classification):
self.host = host
self.source = source
self.classification = classification
class HpHosts(object):
''' hpHosts client '''
identifier = ' http://www.hosts-file.net/'
_LISTED = 'Listed'
def __init__(self, client_name):
'''
Constructor
:param client_name: name of client using the service
'''
self.app_id = client_name
def _query(self, host, classification = False):
''' Query the client for data of given host
:param host: a valid host string
:param classification: if True: hpHosts is queried also for classification for given host, if listed
:returns: content of response to GET request to hpHosts for data on the given host
'''
url = 'http://verify.hosts-file.net/?v={0}&s={1}'.format(self.app_id, host)
url = url + '&class=true' if classification else url
return get(url).content
def __contains__(self, host):
''' Check if given host is present in hpHosts blacklist
:param host: a valid host string
:returns: a boolean value True if given host is listed on hpHosts, False otherwise
'''
return self._LISTED in self._query(host)
def lookup(self, host):
''' Get an object representing a value for a given host, if listed in hpHosts
:param host: a valid host string
:returns: a HpHostItem object, or None if host is not listed
'''
data = self._query(host, True)
if self._LISTED in data:
elements = data.split(',')
classification = elements[1] if len(elements) > 1 else None
return HpHostsItem(host, self.identifier, classification)
return None
class GoogleSafeBrowsing(object):
''' Google Safe Browsing lookup API client '''
protocol_version = '3.1'
max_urls_per_request = 500
def __init__(self, client_name, app_version, api_key):
''' Create new instance
:param client_name: name of application using the API
:param app_version: version of the application
:param api_key: API key given by Google:
https://developers.google.com/safe-browsing/key_signup
'''
self.api_key = api_key
self.client_name = client_name
self.app_version = app_version
self._request_address_val = ''
@property
def _request_address(self):
''' Get address of POST request to the service '''
if not self._request_address_val:
tpl = 'https://sb-ssl.google.com/safebrowsing/api/lookup?client={0}&key={1}&appver={2}&pver={3}'
self._request_address_val = tpl.format(self.client_name, self.api_key, self.app_version, self.protocol_version)
return self._request_address_val
def _query_once(self, urls):
''' Perform a single POST request using lookup API
:param urls: a sequence of urls to put in request body
:returns: a response object
:raises UnathorizedAPIKeyError: when the API key for this instance
is not valid
'''
request_body = '{}\n{}'.format(len(urls), '\n'.join(urls))
response = post(self._request_address, request_body)
try:
response.raise_for_status()
except HTTPError:
if response.status_code == 401:
raise UnathorizedAPIKeyError('The API key is not authorized'), None, exc_info()[2]
return response
def _query(self, urls):
''' Test urls for being listed by the service
:param urls: a sequence of urls to be tested
:returns: a tuple containing chunk of urls and a response pertaining to them
if the code of response was 200, which means at least one of the queried URLs
is matched in either the phishing, malware, or unwanted software lists.
'''
for i in range(0, len(urls), self.max_urls_per_request):
chunk = urls[i:i+self.max_urls_per_request]
response = self._query_once(chunk)
if response.status_code == 200:
yield chunk, response
if __name__ == '__main__':
pass |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import mock
from nose.tools import * # PEP8 asserts
#from tests.base import DbTestCase
from webtest_plus import TestApp
import website.app
from tests.base import DbTestCase
from tests.factories import ProjectFactory, AuthUserFactory
#from website.addons.s3.tests.utils import create_mock_s3
from website.addons.s3 import views
from website.addons.s3.model import AddonS3NodeSettings, AddonS3UserSettings
app = website.app.init_app(routes=True, set_backends=False,
settings_module="website.settings")
class TestS3Views(DbTestCase):
def setUp(self):
self.app = TestApp(app)
self.user = AuthUserFactory()
self.auth = ('test', self.user.api_keys[0]._primary_key)
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('s3')
self.project.creator.add_addon('s3')
#self.s3 = s3_mock
self.node_settings = self.project.get_addon('s3')
# Set the node addon settings to correspond to the values of the mock repo
self.node_settings = AddonS3NodeSettings()
#self.node_settings.user = self.s3.repo.return_value['owner']['login']
#self.node_settings.repo = self.s3.repo.return_value['name']
self.node_settings.save()
def test_s3_page_no_user(self):
s3 = AddonS3NodeSettings(user=None, bucket='lul')
res = views.utils._page_content('873p', s3)
assert_equals(res, {})
def test_s3_page_no_pid(self):
s3 = AddonS3NodeSettings(user='jimbob', bucket='lul')
res = views.utils._page_content(None, s3)
assert_equals(res, {})
def test_s3_page_empty_pid(self):
s3 = AddonS3NodeSettings(user='jimbob', bucket='lul')
res = views.utils._page_content('', s3)
assert_equals(res, {})
def test_s3_page_no_auth(self):
s3 = AddonS3NodeSettings(user='jimbob', bucket='lul')
s3.node_access_key = ""
res = views.utils._page_content('', s3)
assert_equals(res, {})
@mock.patch('website.addons.s3.views.config.does_bucket_exist')
@mock.patch('website.addons.s3.views.config._s3_create_access_key')
@mock.patch('website.addons.s3.views.config.adjust_cors')
def test_s3_settings_no_bucket(self, mock_cors, mock_create_key, mock_does_bucket_exist):
mock_does_bucket_exist.return_value = False
mock_create_key.return_value = True
mock_cors.return_value = True
url = "/api/v1/project/{0}/s3/settings/".format(self.project._id)
res = self.app.post_json(url, {})
self.project.reload()
assert_equals(self.node_settings.bucket, None)
@mock.patch('website.addons.s3.views.utils.create_limited_user')
def test_s3_create_access_key_attrs(self, mock_create_limited_user):
mock_create_limited_user.return_value = {
'access_key_id': 'Boo', 'secret_access_key': 'Riley'}
user_settings = AddonS3UserSettings(user='Aticus-killing-mocking')
views.utils._s3_create_access_key(user_settings, self.node_settings)
assert_equals(self.node_settings.node_access_key, 'Boo')
@mock.patch('website.addons.s3.views.utils.create_limited_user')
def test_s3_create_access_key(self, mock_create_limited_user):
mock_create_limited_user.return_value = {
'access_key_id': 'Boo', 'secret_access_key': 'Riley'}
user_settings = AddonS3UserSettings(user='Aticus-killing-mocking')
assert_true(views.utils._s3_create_access_key(
user_settings, self.node_settings))
@mock.patch('framework.addons.AddonModelMixin.get_addon')
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('framework.auth.get_current_user')
def test_s3_remove_user_settings(self, mock_user, mock_access, mock_addon):
mock_access.return_value = True
mock_user.return_value = self.user
mock_addon.return_value = self.user.get_addon('s3')
user_settings = self.user.get_addon('s3')
user_settings.access_key = 'to-kill-a-mocking-bucket'
user_settings.secret_key = 'itsasecret'
url = '/user/s3/settings/delete/'
self.app.post_json(url, {})
self.project.reload()
assert_equals(user_settings.access_key, None)
# TODO finish me
def test_download_no_file(self):
url = "/api/v1/project/{0}/s3/fetchurl/".format(self.project._id)
self.app.post_json(url, {}, expect_errors=True)
# TODO fix me cant seem to be logged in.....
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('framework.auth.get_current_user')
def test_user_settings_no_auth(self, mock_user, mock_access):
mock_access.return_value = False
mock_user.return_value = self.user
url = '/user/s3/settings/'
rv = self.app.post_json(url, {}, expect_errors=True)
#assert_equals('Looks like your creditials are incorrect Could you have mistyped them?', rv['message'])
@mock.patch('framework.addons.AddonModelMixin.get_addon')
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('framework.auth.get_current_user')
def test_user_settings(self, mock_user, mock_access, mock_addon):
mock_access.return_value = True
mock_user.return_value = self.user
mock_addon.return_value = self.user.get_addon('s3')
url = '/user/s3/settings/'
rv = self.app.post_json(
url, {'access_key': 'scout', 'secret_key': 'Aticus'})
user_settings = self.user.get_addon('s3')
assert_equals(user_settings.access_key, 'scout')
@mock.patch('framework.addons.AddonModelMixin.get_addon')
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('framework.auth.get_current_user')
def test_s3_remove_node_settings(self, mock_user, mock_access, mock_addon):
mock_access.return_value = True
mock_user.return_value = self.user
mock_addon.return_value = self.user.get_addon('s3')
self.node_settings.access_key = 'to-kill-a-mocking-bucket'
self.node_settings.secret_key = 'itsasecret'
url = "/api/v1/project/{0}/s3/settings/delete/".format(self.project._id)
self.app.post_json(url, {})
self.project.reload()
assert_equals(self.node_settings.access_key, None)
Test fixes. Still running into login problems.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import mock
from nose.tools import * # PEP8 asserts
#from tests.base import DbTestCase
from webtest_plus import TestApp
import website.app
from tests.base import DbTestCase
from tests.factories import ProjectFactory, AuthUserFactory
#from website.addons.s3.tests.utils import create_mock_s3
from website.addons.s3 import views
from website.addons.s3.model import AddonS3NodeSettings, AddonS3UserSettings
app = website.app.init_app(routes=True, set_backends=False,
settings_module="website.settings")
class TestS3Views(DbTestCase):
def setUp(self):
self.app = TestApp(app)
self.user = AuthUserFactory()
self.auth = ('test', self.user.api_keys[0]._primary_key)
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('s3')
self.project.creator.add_addon('s3')
#self.s3 = s3_mock
self.user_settings = self.user.get_addon('s3')
self.node_settings = self.project.get_addon('s3')
# Set the node addon settings to correspond to the values of the mock
# repo
self.node_settings = AddonS3NodeSettings()
#self.node_settings.user = self.s3.repo.return_value['owner']['login']
#self.node_settings.repo = self.s3.repo.return_value['name']
self.node_settings.save()
def test_s3_page_no_user(self):
s3 = AddonS3NodeSettings(user=None, bucket='lul')
res = views.utils._page_content('873p', s3, None)
assert_equals(res, {})
def test_s3_page_no_pid(self):
s3 = AddonS3NodeSettings(user='jimbob', bucket='lul')
res = views.utils._page_content(None, s3, self.user_settings)
assert_equals(res, {})
def test_s3_page_empty_pid(self):
s3 = AddonS3NodeSettings(user='jimbob', bucket='lul')
res = views.utils._page_content('', s3, self.user_settings)
assert_equals(res, {})
def test_s3_page_no_auth(self):
s3 = AddonS3NodeSettings(user='jimbob', bucket='lul')
s3.node_access_key = ""
res = views.utils._page_content('', s3, self.user_settings)
assert_equals(res, {})
@mock.patch('website.addons.s3.views.config.does_bucket_exist')
@mock.patch('website.addons.s3.views.config._s3_create_access_key')
@mock.patch('website.addons.s3.views.config.adjust_cors')
def test_s3_settings_no_bucket(self, mock_cors, mock_create_key, mock_does_bucket_exist):
mock_does_bucket_exist.return_value = False
mock_create_key.return_value = True
mock_cors.return_value = True
url = "/api/v1/project/{0}/s3/settings/".format(self.project._id)
res = self.app.post_json(url, {})
self.project.reload()
assert_equals(self.node_settings.bucket, None)
@mock.patch('website.addons.s3.views.utils.create_limited_user')
def test_s3_create_access_key_attrs(self, mock_create_limited_user):
mock_create_limited_user.return_value = {
'access_key_id': 'Boo', 'secret_access_key': 'Riley'}
user_settings = AddonS3UserSettings(user='Aticus-killing-mocking')
views.utils._s3_create_access_key(user_settings, self.node_settings, self.project._id)
assert_equals(self.node_settings.node_access_key, 'Boo')
@mock.patch('website.addons.s3.views.utils.create_limited_user')
def test_s3_create_access_key(self, mock_create_limited_user):
mock_create_limited_user.return_value = {
'access_key_id': 'Boo', 'secret_access_key': 'Riley'}
user_settings = AddonS3UserSettings(user='Aticus-killing-mocking')
assert_true(views.utils._s3_create_access_key(
user_settings, self.node_settings, self.project._id))
@mock.patch('framework.addons.AddonModelMixin.get_addon')
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('framework.auth.get_current_user')
def test_s3_remove_user_settings(self, mock_user, mock_access, mock_addon):
mock_access.return_value = True
mock_user.return_value = self.user
mock_addon.return_value = self.user.get_addon('s3')
user_settings = self.user.get_addon('s3')
user_settings.access_key = 'to-kill-a-mocking-bucket'
user_settings.secret_key = 'itsasecret'
url = '/api/v1/settings/s3/delete/'
self.app.post_json(url, {})
self.project.reload()
assert_equals(user_settings.access_key, None)
# TODO finish me
def test_download_no_file(self):
url = "/api/v1/project/{0}/s3/fetchurl/".format(self.project._id)
self.app.post_json(url, {}, expect_errors=True)
# TODO fix me cant seem to be logged in.....
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('framework.auth.get_current_user')
def test_user_settings_no_auth(self, mock_user, mock_access):
mock_access.return_value = False
mock_user.return_value = self.user
url = '/api/v1/settings/s3/'
rv = self.app.post_json(url, {}, expect_errors=True)
#assert_equals('Looks like your creditials are incorrect Could you have mistyped them?', rv['message'])
@mock.patch('framework.addons.AddonModelMixin.get_addon')
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('framework.auth.get_current_user')
def test_user_settings(self, mock_user, mock_access, mock_addon):
mock_access.return_value = True
mock_user.return_value = self.user
mock_addon.return_value = self.user.get_addon('s3')
url = '/api/v1/settings/s3/'
rv = self.app.post_json(
url, {'access_key': 'scout', 'secret_key': 'Aticus'})
user_settings = self.user.get_addon('s3')
assert_equals(user_settings.access_key, 'scout')
@mock.patch('framework.addons.AddonModelMixin.get_addon')
@mock.patch('website.addons.s3.views.config.has_access')
@mock.patch('framework.auth.get_current_user')
def test_s3_remove_node_settings(self, mock_user, mock_access, mock_addon):
mock_access.return_value = True
mock_user.return_value = self.user
mock_addon.return_value = self.user.get_addon('s3')
self.node_settings.access_key = 'to-kill-a-mocking-bucket'
self.node_settings.secret_key = 'itsasecret'
url = "/api/v1/project/{0}/s3/settings/delete/".format(self.project._id)
self.app.post_json(url, {})
self.project.reload()
assert_equals(self.node_settings.access_key, None)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import moving_averages
class BatchNormalization(base.Layer):
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: An `int` or list of `int`, the axis or axes that should be
normalized, typically the features axis/axes. For instance, after a
`Conv2D` layer with `data_format="channels_first"`, set `axis=1`. If a
list of axes is provided, each axis in `axis` will be normalized
simultaneously. Default is `-1` which takes uses last axis. Note: when
using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and
`moving_variance` variables are the same rank as the input Tensor, with
dimension size 1 in all reduced (non-axis) dimensions).
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.beta_constraint = beta_constraint
self.gamma_constraint = gamma_constraint
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if fused is None:
fused = True
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
if not isinstance(self.axis, list):
raise TypeError('axis must be int or list, type given: %s'
% type(self.axis))
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input Tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0')
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused:
# Currently fused batch norm doesn't support renorm. It also only supports
# an input tensor of rank 4 and a channel dimension on axis 1 or 3.
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
self.fused = (not self.renorm and
ndims == 4 and
self.axis in [[1], [3]] and
self.virtual_batch_size is None and
self.adjustment is None)
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1]:
self._data_format = 'NCHW'
elif self.axis == [3]:
self._data_format = 'NHWC'
else:
raise ValueError('Unsupported axis, fused batch norm only supports '
'axis == [1] or axis == [3]')
axis_to_dim = {x: input_shape[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = base.InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_variable(name='gamma',
shape=param_shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True)
else:
self.gamma = None
if self.fused:
self._gamma_const = array_ops.constant(1.0, shape=param_shape)
if self.center:
self.beta = self.add_variable(name='beta',
shape=param_shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True)
else:
self.beta = None
if self.fused:
self._beta_const = array_ops.constant(0.0, shape=param_shape)
# Disable variable partitioning when creating the moving mean and variance
try:
if self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_variable(
name='moving_mean',
shape=param_shape,
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_variable(
name='moving_variance',
shape=param_shape,
initializer=self.moving_variance_initializer,
trainable=False)
self._one_minus_decay = 1.0 - self.momentum
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
var = self.add_variable(name=name,
shape=shape,
initializer=init_ops.zeros_initializer(),
trainable=False)
return var
with ops.device(None):
device = ((lambda _: self.moving_mean.device)
if context.in_graph_mode() else self.moving_mean.device)
with ops.device(device):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape)
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
device = ((lambda _: self.moving_variance.device)
if context.in_graph_mode() else self.moving_variance.device)
with ops.device(device):
self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape)
self.renorm_stddev_weight = _renorm_variable(
'renorm_stddev_weight', ())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, one_minus_decay):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, one_minus_decay]) as scope:
with ops.colocate_with(variable):
update_delta = math_ops.multiply(
math_ops.subtract(variable.read_value(), value),
one_minus_decay)
if isinstance(variable, resource_variable_ops.ResourceVariable):
# state_ops.assign_sub does an extra read_variable_op after the
# assign. We avoid that here.
return gen_resource_variable_ops.assign_sub_variable_op(
variable.handle, update_delta, name=scope)
else:
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
# TODO(reedwm): Add support for fp16 inputs.
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = utils.constant_value(training)
if training_value is None:
one_minus_decay = utils.smart_cond(training,
lambda: self._one_minus_decay,
lambda: 0.)
else:
one_minus_decay = ops.convert_to_tensor(self._one_minus_decay)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
one_minus_decay)
variance_update = self._assign_moving_average(self.moving_variance,
variance, one_minus_decay)
if context.in_graph_mode():
# Note that in Eager mode, the updates are already executed when running
# assign_moving_averages. So we do not need to put them into
# collections.
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0.
r = utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r))
d = utils.smart_cond(training, lambda: d, lambda: array_ops.zeros_like(d))
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = moving_averages.assign_moving_average(
var, value, self.renorm_momentum, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, weight_value, self.renorm_momentum, zero_debias=False)
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return utils.smart_cond(training, _do_update, _fake_update)
with ops.colocate_with(self.moving_mean):
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight,
mean)
with ops.colocate_with(self.moving_variance):
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight,
stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def call(self, inputs, training=False):
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
return undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.get_shape()
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape[self.axis[0]].value
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
moving_mean = self.moving_mean
moving_variance = self.moving_variance
mean = utils.smart_cond(training,
lambda: mean,
lambda: moving_mean)
variance = utils.smart_cond(training,
lambda: variance,
lambda: moving_variance)
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
else:
new_mean, new_variance = mean, variance
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(new_mean,
axis=1, keep_dims=True)
new_variance = math_ops.reduce_mean(new_variance,
axis=1, keep_dims=True)
def _do_update(var, value):
return moving_averages.assign_moving_average(
var, value, self.momentum, zero_debias=False)
mean_update = utils.smart_cond(
training,
lambda: _do_update(self.moving_mean, new_mean),
lambda: self.moving_mean)
variance_update = utils.smart_cond(
training,
lambda: _do_update(self.moving_variance, new_variance),
lambda: self.moving_variance)
if context.in_graph_mode():
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
else:
mean, variance = self.moving_mean, self.moving_variance
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
return undo_virtual_batching(outputs)
return outputs
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
virtual_batch_size=None,
adjustment=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
Arguments:
inputs: Tensor input.
axis: An `int`, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
make deep copy of axis parameter for batch norm
PiperOrigin-RevId: 176187824
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import moving_averages
class BatchNormalization(base.Layer):
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: An `int` or list of `int`, the axis or axes that should be
normalized, typically the features axis/axes. For instance, after a
`Conv2D` layer with `data_format="channels_first"`, set `axis=1`. If a
list of axes is provided, each axis in `axis` will be normalized
simultaneously. Default is `-1` which takes uses last axis. Note: when
using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and
`moving_variance` variables are the same rank as the input Tensor, with
dimension size 1 in all reduced (non-axis) dimensions).
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
if isinstance(axis, list):
self.axis = axis[:]
else:
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.beta_constraint = beta_constraint
self.gamma_constraint = gamma_constraint
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if fused is None:
fused = True
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
if not isinstance(self.axis, list):
raise TypeError('axis must be int or list, type given: %s'
% type(self.axis))
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input Tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0')
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused:
# Currently fused batch norm doesn't support renorm. It also only supports
# an input tensor of rank 4 and a channel dimension on axis 1 or 3.
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
self.fused = (not self.renorm and
ndims == 4 and
self.axis in [[1], [3]] and
self.virtual_batch_size is None and
self.adjustment is None)
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1]:
self._data_format = 'NCHW'
elif self.axis == [3]:
self._data_format = 'NHWC'
else:
raise ValueError('Unsupported axis, fused batch norm only supports '
'axis == [1] or axis == [3]')
axis_to_dim = {x: input_shape[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = base.InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_variable(name='gamma',
shape=param_shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True)
else:
self.gamma = None
if self.fused:
self._gamma_const = array_ops.constant(1.0, shape=param_shape)
if self.center:
self.beta = self.add_variable(name='beta',
shape=param_shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True)
else:
self.beta = None
if self.fused:
self._beta_const = array_ops.constant(0.0, shape=param_shape)
# Disable variable partitioning when creating the moving mean and variance
try:
if self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_variable(
name='moving_mean',
shape=param_shape,
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_variable(
name='moving_variance',
shape=param_shape,
initializer=self.moving_variance_initializer,
trainable=False)
self._one_minus_decay = 1.0 - self.momentum
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
var = self.add_variable(name=name,
shape=shape,
initializer=init_ops.zeros_initializer(),
trainable=False)
return var
with ops.device(None):
device = ((lambda _: self.moving_mean.device)
if context.in_graph_mode() else self.moving_mean.device)
with ops.device(device):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape)
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
device = ((lambda _: self.moving_variance.device)
if context.in_graph_mode() else self.moving_variance.device)
with ops.device(device):
self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape)
self.renorm_stddev_weight = _renorm_variable(
'renorm_stddev_weight', ())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, one_minus_decay):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, one_minus_decay]) as scope:
with ops.colocate_with(variable):
update_delta = math_ops.multiply(
math_ops.subtract(variable.read_value(), value),
one_minus_decay)
if isinstance(variable, resource_variable_ops.ResourceVariable):
# state_ops.assign_sub does an extra read_variable_op after the
# assign. We avoid that here.
return gen_resource_variable_ops.assign_sub_variable_op(
variable.handle, update_delta, name=scope)
else:
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
# TODO(reedwm): Add support for fp16 inputs.
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = utils.constant_value(training)
if training_value is None:
one_minus_decay = utils.smart_cond(training,
lambda: self._one_minus_decay,
lambda: 0.)
else:
one_minus_decay = ops.convert_to_tensor(self._one_minus_decay)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
one_minus_decay)
variance_update = self._assign_moving_average(self.moving_variance,
variance, one_minus_decay)
if context.in_graph_mode():
# Note that in Eager mode, the updates are already executed when running
# assign_moving_averages. So we do not need to put them into
# collections.
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0.
r = utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r))
d = utils.smart_cond(training, lambda: d, lambda: array_ops.zeros_like(d))
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = moving_averages.assign_moving_average(
var, value, self.renorm_momentum, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, weight_value, self.renorm_momentum, zero_debias=False)
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return utils.smart_cond(training, _do_update, _fake_update)
with ops.colocate_with(self.moving_mean):
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight,
mean)
with ops.colocate_with(self.moving_variance):
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight,
stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def call(self, inputs, training=False):
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
return undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.get_shape()
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape[self.axis[0]].value
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
moving_mean = self.moving_mean
moving_variance = self.moving_variance
mean = utils.smart_cond(training,
lambda: mean,
lambda: moving_mean)
variance = utils.smart_cond(training,
lambda: variance,
lambda: moving_variance)
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
else:
new_mean, new_variance = mean, variance
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(new_mean,
axis=1, keep_dims=True)
new_variance = math_ops.reduce_mean(new_variance,
axis=1, keep_dims=True)
def _do_update(var, value):
return moving_averages.assign_moving_average(
var, value, self.momentum, zero_debias=False)
mean_update = utils.smart_cond(
training,
lambda: _do_update(self.moving_mean, new_mean),
lambda: self.moving_mean)
variance_update = utils.smart_cond(
training,
lambda: _do_update(self.moving_variance, new_variance),
lambda: self.moving_variance)
if context.in_graph_mode():
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
else:
mean, variance = self.moving_mean, self.moving_variance
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
return undo_virtual_batching(outputs)
return outputs
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
virtual_batch_size=None,
adjustment=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
Arguments:
inputs: Tensor input.
axis: An `int`, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
|
# coding: utf-8
# ===== DEFINITIONS =====
from ipykernel.kernelbase import Kernel
from pexpect import replwrap, EOF
from subprocess import check_output
import re
import signal
import uuid
__version__ = '0.0.1'
version_pat = re.compile(r'(\d+(\.\d+)+)')
crlf_pat = re.compile(r'[\r\n]+')
class EgisonKernel(Kernel):
implementation = 'gauche_kernel'
implementation_version = __version__
_language_version = None
@property
def language_version(self):
if self._language_version is None:
m = version_pat.search(check_output(['gosh', '-V']).decode('utf-8'))
self._language_version = m.group(1)
return self._language_version
@property
def banner(self):
return u'Gauche Kernel (Gauche v%s)' % self.language_version
language_info = {'name': 'gauche',
'codemirror_mode': 'scheme',
'mimetype': 'text/plain',
'file_extension': '.scm'}
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_egison()
def _start_egison(self):
# Signal handlers are inherited by forked processes, and we can't easily
# reset it from the subprocess. Since kernelapp ignores SIGINT except in
# message handlers, we need to temporarily reset the SIGINT handler here
# so that Egison is interruptible.
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
self.gauchewrapper = replwrap.REPLWrapper("gosh", "gosh>", None)
finally:
signal.signal(signal.SIGINT, sig)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
code = crlf_pat.sub(' ', code.strip())
if not code:
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
interrupted = False
try:
output = self.gauchewrapper.run_command(code, timeout=None)
except KeyboardInterrupt:
self.gauchewrapper.child.sendintr()
interrupted = True
self.gauchewrapper._expect_prompt()
output = self.gauchewrapper.child.before
except EOF:
output = self.gauchewrapper.child.before + 'Restarting Gauche'
self._start_egison()
if not silent:
# Send standard output
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
# ===== MAIN =====
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=EgisonKernel)
Remove unuseds
# coding: utf-8
# ===== DEFINITIONS =====
from ipykernel.kernelbase import Kernel
from pexpect import replwrap, EOF
from subprocess import check_output
import re
import signal
__version__ = '0.0.1'
crlf_pat = re.compile(r'[\r\n]+')
class GaucheKernel(Kernel):
implementation = 'gauche_kernel'
implementation_version = __version__
@property
def banner(self):
return u'Gauche Kernel'
language_info = {'name': 'gauche',
'codemirror_mode': 'scheme',
'mimetype': 'text/plain',
'file_extension': '.scm'}
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_gauche()
def _start_gauche(self):
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
self.gauchewrapper = replwrap.REPLWrapper("gosh", "gosh>", None)
finally:
signal.signal(signal.SIGINT, sig)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
code = crlf_pat.sub(' ', code.strip())
if not code:
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
interrupted = False
try:
output = self.gauchewrapper.run_command(code, timeout=None)
except KeyboardInterrupt:
self.gauchewrapper.child.sendintr()
interrupted = True
self.gauchewrapper._expect_prompt()
output = self.gauchewrapper.child.before
except EOF:
output = self.gauchewrapper.child.before + 'Restarting Gauche'
self._start_gauche()
if not silent:
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=GaucheKernel)
|
# Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempfile import NamedTemporaryFile
from .base import LibModelTest, ModelObjectTest, StateMixinTest
from lib import model
from lib.model import gamestate
class TestLibModelCoord(LibModelTest):
def setUp(self):
self.expected_exports = [gamestate.GameState]
class TestGameState(ModelObjectTest, StateMixinTest):
def setUp(self):
self.user = model.User('nim', model.Coord())
self.galaxy = model.Galaxy()
system = self.galaxy.system(self.user.planets[0])
planet = system.planets[int(self.user.planets[0].planet)]
planet.emperor = self.user.name
super().setUp()
self.expected_state = (str, (tuple, type(None)), (tuple, type(None)))
self.classname_in_repr = True
self.expected_attrs = {'save_file': str, 'user': (model.User,
type(None)),
'galaxy': (model.Galaxy, type(None))}
def get_new_instance(self):
self.tmp_file = NamedTemporaryFile()
state = gamestate.GameState(save_file=self.tmp_file.name)
state.user = self.user
state.galaxy = self.galaxy
return state
def get_tst_state(self):
return ('save_file', self.user.__getstate__(),
self.galaxy.__getstate__())
def test_state_contents(self):
"""
Recursively iterate through the state and verify only iterables or
primitives are used.
"""
self.skipTest('NI')
tests model: check that getstate returns only primitive values
Acceptable primitives are int, float, str, None, tuple, list and dict
because they can be directly converted to json or yaml.
# Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempfile import NamedTemporaryFile
from collections import Iterable
from lib import model
from lib.model import gamestate
from .base import LibModelTest, ModelObjectTest, StateMixinTest
class TestLibModelCoord(LibModelTest):
def setUp(self):
self.expected_exports = [gamestate.GameState]
class TestGameState(ModelObjectTest, StateMixinTest):
def setUp(self):
self.user = model.User('nim', model.Coord())
self.galaxy = model.Galaxy()
system = self.galaxy.system(self.user.planets[0])
planet = system.planets[int(self.user.planets[0].planet)]
planet.emperor = self.user.name
super().setUp()
self.expected_state = (str, (tuple, type(None)), (tuple, type(None)))
self.classname_in_repr = True
self.expected_attrs = {'save_file': str, 'user': (model.User,
type(None)),
'galaxy': (model.Galaxy, type(None))}
def get_new_instance(self):
self.tmp_file = NamedTemporaryFile()
state = gamestate.GameState(save_file=self.tmp_file.name)
state.user = self.user
state.galaxy = self.galaxy
return state
def get_tst_state(self):
return ('save_file', self.user.__getstate__(),
self.galaxy.__getstate__())
def recurse_objects(self, state, frame):
"""Recursively yield every item in state."""
print('STARTING FRAME {}'.format(frame))
frame += 1
try:
for obj in state:
print("obj {} is type {}".format(obj, type(obj)))
if isinstance(obj, str): # don't iterate over strings
print(' yielding a string')
yield obj
elif isinstance(obj, dict):
print(' recursing over dict...')
for key, value in obj.items():
yield key
print(' descending into dict value')
for sub_obj in self.recurse_objects(value, frame):
yield sub_obj
elif isinstance(obj, Iterable):
print(' recursing...')
for sub_obj in self.recurse_objects(obj, frame):
yield sub_obj
else:
print(' yielding the obj')
yield obj
except:
yield state
def test_state_contents(self):
"""
Recursively iterate through the state and verify only iterables or
primitives are used.
"""
valid_types = (int, float, str, type(None), tuple)
for obj in self.recurse_objects(self.object.__getstate__(), 0):
self.assertIsInstance(obj, valid_types)
if isinstance(obj, tuple): # dict keys
for sub_obj in obj:
self.assertIsInstance(obj, valid_types)
|
import json
import click
import subprocess
import utils
import logging
@click.command()
@click.argument('sources', type=click.Path(exists=True), required=True)
@click.argument('output', required=True)
@click.argument('min_zoom', default=5)
@click.argument('max_zoom', default=14)
def vectorTiling(sources, output, min_zoom, max_zoom):
""" Function that creates vector tiles
PARAMS:
- sources : directory where the geojson file(s) are
- output : file.mbtiles for the generated data
"""
files = []
for f in utils.get_files(sources):
if utils.get_path_parts(f)[-1].split('.')[1] == 'geojson':
files.append(f)
logging.info("{} geojson found".format(len(files)))
paths_string = ''
for item in files:
with open(item, 'rb') as f:
geojson = json.load(f)
features = geojson['features']
for item in features:
paths_string += item['properties']['path'] + ' '
command = 'tippecanoe -f -o ' + output + ' ' + paths_string + ' -z {} -Z {}'.format(max_zoom, min_zoom)
subprocess.call(command,shell=True)
if __name__ == '__main__':
vectorTiling()
Command line option changes
#!/usr/bin/env python
import click
import json
import logging
import os
import subprocess
import sys
import utils
@click.command()
@click.argument('output', required=True, type=click.Path(exists=False))
@click.argument('sources', type=click.Path(exists=True), nargs=-1)
@click.option('--catalog', type=click.Path(exists=True),
help="read a catalog file instead of a list of geojson files")
@click.option('--min_zoom', default=5,
help="min zoom level to generate")
@click.option('--max_zoom', default=14,
help="max zoom level to generate")
@click.option('--layer', default="lands",
help="layer name")
def vectorTiling(output, sources, catalog, min_zoom, max_zoom, layer):
""" Generate an MBTiles file of vector tiles from the output of an OpenBounds project.
\b
PARAMS:
- sources : A directory containing geojson files, or a list of geojson files"
- output : file.mbtiles for the generated data
"""
if os.path.exists(output):
utils.error("Error, output path already exists")
sys.exit(-1)
if catalog:
with open(catalog, 'rb') as f:
geojson = json.load(f)
source_paths = [item['properties']['path'] for item in geojson['features']]
else:
source_paths = []
for arg in sources:
for item in utils.get_files(arg):
if os.path.splitext(item)[1] == '.geojson':
source_paths.append(item)
utils.info("{} geojson files found".format(len(source_paths)))
command = (
'tippecanoe -o ' + output +
' ' + " ".join(source_paths) +
' -l ' + layer + # force to use a single layer
' -z {} -Z {}'.format(max_zoom, min_zoom)
)
utils.info(command)
subprocess.call(command,shell=True)
if __name__ == '__main__':
vectorTiling()
|
# Copyright (c) 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from PIL import Image
from PIL import ImageChops
from huxley.errors import TestError
def rmsdiff_2011(im1, im2):
"Calculate the root-mean-square difference between two images"
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value * (idx ** 2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1]))
return rms
def images_identical(path1, path2):
im1 = Image.open(path1)
im2 = Image.open(path2)
return ImageChops.difference(im1, im2).getbbox() is None
def image_diff(path1, path2, outpath, diffcolor):
im1 = Image.open(path1)
im2 = Image.open(path2)
rmsdiff = rmsdiff_2011(im1, im2)
pix1 = im1.load()
pix2 = im2.load()
if im1.mode != im2.mode:
raise TestError('Different pixel modes between %r and %r' % (path1, path2))
if im1.size != im2.size:
raise TestError('Different dimensions between %r (%r) and %r (%r)' % (path1, im1.size, path2, im2.size))
mode = im1.mode
if mode == '1':
value = 255
elif mode == 'L':
value = 255
elif mode == 'RGB':
value = diffcolor
elif mode == 'RGBA':
value = diffcolor + (255,)
elif mode == 'P':
raise NotImplementedError('TODO: look up nearest palette color')
else:
raise NotImplementedError('Unexpected PNG mode')
width, height = im1.size
for y in xrange(height):
for x in xrange(width):
if pix1[x, y] != pix2[x, y]:
pix2[x, y] = value
im2.save(outpath)
return (rmsdiff, width, height)
Better tolerance of missing screenshots
# Copyright (c) 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from PIL import Image
from PIL import ImageChops
from huxley.errors import TestError
def rmsdiff_2011(im1, im2):
"Calculate the root-mean-square difference between two images"
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value * (idx ** 2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1]))
return rms
def images_identical(path1, path2):
if not os.path.exists(path1):
return False
im1 = Image.open(path1)
im2 = Image.open(path2)
if im1.size != im2.size:
return False
return ImageChops.difference(im1, im2).getbbox() is None
def image_diff(path1, path2, outpath, diffcolor):
if not os.path.exists(path1):
im2 = Image.open(path2)
return (1000, im2.size[0], im2.size[1])
im1 = Image.open(path1)
im2 = Image.open(path2)
if im1.size != im2.size:
return (1000, im2.size[0], im2.size[1])
rmsdiff = rmsdiff_2011(im1, im2)
pix1 = im1.load()
pix2 = im2.load()
if im1.mode != im2.mode:
raise TestError('Different pixel modes between %r and %r' % (path1, path2))
if im1.size != im2.size:
raise TestError('Different dimensions between %r (%r) and %r (%r)' % (path1, im1.size, path2, im2.size))
mode = im1.mode
if mode == '1':
value = 255
elif mode == 'L':
value = 255
elif mode == 'RGB':
value = diffcolor
elif mode == 'RGBA':
value = diffcolor + (255,)
elif mode == 'P':
raise NotImplementedError('TODO: look up nearest palette color')
else:
raise NotImplementedError('Unexpected PNG mode')
width, height = im1.size
for y in xrange(height):
for x in xrange(width):
if pix1[x, y] != pix2[x, y]:
pix2[x, y] = value
im2.save(outpath)
return (rmsdiff, width, height)
|
#!/usr/bin/env python
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Steve Reinhardt
# Nathan Binkert
import os
import re
import shutil
import sys
import time
from glob import glob
from os import system
from os.path import basename, dirname, exists, isdir, isfile, join as joinpath
def mkdir(*args):
path = joinpath(*args)
os.mkdir(path)
def touch(*args, **kwargs):
when = kwargs.get('when', None)
path = joinpath(*args)
os.utime(path, when)
def rmtree(*args):
path = joinpath(*args)
for match in glob(path):
if isdir(match):
shutil.rmtree(match)
else:
os.unlink(match)
def remove(*args):
path = joinpath(*args)
for match in glob(path):
if not isdir(match):
os.unlink(match)
def movedir(srcdir, destdir, dir):
src = joinpath(srcdir, dir)
dest = joinpath(destdir, dir)
if not isdir(src):
raise AttributeError
os.makedirs(dirname(dest))
shutil.move(src, dest)
if not isdir('.hg'):
sys.exit('Not in the top level of an m5 tree!')
usage = '%s <destdir> <release name>' % sys.argv[0]
if len(sys.argv) != 3:
sys.exit(usage)
destdir = sys.argv[1]
releasename = sys.argv[2]
release_dest = joinpath(destdir, 'release')
#encumbered_dest = joinpath(destdir, 'encumbered')
release_dir = joinpath(release_dest, releasename)
#encumbered_dir = joinpath(encumbered_dest, releasename)
if exists(destdir):
if not isdir(destdir):
raise AttributeError, '%s exists, but is not a directory' % destdir
else:
mkdir(destdir)
if exists(release_dest):
if not isdir(release_dest):
raise AttributeError, \
'%s exists, but is not a directory' % release_dest
rmtree(release_dest)
#if exists(encumbered_dest):
# if not isdir(encumbered_dest):
# raise AttributeError, \
# '%s exists, but is not a directory' % encumbered_dest
# rmtree(encumbered_dest)
mkdir(release_dest)
#mkdir(encumbered_dest)
mkdir(release_dir)
#mkdir(encumbered_dir)
system('hg update')
system('rsync -av --exclude ".hg*" --exclude build . %s' % release_dir)
# move the time forward on some files by a couple of minutes so we can
# avoid building things unnecessarily
when = int(time.time()) + 120
# make sure scons doesn't try to run flex unnecessarily
#touch(release_dir, 'src/encumbered/eio/exolex.cc', when=(when, when))
# get rid of non-shipping code
#rmtree(release_dir, 'src/encumbered/dev')
rmtree(release_dir, 'src/cpu/ozone')
rmtree(release_dir, 'src/arch/x86')
#rmtree(release_dir, 'src/mem/cache/tags/split*.cc')
#rmtree(release_dir, 'src/mem/cache/tags/split*.hh')
#rmtree(release_dir, 'src/mem/cache/prefetch/ghb_*.cc')
#rmtree(release_dir, 'src/mem/cache/prefetch/ghb_*.hh')
#rmtree(release_dir, 'src/mem/cache/prefetch/stride_*.cc')
#rmtree(release_dir, 'src/mem/cache/prefetch/stride_*.hh')
rmtree(release_dir, 'configs/fullsys')
rmtree(release_dir, 'configs/test')
rmtree(release_dir, 'configs/splash2')
rmtree(release_dir, 'tests/long/*/ref')
rmtree(release_dir, 'tests/old')
rmtree(release_dir, 'tests/quick/00.hello/ref/x86')
rmtree(release_dir, 'tests/test-progs/hello/bin/x86')
rmtree(release_dir, 'src/dev/x86')
remove(release_dir, 'src/cpu/nativetrace.hh')
remove(release_dir, 'src/cpu/nativetrace.cc')
remove(release_dir, 'build_opts/X86_SE')
remove(release_dir, 'build_opts/X86_FS')
# get rid of some of private scripts
remove(release_dir, 'util/chgcopyright')
remove(release_dir, 'util/make_release.py')
def remove_sources(regex, subdir):
script = joinpath(release_dir, subdir, 'SConscript')
if isinstance(regex, str):
regex = re.compile(regex)
inscript = file(script, 'r').readlines()
outscript = file(script, 'w')
for line in inscript:
if regex.match(line):
continue
outscript.write(line)
outscript.close()
def remove_lines(s_regex, e_regex, f):
f = joinpath(release_dir, f)
if isinstance(s_regex, str):
s_regex = re.compile(s_regex)
if isinstance(e_regex, str):
e_regex = re.compile(e_regex)
inscript = file(f, 'r').readlines()
outscript = file(f, 'w')
skipping = False
for line in inscript:
if (not skipping and s_regex.match(line)) or \
(e_regex and skipping and not e_regex.match(line)):
skipping = True
continue
skipping = False
outscript.write(line)
outscript.close()
def replace_line(s_regex, f, rl):
f = joinpath(release_dir, f)
if isinstance(s_regex, str):
s_regex = re.compile(s_regex)
inscript = file(f, 'r').readlines()
outscript = file(f, 'w')
for line in inscript:
if s_regex.match(line):
outscript.write(rl)
continue
outscript.write(line)
outscript.close()
# fix up the SConscript to deal with files we've removed
#remove_sources(r'.*split.*\.cc', 'src/mem/cache/tags')
#remove_sources(r'.*(ghb|stride)_prefetcher\.cc', 'src/mem/cache/prefetch')
remove_sources(r'.*nativetrace.*', 'src/cpu')
remove_lines(r'.*X86.*', None, 'src/arch/isa_specific.hh')
remove_lines(r'.*X86.*', None, 'src/base/traceflags.py')
remove_lines(r'.*X86.*', None, 'src/base/loader/object_file.hh')
remove_lines(r'.*_X86_.*', '.*else.*', 'src/base/loader/elf_object.cc')
remove_lines(r'.*X86_ISA.*', r'^.el.*','src/sim/process.cc')
remove_lines(r'.*x86.*', r'.*mips.*','src/cpu/BaseCPU.py')
remove_lines(r'.*X86_ISA.*', r'^.*else.*','src/cpu/o3/dyn_inst.hh')
remove_lines(r'.*X86_ISA.*', r'.*stay.*','src/cpu/simple/base.cc')
remove_lines(r'.*x86.*', r'^if.*','src/cpu/SConscript')
remove_lines(r'.*makeX86System.*', r'.*makeDualRoot.*','configs/common/FSConfig.py')
remove_lines(r'.*X86.*', None, 'configs/example/fs.py')
remove_lines(r'.*x86.*', None, 'configs/example/fs.py')
replace_line(r'.*X86_SE.*', 'util/regress', " 'SPARC_SE,SPARC_FS',")
benches = [ 'bzip2', 'eon', 'gzip', 'mcf', 'parser', 'perlbmk',
'twolf', 'vortex' ]
for bench in benches:
rmtree(release_dir, 'tests', 'test-progs', bench)
#movedir(release_dir, encumbered_dir, 'src/encumbered')
rmtree(release_dir, 'tests/test-progs/anagram')
rmtree(release_dir, 'tests/quick/20.eio-short')
def taritup(directory, destdir, filename):
basedir = dirname(directory)
tarball = joinpath(destdir, filename)
tardir = basename(directory)
system('cd %s; tar cfj %s %s' % (basedir, tarball, tardir))
taritup(release_dir, destdir, '%s.tar.bz2' % releasename)
#taritup(encumbered_dir, destdir, '%s-encumbered.tar.bz2' % releasename)
print "release created in %s" % destdir
print "don't forget to tag the repository!"
Release: fix make_release bug
--HG--
extra : convert_revision : 4317e5909f23e2b8bf6e8407f0df10fad34e9e35
#!/usr/bin/env python
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Steve Reinhardt
# Nathan Binkert
import os
import re
import shutil
import sys
import time
from glob import glob
from os import system
from os.path import basename, dirname, exists, isdir, isfile, join as joinpath
def mkdir(*args):
path = joinpath(*args)
os.mkdir(path)
def touch(*args, **kwargs):
when = kwargs.get('when', None)
path = joinpath(*args)
os.utime(path, when)
def rmtree(*args):
path = joinpath(*args)
for match in glob(path):
if isdir(match):
shutil.rmtree(match)
else:
os.unlink(match)
def remove(*args):
path = joinpath(*args)
for match in glob(path):
if not isdir(match):
os.unlink(match)
def movedir(srcdir, destdir, dir):
src = joinpath(srcdir, dir)
dest = joinpath(destdir, dir)
if not isdir(src):
raise AttributeError
os.makedirs(dirname(dest))
shutil.move(src, dest)
if not isdir('.hg'):
sys.exit('Not in the top level of an m5 tree!')
usage = '%s <destdir> <release name>' % sys.argv[0]
if len(sys.argv) != 3:
sys.exit(usage)
destdir = sys.argv[1]
releasename = sys.argv[2]
release_dest = joinpath(destdir, 'release')
#encumbered_dest = joinpath(destdir, 'encumbered')
release_dir = joinpath(release_dest, releasename)
#encumbered_dir = joinpath(encumbered_dest, releasename)
if exists(destdir):
if not isdir(destdir):
raise AttributeError, '%s exists, but is not a directory' % destdir
else:
mkdir(destdir)
if exists(release_dest):
if not isdir(release_dest):
raise AttributeError, \
'%s exists, but is not a directory' % release_dest
rmtree(release_dest)
#if exists(encumbered_dest):
# if not isdir(encumbered_dest):
# raise AttributeError, \
# '%s exists, but is not a directory' % encumbered_dest
# rmtree(encumbered_dest)
mkdir(release_dest)
#mkdir(encumbered_dest)
mkdir(release_dir)
#mkdir(encumbered_dir)
system('hg update')
system('rsync -av --exclude ".hg*" --exclude build . %s' % release_dir)
# move the time forward on some files by a couple of minutes so we can
# avoid building things unnecessarily
when = int(time.time()) + 120
# make sure scons doesn't try to run flex unnecessarily
#touch(release_dir, 'src/encumbered/eio/exolex.cc', when=(when, when))
# get rid of non-shipping code
#rmtree(release_dir, 'src/encumbered/dev')
rmtree(release_dir, 'src/cpu/ozone')
rmtree(release_dir, 'src/arch/x86')
#rmtree(release_dir, 'src/mem/cache/tags/split*.cc')
#rmtree(release_dir, 'src/mem/cache/tags/split*.hh')
#rmtree(release_dir, 'src/mem/cache/prefetch/ghb_*.cc')
#rmtree(release_dir, 'src/mem/cache/prefetch/ghb_*.hh')
#rmtree(release_dir, 'src/mem/cache/prefetch/stride_*.cc')
#rmtree(release_dir, 'src/mem/cache/prefetch/stride_*.hh')
rmtree(release_dir, 'configs/fullsys')
rmtree(release_dir, 'configs/test')
rmtree(release_dir, 'configs/splash2')
rmtree(release_dir, 'tests/long/*/ref')
rmtree(release_dir, 'tests/old')
rmtree(release_dir, 'tests/quick/00.hello/ref/x86')
rmtree(release_dir, 'tests/test-progs/hello/bin/x86')
rmtree(release_dir, 'src/dev/x86')
remove(release_dir, 'src/cpu/nativetrace.hh')
remove(release_dir, 'src/cpu/nativetrace.cc')
remove(release_dir, 'build_opts/X86_SE')
remove(release_dir, 'build_opts/X86_FS')
# get rid of some of private scripts
remove(release_dir, 'util/chgcopyright')
remove(release_dir, 'util/make_release.py')
def remove_sources(regex, subdir):
script = joinpath(release_dir, subdir, 'SConscript')
if isinstance(regex, str):
regex = re.compile(regex)
inscript = file(script, 'r').readlines()
outscript = file(script, 'w')
for line in inscript:
if regex.match(line):
continue
outscript.write(line)
outscript.close()
def remove_lines(s_regex, e_regex, f):
f = joinpath(release_dir, f)
if isinstance(s_regex, str):
s_regex = re.compile(s_regex)
if isinstance(e_regex, str):
e_regex = re.compile(e_regex)
inscript = file(f, 'r').readlines()
outscript = file(f, 'w')
skipping = False
for line in inscript:
if (not skipping and s_regex.match(line)) or \
(e_regex and skipping and not e_regex.match(line)):
if e_regex:
skipping = True
continue
skipping = False
outscript.write(line)
outscript.close()
def replace_line(s_regex, f, rl):
f = joinpath(release_dir, f)
if isinstance(s_regex, str):
s_regex = re.compile(s_regex)
inscript = file(f, 'r').readlines()
outscript = file(f, 'w')
for line in inscript:
if s_regex.match(line):
outscript.write(rl)
continue
outscript.write(line)
outscript.close()
# fix up the SConscript to deal with files we've removed
#remove_sources(r'.*split.*\.cc', 'src/mem/cache/tags')
#remove_sources(r'.*(ghb|stride)_prefetcher\.cc', 'src/mem/cache/prefetch')
remove_sources(r'.*nativetrace.*', 'src/cpu')
remove_lines(r'.*X86.*', None, 'src/arch/isa_specific.hh')
remove_lines(r'.*X86.*', None, 'src/base/traceflags.py')
remove_lines(r'.*X86.*', None, 'src/base/loader/object_file.hh')
remove_lines(r'.*_X86_.*', '.*else.*', 'src/base/loader/elf_object.cc')
remove_lines(r'.*X86_ISA.*', r'^.el.*','src/sim/process.cc')
remove_lines(r'.*x86.*', r'.*mips.*','src/cpu/BaseCPU.py')
remove_lines(r'.*X86_ISA.*', r'^.*else.*','src/cpu/o3/dyn_inst.hh')
remove_lines(r'.*X86_ISA.*', r'.*stay.*','src/cpu/simple/base.cc')
remove_lines(r'.*x86.*', r'^if.*','src/cpu/SConscript')
remove_lines(r'.*makeX86System.*', r'.*makeDualRoot.*','configs/common/FSConfig.py')
remove_lines(r'.*X86.*', None, 'configs/example/fs.py')
remove_lines(r'.*x86.*', None, 'configs/example/fs.py')
replace_line(r'.*X86_SE.*', 'util/regress', " 'SPARC_SE,SPARC_FS',")
benches = [ 'bzip2', 'eon', 'gzip', 'mcf', 'parser', 'perlbmk',
'twolf', 'vortex' ]
for bench in benches:
rmtree(release_dir, 'tests', 'test-progs', bench)
#movedir(release_dir, encumbered_dir, 'src/encumbered')
rmtree(release_dir, 'tests/test-progs/anagram')
rmtree(release_dir, 'tests/quick/20.eio-short')
def taritup(directory, destdir, filename):
basedir = dirname(directory)
tarball = joinpath(destdir, filename)
tardir = basename(directory)
system('cd %s; tar cfj %s %s' % (basedir, tarball, tardir))
taritup(release_dir, destdir, '%s.tar.bz2' % releasename)
#taritup(encumbered_dir, destdir, '%s-encumbered.tar.bz2' % releasename)
print "release created in %s" % destdir
print "don't forget to tag the repository!"
|
from argparse import Namespace
import pytest
import torch
from pytorch_lightning.loggers import TensorBoardLogger
def test_tensorboard_automatic_versioning(tmpdir):
"""Verify that automatic versioning works"""
root_dir = tmpdir.mkdir("tb_versioning")
root_dir.mkdir("version_0")
root_dir.mkdir("version_1")
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning")
assert logger.version == 2
def test_tensorboard_manual_versioning(tmpdir):
"""Verify that manual versioning works"""
root_dir = tmpdir.mkdir("tb_versioning")
root_dir.mkdir("version_0")
root_dir.mkdir("version_1")
root_dir.mkdir("version_2")
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1)
assert logger.version == 1
def test_tensorboard_named_version(tmpdir):
"""Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """
tmpdir.mkdir("tb_versioning")
expected_version = "2020-02-05-162402"
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=expected_version)
logger.log_hyperparams({"a": 1, "b": 2}) # Force data to be written
assert logger.version == expected_version
# Could also test existence of the directory but this fails
# in the "minimum requirements" test setup
@pytest.mark.parametrize("name", ['', None])
def test_tensorboard_no_name(tmpdir, name):
"""Verify that None or empty name works"""
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
assert logger.root_dir == tmpdir
@pytest.mark.parametrize("step_idx", [10, None])
def test_tensorboard_log_metrics(tmpdir, step_idx):
logger = TensorBoardLogger(tmpdir)
metrics = {
"float": 0.3,
"int": 1,
"FloatTensor": torch.tensor(0.1),
"IntTensor": torch.tensor(1)
}
logger.log_metrics(metrics, step_idx)
def test_tensorboard_log_hyperparams(tmpdir):
logger = TensorBoardLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {'a': {'b': 'c'}},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar='buzz')),
"layer": torch.nn.BatchNorm1d
}
logger.log_hyperparams(hparams)
def test_tensorboard_log_hparams_and_metrics
logger = TensorBoardLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {'a': {'b': 'c'}},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar='buzz')),
"layer": torch.nn.BatchNorm1d
}
metrics = {'abc': torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
tests pep8
from argparse import Namespace
import pytest
import torch
from pytorch_lightning.loggers import TensorBoardLogger
def test_tensorboard_automatic_versioning(tmpdir):
"""Verify that automatic versioning works"""
root_dir = tmpdir.mkdir("tb_versioning")
root_dir.mkdir("version_0")
root_dir.mkdir("version_1")
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning")
assert logger.version == 2
def test_tensorboard_manual_versioning(tmpdir):
"""Verify that manual versioning works"""
root_dir = tmpdir.mkdir("tb_versioning")
root_dir.mkdir("version_0")
root_dir.mkdir("version_1")
root_dir.mkdir("version_2")
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1)
assert logger.version == 1
def test_tensorboard_named_version(tmpdir):
"""Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """
tmpdir.mkdir("tb_versioning")
expected_version = "2020-02-05-162402"
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=expected_version)
logger.log_hyperparams({"a": 1, "b": 2}) # Force data to be written
assert logger.version == expected_version
# Could also test existence of the directory but this fails
# in the "minimum requirements" test setup
@pytest.mark.parametrize("name", ['', None])
def test_tensorboard_no_name(tmpdir, name):
"""Verify that None or empty name works"""
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
assert logger.root_dir == tmpdir
@pytest.mark.parametrize("step_idx", [10, None])
def test_tensorboard_log_metrics(tmpdir, step_idx):
logger = TensorBoardLogger(tmpdir)
metrics = {
"float": 0.3,
"int": 1,
"FloatTensor": torch.tensor(0.1),
"IntTensor": torch.tensor(1)
}
logger.log_metrics(metrics, step_idx)
def test_tensorboard_log_hyperparams(tmpdir):
logger = TensorBoardLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {'a': {'b': 'c'}},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar='buzz')),
"layer": torch.nn.BatchNorm1d
}
logger.log_hyperparams(hparams)
def test_tensorboard_log_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {'a': {'b': 'c'}},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar='buzz')),
"layer": torch.nn.BatchNorm1d
}
metrics = {'abc': torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
|
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import stat
import logging
import subprocess
from lib.cuckoo.common.constants import CUCKOO_GUEST_PORT
log = logging.getLogger(__name__)
class Sniffer:
"""Sniffer manager."""
def __init__(self, tcpdump):
"""@param tcpdump: tcpdump path."""
self.tcpdump = tcpdump
self.proc = None
def start(self, interface="eth0", host="", file_path=""):
"""Start sniffing.
@param interface: network interface name.
@param host: guest host IP address.
@param file_path: tcpdump path.
@return: operation status.
"""
if not os.path.exists(self.tcpdump):
log.error("Tcpdump does not exist at path \"%s\", network capture aborted" % self.tcpdump)
return False
mode = os.stat(self.tcpdump)[stat.ST_MODE]
if mode and stat.S_ISUID != 2048:
log.error("Tcpdump is not accessible from this user, network capture aborted")
return False
if not interface:
log.error("Network interface not defined, network capture aborted")
return False
pargs = [self.tcpdump, '-U', '-q', '-i', interface, '-n', '-s', '1515']
pargs.extend(['-w', file_path])
pargs.extend(['not', 'port', str(CUCKOO_GUEST_PORT)])
if host:
pargs.extend(['and', 'host', host])
try:
self.proc = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except (OSError, ValueError) as e:
log.exception("Failed to start sniffer (interface=%s, host=%s, dump path=%s)" % (interface, host, file_path))
return False
log.info("Started sniffer (interface=%s, host=%s, dump path=%s)" % (interface, host, file_path))
return True
def stop(self):
"""Stop sniffing.
@return: operation status.
"""
if self.proc and not self.proc.poll():
try:
self.proc.terminate()
except:
try:
if not self.proc.poll():
self.proc.kill()
log.debug("Sniffer killed.")
except Exception as e:
log.exception("Unable to stop the sniffer with pid %d" % self.proc.pid)
return False
return True
Another fix for killing an already dead process
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import stat
import logging
import subprocess
from lib.cuckoo.common.constants import CUCKOO_GUEST_PORT
log = logging.getLogger(__name__)
class Sniffer:
"""Sniffer manager."""
def __init__(self, tcpdump):
"""@param tcpdump: tcpdump path."""
self.tcpdump = tcpdump
self.proc = None
def start(self, interface="eth0", host="", file_path=""):
"""Start sniffing.
@param interface: network interface name.
@param host: guest host IP address.
@param file_path: tcpdump path.
@return: operation status.
"""
if not os.path.exists(self.tcpdump):
log.error("Tcpdump does not exist at path \"%s\", network capture aborted" % self.tcpdump)
return False
mode = os.stat(self.tcpdump)[stat.ST_MODE]
if mode and stat.S_ISUID != 2048:
log.error("Tcpdump is not accessible from this user, network capture aborted")
return False
if not interface:
log.error("Network interface not defined, network capture aborted")
return False
pargs = [self.tcpdump, '-U', '-q', '-i', interface, '-n', '-s', '1515']
pargs.extend(['-w', file_path])
pargs.extend(['not', 'port', str(CUCKOO_GUEST_PORT)])
if host:
pargs.extend(['and', 'host', host])
try:
self.proc = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except (OSError, ValueError) as e:
log.exception("Failed to start sniffer (interface=%s, host=%s, dump path=%s)" % (interface, host, file_path))
return False
log.info("Started sniffer (interface=%s, host=%s, dump path=%s)" % (interface, host, file_path))
return True
def stop(self):
"""Stop sniffing.
@return: operation status.
"""
if self.proc and not self.proc.poll():
try:
self.proc.terminate()
except:
try:
if not self.proc.poll():
log.debug("Killing sniffer")
self.proc.kill()
except OSError as e:
# Avoid "tying to kill a died process" error.
log.debug("Error killing sniffer: %s. Continue" % e)
pass
except Exception as e:
log.exception("Unable to stop the sniffer with pid %d" % self.proc.pid)
return False
return True
|
import re
from collections import defaultdict
from typing import Mapping, Iterable
from sqlalchemy.orm.exc import NoResultFound
from models import InheritedMutation, Disease
from models import ClinicalData, or_
from helpers.parsers import tsv_file_iterator
from helpers.parsers import gzip_open_text
from database.bulk import get_highest_id, bulk_orm_insert, restart_autoincrement
from database import db
from .mutation_importer import MutationImporter
from .mutation_importer.helpers import make_metadata_ordered_dict
class MalformedRawError(Exception):
pass
class ClinVarImporter(MutationImporter):
name = 'clinvar'
model = InheritedMutation
default_path = 'data/mutations/clinvar_muts_annotated.txt.gz'
default_xml_path = 'data/mutations/ClinVarFullRelease_2019-05.xml.gz'
header = [
'Chr', 'Start', 'End', 'Ref', 'Alt', 'Func.refGene', 'Gene.refGene',
'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene', 'Otherinfo',
]
insert_keys = (
'mutation_id',
'db_snp_ids',
'combined_significances',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.xml_path = None
self.skipped_significances = defaultdict(int)
def load(self, path=None, update=False, clinvar_xml_path=None, **ignored_kwargs):
print(
'Please note that the annovar and XML database needs to be based on the same ClinVar release'
' to avoid incorrect removal of variants which are missing metadata (i.e. not found in the XML file)'
)
self.xml_path = clinvar_xml_path or self.default_xml_path
super().load(path, update, **ignored_kwargs)
@staticmethod
def _beautify_disease_name(name):
if '___' in name:
# for edge cases use a robust regexpr
name = re.sub('([^_])_([^_])', r'\1 \2', name).replace('___', ' _ ')
else:
# but for 99% of data, simple .replace() is much faster:
name = name.replace('_', ' ')
return name.replace('%3B', ';')
def iterate_lines(self, path):
return tsv_file_iterator(path, self.header, file_opener=gzip_open_text)
def test_line(self, line):
try:
at_least_one_significant_sub_entry, *args = self.parse_metadata(line)
return at_least_one_significant_sub_entry
except MalformedRawError:
return False
clinvar_keys = (
'RS',
'CLNDISDB',
'CLNDN',
'CLNSIG',
'CLNSIGCONF'
)
disease_id_clinvar_to_db = {
'MedGen': 'medgen_id',
'OMIM': 'omim_id',
'SNOMED_CT': 'snomed_ct_id',
'Orphanet': 'orhpanet_id',
'Human_Phenotype_Ontology': 'hpo_id'
}
inverse_significance_map: Mapping[str, int] = {
name.lower(): code
for code, name in ClinicalData.significance_codes.items()
}
significance_map = {
'pathologic': 'pathogenic',
'probable-pathogenic': 'likely pathogenic',
'cancer': 'pathogenic',
'untested': 'not provided',
'variant of unknown significance': 'uncertain significance',
'uncertain': 'uncertain significance',
'drug-response': 'drug response',
'probable-non-pathogenic': 'likely benign',
'probably not pathogenic': 'likely benign',
'non-pathogenic': 'benign',
}
def parse_significance(self, significance):
significance = significance.lower()
if significance in self.significance_map:
significance = self.significance_map[significance]
additional_significances = []
if significance not in self.inverse_significance_map:
assign_to = 'other'
first_significance, *additional_significances = significance.split(',')
if first_significance in self.inverse_significance_map:
assign_to = first_significance
if significance not in self.skipped_significances:
print(f'Unmapped significance status: "{significance}", assigning "{assign_to}"')
self.skipped_significances[significance] += 1
significance = assign_to
sig_code = self.inverse_significance_map[significance]
return sig_code, [sig.strip() for sig in additional_significances]
def import_disease_associations(self):
"""Add disease association details to the already imported mutation-disease associations"""
from xml.etree import ElementTree
from tqdm import tqdm
import gzip
ignored_traits = {
'not specified',
'not provided'
}
accepted_assertions = {
'variation to disease',
'variation in modifier gene to disease',
'variation to included disease',
'confers sensitivity'
}
print('Only including assertions: ', accepted_assertions)
print('Ignoring traits: ', ignored_traits)
self.skipped_significances = defaultdict(int)
accepted_species = {'Human', 'human'}
skipped_species = set()
skipped_variation_types = set()
conflicting_types = set()
skipped_diseases = set()
variants_of_interest = {
variation_id
for variation_id, in db.session.query(ClinicalData.variation_id)
}
# otherwise there is no point...
assert variants_of_interest
opener = gzip.open if self.xml_path.endswith('.gz') else open
total_size = 0
with opener(self.xml_path) as clinvar_full_release:
for line in clinvar_full_release:
total_size += len(line)
step = 0
with opener(self.xml_path) as clinvar_full_release:
tree = iter(ElementTree.iterparse(clinvar_full_release, events=('start', 'end')))
event, root = next(tree)
progress_bar = tqdm(total=1000)
last_progress = 0
for event, element in tree:
if event != 'end' or element.tag != 'ClinVarSet':
continue
reference = element.find('ReferenceClinVarAssertion')
assertion = reference.find('Assertion').attrib['Type']
# This skips over "confers resistance" and "variant to named protein"
if assertion not in accepted_assertions:
assert assertion in {'confers resistance', 'variant to named protein'}
continue
# variant-disease accession
rcv_accession = reference.find('ClinVarAccession')
assert rcv_accession.attrib['Type'] == 'RCV'
# rcv_accession = rcv_accession.attrib['Acc']
# variation or variation set, corresponds to InheritedMutation in our database
variation_set = reference.find('MeasureSet')
# Note: this skips over minority of records with "GenotypeSet"s
if not variation_set:
assert reference.find('GenotypeSet')
continue
# skip over haplotypes, etc
variation_set_type = variation_set.attrib['Type']
if variation_set_type != 'Variant':
assert variation_set_type in {'Haplotype', 'Distinct chromosomes', 'Phase unknown'}
continue
# corresponds to InheritedMutation.variation_id
variation_id = int(variation_set.attrib['ID'])
if variation_id not in variants_of_interest:
# as we effectively have only a fraction of all variations
# (non-synonymous SNVs only), this will speed things up
continue
sample = reference.find('ObservedIn/Sample')
species = sample.find('Species').text
if species not in accepted_species:
if species not in skipped_species:
print(f'Skipping non-human species: "{species}"')
skipped_species.add(species)
continue
origin = sample.find('Origin').text
assert reference.find('RecordStatus').text == 'current'
variations = variation_set.findall('Measure')
assert len(variations) == 1
variation = variations[0]
variation_type = variation.attrib['Type']
if variation_type != 'single nucleotide variant':
if variation_type not in skipped_variation_types:
print(f'Skipping variation type: {variation_type}')
skipped_variation_types.add(variation_type)
# Disease or observation, corresponds to Disease
trait = reference.find('TraitSet')
trait_type = trait.attrib['Type']
trait_name = trait.find('Trait/Name/ElementValue').text
if trait_name in ignored_traits:
continue
try:
disease = Disease.query.filter_by(name=trait_name).one()
except NoResultFound:
resolved = False
if 'Mucolipidosis, Type' in trait_name:
print(f'Working around changed name for {trait_name}')
trait_name = trait_name.replace('Mucolipidosis, Type', 'Mucolipidosis')
try:
disease = Disease.query.filter_by(name=trait_name).one()
resolved = True
except NoResultFound:
pass
if not resolved:
skipped_diseases.add(trait_name)
print(f'Disease "{trait_name}" entry not found, skipping')
continue
if disease.clinvar_type:
if disease.clinvar_type != trait_type:
if disease.name not in conflicting_types:
conflicting_types.add(disease.name)
action = ''
if trait_type == 'Disease':
disease.clinvar_type = trait_type
action = ': overwriting the old type with "Disease"'
print(f'Conflicting trait types for "{disease.name}": "{disease.clinvar_type}" != "{trait_type}"{action}')
else:
disease.clinvar_type = trait_type
significance_annotations = reference.findall('ClinicalSignificance')
assert len(significance_annotations) == 1
significance_annotation = significance_annotations[0]
significance = significance_annotation.find('Description').text
review_status = significance_annotation.find('ReviewStatus').text
sig_code, additional_significances = self.parse_significance(significance)
disease_associations: Iterable[ClinicalData] = (
ClinicalData.query
.filter(ClinicalData.disease == disease)
.filter(ClinicalData.variation_id == variation_id)
)
association_values = {
'sig_code': sig_code,
'rev_status': review_status,
'origin': origin
}
for disease_association in disease_associations:
for key, value in association_values.items():
old_value = getattr(disease_association, key)
if old_value and old_value != value:
print(
f'Warning: {key} was already set to {old_value} for {variation_id}/{disease},'
f'while the new value is {value} (accession: {rcv_accession})'
)
disease_association.sig_code = sig_code
disease_association.rev_status = review_status
# IMPORTANT: every "continue" up to this point means that the mutation
# will be removed in remove_muts_without_origin(), because origin will not be set
disease_association.origin = origin
if additional_significances:
disease_association.additional_significances = set(additional_significances)
element.clear()
step += 1
if step % 550 == 0:
progress = int(clinvar_full_release.tell() / total_size * 1000)
if progress != last_progress:
progress_bar.update(progress - last_progress)
last_progress = progress
root.clear()
print(skipped_diseases)
print(self.skipped_significances)
db.session.commit()
def remove_muts_without_origin(self):
origin_blacklist = {
'not applicable',
'not provided',
'not-reported',
'somatic',
'tested-inconclusive',
'unknown'
}
print('ClinVar mutations origin blacklist: ', origin_blacklist)
print('Removing ClinVar associations with blacklisted or missing origin; NOTE:')
print('\torigin is not set also when the mutation was skipped due to other reasons, such as non-human species')
removed_cnt = ClinicalData.query.filter(
or_(ClinicalData.origin == None, ClinicalData.origin.in_(origin_blacklist))
).delete(synchronize_session='fetch')
db.session.commit()
print(f'Removed {removed_cnt} associations')
print('Removing orphaned ClinVar mutations (with no associations)')
empty_mutations_cnt = InheritedMutation.query.filter(~InheritedMutation.clin_data.any()).delete(
synchronize_session='fetch'
)
db.session.commit()
print(f'Removed {empty_mutations_cnt} ClinVar mutations without associations')
print('Removing diseases without associations...')
removed_diseases = Disease.query.filter(~Disease.associations.any()).delete(synchronize_session='fetch')
print(f'removing {removed_diseases} diseases')
db.session.commit()
def _load(self, path, update, **kwargs):
skip_removal = kwargs.pop('skip_removal', False)
super()._load(path, update, **kwargs)
self.import_disease_associations()
if not skip_removal:
self.remove_muts_without_origin()
def parse_metadata(self, line):
metadata = line[20].split(';')
clinvar_entry = make_metadata_ordered_dict(self.clinvar_keys, metadata)
disease_names, diseases_ids, combined_significances, significances_set = (
(entry.split('|') if entry else [])
for entry in
(
clinvar_entry[key]
for key in ('CLNDN', 'CLNDISDB', 'CLNSIG', 'CLNSIGCONF')
)
)
diseases_ids_map = [
{
key: ':'.join(values)
# needed as some ids have colons inside, e.g.:
# CLNDISDB=Human_Phenotype_Ontology:HP:0002145
for disease_id in disease_ids.split(',')
for key, *values in [disease_id.split(':')]
}
for disease_ids in diseases_ids
]
diseases_ids = [
[
disease_ids_map.get(disease_id_clinvar, None)
for disease_id_clinvar in self.disease_id_clinvar_to_db
]
for disease_ids_map in diseases_ids_map
]
combined_significances = [
significance.replace('_', ' ')
for significance in combined_significances
]
assert len(combined_significances) <= 1
assert not significances_set or len(significances_set) == 1
# those lengths should be always equal
assert len(diseases_ids) == len(disease_names)
sub_entries_cnt = len(disease_names)
at_least_one_meaningful_sub_entry = False
for i in range(sub_entries_cnt):
try:
if disease_names:
if disease_names[i] not in ('not_specified', 'not_provided'):
disease_names[i] = self._beautify_disease_name(disease_names[i])
at_least_one_meaningful_sub_entry = True
except IndexError:
raise MalformedRawError(f'Malformed row (wrong count of sub-entries) on {i}-th entry:')
variation_id = int(line[15])
return (
at_least_one_meaningful_sub_entry, clinvar_entry, sub_entries_cnt,
disease_names, diseases_ids, combined_significances, variation_id
)
def parse(self, path):
clinvar_mutations = []
clinvar_data = []
duplicates = 0
new_diseases = {}
highest_disease_id = get_highest_id(Disease)
def clinvar_parser(line):
nonlocal highest_disease_id, duplicates
try:
(
at_least_one_significant_sub_entry, clinvar_entry, sub_entries_cnt,
disease_names, diseases_ids, combined_significances, variation_id
) = self.parse_metadata(line)
except MalformedRawError as e:
print(str(e) + '\n', line)
return False
# following 2 lines are result of issue #47 - we don't import those
# clinvar mutations that do not have any diseases specified:
if not at_least_one_significant_sub_entry:
return
values = list(clinvar_entry.values())
# should correspond to insert keys!
clinvar_mutation_values = [
{int(rs) for rs in (clinvar_entry['RS'] or '').split('|') if rs},
set(combined_significances)
]
for mutation_id in self.get_or_make_mutations(line):
# take care of duplicates
duplicated = self.look_after_duplicates(mutation_id, clinvar_mutations, values[:1])
if duplicated:
duplicates += 1
continue
# take care of nearly-duplicates
same_mutation_pointers = self.mutations_details_pointers_grouped_by_unique_mutations[mutation_id]
assert len(same_mutation_pointers) <= 1
if same_mutation_pointers:
pointer = same_mutation_pointers[0]
retained_values = clinvar_mutations[pointer]
old = self.data_as_dict(retained_values)
new = self.data_as_dict(clinvar_mutation_values, mutation_id=mutation_id)
for key in ['db_snp_ids', 'combined_significances']:
index = self.insert_keys.index(key)
retained_values[index].update(new[key])
print(f'Merged SNVs of the same protein mutation ({mutation_id}):\n\t{new}\nand\n\t{old}\n')
else:
# only add the protein-level mutation once
self.protect_from_duplicates(mutation_id, clinvar_mutations)
clinvar_mutations.append([mutation_id, *clinvar_mutation_values])
# then add the disease-mutation relations;
# if these are caused by multiple SNVs (and thus have different variant_id),
# add them for each of SNVs separately as each can have different sig_code:
for i in range(sub_entries_cnt):
# disease names matching is case insensitive;
# NB: MySQL uses case-insensitive unique constraint by default
name = disease_names[i]
disease_ids = diseases_ids[i]
key = name.lower()
merged = False
# we don't want _uninteresting_ data
if name in ('not_specified', 'not_provided'):
continue
if key in new_diseases:
disease_id, (recorded_name, *recorded_ids) = new_diseases[key]
merged = True
else:
try:
disease = Disease.query.filter(Disease.name.ilike(name)).one()
disease_id = disease.id
recorded_name = disease.name
recorded_ids = [
getattr(disease, id_name, None)
for id_name in self.disease_id_clinvar_to_db.values()
]
merged = True
except NoResultFound:
highest_disease_id += 1
new_diseases[key] = highest_disease_id, (name, *disease_ids)
disease_id = highest_disease_id
if merged:
if recorded_name != name:
print(
f'Note: {name} and {recorded_name} diseases were merged'
f'(identical in case-insensitive comparison)'
)
assert recorded_ids == disease_ids
clinvar_data.append(
(
len(clinvar_mutations),
disease_id,
variation_id
)
)
for line in self.iterate_lines(path):
clinvar_parser(line)
print(f'{duplicates} duplicates found')
return clinvar_mutations, clinvar_data, new_diseases.values()
def export_details_headers(self):
return ['disease', 'significance', 'has_significance_conflict']
def export_details(self, mutation):
return [
[d.disease_name, d.significance or '', str(d.has_significance_conflict)]
for d in mutation.clin_data
]
def insert_details(self, details):
clinvar_mutations, clinvar_data, new_diseases = details
disease_columns = ('name', *self.disease_id_clinvar_to_db.values())
bulk_orm_insert(
Disease,
disease_columns,
[disease_data for pk, disease_data in new_diseases]
)
self.insert_list(clinvar_mutations)
bulk_orm_insert(
ClinicalData,
('inherited_id', 'disease_id', 'variation_id'),
clinvar_data
)
def restart_autoincrement(self, model):
assert self.model == model
for model in [self.model, ClinicalData, Disease]:
restart_autoincrement(model)
db.session.commit()
def raw_delete_all(self, model):
assert self.model == model
# remove clinical data
data_cnt = ClinicalData.query.delete()
# remove diseases
disease_cnt = Disease.query.delete()
print(f'{disease_cnt} diseases and {data_cnt} clinical data entries removed')
# then mutations
count = self.model.query.delete()
# count of removed mutations is more informative
return count
Improve updates handling for newly added IDs for ClinVar
import re
from collections import defaultdict
from typing import Mapping, Iterable
from sqlalchemy.orm.exc import NoResultFound
from models import InheritedMutation, Disease
from models import ClinicalData, or_
from helpers.parsers import tsv_file_iterator
from helpers.parsers import gzip_open_text
from database.bulk import get_highest_id, bulk_orm_insert, restart_autoincrement
from database import db
from .mutation_importer import MutationImporter
from .mutation_importer.helpers import make_metadata_ordered_dict
class MalformedRawError(Exception):
pass
class ClinVarImporter(MutationImporter):
name = 'clinvar'
model = InheritedMutation
default_path = 'data/mutations/clinvar_muts_annotated.txt.gz'
default_xml_path = 'data/mutations/ClinVarFullRelease_2019-05.xml.gz'
header = [
'Chr', 'Start', 'End', 'Ref', 'Alt', 'Func.refGene', 'Gene.refGene',
'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene', 'Otherinfo',
]
insert_keys = (
'mutation_id',
'db_snp_ids',
'combined_significances',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.xml_path = None
self.skipped_significances = defaultdict(int)
def load(self, path=None, update=False, clinvar_xml_path=None, **ignored_kwargs):
print(
'Please note that the annovar and XML database needs to be based on the same ClinVar release'
' to avoid incorrect removal of variants which are missing metadata (i.e. not found in the XML file)'
)
self.xml_path = clinvar_xml_path or self.default_xml_path
super().load(path, update, **ignored_kwargs)
@staticmethod
def _beautify_disease_name(name):
if '___' in name:
# for edge cases use a robust regexpr
name = re.sub('([^_])_([^_])', r'\1 \2', name).replace('___', ' _ ')
else:
# but for 99% of data, simple .replace() is much faster:
name = name.replace('_', ' ')
return name.replace('%3B', ';')
def iterate_lines(self, path):
return tsv_file_iterator(path, self.header, file_opener=gzip_open_text)
def test_line(self, line):
try:
at_least_one_significant_sub_entry, *args = self.parse_metadata(line)
return at_least_one_significant_sub_entry
except MalformedRawError:
return False
clinvar_keys = (
'RS',
'CLNDISDB',
'CLNDN',
'CLNSIG',
'CLNSIGCONF'
)
disease_id_clinvar_to_db = {
'MedGen': 'medgen_id',
'OMIM': 'omim_id',
'SNOMED_CT': 'snomed_ct_id',
'Orphanet': 'orhpanet_id',
'Human_Phenotype_Ontology': 'hpo_id'
}
inverse_significance_map: Mapping[str, int] = {
name.lower(): code
for code, name in ClinicalData.significance_codes.items()
}
significance_map = {
'pathologic': 'pathogenic',
'probable-pathogenic': 'likely pathogenic',
'cancer': 'pathogenic',
'untested': 'not provided',
'variant of unknown significance': 'uncertain significance',
'uncertain': 'uncertain significance',
'drug-response': 'drug response',
'probable-non-pathogenic': 'likely benign',
'probably not pathogenic': 'likely benign',
'non-pathogenic': 'benign',
}
def parse_significance(self, significance):
significance = significance.lower()
if significance in self.significance_map:
significance = self.significance_map[significance]
additional_significances = []
if significance not in self.inverse_significance_map:
assign_to = 'other'
first_significance, *additional_significances = significance.split(',')
if first_significance in self.inverse_significance_map:
assign_to = first_significance
if significance not in self.skipped_significances:
print(f'Unmapped significance status: "{significance}", assigning "{assign_to}"')
self.skipped_significances[significance] += 1
significance = assign_to
sig_code = self.inverse_significance_map[significance]
return sig_code, [sig.strip() for sig in additional_significances]
def import_disease_associations(self):
"""Add disease association details to the already imported mutation-disease associations"""
from xml.etree import ElementTree
from tqdm import tqdm
import gzip
ignored_traits = {
'not specified',
'not provided'
}
accepted_assertions = {
'variation to disease',
'variation in modifier gene to disease',
'variation to included disease',
'confers sensitivity'
}
print('Only including assertions: ', accepted_assertions)
print('Ignoring traits: ', ignored_traits)
self.skipped_significances = defaultdict(int)
accepted_species = {'Human', 'human'}
skipped_species = set()
skipped_variation_types = set()
conflicting_types = set()
skipped_diseases = set()
variants_of_interest = {
variation_id
for variation_id, in db.session.query(ClinicalData.variation_id)
}
# otherwise there is no point...
assert variants_of_interest
opener = gzip.open if self.xml_path.endswith('.gz') else open
total_size = 0
with opener(self.xml_path) as clinvar_full_release:
for line in clinvar_full_release:
total_size += len(line)
step = 0
with opener(self.xml_path) as clinvar_full_release:
tree = iter(ElementTree.iterparse(clinvar_full_release, events=('start', 'end')))
event, root = next(tree)
progress_bar = tqdm(total=1000)
last_progress = 0
for event, element in tree:
if event != 'end' or element.tag != 'ClinVarSet':
continue
reference = element.find('ReferenceClinVarAssertion')
assertion = reference.find('Assertion').attrib['Type']
# This skips over "confers resistance" and "variant to named protein"
if assertion not in accepted_assertions:
assert assertion in {'confers resistance', 'variant to named protein'}
continue
# variant-disease accession
rcv_accession = reference.find('ClinVarAccession')
assert rcv_accession.attrib['Type'] == 'RCV'
# rcv_accession = rcv_accession.attrib['Acc']
# variation or variation set, corresponds to InheritedMutation in our database
variation_set = reference.find('MeasureSet')
# Note: this skips over minority of records with "GenotypeSet"s
if not variation_set:
assert reference.find('GenotypeSet')
continue
# skip over haplotypes, etc
variation_set_type = variation_set.attrib['Type']
if variation_set_type != 'Variant':
assert variation_set_type in {'Haplotype', 'Distinct chromosomes', 'Phase unknown'}
continue
# corresponds to InheritedMutation.variation_id
variation_id = int(variation_set.attrib['ID'])
if variation_id not in variants_of_interest:
# as we effectively have only a fraction of all variations
# (non-synonymous SNVs only), this will speed things up
continue
sample = reference.find('ObservedIn/Sample')
species = sample.find('Species').text
if species not in accepted_species:
if species not in skipped_species:
print(f'Skipping non-human species: "{species}"')
skipped_species.add(species)
continue
origin = sample.find('Origin').text
assert reference.find('RecordStatus').text == 'current'
variations = variation_set.findall('Measure')
assert len(variations) == 1
variation = variations[0]
variation_type = variation.attrib['Type']
if variation_type != 'single nucleotide variant':
if variation_type not in skipped_variation_types:
print(f'Skipping variation type: {variation_type}')
skipped_variation_types.add(variation_type)
# Disease or observation, corresponds to Disease
trait = reference.find('TraitSet')
trait_type = trait.attrib['Type']
trait_name = trait.find('Trait/Name/ElementValue').text
if trait_name in ignored_traits:
continue
try:
disease = Disease.query.filter_by(name=trait_name).one()
except NoResultFound:
resolved = False
if 'Mucolipidosis, Type' in trait_name:
print(f'Working around changed name for {trait_name}')
trait_name = trait_name.replace('Mucolipidosis, Type', 'Mucolipidosis')
try:
disease = Disease.query.filter_by(name=trait_name).one()
resolved = True
except NoResultFound:
pass
if not resolved:
skipped_diseases.add(trait_name)
print(f'Disease "{trait_name}" entry not found, skipping')
continue
if disease.clinvar_type:
if disease.clinvar_type != trait_type:
if disease.name not in conflicting_types:
conflicting_types.add(disease.name)
action = ''
if trait_type == 'Disease':
disease.clinvar_type = trait_type
action = ': overwriting the old type with "Disease"'
print(f'Conflicting trait types for "{disease.name}": "{disease.clinvar_type}" != "{trait_type}"{action}')
else:
disease.clinvar_type = trait_type
significance_annotations = reference.findall('ClinicalSignificance')
assert len(significance_annotations) == 1
significance_annotation = significance_annotations[0]
significance = significance_annotation.find('Description').text
review_status = significance_annotation.find('ReviewStatus').text
sig_code, additional_significances = self.parse_significance(significance)
disease_associations: Iterable[ClinicalData] = (
ClinicalData.query
.filter(ClinicalData.disease == disease)
.filter(ClinicalData.variation_id == variation_id)
)
association_values = {
'sig_code': sig_code,
'rev_status': review_status,
'origin': origin
}
for disease_association in disease_associations:
for key, value in association_values.items():
old_value = getattr(disease_association, key)
if old_value and old_value != value:
print(
f'Warning: {key} was already set to {old_value} for {variation_id}/{disease},'
f'while the new value is {value} (accession: {rcv_accession})'
)
disease_association.sig_code = sig_code
disease_association.rev_status = review_status
# IMPORTANT: every "continue" up to this point means that the mutation
# will be removed in remove_muts_without_origin(), because origin will not be set
disease_association.origin = origin
if additional_significances:
disease_association.additional_significances = set(additional_significances)
element.clear()
step += 1
if step % 550 == 0:
progress = int(clinvar_full_release.tell() / total_size * 1000)
if progress != last_progress:
progress_bar.update(progress - last_progress)
last_progress = progress
root.clear()
print(skipped_diseases)
print(self.skipped_significances)
db.session.commit()
def remove_muts_without_origin(self):
origin_blacklist = {
'not applicable',
'not provided',
'not-reported',
'somatic',
'tested-inconclusive',
'unknown'
}
print('ClinVar mutations origin blacklist: ', origin_blacklist)
print('Removing ClinVar associations with blacklisted or missing origin; NOTE:')
print('\torigin is not set also when the mutation was skipped due to other reasons, such as non-human species')
removed_cnt = ClinicalData.query.filter(
or_(ClinicalData.origin == None, ClinicalData.origin.in_(origin_blacklist))
).delete(synchronize_session='fetch')
db.session.commit()
print(f'Removed {removed_cnt} associations')
print('Removing orphaned ClinVar mutations (with no associations)')
empty_mutations_cnt = InheritedMutation.query.filter(~InheritedMutation.clin_data.any()).delete(
synchronize_session='fetch'
)
db.session.commit()
print(f'Removed {empty_mutations_cnt} ClinVar mutations without associations')
print('Removing diseases without associations...')
removed_diseases = Disease.query.filter(~Disease.associations.any()).delete(synchronize_session='fetch')
print(f'removing {removed_diseases} diseases')
db.session.commit()
def _load(self, path, update, **kwargs):
skip_removal = kwargs.pop('skip_removal', False)
super()._load(path, update, **kwargs)
self.import_disease_associations()
if not skip_removal:
self.remove_muts_without_origin()
def parse_metadata(self, line):
metadata = line[20].split(';')
clinvar_entry = make_metadata_ordered_dict(self.clinvar_keys, metadata)
disease_names, diseases_ids, combined_significances, significances_set = (
(entry.split('|') if entry else [])
for entry in
(
clinvar_entry[key]
for key in ('CLNDN', 'CLNDISDB', 'CLNSIG', 'CLNSIGCONF')
)
)
diseases_ids_map = [
{
key: ':'.join(values)
# needed as some ids have colons inside, e.g.:
# CLNDISDB=Human_Phenotype_Ontology:HP:0002145
for disease_id in disease_ids.split(',')
for key, *values in [disease_id.split(':')]
}
for disease_ids in diseases_ids
]
diseases_ids = [
[
disease_ids_map.get(disease_id_clinvar, None)
for disease_id_clinvar in self.disease_id_clinvar_to_db
]
for disease_ids_map in diseases_ids_map
]
combined_significances = [
significance.replace('_', ' ')
for significance in combined_significances
]
assert len(combined_significances) <= 1
assert not significances_set or len(significances_set) == 1
# those lengths should be always equal
assert len(diseases_ids) == len(disease_names)
sub_entries_cnt = len(disease_names)
at_least_one_meaningful_sub_entry = False
for i in range(sub_entries_cnt):
try:
if disease_names:
if disease_names[i] not in ('not_specified', 'not_provided'):
disease_names[i] = self._beautify_disease_name(disease_names[i])
at_least_one_meaningful_sub_entry = True
except IndexError:
raise MalformedRawError(f'Malformed row (wrong count of sub-entries) on {i}-th entry:')
variation_id = int(line[15])
return (
at_least_one_meaningful_sub_entry, clinvar_entry, sub_entries_cnt,
disease_names, diseases_ids, combined_significances, variation_id
)
def parse(self, path):
clinvar_mutations = []
clinvar_data = []
duplicates = 0
new_diseases = {}
highest_disease_id = get_highest_id(Disease)
def clinvar_parser(line):
nonlocal highest_disease_id, duplicates
try:
(
at_least_one_significant_sub_entry, clinvar_entry, sub_entries_cnt,
disease_names, diseases_ids, combined_significances, variation_id
) = self.parse_metadata(line)
except MalformedRawError as e:
print(str(e) + '\n', line)
return False
# following 2 lines are result of issue #47 - we don't import those
# clinvar mutations that do not have any diseases specified:
if not at_least_one_significant_sub_entry:
return
values = list(clinvar_entry.values())
# should correspond to insert keys!
clinvar_mutation_values = [
{int(rs) for rs in (clinvar_entry['RS'] or '').split('|') if rs},
set(combined_significances)
]
for mutation_id in self.get_or_make_mutations(line):
# take care of duplicates
duplicated = self.look_after_duplicates(mutation_id, clinvar_mutations, values[:1])
if duplicated:
duplicates += 1
continue
# take care of nearly-duplicates
same_mutation_pointers = self.mutations_details_pointers_grouped_by_unique_mutations[mutation_id]
assert len(same_mutation_pointers) <= 1
if same_mutation_pointers:
pointer = same_mutation_pointers[0]
retained_values = clinvar_mutations[pointer]
old = self.data_as_dict(retained_values)
new = self.data_as_dict(clinvar_mutation_values, mutation_id=mutation_id)
for key in ['db_snp_ids', 'combined_significances']:
index = self.insert_keys.index(key)
retained_values[index].update(new[key])
print(f'Merged SNVs of the same protein mutation ({mutation_id}):\n\t{new}\nand\n\t{old}\n')
else:
# only add the protein-level mutation once
self.protect_from_duplicates(mutation_id, clinvar_mutations)
clinvar_mutations.append([mutation_id, *clinvar_mutation_values])
# then add the disease-mutation relations;
# if these are caused by multiple SNVs (and thus have different variant_id),
# add them for each of SNVs separately as each can have different sig_code:
for i in range(sub_entries_cnt):
# disease names matching is case insensitive;
# NB: MySQL uses case-insensitive unique constraint by default
name = disease_names[i]
disease_ids = diseases_ids[i]
key = name.lower()
merged = False
# we don't want _uninteresting_ data
if name in ('not_specified', 'not_provided'):
continue
if key in new_diseases:
disease_id, (recorded_name, *recorded_ids) = new_diseases[key]
merged = True
else:
try:
disease = Disease.query.filter(Disease.name.ilike(name)).one()
disease_id = disease.id
recorded_name = disease.name
recorded_ids = [
getattr(disease, id_name, None)
for id_name in self.disease_id_clinvar_to_db.values()
]
merged = True
except NoResultFound:
highest_disease_id += 1
new_diseases[key] = highest_disease_id, (name, *disease_ids)
disease_id = highest_disease_id
if merged:
if recorded_name != name:
print(
f'Note: {name} and {recorded_name} diseases were merged'
f' (identical in case-insensitive comparison)'
)
different_ids = [
id_label
for i, (id_label, id_name) in enumerate(self.disease_id_clinvar_to_db.items())
if recorded_ids[i] != disease_ids[i]
]
if any(different_ids):
print(
f'Note: {name} identifiers differ from {recorded_name} identifiers'
f' {disease_ids} vs {recorded_ids} ({different_ids} differ).'
f' The newer set of ids ({disease_ids}) was kept.'
)
assert len(disease_ids) != len(recorded_ids)
clinvar_data.append(
(
len(clinvar_mutations),
disease_id,
variation_id
)
)
for line in self.iterate_lines(path):
clinvar_parser(line)
print(f'{duplicates} duplicates found')
return clinvar_mutations, clinvar_data, new_diseases.values()
def export_details_headers(self):
return ['disease', 'significance', 'has_significance_conflict']
def export_details(self, mutation):
return [
[d.disease_name, d.significance or '', str(d.has_significance_conflict)]
for d in mutation.clin_data
]
def insert_details(self, details):
clinvar_mutations, clinvar_data, new_diseases = details
disease_columns = ('name', *self.disease_id_clinvar_to_db.values())
bulk_orm_insert(
Disease,
disease_columns,
[disease_data for pk, disease_data in new_diseases]
)
self.insert_list(clinvar_mutations)
bulk_orm_insert(
ClinicalData,
('inherited_id', 'disease_id', 'variation_id'),
clinvar_data
)
def restart_autoincrement(self, model):
assert self.model == model
for model in [self.model, ClinicalData, Disease]:
restart_autoincrement(model)
db.session.commit()
def raw_delete_all(self, model):
assert self.model == model
# remove clinical data
data_cnt = ClinicalData.query.delete()
# remove diseases
disease_cnt = Disease.query.delete()
print(f'{disease_cnt} diseases and {data_cnt} clinical data entries removed')
# then mutations
count = self.model.query.delete()
# count of removed mutations is more informative
return count
|
from datetime import date
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from frontpage.management.magic import compile_markdown
from frontpage.management import media_actions
from frontpage.models import Profile
from .init_database import init_db
import os
class TestManagementGenericFunctions(TestCase):
test_image_path = ""
def setUp(self):
# Setup articles for get_article_pcs_free here
this_dir, this_filename = os.path.split(__file__)
self.test_image_path = os.path.join(this_dir, "testdata", "cc-test-image.jpg")
media_actions.PATH_TO_UPLOAD_FOLDER_ON_DISK = ""
init_db()
pass
def test_markdown_generation(self):
# Only test this thing not throwing exceptions
self.assertEquals(compile_markdown("# Test MD"),
'<h1 id="test-md">Test MD</h1>')
def test_image_upload(self):
# since pillow runs unit tests as well we'll simply check for the existance of the images
p: Profile = Profile.objects.all()[0]
f = open(self.test_image_path, 'rb')
img = SimpleUploadedFile(f.name, f.read(), content_type="image/jpg")
media_actions.handle_file(p, "A Test image headline", "Silly Test Images",
"This is a more detailed description of a CC test image", img)
assumed_path_hr = "uploads/" + str(date.today().year) + "/HIGHRES_cc-test-image.jpg"
assumed_path_lr = "uploads/" + str(date.today().year) + "/LOWRES_cc-test-image.jpg"
self.assertTrue(os.path.isfile(assumed_path_hr))
self.assertTrue(os.path.isfile(assumed_path_lr))
# Clean up FS
os.remove(assumed_path_lr)
os.remove(assumed_path_hr)
if not os.listdir("uploads/" + str(date.today().year)):
os.rmdir("uploads/" + str(date.today().year))
if not os.listdir("uploads"):
os.rmdir("uploads")
pass
chg: Also require the cropped file to be smaller than the original file
from datetime import date
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from frontpage.management.magic import compile_markdown
from frontpage.management import media_actions
from frontpage.models import Profile
from .init_database import init_db
import os
class TestManagementGenericFunctions(TestCase):
test_image_path = ""
def setUp(self):
# Setup articles for get_article_pcs_free here
this_dir, this_filename = os.path.split(__file__)
self.test_image_path = os.path.join(this_dir, "testdata", "cc-test-image.jpg")
media_actions.PATH_TO_UPLOAD_FOLDER_ON_DISK = ""
init_db()
pass
def test_markdown_generation(self):
# Only test this thing not throwing exceptions
self.assertEquals(compile_markdown("# Test MD"),
'<h1 id="test-md">Test MD</h1>')
def test_image_upload(self):
# since pillow runs unit tests as well we'll simply check for the existance of the images
p: Profile = Profile.objects.all()[0]
f = open(self.test_image_path, 'rb')
img = SimpleUploadedFile(f.name, f.read(), content_type="image/jpg")
media_actions.handle_file(p, "A Test image headline", "Silly Test Images",
"This is a more detailed description of a CC test image", img)
assumed_path_hr = "uploads/" + str(date.today().year) + "/HIGHRES_cc-test-image.jpg"
assumed_path_lr = "uploads/" + str(date.today().year) + "/LOWRES_cc-test-image.jpg"
self.assertTrue(os.path.isfile(assumed_path_hr))
self.assertTrue(os.path.isfile(assumed_path_lr))
self.assertTrue(os.path.getsize(assumed_path_hr) > os.path.getsize(assumed_path_lr))
# Clean up FS
os.remove(assumed_path_lr)
os.remove(assumed_path_hr)
if not os.listdir("uploads/" + str(date.today().year)):
os.rmdir("uploads/" + str(date.today().year))
if not os.listdir("uploads"):
os.rmdir("uploads")
pass
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import requests
import time
from pydruid.db import connect
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.dbapi_hook import DbApiHook
class DruidHook(BaseHook):
"""
Connection to Druid overlord for ingestion
:param druid_ingest_conn_id: The connection id to the Druid overlord machine
which accepts index jobs
:type druid_ingest_conn_id: string
:param timeout: The interval between polling
the Druid job for the status of the ingestion job
:type timeout: int
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
:type max_ingestion_time: int
"""
def __init__(
self,
druid_ingest_conn_id='druid_ingest_default',
timeout=1,
max_ingestion_time=None):
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
self.header = {'content-type': 'application/json'}
def get_conn_url(self):
conn = self.get_connection(self.druid_ingest_conn_id)
host = conn.host
port = conn.port
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', '')
return "{conn_type}://{host}:{port}/{endpoint}".format(**locals())
def submit_indexing_job(self, json_index_spec):
url = self.get_conn_url()
req_index = requests.post(url, json=json_index_spec, headers=self.header)
if (req_index.status_code != 200):
raise AirflowException('Did not get 200 when '
'submitting the Druid job to {}'.format(url))
req_json = req_index.json()
# Wait until the job is completed
druid_task_id = req_json['task']
running = True
sec = 0
while running:
req_status = requests.get("{0}/{1}/status".format(url, druid_task_id))
self.log.info("Job still running for %s seconds...", sec)
if self.max_ingestion_time and sec > self.max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
requests.post("{0}/{1}/shutdown".format(url, druid_task_id))
raise AirflowException('Druid ingestion took more than '
'%s seconds', self.max_ingestion_time)
time.sleep(self.timeout)
sec = sec + self.timeout
status = req_status.json()['status']['status']
if status == 'RUNNING':
running = True
elif status == 'SUCCESS':
running = False # Great success!
elif status == 'FAILED':
raise AirflowException('Druid indexing job failed, '
'check console for more info')
else:
raise AirflowException('Could not get status of the job, got %s', status)
self.log.info('Successful index')
class DruidDbApiHook(DbApiHook):
"""
Interact with Druid broker
This hook is purely for users to query druid broker.
For ingestion, please use druidHook.
"""
conn_name_attr = 'druid_broker_conn_id'
default_conn_name = 'druid_broker_default'
supports_autocommit = False
def __init__(self, *args, **kwargs):
super(DruidDbApiHook, self).__init__(*args, **kwargs)
def get_conn(self):
"""
Establish a connection to druid broker.
"""
conn = self.get_connection(self.druid_broker_conn_id)
druid_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to druid '
'broker on {host}'.format(host=conn.host))
return druid_broker_conn
def get_uri(self):
"""
Get the connection uri for druid broker.
e.g: druid://localhost:8082/druid/v2/sql/
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'druid' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'druid/v2/sql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
def set_autocommit(self, conn, autocommit):
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None):
raise NotImplementedError()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
raise NotImplementedError()
Revert "[AIRFLOW-2860] DruidHook: time variable is not updated correctly when checking for timeout (#3707)"
This reverts commit d12aacd552878308f9b1c3663414bb7c00c0632b.
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import requests
import time
from pydruid.db import connect
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.dbapi_hook import DbApiHook
class DruidHook(BaseHook):
"""
Connection to Druid overlord for ingestion
:param druid_ingest_conn_id: The connection id to the Druid overlord machine
which accepts index jobs
:type druid_ingest_conn_id: string
:param timeout: The interval between polling
the Druid job for the status of the ingestion job
:type timeout: int
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
:type max_ingestion_time: int
"""
def __init__(
self,
druid_ingest_conn_id='druid_ingest_default',
timeout=1,
max_ingestion_time=None):
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
self.header = {'content-type': 'application/json'}
def get_conn_url(self):
conn = self.get_connection(self.druid_ingest_conn_id)
host = conn.host
port = conn.port
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', '')
return "{conn_type}://{host}:{port}/{endpoint}".format(**locals())
def submit_indexing_job(self, json_index_spec):
url = self.get_conn_url()
req_index = requests.post(url, json=json_index_spec, headers=self.header)
if (req_index.status_code != 200):
raise AirflowException('Did not get 200 when '
'submitting the Druid job to {}'.format(url))
req_json = req_index.json()
# Wait until the job is completed
druid_task_id = req_json['task']
running = True
sec = 0
while running:
req_status = requests.get("{0}/{1}/status".format(url, druid_task_id))
self.log.info("Job still running for %s seconds...", sec)
sec = sec + 1
if self.max_ingestion_time and sec > self.max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
requests.post("{0}/{1}/shutdown".format(url, druid_task_id))
raise AirflowException('Druid ingestion took more than '
'%s seconds', self.max_ingestion_time)
time.sleep(self.timeout)
status = req_status.json()['status']['status']
if status == 'RUNNING':
running = True
elif status == 'SUCCESS':
running = False # Great success!
elif status == 'FAILED':
raise AirflowException('Druid indexing job failed, '
'check console for more info')
else:
raise AirflowException('Could not get status of the job, got %s', status)
self.log.info('Successful index')
class DruidDbApiHook(DbApiHook):
"""
Interact with Druid broker
This hook is purely for users to query druid broker.
For ingestion, please use druidHook.
"""
conn_name_attr = 'druid_broker_conn_id'
default_conn_name = 'druid_broker_default'
supports_autocommit = False
def __init__(self, *args, **kwargs):
super(DruidDbApiHook, self).__init__(*args, **kwargs)
def get_conn(self):
"""
Establish a connection to druid broker.
"""
conn = self.get_connection(self.druid_broker_conn_id)
druid_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to druid '
'broker on {host}'.format(host=conn.host))
return druid_broker_conn
def get_uri(self):
"""
Get the connection uri for druid broker.
e.g: druid://localhost:8082/druid/v2/sql/
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'druid' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'druid/v2/sql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
def set_autocommit(self, conn, autocommit):
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None):
raise NotImplementedError()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
raise NotImplementedError()
|
import sputnik
from django.db import transaction
from booki.utils.log import logBookHistory, logChapterHistory
from booki.editor import models
from booki.editor.views import getVersion
from booki.utils import security
try:
from booki.settings import OBJAVI_URL
except ImportError:
OBJAVI_URL = "http://objavi.flossmanuals.net/objavi.cgi"
try:
from booki.settings import THIS_BOOKI_SERVER
except:
import os
THIS_BOOKI_SERVER = os.environ.get('HTTP_HOST', 'booki.flossmanuals.net')
# this couple of functions should go to models.BookVersion
def getTOCForBook(version):
results = []
for chap in version.getTOC():
# is it a section or chapter?
if chap.chapter:
results.append((chap.chapter.id,
chap.chapter.title,
chap.chapter.url_title,
chap.typeof,
chap.chapter.status.id))
else:
results.append(('s%s' % chap.id, chap.name, chap.name, chap.typeof))
return results
def getHoldChapters(book_version):
return [(ch.id, ch.title, ch.url_title, 1, ch.status.id) for ch in book_version.getHoldChapters()]
def getAttachments(book_version):
import os.path
import Image
def _getDimension(att):
if att.attachment.name.endswith(".jpg"):
try:
im = Image.open(att.attachment.name)
return im.size
except:
return (0, 0)
return None
attachments = [{"id": att.id,
"dimension": _getDimension(att),
"status": att.status.id,
"name": os.path.split(att.attachment.name)[1],
"created": str(att.created.strftime("%d.%m.%Y %H:%M:%S")),
"size": att.attachment.size}
for att in book_version.getAttachments().order_by("attachment") if att.attachment]
return attachments
def remote_init_editor(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
## get chapters
chapters = getTOCForBook(book_version)
holdChapters = getHoldChapters(book_version)
## get users
def _getUserName(a):
if a == request.sputnikID:
return "<b>%s</b>" % a
return a
try:
users = [_getUserName(m) for m in list(sputnik.smembers("sputnik:channel:%s:channel" % message["channel"]))]
except:
users = []
## get workflow statuses
statuses = [(status.id, status.name) for status in models.BookStatus.objects.filter(book=book).order_by("-weight")]
## get attachments
try:
attachments = getAttachments(book_version)
except:
attachments = []
## get metadata
metadata = [{'name': v.name, 'value': v.getValue()} for v in models.Info.objects.filter(book=book)]
## notify others
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid,
{"command": "user_joined",
"user_joined": request.user.username},
myself = False)
## get licenses
licenses = [(elem.abbrevation, elem.name) for elem in models.License.objects.all().order_by("name")]
## get online users
try:
_onlineUsers = sputnik.smembers("sputnik:channel:%s:users" % message["channel"])
except:
_onlineUsers = []
if request.user.username not in _onlineUsers:
try:
sputnik.sadd("sputnik:channel:%s:users" % message["channel"], request.user.username)
_onlineUsers.append(request.user.username)
except:
pass
## get mood message for current user
## send mood as seperate message
## set notifications to other clients
profile = request.user.get_profile()
if profile:
moodMessage = profile.mood;
else:
moodMessage = ''
sputnik.addMessageToChannel(request,
"/booki/book/%s/%s/" % (bookid, version),
{"command": "user_add",
"username": request.user.username,
"mood": moodMessage}
)
## get online users and their mood messages
from django.contrib.auth.models import User
def _getUser(_user):
try:
_u = User.objects.get(username=_user)
return (_user, _u.get_profile().mood)
except:
return None
onlineUsers = [x for x in [_getUser(x) for x in _onlineUsers] if x]
# for now, this is one big temp here
import time, decimal, re
_now = time.time()
locks = {}
try:
for key in sputnik.rkeys("booki:*:locks:*"):
lastAccess = sputnik.get(key)
if type(lastAccess) in [type(' '), type(u' ')]:
try:
lastAccess = decimal.Decimal(lastAccess)
except:
continue
if lastAccess and decimal.Decimal("%f" % _now) - lastAccess <= 30:
m = re.match("booki:(\d+):locks:(\d+):(\w+)", key)
if m:
if m.group(1) == bookid:
locks[m.group(2)] = m.group(3)
except:
pass
return {"licenses": licenses,
"chapters": chapters,
"metadata": metadata,
"hold": holdChapters,
"users": users,
"locks": locks,
"statuses": statuses,
"attachments": attachments,
"onlineUsers": list(onlineUsers)}
def remote_attachments_list(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
try:
attachments = getAttachments(book_version)
except:
attachments = []
return {"attachments": attachments}
def remote_attachments_delete(request, message, bookid, version):
# TODO: must check security
book = models.Book.objects.get(id=bookid)
bookSecurity = security.getUserSecurityForBook(request.user, book)
if bookSecurity.isAdmin():
for att_id in message['attachments']:
att = models.Attachment.objects.get(pk=att_id)
att.delete()
transaction.commit()
return {"result": True}
return {"result": False}
def remote_chapter_status(request, message, bookid, version):
if message["status"] == "normal":
sputnik.rdelete("booki:%s:locks:%s:%s" % (bookid, message["chapterID"], request.user.username))
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_status",
"chapterID": message["chapterID"],
"status": message["status"],
"username": request.user.username})
return {}
def remote_change_status(request, message, bookid, version):
# chapterID
# statusID
chapter = models.Chapter.objects.get(id=int(message["chapterID"]))
status = models.BookStatus.objects.get(id=int(message["statusID"]))
chapter.status = status
try:
chapter.save()
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "change_status",
"chapterID": message["chapterID"],
"statusID": int(message["statusID"]),
"username": request.user.username})
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid,
{"command": "message_info",
"from": request.user.username,
"message": 'User %s has changed status of chapter "%s" to "%s".' % (request.user.username, chapter.title, status.name)}, myself=True)
except:
transaction.rollback()
else:
transaction.commit()
return {}
def remote_chapter_save(request, message, bookid, version):
# TODO
# put this outside in common module
# or maybe even betterm put it in the Model
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
chapter = models.Chapter.objects.get(id=int(message["chapterID"]))
if message.get("minor", False) != True:
history = logChapterHistory(chapter = chapter,
content = message["content"],
user = request.user,
comment = message.get("comment", ""),
revision = chapter.revision+1)
logBookHistory(book = chapter.book,
version = book_version,
chapter = chapter,
chapter_history = history,
user = request.user,
args = {"comment": message.get("comment", ""),
"author": message.get("author", ""),
"authorcomment": message.get("authorcomment", "")},
kind = 'chapter_save')
chapter.revision += 1
chapter.content = message["content"];
try:
chapter.save()
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": 'User %s has saved chapter "%s".' % (request.user.username, chapter.title)}, myself=True)
except:
transaction.rollback()
else:
transaction.commit()
if not message['continue']:
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_status",
"chapterID": message["chapterID"],
"status": "normal",
"username": request.user.username})
sputnik.rdelete("booki:%s:locks:%s:%s" % (bookid, message["chapterID"], request.user.username))
# fire the signal
import booki.editor.signals
booki.editor.signals.chapter_modified.send(sender = book_version, chapter = chapter, user = request.user)
return {}
def remote_chapter_rename(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
chapter = models.Chapter.objects.get(id=int(message["chapterID"]))
oldTitle = chapter.title
chapter.title = message["chapter"];
try:
chapter.save()
except:
transaction.rollback()
else:
logBookHistory(book = chapter.book,
version = book_version,
chapter = chapter,
user = request.user,
args = {"old": oldTitle, "new": message["chapter"]},
kind = "chapter_rename")
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid,
{"command": "message_info",
"from": request.user.username,
"message": 'User %s has renamed chapter "%s" to "%s".' % (request.user.username, oldTitle, message["chapter"])},
myself=True)
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_status",
"chapterID": message["chapterID"],
"status": "normal",
"username": request.user.username})
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_rename",
"chapterID": message["chapterID"],
"chapter": message["chapter"]})
transaction.commit()
return {}
def remote_chapters_changed(request, message, bookid, version):
lst = [chap[5:] for chap in message["chapters"]]
lstHold = [chap[5:] for chap in message["hold"]]
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
weight = len(lst)
logBookHistory(book = book,
version = book_version,
user = request.user,
kind = "chapter_reorder")
for chap in lst:
if chap[0] == 's':
m = models.BookToc.objects.get(id__exact=int(chap[1:]))
m.weight = weight
m.save()
else:
try:
m = models.BookToc.objects.get(chapter__id__exact=int(chap))
m.weight = weight
m.save()
except:
chptr = models.Chapter.objects.get(id__exact=int(chap))
m = models.BookToc(book = book,
version = book_version,
name = "SOMETHING",
chapter = chptr,
weight = weight,
typeof=1)
m.save()
weight -= 1
if message["kind"] == "remove":
if type(message["chapter_id"]) == type(u' ') and message["chapter_id"][0] == 's':
m = models.BookToc.objects.get(id__exact=message["chapter_id"][1:])
m.delete()
else:
m = models.BookToc.objects.get(chapter__id__exact=int(message["chapter_id"]))
m.delete()
# addMessageToChannel(request, "/chat/%s/%s/" % (projectid, bookid), {"command": "message_info", "from": request.user.username, "message": 'User %s has rearranged chapters.' % request.user.username})
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapters_changed",
"ids": lst,
"hold_ids": lstHold,
"kind": message["kind"],
"chapter_id": message["chapter_id"]})
# TODO
# this should be changed, to check for errors
transaction.commit()
return {}
def remote_get_users(request, message, bookid, version):
res = {}
def vidi(a):
if a == request.sputnikID:
return "!%s!" % a
return a
res["users"] = [vidi(m) for m in list(sputnik.smembers("sputnik:channel:%s:channel" % message["channel"]))]
return res
def remote_get_chapter(request, message, bookid, version):
res = {}
chapter = models.Chapter.objects.get(id=int(message["chapterID"]))
res["title"] = chapter.title
res["content"] = chapter.content
if not message.get("lock", True):
return res
import time
# set the initial timer for editor
sputnik.set("booki:%s:locks:%s:%s" % (bookid, message["chapterID"], request.user.username), time.time())
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_status",
"chapterID": message["chapterID"],
"status": "edit",
"username": request.user.username})
return res
def remote_book_notification(request, message, bookid, version):
res = {}
import time
# rcon.delete(key)
# set the initial timer for editor
if request.user.username and request.user.username != '':
sputnik.set("booki:%s:locks:%s:%s" % (bookid, message["chapterID"], request.user.username), time.time())
if '%s' % sputnik.get("booki:%s:killlocks:%s:%s" % (bookid, message["chapterID"], request.user.username)) == '1':
sputnik.rdelete("booki:%s:killlocks:%s:%s" % (bookid, message["chapterID"], request.user.username))
res = {"kill": "please"}
return res
def remote_chapter_split(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
logBookHistory(book = book,
version = book_version,
user = request.user,
kind = 'chapter_split')
allChapters = []
try:
originalChapter = models.Chapter.objects.get(id=int(message["chapterID"]))
except:
originalChapter = None
try:
tocChapter = models.BookToc.objects.get(book=book, chapter__id__exact=message["chapterID"])
except:
tocChapter = None
import datetime
from django.template.defaultfilters import slugify
if tocChapter:
allChapters = [chap for chap in models.BookToc.objects.filter(book=book).order_by("-weight")]
initialPosition = len(allChapters)-tocChapter.weight
else:
initialPosition = 0
s = models.BookStatus.objects.filter(book=book).order_by("weight")[0]
n = 0
for chap in message["chapters"]:
chapter = models.Chapter(book = book,
url_title = slugify(chap[0]),
title = chap[0],
status = s,
content = '<h1>%s</h1>%s' % (chap[0], chap[1]),
created = datetime.datetime.now(),
modified = datetime.datetime.now())
chapter.save()
if tocChapter:
m = models.BookToc(book = book,
chapter = chapter,
name = chap[0],
weight = 0,
typeof = 1)
m.save()
allChapters.insert(1+initialPosition+n, m)
n += 1
if originalChapter:
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info", "from": request.user.username, "message": 'User %s has split chapter "%s".' % (request.user.username, originalChapter.title)}, myself=True)
originalChapter.delete()
if tocChapter:
tocChapter.delete()
n = len(allChapters)
for chap in allChapters:
try:
chap.weight = n
chap.save()
n -= 1
except:
pass
## get chapters
chapters = getTOCForBook(book_version)
holdChapters = getHoldChapters(book_version)
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_split",
"chapterID": message["chapterID"],
"chapters": chapters,
"hold": holdChapters,
"username": request.user.username},
myself = True)
transaction.commit()
return {}
def remote_create_chapter(request, message, bookid, version):
import datetime
# BookVersion treba uzeti
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
from django.template.defaultfilters import slugify
url_title = slugify(message["chapter"])
# here i should probably set it to default project status
s = models.BookStatus.objects.filter(book=book).order_by("weight")[0]
ch = models.Chapter.objects.filter(book=book, version=book_version, url_title=url_title)
if len(list(ch)) > 0:
return {"created": False}
content = u'<h1>%s</h1>' % message["chapter"]
chapter = models.Chapter(book = book,
version = book_version,
url_title = url_title,
title = message["chapter"],
status = s,
content = content,
created = datetime.datetime.now(),
modified = datetime.datetime.now())
try:
chapter.save()
except:
transaction.rollback()
return {"created": False}
else:
history = logChapterHistory(chapter = chapter,
content = content,
user = request.user,
comment = message.get("comment", ""),
revision = chapter.revision)
logBookHistory(book = book,
version = book_version,
chapter = chapter,
chapter_history = history,
user = request.user,
kind = 'chapter_create')
transaction.commit()
result = (chapter.id, chapter.title, chapter.url_title, 1, s.id)
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": 'User %s has created new chapter "%s".' % (request.user.username, message["chapter"])},
myself=True)
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version), {"command": "chapter_create", "chapter": result}, myself = True)
return {"created": True}
def remote_publish_book(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": '"%s" is being published.' % (book.title, )},
myself=True)
import urllib2
import urllib
publishMode = message.get("publish_mode", "epub")
destination = "nowhere"
if message.get("is_archive", False):
destination = "archive.org"
args = {'book': book.url_title,
'project': 'export',
'mode': publishMode,
'server': THIS_BOOKI_SERVER,
'destination': destination,
'max-age': 0,
}
def _isSet(name):
if message.get(name, None):
if name == 'grey_scale':
args['grey_scale'] = 'yes'
else:
args[name] = message.get(name)
_isSet('title')
_isSet('license')
_isSet('isbn')
_isSet('toc_header')
_isSet('booksize')
_isSet('page_width')
_isSet('page_height')
_isSet('top_margin')
_isSet('side_margin')
_isSet('gutter')
_isSet('columns')
_isSet('column_margin')
_isSet('grey_scale')
_isSet('css')
data = urllib.urlencode(args)
req = urllib2.Request(OBJAVI_URL, data)
f = urllib2.urlopen(req)
# f = urllib2.urlopen("%s?book=%s&project=export&mode=%s&server=booki.flossmanuals.net&destination=%s" % (urlPublish, book.url_title, publishMode, destination))
ta = f.read()
lst = ta.split("\n")
dta, dtas3 = "", ""
if len(lst) > 0:
dta = lst[0]
if len(lst) > 1:
dtas3 = lst[1]
return {"dtaall": ta, "dta": dta, "dtas3": dtas3}
def remote_create_section(request, message, bookid, version):
import datetime
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
ch = models.BookToc.objects.filter(book=book,
version=book_version,
name=message['chapter'],
typeof=0)
if len(list(ch)) > 0:
return {"created": False}
c = models.BookToc(book = book,
version = book_version,
name = message["chapter"],
chapter = None,
weight = 0,
typeof=0)
result = True
try:
c.save()
except:
result = False
transaction.rollback()
else:
logBookHistory(book = book,
version = book_version,
user = request.user,
args = {"title": message["chapter"]},
kind = 'section_create')
transaction.commit()
result = ("s%s" % c.id, c.name, None, c.typeof)
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": 'User %s has created new section "%s".' % (request.user.username, message["chapter"])},
myself=True)
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_create",
"chapter": result,
"typeof": c.typeof},
myself = True)
return {"created": result}
def remote_get_history(request, message, bookid, version):
import datetime
from booki.editor.common import parseJSON
book = models.Book.objects.get(id=bookid)
page = int(message.get("page", 1))
book_history = models.BookHistory.objects.filter(book=book).order_by("-modified")[(page-1)*50:(page-1)*50+50]
temp = {0: 'unknown',
1: 'create',
2: 'save',
3: 'rename',
4: 'reorder',
5: 'split',
6: 'section create',
10: 'book create',
11: 'minor',
12: 'major',
13: 'attachment'}
history = []
for entry in book_history:
if entry.kind in [1, 2, 3] and entry.chapter:
history.append({"chapter": entry.chapter.title,
"chapter_url": entry.chapter.url_title,
"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"description": entry.args,
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
elif entry.kind == 2 and entry.chapter:
history.append({"chapter": entry.chapter.title,
"chapter_url": entry.chapter.url_title,
"chapter_history": entry.chapter_history.id,
"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"description": entry.args,
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
elif entry.kind in [11, 12]:
history.append({"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"version": parseJSON(entry.args),
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
elif entry.kind in [13]:
history.append({"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"args": parseJSON(entry.args),
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
else:
history.append({"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"description": entry.args,
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
return {"history": history}
def remote_get_chapter_history(request, message, bookid, version):
import datetime
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
book_ver = getVersion(book, version)
chapter_history = models.ChapterHistory.objects.filter(chapter__book=book, chapter__url_title=message["chapter"]).order_by("-modified")
history = []
for entry in chapter_history:
history.append({"chapter": entry.chapter.title,
"chapter_url": entry.chapter.url_title,
"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"user": entry.user.username,
"revision": entry.revision,
"comment": entry.comment})
return {"history": history}
def remote_revert_revision(request, message, bookid, version):
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
book_ver = getVersion(book, version)
chapter = models.Chapter.objects.get(version=book_ver, url_title=message["chapter"])
revision = models.ChapterHistory.objects.get(revision=message["revision"], chapter__url_title=message["chapter"], chapter__version=book_ver.id)
# TODO
# does chapter history really needs to keep content or it can only keep reference to chapter
history = logChapterHistory(chapter = chapter,
content = revision.content,
user = request.user,
comment = "Reverted to revision %s." % message["revision"],
revision = chapter.revision+1)
logBookHistory(book = book,
version = book_ver,
chapter = chapter,
chapter_history = history,
user = request.user,
args = {},
kind = 'chapter_save')
chapter.revision += 1
chapter.content = revision.content;
try:
chapter.save()
except:
transaction.rollback()
else:
transaction.commit()
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid,
{"command": "message_info",
"from": request.user.username,
"message": 'User %s has reverted chapter "%s" to revision %s.' % (request.user.username, chapter.title, message["revision"])}, myself=True)
return {}
def remote_get_chapter_revision(request, message, bookid, version):
import datetime
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
try:
revision = models.ChapterHistory.objects.get(chapter__book=book, chapter__url_title=message["chapter"], revision=message["revision"])
return {"chapter": revision.chapter.title,
"chapter_url": revision.chapter.url_title,
"modified": revision.modified.strftime("%d.%m.%Y %H:%M:%S"),
"user": revision.user.username,
"revision": revision.revision,
"version": '%d.%d' % (revision.chapter.version.major, revision.chapter.version.minor),
"content": revision.content,
"comment": revision.comment}
except:
return {}
def remote_get_notes(request, message, bookid, version):
import datetime
book = models.Book.objects.get(id=bookid)
book_notes = models.BookNotes.objects.filter(book=book)
notes = []
for entry in book_notes:
notes.append({"notes": entry.notes})
return {"notes": notes}
def remote_notes_save(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_notes = models.BookNotes.objects.filter(book=book)
notes = message.get("notes")
book_notes_obj = None
if len(book_notes) == 0:
book_notes_obj = models.BookNotes( book = book , notes = notes)
else:
book_notes_obj = book_notes[0]
book_notes_obj.notes = notes
try:
book_notes_obj.save()
except:
transaction.rollback()
else:
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": 'User %s has saved notes for book "%s".' % (request.user.username, book.title)}, myself=True)
transaction.commit()
return {}
def remote_unlock_chapter(request, message, bookid, version):
import re
if request.user.username == 'booki':
for key in sputnik.rkeys("booki:%s:locks:%s:*" % (bookid, message["chapterID"])):
m = re.match("booki:(\d+):locks:(\d+):(\w+)", key)
if m:
sputnik.set("booki:%s:killlocks:%s:%s" % (bookid, message["chapterID"], m.group(3)), 1)
return {}
def remote_get_versions(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_versions = [{"major": v.major,
"minor": v.minor,
"name": v.name,
"description": v.description,
"created": str(v.created.strftime('%a, %d %b %Y %H:%M:%S GMT'))}
for v in models.BookVersion.objects.filter(book=book).order_by("-created")]
return {"versions": book_versions}
# put this outside of this module
def create_new_version(book, book_ver, message, major, minor):#request, message, bookid, version):
new_version = models.BookVersion(book=book,
major=major,
minor=minor,
name=message.get("name", ""),
description=message.get("description", ""))
new_version.save()
for toc in book_ver.getTOC():
nchap = None
if toc.chapter:
chap = toc.chapter
nchap = models.Chapter(version=new_version,
book=book, # this should be removed
url_title=chap.url_title,
title=chap.title,
status=chap.status,
revision=chap.revision,
content=chap.content)
nchap.save()
ntoc = models.BookToc(version=new_version,
book=book, # this should be removed
name=toc.name,
chapter=nchap,
weight=toc.weight,
typeof=toc.typeof)
ntoc.save()
# hold chapters
for chap in book_ver.getHoldChapters():
c = models.Chapter(version=new_version,
book=book, # this should be removed
url_title=chap.url_title,
title=chap.title,
status=chap.status,
revision=chap.revision,
content=chap.content)
c.save()
for att in book_ver.getAttachments():
a = models.Attachment(version = new_version,
book = book,
status = att.status)
a.attachment.save(att.getName(), att.attachment, save = False)
a.save()
book.version = new_version
book.save()
# probably it would be smart to throw exception from here
return new_version
def remote_create_major_version(request, message, bookid, version):
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
book_ver = getVersion(book, version)
try:
new_version = create_new_version(book, book_ver, message, book_ver.major+1, 0)
except:
transaction.rollback()
else:
logBookHistory(book = book,
version = new_version,
chapter = None,
chapter_history = None,
user = request.user,
args = {"version": new_version.getVersion()},
kind = 'major_version')
transaction.commit()
return {"version": new_version.getVersion()}
def remote_create_minor_version(request, message, bookid, version):
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
book_ver = getVersion(book, version)
try:
new_version = create_new_version(book, book_ver, message, book_ver.major, book_ver.minor+1)
except:
transaction.rollback()
return {"result": False}
else:
logBookHistory(book = book,
version = new_version,
chapter = None,
chapter_history = None,
user = request.user,
args = {"version": new_version.getVersion()},
kind = 'minor_version')
transaction.commit()
return {"version": new_version.getVersion()}
def remote_chapter_diff(request, message, bookid, version):
import datetime
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
revision1 = models.ChapterHistory.objects.get(chapter__book=book, chapter__url_title=message["chapter"], revision=message["revision1"])
revision2 = models.ChapterHistory.objects.get(chapter__book=book, chapter__url_title=message["chapter"], revision=message["revision2"])
import difflib
output = []
# for line in difflib.unified_diff(revision1.content.splitlines(1), revision2.content.splitlines(1)):
for line in difflib.ndiff(revision1.content.splitlines(1), revision2.content.splitlines(1)):
output.append(line)
return {"output": '\n'.join(output)}
#304. Fixed - New chapters in ToC are not in the ToC
import sputnik
from django.db import transaction
from booki.utils.log import logBookHistory, logChapterHistory
from booki.editor import models
from booki.editor.views import getVersion
from booki.utils import security
try:
from booki.settings import OBJAVI_URL
except ImportError:
OBJAVI_URL = "http://objavi.flossmanuals.net/objavi.cgi"
try:
from booki.settings import THIS_BOOKI_SERVER
except:
import os
THIS_BOOKI_SERVER = os.environ.get('HTTP_HOST', 'booki.flossmanuals.net')
# this couple of functions should go to models.BookVersion
def getTOCForBook(version):
results = []
for chap in version.getTOC():
# is it a section or chapter?
if chap.chapter:
results.append((chap.chapter.id,
chap.chapter.title,
chap.chapter.url_title,
chap.typeof,
chap.chapter.status.id))
else:
results.append(('s%s' % chap.id, chap.name, chap.name, chap.typeof))
return results
def getHoldChapters(book_version):
return [(ch.id, ch.title, ch.url_title, 1, ch.status.id) for ch in book_version.getHoldChapters()]
def getAttachments(book_version):
import os.path
import Image
def _getDimension(att):
if att.attachment.name.endswith(".jpg"):
try:
im = Image.open(att.attachment.name)
return im.size
except:
return (0, 0)
return None
attachments = [{"id": att.id,
"dimension": _getDimension(att),
"status": att.status.id,
"name": os.path.split(att.attachment.name)[1],
"created": str(att.created.strftime("%d.%m.%Y %H:%M:%S")),
"size": att.attachment.size}
for att in book_version.getAttachments().order_by("attachment") if att.attachment]
return attachments
def remote_init_editor(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
## get chapters
chapters = getTOCForBook(book_version)
holdChapters = getHoldChapters(book_version)
## get users
def _getUserName(a):
if a == request.sputnikID:
return "<b>%s</b>" % a
return a
try:
users = [_getUserName(m) for m in list(sputnik.smembers("sputnik:channel:%s:channel" % message["channel"]))]
except:
users = []
## get workflow statuses
statuses = [(status.id, status.name) for status in models.BookStatus.objects.filter(book=book).order_by("-weight")]
## get attachments
try:
attachments = getAttachments(book_version)
except:
attachments = []
## get metadata
metadata = [{'name': v.name, 'value': v.getValue()} for v in models.Info.objects.filter(book=book)]
## notify others
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid,
{"command": "user_joined",
"user_joined": request.user.username},
myself = False)
## get licenses
licenses = [(elem.abbrevation, elem.name) for elem in models.License.objects.all().order_by("name")]
## get online users
try:
_onlineUsers = sputnik.smembers("sputnik:channel:%s:users" % message["channel"])
except:
_onlineUsers = []
if request.user.username not in _onlineUsers:
try:
sputnik.sadd("sputnik:channel:%s:users" % message["channel"], request.user.username)
_onlineUsers.append(request.user.username)
except:
pass
## get mood message for current user
## send mood as seperate message
## set notifications to other clients
profile = request.user.get_profile()
if profile:
moodMessage = profile.mood;
else:
moodMessage = ''
sputnik.addMessageToChannel(request,
"/booki/book/%s/%s/" % (bookid, version),
{"command": "user_add",
"username": request.user.username,
"mood": moodMessage}
)
## get online users and their mood messages
from django.contrib.auth.models import User
def _getUser(_user):
try:
_u = User.objects.get(username=_user)
return (_user, _u.get_profile().mood)
except:
return None
onlineUsers = [x for x in [_getUser(x) for x in _onlineUsers] if x]
# for now, this is one big temp here
import time, decimal, re
_now = time.time()
locks = {}
try:
for key in sputnik.rkeys("booki:*:locks:*"):
lastAccess = sputnik.get(key)
if type(lastAccess) in [type(' '), type(u' ')]:
try:
lastAccess = decimal.Decimal(lastAccess)
except:
continue
if lastAccess and decimal.Decimal("%f" % _now) - lastAccess <= 30:
m = re.match("booki:(\d+):locks:(\d+):(\w+)", key)
if m:
if m.group(1) == bookid:
locks[m.group(2)] = m.group(3)
except:
pass
return {"licenses": licenses,
"chapters": chapters,
"metadata": metadata,
"hold": holdChapters,
"users": users,
"locks": locks,
"statuses": statuses,
"attachments": attachments,
"onlineUsers": list(onlineUsers)}
def remote_attachments_list(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
try:
attachments = getAttachments(book_version)
except:
attachments = []
return {"attachments": attachments}
def remote_attachments_delete(request, message, bookid, version):
# TODO: must check security
book = models.Book.objects.get(id=bookid)
bookSecurity = security.getUserSecurityForBook(request.user, book)
if bookSecurity.isAdmin():
for att_id in message['attachments']:
att = models.Attachment.objects.get(pk=att_id)
att.delete()
transaction.commit()
return {"result": True}
return {"result": False}
def remote_chapter_status(request, message, bookid, version):
if message["status"] == "normal":
sputnik.rdelete("booki:%s:locks:%s:%s" % (bookid, message["chapterID"], request.user.username))
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_status",
"chapterID": message["chapterID"],
"status": message["status"],
"username": request.user.username})
return {}
def remote_change_status(request, message, bookid, version):
# chapterID
# statusID
chapter = models.Chapter.objects.get(id=int(message["chapterID"]))
status = models.BookStatus.objects.get(id=int(message["statusID"]))
chapter.status = status
try:
chapter.save()
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "change_status",
"chapterID": message["chapterID"],
"statusID": int(message["statusID"]),
"username": request.user.username})
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid,
{"command": "message_info",
"from": request.user.username,
"message": 'User %s has changed status of chapter "%s" to "%s".' % (request.user.username, chapter.title, status.name)}, myself=True)
except:
transaction.rollback()
else:
transaction.commit()
return {}
def remote_chapter_save(request, message, bookid, version):
# TODO
# put this outside in common module
# or maybe even betterm put it in the Model
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
chapter = models.Chapter.objects.get(id=int(message["chapterID"]))
if message.get("minor", False) != True:
history = logChapterHistory(chapter = chapter,
content = message["content"],
user = request.user,
comment = message.get("comment", ""),
revision = chapter.revision+1)
logBookHistory(book = chapter.book,
version = book_version,
chapter = chapter,
chapter_history = history,
user = request.user,
args = {"comment": message.get("comment", ""),
"author": message.get("author", ""),
"authorcomment": message.get("authorcomment", "")},
kind = 'chapter_save')
chapter.revision += 1
chapter.content = message["content"];
try:
chapter.save()
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": 'User %s has saved chapter "%s".' % (request.user.username, chapter.title)}, myself=True)
except:
transaction.rollback()
else:
transaction.commit()
if not message['continue']:
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_status",
"chapterID": message["chapterID"],
"status": "normal",
"username": request.user.username})
sputnik.rdelete("booki:%s:locks:%s:%s" % (bookid, message["chapterID"], request.user.username))
# fire the signal
import booki.editor.signals
booki.editor.signals.chapter_modified.send(sender = book_version, chapter = chapter, user = request.user)
return {}
def remote_chapter_rename(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
chapter = models.Chapter.objects.get(id=int(message["chapterID"]))
oldTitle = chapter.title
chapter.title = message["chapter"];
try:
chapter.save()
except:
transaction.rollback()
else:
logBookHistory(book = chapter.book,
version = book_version,
chapter = chapter,
user = request.user,
args = {"old": oldTitle, "new": message["chapter"]},
kind = "chapter_rename")
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid,
{"command": "message_info",
"from": request.user.username,
"message": 'User %s has renamed chapter "%s" to "%s".' % (request.user.username, oldTitle, message["chapter"])},
myself=True)
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_status",
"chapterID": message["chapterID"],
"status": "normal",
"username": request.user.username})
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_rename",
"chapterID": message["chapterID"],
"chapter": message["chapter"]})
transaction.commit()
return {}
def remote_chapters_changed(request, message, bookid, version):
lst = [chap[5:] for chap in message["chapters"]]
lstHold = [chap[5:] for chap in message["hold"]]
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
weight = len(lst)
logBookHistory(book = book,
version = book_version,
user = request.user,
kind = "chapter_reorder")
for chap in lst:
if chap[0] == 's':
m = models.BookToc.objects.get(id__exact=int(chap[1:]))
m.weight = weight
m.save()
else:
try:
m = models.BookToc.objects.get(chapter__id__exact=int(chap))
m.weight = weight
m.save()
except:
chptr = models.Chapter.objects.get(id__exact=int(chap))
m = models.BookToc(book = book,
version = book_version,
name = "SOMETHING",
chapter = chptr,
weight = weight,
typeof=1)
m.save()
weight -= 1
if message["kind"] == "remove":
if type(message["chapter_id"]) == type(u' ') and message["chapter_id"][0] == 's':
m = models.BookToc.objects.get(id__exact=message["chapter_id"][1:])
m.delete()
else:
m = models.BookToc.objects.get(chapter__id__exact=int(message["chapter_id"]))
m.delete()
# addMessageToChannel(request, "/chat/%s/%s/" % (projectid, bookid), {"command": "message_info", "from": request.user.username, "message": 'User %s has rearranged chapters.' % request.user.username})
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapters_changed",
"ids": lst,
"hold_ids": lstHold,
"kind": message["kind"],
"chapter_id": message["chapter_id"]})
# TODO
# this should be changed, to check for errors
transaction.commit()
return {}
def remote_get_users(request, message, bookid, version):
res = {}
def vidi(a):
if a == request.sputnikID:
return "!%s!" % a
return a
res["users"] = [vidi(m) for m in list(sputnik.smembers("sputnik:channel:%s:channel" % message["channel"]))]
return res
def remote_get_chapter(request, message, bookid, version):
res = {}
chapter = models.Chapter.objects.get(id=int(message["chapterID"]))
res["title"] = chapter.title
res["content"] = chapter.content
if not message.get("lock", True):
return res
import time
# set the initial timer for editor
sputnik.set("booki:%s:locks:%s:%s" % (bookid, message["chapterID"], request.user.username), time.time())
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_status",
"chapterID": message["chapterID"],
"status": "edit",
"username": request.user.username})
return res
def remote_book_notification(request, message, bookid, version):
res = {}
import time
# rcon.delete(key)
# set the initial timer for editor
if request.user.username and request.user.username != '':
sputnik.set("booki:%s:locks:%s:%s" % (bookid, message["chapterID"], request.user.username), time.time())
if '%s' % sputnik.get("booki:%s:killlocks:%s:%s" % (bookid, message["chapterID"], request.user.username)) == '1':
sputnik.rdelete("booki:%s:killlocks:%s:%s" % (bookid, message["chapterID"], request.user.username))
res = {"kill": "please"}
return res
def remote_chapter_split(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
logBookHistory(book = book,
version = book_version,
user = request.user,
kind = 'chapter_split')
allChapters = []
try:
originalChapter = models.Chapter.objects.get(id=int(message["chapterID"]))
except:
originalChapter = None
try:
tocChapter = models.BookToc.objects.get(book=book, chapter__id__exact=message["chapterID"])
except:
tocChapter = None
import datetime
from django.template.defaultfilters import slugify
if tocChapter:
allChapters = [chap for chap in models.BookToc.objects.filter(book=book).order_by("-weight")]
initialPosition = len(allChapters)-tocChapter.weight
else:
initialPosition = 0
s = models.BookStatus.objects.filter(book=book).order_by("weight")[0]
n = 0
for chap in message["chapters"]:
chapter = models.Chapter(book = book,
url_title = slugify(chap[0]),
title = chap[0],
status = s,
content = '<h1>%s</h1>%s' % (chap[0], chap[1]),
created = datetime.datetime.now(),
modified = datetime.datetime.now())
chapter.save()
if tocChapter:
m = models.BookToc(book = book,
chapter = chapter,
name = chap[0],
weight = 0,
typeof = 1)
m.save()
allChapters.insert(1+initialPosition+n, m)
n += 1
if originalChapter:
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info", "from": request.user.username, "message": 'User %s has split chapter "%s".' % (request.user.username, originalChapter.title)}, myself=True)
originalChapter.delete()
if tocChapter:
tocChapter.delete()
n = len(allChapters)
for chap in allChapters:
try:
chap.weight = n
chap.save()
n -= 1
except:
pass
## get chapters
chapters = getTOCForBook(book_version)
holdChapters = getHoldChapters(book_version)
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_split",
"chapterID": message["chapterID"],
"chapters": chapters,
"hold": holdChapters,
"username": request.user.username},
myself = True)
transaction.commit()
return {}
def remote_create_chapter(request, message, bookid, version):
import datetime
# BookVersion treba uzeti
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
from django.template.defaultfilters import slugify
url_title = slugify(message["chapter"])
# here i should probably set it to default project status
s = models.BookStatus.objects.filter(book=book).order_by("weight")[0]
ch = models.Chapter.objects.filter(book=book, version=book_version, url_title=url_title)
if len(list(ch)) > 0:
return {"created": False}
content = u'<h1>%s</h1>' % message["chapter"]
chapter = models.Chapter(book = book,
version = book_version,
url_title = url_title,
title = message["chapter"],
status = s,
content = content,
created = datetime.datetime.now(),
modified = datetime.datetime.now())
try:
chapter.save()
except:
transaction.rollback()
return {"created": False}
else:
# this should be solved in better way
# should have createChapter in booki.utils.book module
toc_items = len(book_version.getTOC())+1
for itm in models.BookToc.objects.filter(version = book_version, book = book):
itm.weight = toc_items
itm.save()
toc_items -= 1
tc = models.BookToc(version = book_version,
book = book,
name = message["chapter"],
chapter = chapter,
weight = 1,
typeof = 1)
try:
tc.save()
except:
transaction.rollback()
return {"created": False}
history = logChapterHistory(chapter = chapter,
content = content,
user = request.user,
comment = message.get("comment", ""),
revision = chapter.revision)
logBookHistory(book = book,
version = book_version,
chapter = chapter,
chapter_history = history,
user = request.user,
kind = 'chapter_create')
transaction.commit()
result = (chapter.id, chapter.title, chapter.url_title, 1, s.id)
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": 'User %s has created new chapter "%s".' % (request.user.username, message["chapter"])},
myself=True)
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version), {"command": "chapter_create", "chapter": result}, myself = True)
return {"created": True}
def remote_publish_book(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": '"%s" is being published.' % (book.title, )},
myself=True)
import urllib2
import urllib
publishMode = message.get("publish_mode", "epub")
destination = "nowhere"
if message.get("is_archive", False):
destination = "archive.org"
args = {'book': book.url_title,
'project': 'export',
'mode': publishMode,
'server': THIS_BOOKI_SERVER,
'destination': destination,
'max-age': 0,
}
def _isSet(name):
if message.get(name, None):
if name == 'grey_scale':
args['grey_scale'] = 'yes'
else:
args[name] = message.get(name)
_isSet('title')
_isSet('license')
_isSet('isbn')
_isSet('toc_header')
_isSet('booksize')
_isSet('page_width')
_isSet('page_height')
_isSet('top_margin')
_isSet('side_margin')
_isSet('gutter')
_isSet('columns')
_isSet('column_margin')
_isSet('grey_scale')
_isSet('css')
data = urllib.urlencode(args)
req = urllib2.Request(OBJAVI_URL, data)
f = urllib2.urlopen(req)
# f = urllib2.urlopen("%s?book=%s&project=export&mode=%s&server=booki.flossmanuals.net&destination=%s" % (urlPublish, book.url_title, publishMode, destination))
ta = f.read()
lst = ta.split("\n")
dta, dtas3 = "", ""
if len(lst) > 0:
dta = lst[0]
if len(lst) > 1:
dtas3 = lst[1]
return {"dtaall": ta, "dta": dta, "dtas3": dtas3}
def remote_create_section(request, message, bookid, version):
import datetime
book = models.Book.objects.get(id=bookid)
book_version = getVersion(book, version)
ch = models.BookToc.objects.filter(book=book,
version=book_version,
name=message['chapter'],
typeof=0)
if len(list(ch)) > 0:
return {"created": False}
c = models.BookToc(book = book,
version = book_version,
name = message["chapter"],
chapter = None,
weight = 0,
typeof=0)
result = True
try:
c.save()
except:
result = False
transaction.rollback()
else:
logBookHistory(book = book,
version = book_version,
user = request.user,
args = {"title": message["chapter"]},
kind = 'section_create')
transaction.commit()
result = ("s%s" % c.id, c.name, None, c.typeof)
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": 'User %s has created new section "%s".' % (request.user.username, message["chapter"])},
myself=True)
sputnik.addMessageToChannel(request, "/booki/book/%s/%s/" % (bookid, version),
{"command": "chapter_create",
"chapter": result,
"typeof": c.typeof},
myself = True)
return {"created": result}
def remote_get_history(request, message, bookid, version):
import datetime
from booki.editor.common import parseJSON
book = models.Book.objects.get(id=bookid)
page = int(message.get("page", 1))
book_history = models.BookHistory.objects.filter(book=book).order_by("-modified")[(page-1)*50:(page-1)*50+50]
temp = {0: 'unknown',
1: 'create',
2: 'save',
3: 'rename',
4: 'reorder',
5: 'split',
6: 'section create',
10: 'book create',
11: 'minor',
12: 'major',
13: 'attachment'}
history = []
for entry in book_history:
if entry.kind in [1, 2, 3] and entry.chapter:
history.append({"chapter": entry.chapter.title,
"chapter_url": entry.chapter.url_title,
"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"description": entry.args,
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
elif entry.kind == 2 and entry.chapter:
history.append({"chapter": entry.chapter.title,
"chapter_url": entry.chapter.url_title,
"chapter_history": entry.chapter_history.id,
"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"description": entry.args,
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
elif entry.kind in [11, 12]:
history.append({"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"version": parseJSON(entry.args),
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
elif entry.kind in [13]:
history.append({"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"args": parseJSON(entry.args),
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
else:
history.append({"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"description": entry.args,
"user": entry.user.username,
"kind": temp.get(entry.kind,'')})
return {"history": history}
def remote_get_chapter_history(request, message, bookid, version):
import datetime
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
book_ver = getVersion(book, version)
chapter_history = models.ChapterHistory.objects.filter(chapter__book=book, chapter__url_title=message["chapter"]).order_by("-modified")
history = []
for entry in chapter_history:
history.append({"chapter": entry.chapter.title,
"chapter_url": entry.chapter.url_title,
"modified": entry.modified.strftime("%d.%m.%Y %H:%M:%S"),
"user": entry.user.username,
"revision": entry.revision,
"comment": entry.comment})
return {"history": history}
def remote_revert_revision(request, message, bookid, version):
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
book_ver = getVersion(book, version)
chapter = models.Chapter.objects.get(version=book_ver, url_title=message["chapter"])
revision = models.ChapterHistory.objects.get(revision=message["revision"], chapter__url_title=message["chapter"], chapter__version=book_ver.id)
# TODO
# does chapter history really needs to keep content or it can only keep reference to chapter
history = logChapterHistory(chapter = chapter,
content = revision.content,
user = request.user,
comment = "Reverted to revision %s." % message["revision"],
revision = chapter.revision+1)
logBookHistory(book = book,
version = book_ver,
chapter = chapter,
chapter_history = history,
user = request.user,
args = {},
kind = 'chapter_save')
chapter.revision += 1
chapter.content = revision.content;
try:
chapter.save()
except:
transaction.rollback()
else:
transaction.commit()
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid,
{"command": "message_info",
"from": request.user.username,
"message": 'User %s has reverted chapter "%s" to revision %s.' % (request.user.username, chapter.title, message["revision"])}, myself=True)
return {}
def remote_get_chapter_revision(request, message, bookid, version):
import datetime
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
try:
revision = models.ChapterHistory.objects.get(chapter__book=book, chapter__url_title=message["chapter"], revision=message["revision"])
return {"chapter": revision.chapter.title,
"chapter_url": revision.chapter.url_title,
"modified": revision.modified.strftime("%d.%m.%Y %H:%M:%S"),
"user": revision.user.username,
"revision": revision.revision,
"version": '%d.%d' % (revision.chapter.version.major, revision.chapter.version.minor),
"content": revision.content,
"comment": revision.comment}
except:
return {}
def remote_get_notes(request, message, bookid, version):
import datetime
book = models.Book.objects.get(id=bookid)
book_notes = models.BookNotes.objects.filter(book=book)
notes = []
for entry in book_notes:
notes.append({"notes": entry.notes})
return {"notes": notes}
def remote_notes_save(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_notes = models.BookNotes.objects.filter(book=book)
notes = message.get("notes")
book_notes_obj = None
if len(book_notes) == 0:
book_notes_obj = models.BookNotes( book = book , notes = notes)
else:
book_notes_obj = book_notes[0]
book_notes_obj.notes = notes
try:
book_notes_obj.save()
except:
transaction.rollback()
else:
sputnik.addMessageToChannel(request, "/chat/%s/" % bookid, {"command": "message_info",
"from": request.user.username,
"message": 'User %s has saved notes for book "%s".' % (request.user.username, book.title)}, myself=True)
transaction.commit()
return {}
def remote_unlock_chapter(request, message, bookid, version):
import re
if request.user.username == 'booki':
for key in sputnik.rkeys("booki:%s:locks:%s:*" % (bookid, message["chapterID"])):
m = re.match("booki:(\d+):locks:(\d+):(\w+)", key)
if m:
sputnik.set("booki:%s:killlocks:%s:%s" % (bookid, message["chapterID"], m.group(3)), 1)
return {}
def remote_get_versions(request, message, bookid, version):
book = models.Book.objects.get(id=bookid)
book_versions = [{"major": v.major,
"minor": v.minor,
"name": v.name,
"description": v.description,
"created": str(v.created.strftime('%a, %d %b %Y %H:%M:%S GMT'))}
for v in models.BookVersion.objects.filter(book=book).order_by("-created")]
return {"versions": book_versions}
# put this outside of this module
def create_new_version(book, book_ver, message, major, minor):#request, message, bookid, version):
new_version = models.BookVersion(book=book,
major=major,
minor=minor,
name=message.get("name", ""),
description=message.get("description", ""))
new_version.save()
for toc in book_ver.getTOC():
nchap = None
if toc.chapter:
chap = toc.chapter
nchap = models.Chapter(version=new_version,
book=book, # this should be removed
url_title=chap.url_title,
title=chap.title,
status=chap.status,
revision=chap.revision,
content=chap.content)
nchap.save()
ntoc = models.BookToc(version=new_version,
book=book, # this should be removed
name=toc.name,
chapter=nchap,
weight=toc.weight,
typeof=toc.typeof)
ntoc.save()
# hold chapters
for chap in book_ver.getHoldChapters():
c = models.Chapter(version=new_version,
book=book, # this should be removed
url_title=chap.url_title,
title=chap.title,
status=chap.status,
revision=chap.revision,
content=chap.content)
c.save()
for att in book_ver.getAttachments():
a = models.Attachment(version = new_version,
book = book,
status = att.status)
a.attachment.save(att.getName(), att.attachment, save = False)
a.save()
book.version = new_version
book.save()
# probably it would be smart to throw exception from here
return new_version
def remote_create_major_version(request, message, bookid, version):
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
book_ver = getVersion(book, version)
try:
new_version = create_new_version(book, book_ver, message, book_ver.major+1, 0)
except:
transaction.rollback()
else:
logBookHistory(book = book,
version = new_version,
chapter = None,
chapter_history = None,
user = request.user,
args = {"version": new_version.getVersion()},
kind = 'major_version')
transaction.commit()
return {"version": new_version.getVersion()}
def remote_create_minor_version(request, message, bookid, version):
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
book_ver = getVersion(book, version)
try:
new_version = create_new_version(book, book_ver, message, book_ver.major, book_ver.minor+1)
except:
transaction.rollback()
return {"result": False}
else:
logBookHistory(book = book,
version = new_version,
chapter = None,
chapter_history = None,
user = request.user,
args = {"version": new_version.getVersion()},
kind = 'minor_version')
transaction.commit()
return {"version": new_version.getVersion()}
def remote_chapter_diff(request, message, bookid, version):
import datetime
from booki.editor.views import getVersion
book = models.Book.objects.get(id=bookid)
revision1 = models.ChapterHistory.objects.get(chapter__book=book, chapter__url_title=message["chapter"], revision=message["revision1"])
revision2 = models.ChapterHistory.objects.get(chapter__book=book, chapter__url_title=message["chapter"], revision=message["revision2"])
import difflib
output = []
# for line in difflib.unified_diff(revision1.content.splitlines(1), revision2.content.splitlines(1)):
for line in difflib.ndiff(revision1.content.splitlines(1), revision2.content.splitlines(1)):
output.append(line)
return {"output": '\n'.join(output)}
|
"""
Django settings for sendcertified project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DEBUG", False)
ALLOWED_HOSTS = [".herokuapp.com"]
# Application definition
INSTALLED_APPS = [
'main.apps.MainConfig',
'widget_tweaks',
'tinymce',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sendcertified.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sendcertified.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'sendcertified',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
else:
DATABASES = {}
DATABASES['default'] = dj_database_url.config(conn_max_age=600)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, '../main/static')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
#STATICFILES_DIRS = (
# os.path.join(PROJECT_ROOT, '../main/static'),
#)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
Fix database config
"""
Django settings for sendcertified project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DEBUG", False)
ALLOWED_HOSTS = [".herokuapp.com", "localhost"]
# Application definition
INSTALLED_APPS = [
'main.apps.MainConfig',
'widget_tweaks',
'tinymce',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sendcertified.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sendcertified.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'sendcertified',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, '../main/static')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
#STATICFILES_DIRS = (
# os.path.join(PROJECT_ROOT, '../main/static'),
#)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import ctypes
import errno
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import time
verbose = False
isARMv7l = (platform.uname()[4] == 'armv7l')
isLinux = (platform.system() == 'Linux')
isMac = (platform.system() == 'Darwin')
isWin = (platform.system() == 'Windows')
isWin64 = ('PROGRAMFILES(X86)' in os.environ)
isWinVistaOrHigher = isWin and (sys.getwindowsversion()[0] >= 6)
# This refers to the Win-specific "MozillaBuild" environment in which Python is running, which is
# spawned from the MozillaBuild script for 64-bit compilers, e.g. start-msvc10-x64.bat
isMozBuild64 = (os.name == 'nt') and ('x64' in os.environ['MOZ_TOOLS'].split(os.sep)[-1])
# isMozBuild64 = isWin and '64' in os.environ['MOZ_MSVCBITS'] # For MozillaBuild 2.0.0
noMinidumpMsg = r'''
WARNING: Minidumps are not being generated, so all crashes will be uninteresting.
WARNING: Make sure the following key value exists in this key:
WARNING: HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\Windows Error Reporting\LocalDumps
WARNING: Name: DumpType Type: REG_DWORD
WARNING: http://msdn.microsoft.com/en-us/library/windows/desktop/bb787181%28v=vs.85%29.aspx
'''
########################
# Platform Detection #
########################
def macVer():
'''
If system is a Mac, return the mac type.
'''
assert platform.system() == 'Darwin'
return [int(x) for x in platform.mac_ver()[0].split('.')]
def getFreeSpace(folder, mulVar):
'''
Return folder/drive free space in bytes if mulVar is 0.
Adapted from http://stackoverflow.com/a/2372171
'''
assert mulVar >= 0
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
retVal = float(free_bytes.value)
else:
retVal = float(os.statvfs(folder).f_bfree * os.statvfs(folder).f_frsize)
return retVal / (1024 ** mulVar)
#####################
# Shell Functions #
#####################
def captureStdout(inputCmd, ignoreStderr=False, combineStderr=False, ignoreExitCode=False,
currWorkingDir=os.getcwdu(), env='NOTSET', verbosity=False):
'''
Captures standard output, returns the output as a string, along with the return value.
'''
if env == 'NOTSET':
vdump(shellify(inputCmd))
env = os.environ
else:
# There is no way yet to only print the environment variables that were added by the harness
# We could dump all of os.environ but it is too much verbose output.
vdump('ENV_VARIABLES_WERE_ADDED_HERE ' + shellify(inputCmd))
cmd = []
for el in inputCmd:
if el.startswith('"') and el.endswith('"'):
cmd.append(str(el[1:-1]))
else:
cmd.append(str(el))
assert cmd != []
try:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if combineStderr else subprocess.PIPE,
cwd=currWorkingDir,
env=env)
(stdout, stderr) = p.communicate()
except OSError, e:
raise Exception(repr(e.strerror) + ' error calling: ' + shellify(cmd))
if p.returncode != 0:
oomErrorOutput = stdout if combineStderr else stderr
if (isLinux or isMac) and oomErrorOutput:
if 'internal compiler error: Killed (program cc1plus)' in oomErrorOutput:
raise Exception('GCC running out of memory')
elif 'error: unable to execute command: Killed' in oomErrorOutput:
raise Exception('Clang running out of memory')
if not ignoreExitCode:
# Potential problem area: Note that having a non-zero exit code does not mean that the
# operation did not succeed, for example when compiling a shell. A non-zero exit code
# can appear even though a shell compiled successfully.
# Pymake in builds earlier than revision 232553f741a0 did not support the '-s' option.
if 'no such option: -s' not in stdout:
print 'Nonzero exit code from: '
print ' ' + shellify(cmd)
print 'stdout is:'
print stdout
if stderr is not None:
print 'stderr is:'
print stderr
# Pymake in builds earlier than revision 232553f741a0 did not support the '-s' option.
if 'hg pull: option --rebase not recognized' not in stdout and 'no such option: -s' not in stdout:
if isWin and stderr and 'Permission denied' in stderr and \
'configure: error: installation or configuration problem: ' + \
'C++ compiler cannot create executables.' in stderr:
raise Exception('Windows conftest.exe configuration permission problem')
else:
raise Exception('Nonzero exit code')
if not combineStderr and not ignoreStderr and len(stderr) > 0:
# Ignore hg color mode throwing an error in console on Windows platforms.
if not (isWin and 'warning: failed to set color mode to win32' in stderr):
print 'Unexpected output on stderr from: '
print ' ' + shellify(cmd)
print stdout, stderr
raise Exception('Unexpected output on stderr')
if stderr and ignoreStderr and len(stderr) > 0 and p.returncode != 0:
# During configure, there will always be stderr. Sometimes this stderr causes configure to
# stop the entire script, especially on Windows.
print 'Return code not zero, and unexpected output on stderr from: '
print ' ' + shellify(cmd)
print stdout, stderr
raise Exception('Return code not zero, and unexpected output on stderr')
if verbose or verbosity:
print stdout
if stderr is not None:
print stderr
return stdout.rstrip(), p.returncode
def createWtmpDir(tmpDirBase):
'''Create wtmp<number> directory, incrementing the number if one is already found.'''
i = 1
while True:
tmpDirWithNum = 'wtmp' + str(i)
tmpDir = os.path.join(tmpDirBase, tmpDirWithNum)
try:
os.mkdir(tmpDir) # To avoid race conditions, we use try/except instead of exists/create
break
except OSError:
i += 1
vdump(tmpDirWithNum + os.sep) # Even if not verbose, wtmp<num> is also dumped: wtmp1/w1: NORMAL
return tmpDirWithNum
def dateStr():
'''Equivalent of running `date` in bash, excluding the timezone.'''
# Try not to add the timezone. Python does not seem to be accurate about DST switchovers.
# assert captureStdout(['date'])[0] == currDateTime # This fails on Windows
# On Windows, there is a leading zero in the day of the date in time.asctime()
return time.asctime()
def grabMacCrashLog(progname, crashedPID, logPrefix, useLogFiles):
'''Finds the required crash log in the given crash reporter directory.'''
assert platform.system() == 'Darwin' and macVer() >= [10, 6]
reportDirList = [os.path.expanduser('~'), '/']
for baseDir in reportDirList:
# Sometimes the crash reports end up in the root directory.
# This possibly happens when the value of <value>:
# defaults write com.apple.CrashReporter DialogType <value>
# is none, instead of server, or some other option.
# It also happens when ssh'd into a computer.
# And maybe when the computer is under heavy load.
# See http://en.wikipedia.org/wiki/Crash_Reporter_%28Mac_OS_X%29
reportDir = os.path.join(baseDir, 'Library/Logs/DiagnosticReports/')
# Find a crash log for the right process name and pid, preferring
# newer crash logs (which sort last).
if os.path.exists(reportDir):
crashLogs = os.listdir(reportDir)
else:
crashLogs = []
# Firefox sometimes still runs as firefox-bin, at least on Mac (likely bug 658850)
crashLogs = [x for x in crashLogs
if x.startswith(progname + '_') or x.startswith(progname + '-bin_')]
crashLogs.sort(reverse=True)
for fn in crashLogs:
fullfn = os.path.join(reportDir, fn)
try:
with open(fullfn) as c:
firstLine = c.readline()
if firstLine.rstrip().endswith("[" + str(crashedPID) + "]"):
if useLogFiles:
# Copy, don't rename, because we might not have permissions
# (especially for the system rather than user crash log directory)
# Use copyfile, as we do not want to copy the permissions metadata over
shutil.copyfile(fullfn, logPrefix + "-crash.txt")
captureStdout(["chmod", "og+r", logPrefix + "-crash.txt"])
return logPrefix + "-crash.txt"
else:
return fullfn
#return open(fullfn).read()
except (OSError, IOError):
# Maybe the log was rotated out between when we got the list
# of files and when we tried to open this file. If so, it's
# clearly not The One.
pass
return None
def grabCrashLog(progfullname, crashedPID, logPrefix, wantStack):
'''Returns the crash log if found.'''
progname = os.path.basename(progfullname)
useLogFiles = isinstance(logPrefix, str)
if useLogFiles:
if os.path.exists(logPrefix + "-crash.txt"):
os.remove(logPrefix + "-crash.txt")
if os.path.exists(logPrefix + "-core"):
os.remove(logPrefix + "-core")
if not wantStack or progname == "valgrind":
return
# This has only been tested on 64-bit Windows 7 and higher, but should work on 64-bit Vista.
if isWinVistaOrHigher and isWin64:
debuggerCmd = constructCdbCommand(progfullname, crashedPID)
elif os.name == 'posix':
debuggerCmd = constructGdbCommand(progfullname, crashedPID)
else:
debuggerCmd = None
if debuggerCmd:
vdump(' '.join(debuggerCmd))
debuggerExitCode = subprocess.call(
debuggerCmd,
stdin=None,
stderr=subprocess.STDOUT,
stdout=open(logPrefix + "-crash.txt", 'w') if useLogFiles else None,
# It would be nice to use this everywhere, but it seems to be broken on Windows
# (http://docs.python.org/library/subprocess.html)
close_fds=(os.name == "posix")
)
if debuggerExitCode != 0:
print 'Debugger exited with code %d : %s' % (debuggerExitCode, shellify(debuggerCmd))
if useLogFiles:
if os.path.isfile(normExpUserPath(debuggerCmd[-1])):
# Path to memory dump is the last element of debuggerCmd.
shutil.move(debuggerCmd[-1], logPrefix + "-core")
subprocess.call(["gzip", '-f', logPrefix + "-core"])
# chmod here, else the uploaded -core.gz files do not have sufficient permissions.
subprocess.check_call(['chmod', 'og+r', logPrefix + "-core.gz"])
return logPrefix + "-crash.txt"
else:
print 'This file does not exist: ' + debuggerCmd[-1]
else:
print "I don't know what to do with a core file when logPrefix is null"
# On Mac, look for a crash log generated by Mac OS X Crash Reporter
if isMac:
loops = 0
maxLoops = 500 if progname.startswith("firefox") else 30
while True:
cLogFound = grabMacCrashLog(progname, crashedPID, logPrefix, useLogFiles)
if cLogFound is not None:
return cLogFound
# print "[grabCrashLog] Waiting for the crash log to appear..."
time.sleep(0.200)
loops += 1
if loops > maxLoops:
# I suppose this might happen if the process corrupts itself so much that
# the crash reporter gets confused about the process name, for example.
print "grabCrashLog waited a long time, but a crash log for " + progname + \
" [" + str(crashedPID) + "] never appeared!"
break
def constructCdbCommand(progfullname, crashedPID):
'''
Constructs a command that uses the Windows debugger (cdb.exe) to turn a minidump file into a
stack trace.
'''
# On Windows Vista and above, look for a minidump.
dumpFilename = normExpUserPath(os.path.join(
'~', 'AppData', 'Local', 'CrashDumps', os.path.basename(progfullname) + '.' + str(crashedPID) + '.dmp'))
win64bitDebuggerFolder = os.path.join(os.getenv('PROGRAMW6432'), 'Debugging Tools for Windows (x64)')
# 64-bit cdb.exe seems to also be able to analyse 32-bit binary dumps.
cdbPath = os.path.join(win64bitDebuggerFolder, 'cdb.exe')
if not os.path.exists(cdbPath):
print '\nWARNING: cdb.exe is not found - all crashes will be interesting.\n'
return None
if isWinDumpingToDefaultLocation():
loops = 0
maxLoops = 300
while True:
if os.path.exists(dumpFilename):
debuggerCmdPath = getAbsPathForAdjacentFile('cdbCmds.txt')
assert os.path.exists(debuggerCmdPath)
cdbCmdList = []
bExploitableDLL = os.path.join(win64bitDebuggerFolder, 'winext', 'msec.dll')
if os.path.exists(bExploitableDLL):
cdbCmdList.append('.load ' + bExploitableDLL)
cdbCmdList.append('$<' + debuggerCmdPath)
# See bug 902706 about -g.
return [cdbPath, '-g', '-c', ';'.join(cdbCmdList), '-z', dumpFilename]
time.sleep(0.200)
loops += 1
if loops > maxLoops:
# Windows may take some time to generate the dump.
print "constructCdbCommand waited a long time, but " + dumpFilename + " never appeared!"
return None
else:
return None
def isWinDumpingToDefaultLocation():
'''Checks whether Windows minidumps are enabled and set to go to Windows' default location.'''
import _winreg
# For now, this code does not edit the Windows Registry because we tend to be in a 32-bit
# version of Python and if one types in regedit in the Run dialog, opens up the 64-bit registry.
# If writing a key, we most likely need to flush. For the moment, no keys are written.
try:
with _winreg.OpenKey(_winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE),
r'Software\Microsoft\Windows\Windows Error Reporting\LocalDumps',
# Read key from 64-bit registry, which also works for 32-bit
0, (_winreg.KEY_WOW64_64KEY + _winreg.KEY_READ)) as key:
try:
dumpTypeRegValue = _winreg.QueryValueEx(key, 'DumpType')
if not (dumpTypeRegValue[0] == 1 and dumpTypeRegValue[1] == _winreg.REG_DWORD):
print noMinidumpMsg
return False
except WindowsError as e:
if e.errno == 2:
print noMinidumpMsg
return False
else:
raise
try:
dumpFolderRegValue = _winreg.QueryValueEx(key, 'DumpFolder')
# %LOCALAPPDATA%\CrashDumps is the default location.
if not (dumpFolderRegValue[0] == '%LOCALAPPDATA%\CrashDumps' and
dumpFolderRegValue[1] == _winreg.REG_EXPAND_SZ):
print '\nWARNING: Dumps are instead appearing at: ' + dumpFolderRegValue[0] + \
' - all crashes will be uninteresting.\n'
return False
except WindowsError as e:
# If the key value cannot be found, the dumps will be put in the default location
if e.errno == 2 and e.strerror == 'The system cannot find the file specified':
return True
else:
raise
return True
except WindowsError as e:
# If the LocalDumps registry key cannot be found, dumps will be put in the default location.
if e.errno == 2 and e.strerror == 'The system cannot find the file specified':
print '\nWARNING: The registry key HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\' + \
'Windows\\Windows Error Reporting\\LocalDumps cannot be found.\n'
return None
else:
raise
def constructGdbCommand(progfullname, crashedPID):
'''
Constructs a command that uses the POSIX debugger (gdb) to turn a minidump file into a
stack trace.
'''
# On Mac and Linux, look for a core file.
coreFilename = None
if isMac:
# Core files will be generated if you do:
# mkdir -p /cores/
# ulimit -c 2147483648 (or call resource.setrlimit from a preexec_fn hook)
coreFilename = "/cores/core." + str(crashedPID)
elif isLinux:
isPidUsed = False
if os.path.exists('/proc/sys/kernel/core_uses_pid'):
with open('/proc/sys/kernel/core_uses_pid') as f:
isPidUsed = bool(int(f.read()[0])) # Setting [0] turns the input to a str.
coreFilename = 'core.' + str(crashedPID) if isPidUsed else 'core' # relative path
if not os.path.isfile(coreFilename):
coreFilename = normExpUserPath(os.path.join('~', coreFilename)) # try the home dir
if coreFilename and os.path.exists(coreFilename):
debuggerCmdPath = getAbsPathForAdjacentFile('gdb-quick.txt')
assert os.path.exists(debuggerCmdPath)
# Run gdb and move the core file. Tip: gdb gives more info for:
# (debug with intact build dir > debug > opt with frame pointers > opt)
return ["gdb", "-n", "-batch", "-x", debuggerCmdPath, progfullname, coreFilename]
else:
return None
def getAbsPathForAdjacentFile(filename):
'''Gets the absolute path of a particular file, given its base directory and filename.'''
return os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
def isProgramInstalled(program):
'''Checks if the specified program is installed.'''
whichExit = captureStdout(['which', program], ignoreStderr=True, combineStderr=True, ignoreExitCode=True)[1]
return whichExit == 0
def rmDirIfEmpty(eDir):
'''Remove directory if empty.'''
assert os.path.isdir(eDir)
if not os.listdir(eDir):
os.rmdir(eDir)
def rmTreeIfExists(dirTree):
'''Remove a directory with all sub-directories and files if the directory exists.'''
if os.path.isdir(dirTree):
rmTreeIncludingReadOnly(dirTree)
assert not os.path.isdir(dirTree)
def rmTreeIncludingReadOnly(dirTree):
shutil.rmtree(dirTree, onerror=handleRemoveReadOnly)
def test_rmTreeIncludingReadOnly():
'''Run this function in the same directory as subprocesses.py to test.'''
testDir = 'test_rmTreeIncludingReadOnly'
os.mkdir(testDir)
readOnlyDir = os.path.join(testDir, 'nestedReadOnlyDir')
os.mkdir(readOnlyDir)
filename = os.path.join(readOnlyDir, 'test.txt')
with open(filename, 'wb') as f:
f.write('testing\n')
os.chmod(filename, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.chmod(readOnlyDir, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
rmTreeIncludingReadOnly(testDir) # Should pass here
def handleRemoveReadOnly(func, path, exc):
'''Handle read-only files. Adapted from http://stackoverflow.com/q/1213706'''
if func in (os.rmdir, os.remove) and exc[1].errno == errno.EACCES:
if os.name == 'posix':
# Ensure parent directory is also writeable.
pardir = os.path.abspath(os.path.join(path, os.path.pardir))
if not os.access(pardir, os.W_OK):
os.chmod(pardir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
elif os.name == 'nt':
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise
def normExpUserPath(p):
return os.path.normpath(os.path.expanduser(p))
def shellify(cmd):
"""Try to convert an arguments array to an equivalent string that can be pasted into a shell."""
okUnquotedRE = re.compile(r"""^[a-zA-Z0-9\-\_\.\,\/\=\~@\+]*$""")
okQuotedRE = re.compile(r"""^[a-zA-Z0-9\-\_\.\,\/\=\~@\{\}\|\(\)\+ ]*$""")
ssc = []
for i in xrange(len(cmd)):
item = cmd[i]
if okUnquotedRE.match(item):
ssc.append(item)
elif okQuotedRE.match(item):
ssc.append('"' + item + '"')
else:
vdump('Regex not matched, but trying to shellify anyway:')
return ' '.join(cmd).replace('\\', '//') if isWin else ' '.join(cmd)
return ' '.join(ssc)
def timeSubprocess(command, ignoreStderr=False, combineStderr=False, ignoreExitCode=False,
cwd=os.getcwdu(), env=os.environ, vb=False):
'''
Calculates how long a captureStdout command takes and prints it. Returns the stdout and return
value that captureStdout passes on.
'''
print 'Running `%s` now..' % shellify(command)
startTime = time.time()
stdOutput, retVal = captureStdout(command, ignoreStderr=ignoreStderr,
combineStderr=combineStderr, ignoreExitCode=ignoreExitCode,
currWorkingDir=cwd, env=env, verbosity=vb)
endTime = time.time()
print '`' + shellify(command) + '` took %.3f seconds.\n' % (endTime - startTime)
return stdOutput, retVal
class Unbuffered:
'''From http://stackoverflow.com/a/107717 - Unbuffered stdout by default, similar to -u.'''
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def vdump(inp):
'''
This function appends the word 'DEBUG' to any verbose output.
'''
if verbose:
print 'DEBUG -', inp
###########
# Tests #
###########
if __name__ == '__main__':
vdump('Running tests...')
assert isProgramInstalled('date')
assert not isProgramInstalled('FOOBARFOOBAR')
vdump('Done')
Revert the backout for (aka reland) "Do not generate a corefile if gdb crashes." - the cause was unrelated.
This reverts commit 540f6aafcaa6596b84ff0d04019718e9af3835ee.
#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import ctypes
import errno
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import time
verbose = False
isARMv7l = (platform.uname()[4] == 'armv7l')
isLinux = (platform.system() == 'Linux')
isMac = (platform.system() == 'Darwin')
isWin = (platform.system() == 'Windows')
isWin64 = ('PROGRAMFILES(X86)' in os.environ)
isWinVistaOrHigher = isWin and (sys.getwindowsversion()[0] >= 6)
# This refers to the Win-specific "MozillaBuild" environment in which Python is running, which is
# spawned from the MozillaBuild script for 64-bit compilers, e.g. start-msvc10-x64.bat
isMozBuild64 = (os.name == 'nt') and ('x64' in os.environ['MOZ_TOOLS'].split(os.sep)[-1])
# isMozBuild64 = isWin and '64' in os.environ['MOZ_MSVCBITS'] # For MozillaBuild 2.0.0
noMinidumpMsg = r'''
WARNING: Minidumps are not being generated, so all crashes will be uninteresting.
WARNING: Make sure the following key value exists in this key:
WARNING: HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\Windows Error Reporting\LocalDumps
WARNING: Name: DumpType Type: REG_DWORD
WARNING: http://msdn.microsoft.com/en-us/library/windows/desktop/bb787181%28v=vs.85%29.aspx
'''
########################
# Platform Detection #
########################
def macVer():
'''
If system is a Mac, return the mac type.
'''
assert platform.system() == 'Darwin'
return [int(x) for x in platform.mac_ver()[0].split('.')]
def getFreeSpace(folder, mulVar):
'''
Return folder/drive free space in bytes if mulVar is 0.
Adapted from http://stackoverflow.com/a/2372171
'''
assert mulVar >= 0
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
retVal = float(free_bytes.value)
else:
retVal = float(os.statvfs(folder).f_bfree * os.statvfs(folder).f_frsize)
return retVal / (1024 ** mulVar)
#####################
# Shell Functions #
#####################
def captureStdout(inputCmd, ignoreStderr=False, combineStderr=False, ignoreExitCode=False,
currWorkingDir=os.getcwdu(), env='NOTSET', verbosity=False):
'''
Captures standard output, returns the output as a string, along with the return value.
'''
if env == 'NOTSET':
vdump(shellify(inputCmd))
env = os.environ
else:
# There is no way yet to only print the environment variables that were added by the harness
# We could dump all of os.environ but it is too much verbose output.
vdump('ENV_VARIABLES_WERE_ADDED_HERE ' + shellify(inputCmd))
cmd = []
for el in inputCmd:
if el.startswith('"') and el.endswith('"'):
cmd.append(str(el[1:-1]))
else:
cmd.append(str(el))
assert cmd != []
try:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if combineStderr else subprocess.PIPE,
cwd=currWorkingDir,
env=env)
(stdout, stderr) = p.communicate()
except OSError, e:
raise Exception(repr(e.strerror) + ' error calling: ' + shellify(cmd))
if p.returncode != 0:
oomErrorOutput = stdout if combineStderr else stderr
if (isLinux or isMac) and oomErrorOutput:
if 'internal compiler error: Killed (program cc1plus)' in oomErrorOutput:
raise Exception('GCC running out of memory')
elif 'error: unable to execute command: Killed' in oomErrorOutput:
raise Exception('Clang running out of memory')
if not ignoreExitCode:
# Potential problem area: Note that having a non-zero exit code does not mean that the
# operation did not succeed, for example when compiling a shell. A non-zero exit code
# can appear even though a shell compiled successfully.
# Pymake in builds earlier than revision 232553f741a0 did not support the '-s' option.
if 'no such option: -s' not in stdout:
print 'Nonzero exit code from: '
print ' ' + shellify(cmd)
print 'stdout is:'
print stdout
if stderr is not None:
print 'stderr is:'
print stderr
# Pymake in builds earlier than revision 232553f741a0 did not support the '-s' option.
if 'hg pull: option --rebase not recognized' not in stdout and 'no such option: -s' not in stdout:
if isWin and stderr and 'Permission denied' in stderr and \
'configure: error: installation or configuration problem: ' + \
'C++ compiler cannot create executables.' in stderr:
raise Exception('Windows conftest.exe configuration permission problem')
else:
raise Exception('Nonzero exit code')
if not combineStderr and not ignoreStderr and len(stderr) > 0:
# Ignore hg color mode throwing an error in console on Windows platforms.
if not (isWin and 'warning: failed to set color mode to win32' in stderr):
print 'Unexpected output on stderr from: '
print ' ' + shellify(cmd)
print stdout, stderr
raise Exception('Unexpected output on stderr')
if stderr and ignoreStderr and len(stderr) > 0 and p.returncode != 0:
# During configure, there will always be stderr. Sometimes this stderr causes configure to
# stop the entire script, especially on Windows.
print 'Return code not zero, and unexpected output on stderr from: '
print ' ' + shellify(cmd)
print stdout, stderr
raise Exception('Return code not zero, and unexpected output on stderr')
if verbose or verbosity:
print stdout
if stderr is not None:
print stderr
return stdout.rstrip(), p.returncode
def createWtmpDir(tmpDirBase):
'''Create wtmp<number> directory, incrementing the number if one is already found.'''
i = 1
while True:
tmpDirWithNum = 'wtmp' + str(i)
tmpDir = os.path.join(tmpDirBase, tmpDirWithNum)
try:
os.mkdir(tmpDir) # To avoid race conditions, we use try/except instead of exists/create
break
except OSError:
i += 1
vdump(tmpDirWithNum + os.sep) # Even if not verbose, wtmp<num> is also dumped: wtmp1/w1: NORMAL
return tmpDirWithNum
def dateStr():
'''Equivalent of running `date` in bash, excluding the timezone.'''
# Try not to add the timezone. Python does not seem to be accurate about DST switchovers.
# assert captureStdout(['date'])[0] == currDateTime # This fails on Windows
# On Windows, there is a leading zero in the day of the date in time.asctime()
return time.asctime()
def disableCorefile():
'''When called as a preexec_fn, sets appropriate resource limits for the JS shell. Must only be called on POSIX.'''
import resource # module only available on POSIX
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
def grabMacCrashLog(progname, crashedPID, logPrefix, useLogFiles):
'''Finds the required crash log in the given crash reporter directory.'''
assert platform.system() == 'Darwin' and macVer() >= [10, 6]
reportDirList = [os.path.expanduser('~'), '/']
for baseDir in reportDirList:
# Sometimes the crash reports end up in the root directory.
# This possibly happens when the value of <value>:
# defaults write com.apple.CrashReporter DialogType <value>
# is none, instead of server, or some other option.
# It also happens when ssh'd into a computer.
# And maybe when the computer is under heavy load.
# See http://en.wikipedia.org/wiki/Crash_Reporter_%28Mac_OS_X%29
reportDir = os.path.join(baseDir, 'Library/Logs/DiagnosticReports/')
# Find a crash log for the right process name and pid, preferring
# newer crash logs (which sort last).
if os.path.exists(reportDir):
crashLogs = os.listdir(reportDir)
else:
crashLogs = []
# Firefox sometimes still runs as firefox-bin, at least on Mac (likely bug 658850)
crashLogs = [x for x in crashLogs
if x.startswith(progname + '_') or x.startswith(progname + '-bin_')]
crashLogs.sort(reverse=True)
for fn in crashLogs:
fullfn = os.path.join(reportDir, fn)
try:
with open(fullfn) as c:
firstLine = c.readline()
if firstLine.rstrip().endswith("[" + str(crashedPID) + "]"):
if useLogFiles:
# Copy, don't rename, because we might not have permissions
# (especially for the system rather than user crash log directory)
# Use copyfile, as we do not want to copy the permissions metadata over
shutil.copyfile(fullfn, logPrefix + "-crash.txt")
captureStdout(["chmod", "og+r", logPrefix + "-crash.txt"])
return logPrefix + "-crash.txt"
else:
return fullfn
#return open(fullfn).read()
except (OSError, IOError):
# Maybe the log was rotated out between when we got the list
# of files and when we tried to open this file. If so, it's
# clearly not The One.
pass
return None
def grabCrashLog(progfullname, crashedPID, logPrefix, wantStack):
'''Returns the crash log if found.'''
progname = os.path.basename(progfullname)
useLogFiles = isinstance(logPrefix, str)
if useLogFiles:
if os.path.exists(logPrefix + "-crash.txt"):
os.remove(logPrefix + "-crash.txt")
if os.path.exists(logPrefix + "-core"):
os.remove(logPrefix + "-core")
if not wantStack or progname == "valgrind":
return
# This has only been tested on 64-bit Windows 7 and higher, but should work on 64-bit Vista.
if isWinVistaOrHigher and isWin64:
debuggerCmd = constructCdbCommand(progfullname, crashedPID)
elif os.name == 'posix':
debuggerCmd = constructGdbCommand(progfullname, crashedPID)
else:
debuggerCmd = None
if debuggerCmd:
vdump(' '.join(debuggerCmd))
debuggerExitCode = subprocess.call(
debuggerCmd,
stdin=None,
stderr=subprocess.STDOUT,
stdout=open(logPrefix + "-crash.txt", 'w') if useLogFiles else None,
# It would be nice to use this everywhere, but it seems to be broken on Windows
# (http://docs.python.org/library/subprocess.html)
close_fds=(os.name == "posix"),
preexec_fn=(disableCorefile if os.name == 'posix' else None) # Do not generate a corefile if gdb crashes
)
if debuggerExitCode != 0:
print 'Debugger exited with code %d : %s' % (debuggerExitCode, shellify(debuggerCmd))
if useLogFiles:
if os.path.isfile(normExpUserPath(debuggerCmd[-1])):
# Path to memory dump is the last element of debuggerCmd.
shutil.move(debuggerCmd[-1], logPrefix + "-core")
subprocess.call(["gzip", '-f', logPrefix + "-core"])
# chmod here, else the uploaded -core.gz files do not have sufficient permissions.
subprocess.check_call(['chmod', 'og+r', logPrefix + "-core.gz"])
return logPrefix + "-crash.txt"
else:
print 'This file does not exist: ' + debuggerCmd[-1]
else:
print "I don't know what to do with a core file when logPrefix is null"
# On Mac, look for a crash log generated by Mac OS X Crash Reporter
if isMac:
loops = 0
maxLoops = 500 if progname.startswith("firefox") else 30
while True:
cLogFound = grabMacCrashLog(progname, crashedPID, logPrefix, useLogFiles)
if cLogFound is not None:
return cLogFound
# print "[grabCrashLog] Waiting for the crash log to appear..."
time.sleep(0.200)
loops += 1
if loops > maxLoops:
# I suppose this might happen if the process corrupts itself so much that
# the crash reporter gets confused about the process name, for example.
print "grabCrashLog waited a long time, but a crash log for " + progname + \
" [" + str(crashedPID) + "] never appeared!"
break
def constructCdbCommand(progfullname, crashedPID):
'''
Constructs a command that uses the Windows debugger (cdb.exe) to turn a minidump file into a
stack trace.
'''
# On Windows Vista and above, look for a minidump.
dumpFilename = normExpUserPath(os.path.join(
'~', 'AppData', 'Local', 'CrashDumps', os.path.basename(progfullname) + '.' + str(crashedPID) + '.dmp'))
win64bitDebuggerFolder = os.path.join(os.getenv('PROGRAMW6432'), 'Debugging Tools for Windows (x64)')
# 64-bit cdb.exe seems to also be able to analyse 32-bit binary dumps.
cdbPath = os.path.join(win64bitDebuggerFolder, 'cdb.exe')
if not os.path.exists(cdbPath):
print '\nWARNING: cdb.exe is not found - all crashes will be interesting.\n'
return None
if isWinDumpingToDefaultLocation():
loops = 0
maxLoops = 300
while True:
if os.path.exists(dumpFilename):
debuggerCmdPath = getAbsPathForAdjacentFile('cdbCmds.txt')
assert os.path.exists(debuggerCmdPath)
cdbCmdList = []
bExploitableDLL = os.path.join(win64bitDebuggerFolder, 'winext', 'msec.dll')
if os.path.exists(bExploitableDLL):
cdbCmdList.append('.load ' + bExploitableDLL)
cdbCmdList.append('$<' + debuggerCmdPath)
# See bug 902706 about -g.
return [cdbPath, '-g', '-c', ';'.join(cdbCmdList), '-z', dumpFilename]
time.sleep(0.200)
loops += 1
if loops > maxLoops:
# Windows may take some time to generate the dump.
print "constructCdbCommand waited a long time, but " + dumpFilename + " never appeared!"
return None
else:
return None
def isWinDumpingToDefaultLocation():
'''Checks whether Windows minidumps are enabled and set to go to Windows' default location.'''
import _winreg
# For now, this code does not edit the Windows Registry because we tend to be in a 32-bit
# version of Python and if one types in regedit in the Run dialog, opens up the 64-bit registry.
# If writing a key, we most likely need to flush. For the moment, no keys are written.
try:
with _winreg.OpenKey(_winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE),
r'Software\Microsoft\Windows\Windows Error Reporting\LocalDumps',
# Read key from 64-bit registry, which also works for 32-bit
0, (_winreg.KEY_WOW64_64KEY + _winreg.KEY_READ)) as key:
try:
dumpTypeRegValue = _winreg.QueryValueEx(key, 'DumpType')
if not (dumpTypeRegValue[0] == 1 and dumpTypeRegValue[1] == _winreg.REG_DWORD):
print noMinidumpMsg
return False
except WindowsError as e:
if e.errno == 2:
print noMinidumpMsg
return False
else:
raise
try:
dumpFolderRegValue = _winreg.QueryValueEx(key, 'DumpFolder')
# %LOCALAPPDATA%\CrashDumps is the default location.
if not (dumpFolderRegValue[0] == '%LOCALAPPDATA%\CrashDumps' and
dumpFolderRegValue[1] == _winreg.REG_EXPAND_SZ):
print '\nWARNING: Dumps are instead appearing at: ' + dumpFolderRegValue[0] + \
' - all crashes will be uninteresting.\n'
return False
except WindowsError as e:
# If the key value cannot be found, the dumps will be put in the default location
if e.errno == 2 and e.strerror == 'The system cannot find the file specified':
return True
else:
raise
return True
except WindowsError as e:
# If the LocalDumps registry key cannot be found, dumps will be put in the default location.
if e.errno == 2 and e.strerror == 'The system cannot find the file specified':
print '\nWARNING: The registry key HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\' + \
'Windows\\Windows Error Reporting\\LocalDumps cannot be found.\n'
return None
else:
raise
def constructGdbCommand(progfullname, crashedPID):
'''
Constructs a command that uses the POSIX debugger (gdb) to turn a minidump file into a
stack trace.
'''
# On Mac and Linux, look for a core file.
coreFilename = None
if isMac:
# Core files will be generated if you do:
# mkdir -p /cores/
# ulimit -c 2147483648 (or call resource.setrlimit from a preexec_fn hook)
coreFilename = "/cores/core." + str(crashedPID)
elif isLinux:
isPidUsed = False
if os.path.exists('/proc/sys/kernel/core_uses_pid'):
with open('/proc/sys/kernel/core_uses_pid') as f:
isPidUsed = bool(int(f.read()[0])) # Setting [0] turns the input to a str.
coreFilename = 'core.' + str(crashedPID) if isPidUsed else 'core' # relative path
if not os.path.isfile(coreFilename):
coreFilename = normExpUserPath(os.path.join('~', coreFilename)) # try the home dir
if coreFilename and os.path.exists(coreFilename):
debuggerCmdPath = getAbsPathForAdjacentFile('gdb-quick.txt')
assert os.path.exists(debuggerCmdPath)
# Run gdb and move the core file. Tip: gdb gives more info for:
# (debug with intact build dir > debug > opt with frame pointers > opt)
return ["gdb", "-n", "-batch", "-x", debuggerCmdPath, progfullname, coreFilename]
else:
return None
def getAbsPathForAdjacentFile(filename):
'''Gets the absolute path of a particular file, given its base directory and filename.'''
return os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
def isProgramInstalled(program):
'''Checks if the specified program is installed.'''
whichExit = captureStdout(['which', program], ignoreStderr=True, combineStderr=True, ignoreExitCode=True)[1]
return whichExit == 0
def rmDirIfEmpty(eDir):
'''Remove directory if empty.'''
assert os.path.isdir(eDir)
if not os.listdir(eDir):
os.rmdir(eDir)
def rmTreeIfExists(dirTree):
'''Remove a directory with all sub-directories and files if the directory exists.'''
if os.path.isdir(dirTree):
rmTreeIncludingReadOnly(dirTree)
assert not os.path.isdir(dirTree)
def rmTreeIncludingReadOnly(dirTree):
shutil.rmtree(dirTree, onerror=handleRemoveReadOnly)
def test_rmTreeIncludingReadOnly():
'''Run this function in the same directory as subprocesses.py to test.'''
testDir = 'test_rmTreeIncludingReadOnly'
os.mkdir(testDir)
readOnlyDir = os.path.join(testDir, 'nestedReadOnlyDir')
os.mkdir(readOnlyDir)
filename = os.path.join(readOnlyDir, 'test.txt')
with open(filename, 'wb') as f:
f.write('testing\n')
os.chmod(filename, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.chmod(readOnlyDir, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
rmTreeIncludingReadOnly(testDir) # Should pass here
def handleRemoveReadOnly(func, path, exc):
'''Handle read-only files. Adapted from http://stackoverflow.com/q/1213706'''
if func in (os.rmdir, os.remove) and exc[1].errno == errno.EACCES:
if os.name == 'posix':
# Ensure parent directory is also writeable.
pardir = os.path.abspath(os.path.join(path, os.path.pardir))
if not os.access(pardir, os.W_OK):
os.chmod(pardir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
elif os.name == 'nt':
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise
def normExpUserPath(p):
return os.path.normpath(os.path.expanduser(p))
def shellify(cmd):
"""Try to convert an arguments array to an equivalent string that can be pasted into a shell."""
okUnquotedRE = re.compile(r"""^[a-zA-Z0-9\-\_\.\,\/\=\~@\+]*$""")
okQuotedRE = re.compile(r"""^[a-zA-Z0-9\-\_\.\,\/\=\~@\{\}\|\(\)\+ ]*$""")
ssc = []
for i in xrange(len(cmd)):
item = cmd[i]
if okUnquotedRE.match(item):
ssc.append(item)
elif okQuotedRE.match(item):
ssc.append('"' + item + '"')
else:
vdump('Regex not matched, but trying to shellify anyway:')
return ' '.join(cmd).replace('\\', '//') if isWin else ' '.join(cmd)
return ' '.join(ssc)
def timeSubprocess(command, ignoreStderr=False, combineStderr=False, ignoreExitCode=False,
cwd=os.getcwdu(), env=os.environ, vb=False):
'''
Calculates how long a captureStdout command takes and prints it. Returns the stdout and return
value that captureStdout passes on.
'''
print 'Running `%s` now..' % shellify(command)
startTime = time.time()
stdOutput, retVal = captureStdout(command, ignoreStderr=ignoreStderr,
combineStderr=combineStderr, ignoreExitCode=ignoreExitCode,
currWorkingDir=cwd, env=env, verbosity=vb)
endTime = time.time()
print '`' + shellify(command) + '` took %.3f seconds.\n' % (endTime - startTime)
return stdOutput, retVal
class Unbuffered:
'''From http://stackoverflow.com/a/107717 - Unbuffered stdout by default, similar to -u.'''
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def vdump(inp):
'''
This function appends the word 'DEBUG' to any verbose output.
'''
if verbose:
print 'DEBUG -', inp
###########
# Tests #
###########
if __name__ == '__main__':
vdump('Running tests...')
assert isProgramInstalled('date')
assert not isProgramInstalled('FOOBARFOOBAR')
vdump('Done')
|
# -*- coding: utf-8 -*-
import httplib as http
import time
from flask import request
from modularodm.exceptions import ValidationError, ValidationValueError
from framework import forms
from framework import status
from framework.auth import cas
from framework.auth import User, get_user
from framework.auth.core import generate_confirm_token
from framework.auth.decorators import collect_auth, must_be_logged_in
from framework.auth.forms import PasswordForm, SetEmailAndPasswordForm
from framework.auth.signals import user_registered
from framework.auth.utils import validate_email
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.sessions import session
from framework.transactions.handlers import no_auto_transaction
from website import mails
from website import language
from website import security
from website import settings
from website.models import Node
from website.profile import utils as profile_utils
from website.project.decorators import (must_have_permission, must_be_valid_project,
must_not_be_registration, must_be_contributor_or_public, must_be_contributor)
from website.project.model import has_anonymous_link
from website.project.signals import unreg_contributor_added, contributor_added
from website.util import web_url_for, is_json_request
from website.util.permissions import expand_permissions, ADMIN
from website.util import sanitize
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_node_contributors_abbrev(auth, node, **kwargs):
anonymous = has_anonymous_link(node, auth)
formatter = 'surname'
max_count = kwargs.get('max_count', 3)
if 'user_ids' in kwargs:
users = [
User.load(user_id) for user_id in kwargs['user_ids']
if user_id in node.visible_contributor_ids
]
else:
users = node.visible_contributors
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = ' &'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = ' &'
else:
separator = ','
contributor = user.get_summary(formatter)
contributor['user_id'] = user._primary_key
contributor['separator'] = separator
contributors.append(contributor)
return {
'contributors': contributors,
'others_count': others_count,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_contributors(auth, node, **kwargs):
# Can set limit to only receive a specified number of contributors in a call to this route
if request.args.get('limit'):
try:
limit = int(request.args['limit'])
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "limit": {}'.format(request.args['limit'])
))
else:
limit = None
anonymous = has_anonymous_link(node, auth)
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
# Limit is either an int or None:
# if int, contribs list is sliced to specified length
# if None, contribs list is not sliced
contribs = profile_utils.serialize_contributors(
node.visible_contributors[0:limit],
node=node,
)
# Will either return just contributor list or contributor list + 'more' element
if limit:
return {
'contributors': contribs,
'more': max(0, len(node.visible_contributors) - limit)
}
else:
return {'contributors': contribs}
@must_be_logged_in
@must_be_valid_project
def get_contributors_from_parent(auth, node, **kwargs):
parent = node.parent_node
if not parent:
raise HTTPError(http.BAD_REQUEST)
if not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contribs = [
profile_utils.add_contributor_json(contrib)
for contrib in parent.visible_contributors
]
return {'contributors': contribs}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_before_remove_contributor(auth, node, **kwargs):
contributor = User.load(request.json.get('id'))
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if len(node.visible_contributor_ids) == 1 \
and node.visible_contributor_ids[0] == contributor._id:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
prompts = node.callback(
'before_remove_contributor', removed=contributor,
)
if auth.user == contributor:
prompts.insert(
0,
'Are you sure you want to remove yourself from this project?'
)
return {'prompts': prompts}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_removecontributor(auth, node, **kwargs):
contributor = User.load(request.json['id'])
if contributor is None:
raise HTTPError(http.BAD_REQUEST)
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if len(node.visible_contributor_ids) == 1 \
and node.visible_contributor_ids[0] == contributor._id:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
outcome = node.remove_contributor(
contributor=contributor, auth=auth,
)
if outcome:
if auth.user == contributor:
status.push_status_message('Removed self from project', kind='success', trust=False)
return {'redirectUrl': web_url_for('dashboard')}
status.push_status_message('Contributor removed', kind='success', trust=False)
return {}
raise HTTPError(
http.BAD_REQUEST,
data={
'message_long': (
'{0} must have at least one contributor with admin '
'rights'.format(
node.project_or_component.capitalize()
)
)
}
)
def deserialize_contributors(node, user_dicts, auth, validate=False):
"""View helper that returns a list of User objects from a list of
serialized users (dicts). The users in the list may be registered or
unregistered users.
e.g. ``[{'id': 'abc123', 'registered': True, 'fullname': ..},
{'id': None, 'registered': False, 'fullname'...},
{'id': '123ab', 'registered': False, 'fullname': ...}]
If a dict represents an unregistered user without an ID, creates a new
unregistered User record.
:param Node node: The node to add contributors to
:param list(dict) user_dicts: List of serialized users in the format above.
:param Auth auth:
:param bool validate: Whether to validate and sanitize fields (if necessary)
"""
# Add the registered contributors
contribs = []
for contrib_dict in user_dicts:
fullname = contrib_dict['fullname']
visible = contrib_dict['visible']
email = contrib_dict.get('email')
if validate is True:
# Validate and sanitize inputs as needed. Email will raise error if invalid.
# TODO Edge case bug: validation and saving are performed in same loop, so all in list
# up to the invalid entry will be saved. (communicate to the user what needs to be retried)
fullname = sanitize.strip_html(fullname)
if not fullname:
raise ValidationValueError('Full name field cannot be empty')
if email:
validate_email(email) # Will raise a ValidationError if email invalid
if contrib_dict['id']:
contributor = User.load(contrib_dict['id'])
else:
try:
contributor = User.create_unregistered(
fullname=fullname,
email=email)
contributor.save()
except ValidationValueError:
## FIXME: This suppresses an exception if ID not found & new validation fails; get_user will return None
contributor = get_user(email=email)
# Add unclaimed record if necessary
if (not contributor.is_registered
and node._primary_key not in contributor.unclaimed_records):
contributor.add_unclaimed_record(node=node, referrer=auth.user,
given_name=fullname,
email=email)
contributor.save()
unreg_contributor_added.send(node, contributor=contributor,
auth=auth)
contribs.append({
'user': contributor,
'visible': visible,
'permissions': expand_permissions(contrib_dict.get('permission'))
})
return contribs
@unreg_contributor_added.connect
def finalize_invitation(node, contributor, auth):
record = contributor.get_unclaimed_record(node._primary_key)
if record['email']:
send_claim_email(record['email'], contributor, node, notify=True)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_contributors_post(auth, node, **kwargs):
""" Add contributors to a node. """
user_dicts = request.json.get('users')
node_ids = request.json.get('node_ids')
if user_dicts is None or node_ids is None:
raise HTTPError(http.BAD_REQUEST)
# Prepare input data for `Node::add_contributors`
try:
contribs = deserialize_contributors(node, user_dicts, auth=auth, validate=True)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
node.add_contributors(contributors=contribs, auth=auth)
node.save()
# Disconnect listener to avoid multiple invite emails
unreg_contributor_added.disconnect(finalize_invitation)
for child_id in node_ids:
child = Node.load(child_id)
# Only email unreg users once
try:
child_contribs = deserialize_contributors(
child, user_dicts, auth=auth, validate=True
)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
child.add_contributors(contributors=child_contribs, auth=auth)
child.save()
# Reconnect listeners
unreg_contributor_added.connect(finalize_invitation)
return {
'status': 'success',
'contributors': profile_utils.serialize_contributors(
node.visible_contributors,
node=node,
)
}, 201
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_manage_contributors(auth, node, **kwargs):
"""Reorder and remove contributors.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributors = request.json.get('contributors')
# Update permissions and order
try:
node.manage_contributors(contributors, auth=auth, save=True)
except ValueError as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.message})
# If user has removed herself from project, alert; redirect to user
# dashboard if node is private, else node dashboard
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
kind='success',
trust=False
)
# Else stay on current page
return {}
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_remove_contributor(auth, **kwargs):
"""Remove a contributor from a list of nodes.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributor_id = request.json.get('contributorID');
node_ids = request.json.get('nodeIDs')
contributor = User.load(contributor_id)
for node_id in node_ids:
# Update permissions and order
node = Node.load(node_id)
try:
node.remove_contributor(contributor, auth=auth)
except ValueError as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.message})
# If user has removed herself from project, alert; redirect to user
# dashboard if node is private, else node dashboard
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
kind='success',
trust=False
)
# Else stay on current page
return {}
def get_timestamp():
return int(time.time())
def throttle_period_expired(timestamp, throttle):
return timestamp is None or (get_timestamp() - timestamp) > throttle
def send_claim_registered_email(claimer, unreg_user, node, throttle=24 * 3600):
unclaimed_record = unreg_user.get_unclaimed_record(node._primary_key)
# roll the valid token for each email, thus user cannot change email and approve a different email address
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(400, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
unclaimed_record['token'] = generate_confirm_token()
unclaimed_record['claimer_email'] = claimer.username
unreg_user.save()
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = web_url_for(
'claim_user_registered',
uid=unreg_user._primary_key,
pid=node._primary_key,
token=unclaimed_record['token'],
_external=True,
)
# Send mail to referrer, telling them to forward verification link to claimer
mails.send_mail(
referrer.username,
mails.FORWARD_INVITE_REGISTERED,
user=unreg_user,
referrer=referrer,
node=node,
claim_url=claim_url,
fullname=unclaimed_record['name'],
)
unclaimed_record['last_sent'] = get_timestamp()
unreg_user.save()
# Send mail to claimer, telling them to wait for referrer
mails.send_mail(
claimer.username,
mails.PENDING_VERIFICATION_REGISTERED,
fullname=claimer.fullname,
referrer=referrer,
node=node,
)
def send_claim_email(email, user, node, notify=True, throttle=24 * 3600):
"""Send an email for claiming a user account. Either sends to the given email
or the referrer's email, depending on the email address provided.
:param str email: The address given in the claim user form
:param User user: The User record to claim.
:param Node node: The node where the user claimed their account.
:param bool notify: If True and an email is sent to the referrer, an email
will also be sent to the invited user about their pending verification.
:param int throttle: Time period (in seconds) after the referrer is
emailed during which the referrer will not be emailed again.
"""
claimer_email = email.lower().strip()
unclaimed_record = user.get_unclaimed_record(node._primary_key)
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = user.get_claim_url(node._primary_key, external=True)
# If given email is the same provided by user, just send to that email
if unclaimed_record.get('email') == claimer_email:
mail_tpl = mails.INVITE
to_addr = claimer_email
unclaimed_record['claimer_email'] = claimer_email
user.save()
else: # Otherwise have the referrer forward the email to the user
# roll the valid token for each email, thus user cannot change email and approve a different email address
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(400, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
unclaimed_record['last_sent'] = get_timestamp()
unclaimed_record['token'] = generate_confirm_token()
unclaimed_record['claimer_email'] = claimer_email
user.save()
claim_url = user.get_claim_url(node._primary_key, external=True)
if notify:
pending_mail = mails.PENDING_VERIFICATION
mails.send_mail(
claimer_email,
pending_mail,
user=user,
referrer=referrer,
fullname=unclaimed_record['name'],
node=node
)
mail_tpl = mails.FORWARD_INVITE
to_addr = referrer.username
mails.send_mail(
to_addr,
mail_tpl,
user=user,
referrer=referrer,
node=node,
claim_url=claim_url,
email=claimer_email,
fullname=unclaimed_record['name']
)
return to_addr
@contributor_added.connect
def notify_added_contributor(node, contributor, auth=None, throttle=None):
throttle = throttle or settings.CONTRIBUTOR_ADDED_EMAIL_THROTTLE
# Exclude forks and templates because the user forking/templating the project gets added
# via 'add_contributor' but does not need to get notified.
# Only email users for projects, or for components where they are not contributors on the parent node.
if (contributor.is_registered and not node.template_node and not node.is_fork and
(not node.parent_node or
(node.parent_node and not node.parent_node.is_contributor(contributor)))):
contributor_record = contributor.contributor_added_email_records.get(node._id, {})
if contributor_record:
timestamp = contributor_record.get('last_sent', None)
if timestamp:
if not throttle_period_expired(timestamp, throttle):
return
else:
contributor.contributor_added_email_records[node._id] = {}
mails.send_mail(
contributor.username,
mails.CONTRIBUTOR_ADDED,
user=contributor,
node=node,
referrer_name=auth.user.fullname if auth else ''
)
contributor.contributor_added_email_records[node._id]['last_sent'] = get_timestamp()
contributor.save()
def verify_claim_token(user, token, pid):
"""View helper that checks that a claim token for a given user and node ID
is valid. If not valid, throws an error with custom error messages.
"""
# if token is invalid, throw an error
if not user.verify_claim_token(token=token, project_id=pid):
if user.is_registered:
error_data = {
'message_short': 'User has already been claimed.',
'message_long': 'Please <a href="/login/">log in</a> to continue.'}
raise HTTPError(400, data=error_data)
else:
return False
return True
@collect_auth
@must_be_valid_project
def claim_user_registered(auth, node, **kwargs):
"""View that prompts user to enter their password in order to claim
contributorship on a project.
A user must be logged in.
"""
current_user = auth.user
sign_out_url = web_url_for('auth_login', logout=True, next=request.url)
if not current_user:
return redirect(sign_out_url)
# Logged in user should not be a contributor the project
if node.is_contributor(current_user):
logout_url = web_url_for('auth_logout', redirect_url=request.url)
data = {
'message_short': 'Already a contributor',
'message_long': ('The logged-in user is already a contributor to this '
'project. Would you like to <a href="{}">log out</a>?').format(logout_url)
}
raise HTTPError(http.BAD_REQUEST, data=data)
uid, pid, token = kwargs['uid'], kwargs['pid'], kwargs['token']
unreg_user = User.load(uid)
if not verify_claim_token(unreg_user, token, pid=node._primary_key):
raise HTTPError(http.BAD_REQUEST)
# Store the unreg_user data on the session in case the user registers
# a new account
session.data['unreg_user'] = {
'uid': uid, 'pid': pid, 'token': token
}
form = PasswordForm(request.form)
if request.method == 'POST':
if form.validate():
if current_user.check_password(form.password.data):
node.replace_contributor(old=unreg_user, new=current_user)
node.save()
status.push_status_message(
'You are now a contributor to this project.',
kind='success')
return redirect(node.url)
else:
status.push_status_message(language.LOGIN_FAILED, kind='warning', trust=True)
else:
forms.push_errors_to_status(form.errors)
if is_json_request():
form_ret = forms.utils.jsonify(form)
user_ret = profile_utils.serialize_user(current_user, full=False)
else:
form_ret = form
user_ret = current_user
return {
'form': form_ret,
'user': user_ret,
'signOutUrl': sign_out_url
}
@user_registered.connect
def replace_unclaimed_user_with_registered(user):
"""Listens for the user_registered signal. If unreg_user is stored in the
session, then the current user is trying to claim themselves as a contributor.
Replaces the old, unregistered contributor with the newly registered
account.
"""
unreg_user_info = session.data.get('unreg_user')
if unreg_user_info:
unreg_user = User.load(unreg_user_info['uid'])
pid = unreg_user_info['pid']
node = Node.load(pid)
node.replace_contributor(old=unreg_user, new=user)
node.save()
status.push_status_message(
'Successfully claimed contributor.', kind='success', trust=False)
@collect_auth
def claim_user_form(auth, **kwargs):
"""View for rendering the set password page for a claimed user.
Must have ``token`` as a querystring argument.
Renders the set password form, validates it, and sets the user's password.
"""
uid, pid = kwargs['uid'], kwargs['pid']
token = request.form.get('token') or request.args.get('token')
# If user is logged in, redirect to 're-enter password' page
if auth.logged_in:
return redirect(web_url_for('claim_user_registered',
uid=uid, pid=pid, token=token))
user = User.load(uid) # The unregistered user
# user ID is invalid. Unregistered user is not in database
if not user:
raise HTTPError(http.BAD_REQUEST)
# If claim token not valid, redirect to registration page
if not verify_claim_token(user, token, pid):
return redirect(web_url_for('auth_login'))
unclaimed_record = user.unclaimed_records[pid]
user.fullname = unclaimed_record['name']
user.update_guessed_names()
# The email can be the original referrer email if no claimer email has been specified.
claimer_email = unclaimed_record.get('claimer_email') or unclaimed_record.get('email')
form = SetEmailAndPasswordForm(request.form, token=token)
if request.method == 'POST':
if form.validate():
username, password = claimer_email, form.password.data
user.register(username=username, password=password)
# Clear unclaimed records
user.unclaimed_records = {}
user.verification_key = security.random_string(20)
user.save()
# Authenticate user and redirect to project page
node = Node.load(pid)
status.push_status_message(language.CLAIMED_CONTRIBUTOR.format(node=node),
kind='success',
trust=True)
# Redirect to CAS and authenticate the user with a verification key.
return redirect(cas.get_login_url(
web_url_for('user_profile', _absolute=True),
auto=True,
username=user.username,
verification_key=user.verification_key
))
else:
forms.push_errors_to_status(form.errors)
return {
'firstname': user.given_name,
'email': claimer_email if claimer_email else '',
'fullname': user.fullname,
'form': forms.utils.jsonify(form) if is_json_request() else form,
}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def invite_contributor_post(node, **kwargs):
"""API view for inviting an unregistered user. Performs validation, but does not actually invite the user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
# Validate and sanitize inputs as needed. Email will raise error if invalid.
fullname = sanitize.strip_html(fullname)
if email:
email = email.lower().strip()
try:
validate_email(email)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
if not fullname:
return {'status': 400, 'message': 'Full name field cannot be empty'}, 400
# Check if email is in the database
user = get_user(email=email)
if user:
if user.is_registered:
msg = 'User is already in database. Please go back and try your search again.'
return {'status': 400, 'message': msg}, 400
elif node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
else:
serialized = profile_utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = profile_utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized}
@must_be_contributor_or_public
def claim_user_post(node, **kwargs):
"""View for claiming a user from the X-editable form on a project page.
"""
reqdata = request.json
# Unreg user
user = User.load(reqdata['pk'])
unclaimed_data = user.get_unclaimed_record(node._primary_key)
# Submitted through X-editable
if 'value' in reqdata: # Submitted email address
email = reqdata['value'].lower().strip()
claimer = get_user(email=email)
if claimer and claimer.is_registered:
send_claim_registered_email(claimer=claimer, unreg_user=user,
node=node)
else:
send_claim_email(email, user, node, notify=True)
# TODO(sloria): Too many assumptions about the request data. Just use
elif 'claimerId' in reqdata: # User is logged in and confirmed identity
claimer_id = reqdata['claimerId']
claimer = User.load(claimer_id)
send_claim_registered_email(claimer=claimer, unreg_user=user, node=node)
email = claimer.username
else:
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'email': email,
'fullname': unclaimed_data['name']
}
remove stupid stupid semocolon
# -*- coding: utf-8 -*-
import httplib as http
import time
from flask import request
from modularodm.exceptions import ValidationError, ValidationValueError
from framework import forms
from framework import status
from framework.auth import cas
from framework.auth import User, get_user
from framework.auth.core import generate_confirm_token
from framework.auth.decorators import collect_auth, must_be_logged_in
from framework.auth.forms import PasswordForm, SetEmailAndPasswordForm
from framework.auth.signals import user_registered
from framework.auth.utils import validate_email
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.sessions import session
from framework.transactions.handlers import no_auto_transaction
from website import mails
from website import language
from website import security
from website import settings
from website.models import Node
from website.profile import utils as profile_utils
from website.project.decorators import (must_have_permission, must_be_valid_project,
must_not_be_registration, must_be_contributor_or_public, must_be_contributor)
from website.project.model import has_anonymous_link
from website.project.signals import unreg_contributor_added, contributor_added
from website.util import web_url_for, is_json_request
from website.util.permissions import expand_permissions, ADMIN
from website.util import sanitize
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_node_contributors_abbrev(auth, node, **kwargs):
anonymous = has_anonymous_link(node, auth)
formatter = 'surname'
max_count = kwargs.get('max_count', 3)
if 'user_ids' in kwargs:
users = [
User.load(user_id) for user_id in kwargs['user_ids']
if user_id in node.visible_contributor_ids
]
else:
users = node.visible_contributors
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = ' &'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = ' &'
else:
separator = ','
contributor = user.get_summary(formatter)
contributor['user_id'] = user._primary_key
contributor['separator'] = separator
contributors.append(contributor)
return {
'contributors': contributors,
'others_count': others_count,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_contributors(auth, node, **kwargs):
# Can set limit to only receive a specified number of contributors in a call to this route
if request.args.get('limit'):
try:
limit = int(request.args['limit'])
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "limit": {}'.format(request.args['limit'])
))
else:
limit = None
anonymous = has_anonymous_link(node, auth)
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
# Limit is either an int or None:
# if int, contribs list is sliced to specified length
# if None, contribs list is not sliced
contribs = profile_utils.serialize_contributors(
node.visible_contributors[0:limit],
node=node,
)
# Will either return just contributor list or contributor list + 'more' element
if limit:
return {
'contributors': contribs,
'more': max(0, len(node.visible_contributors) - limit)
}
else:
return {'contributors': contribs}
@must_be_logged_in
@must_be_valid_project
def get_contributors_from_parent(auth, node, **kwargs):
parent = node.parent_node
if not parent:
raise HTTPError(http.BAD_REQUEST)
if not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contribs = [
profile_utils.add_contributor_json(contrib)
for contrib in parent.visible_contributors
]
return {'contributors': contribs}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_before_remove_contributor(auth, node, **kwargs):
contributor = User.load(request.json.get('id'))
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if len(node.visible_contributor_ids) == 1 \
and node.visible_contributor_ids[0] == contributor._id:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
prompts = node.callback(
'before_remove_contributor', removed=contributor,
)
if auth.user == contributor:
prompts.insert(
0,
'Are you sure you want to remove yourself from this project?'
)
return {'prompts': prompts}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_removecontributor(auth, node, **kwargs):
contributor = User.load(request.json['id'])
if contributor is None:
raise HTTPError(http.BAD_REQUEST)
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if len(node.visible_contributor_ids) == 1 \
and node.visible_contributor_ids[0] == contributor._id:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
outcome = node.remove_contributor(
contributor=contributor, auth=auth,
)
if outcome:
if auth.user == contributor:
status.push_status_message('Removed self from project', kind='success', trust=False)
return {'redirectUrl': web_url_for('dashboard')}
status.push_status_message('Contributor removed', kind='success', trust=False)
return {}
raise HTTPError(
http.BAD_REQUEST,
data={
'message_long': (
'{0} must have at least one contributor with admin '
'rights'.format(
node.project_or_component.capitalize()
)
)
}
)
def deserialize_contributors(node, user_dicts, auth, validate=False):
"""View helper that returns a list of User objects from a list of
serialized users (dicts). The users in the list may be registered or
unregistered users.
e.g. ``[{'id': 'abc123', 'registered': True, 'fullname': ..},
{'id': None, 'registered': False, 'fullname'...},
{'id': '123ab', 'registered': False, 'fullname': ...}]
If a dict represents an unregistered user without an ID, creates a new
unregistered User record.
:param Node node: The node to add contributors to
:param list(dict) user_dicts: List of serialized users in the format above.
:param Auth auth:
:param bool validate: Whether to validate and sanitize fields (if necessary)
"""
# Add the registered contributors
contribs = []
for contrib_dict in user_dicts:
fullname = contrib_dict['fullname']
visible = contrib_dict['visible']
email = contrib_dict.get('email')
if validate is True:
# Validate and sanitize inputs as needed. Email will raise error if invalid.
# TODO Edge case bug: validation and saving are performed in same loop, so all in list
# up to the invalid entry will be saved. (communicate to the user what needs to be retried)
fullname = sanitize.strip_html(fullname)
if not fullname:
raise ValidationValueError('Full name field cannot be empty')
if email:
validate_email(email) # Will raise a ValidationError if email invalid
if contrib_dict['id']:
contributor = User.load(contrib_dict['id'])
else:
try:
contributor = User.create_unregistered(
fullname=fullname,
email=email)
contributor.save()
except ValidationValueError:
## FIXME: This suppresses an exception if ID not found & new validation fails; get_user will return None
contributor = get_user(email=email)
# Add unclaimed record if necessary
if (not contributor.is_registered
and node._primary_key not in contributor.unclaimed_records):
contributor.add_unclaimed_record(node=node, referrer=auth.user,
given_name=fullname,
email=email)
contributor.save()
unreg_contributor_added.send(node, contributor=contributor,
auth=auth)
contribs.append({
'user': contributor,
'visible': visible,
'permissions': expand_permissions(contrib_dict.get('permission'))
})
return contribs
@unreg_contributor_added.connect
def finalize_invitation(node, contributor, auth):
record = contributor.get_unclaimed_record(node._primary_key)
if record['email']:
send_claim_email(record['email'], contributor, node, notify=True)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_contributors_post(auth, node, **kwargs):
""" Add contributors to a node. """
user_dicts = request.json.get('users')
node_ids = request.json.get('node_ids')
if user_dicts is None or node_ids is None:
raise HTTPError(http.BAD_REQUEST)
# Prepare input data for `Node::add_contributors`
try:
contribs = deserialize_contributors(node, user_dicts, auth=auth, validate=True)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
node.add_contributors(contributors=contribs, auth=auth)
node.save()
# Disconnect listener to avoid multiple invite emails
unreg_contributor_added.disconnect(finalize_invitation)
for child_id in node_ids:
child = Node.load(child_id)
# Only email unreg users once
try:
child_contribs = deserialize_contributors(
child, user_dicts, auth=auth, validate=True
)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
child.add_contributors(contributors=child_contribs, auth=auth)
child.save()
# Reconnect listeners
unreg_contributor_added.connect(finalize_invitation)
return {
'status': 'success',
'contributors': profile_utils.serialize_contributors(
node.visible_contributors,
node=node,
)
}, 201
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_manage_contributors(auth, node, **kwargs):
"""Reorder and remove contributors.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributors = request.json.get('contributors')
# Update permissions and order
try:
node.manage_contributors(contributors, auth=auth, save=True)
except ValueError as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.message})
# If user has removed herself from project, alert; redirect to user
# dashboard if node is private, else node dashboard
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
kind='success',
trust=False
)
# Else stay on current page
return {}
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_remove_contributor(auth, **kwargs):
"""Remove a contributor from a list of nodes.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributor_id = request.json.get('contributorID')
node_ids = request.json.get('nodeIDs')
contributor = User.load(contributor_id)
for node_id in node_ids:
# Update permissions and order
node = Node.load(node_id)
try:
node.remove_contributor(contributor, auth=auth)
except ValueError as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.message})
# If user has removed herself from project, alert; redirect to user
# dashboard if node is private, else node dashboard
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
kind='success',
trust=False
)
# Else stay on current page
return {}
def get_timestamp():
return int(time.time())
def throttle_period_expired(timestamp, throttle):
return timestamp is None or (get_timestamp() - timestamp) > throttle
def send_claim_registered_email(claimer, unreg_user, node, throttle=24 * 3600):
unclaimed_record = unreg_user.get_unclaimed_record(node._primary_key)
# roll the valid token for each email, thus user cannot change email and approve a different email address
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(400, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
unclaimed_record['token'] = generate_confirm_token()
unclaimed_record['claimer_email'] = claimer.username
unreg_user.save()
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = web_url_for(
'claim_user_registered',
uid=unreg_user._primary_key,
pid=node._primary_key,
token=unclaimed_record['token'],
_external=True,
)
# Send mail to referrer, telling them to forward verification link to claimer
mails.send_mail(
referrer.username,
mails.FORWARD_INVITE_REGISTERED,
user=unreg_user,
referrer=referrer,
node=node,
claim_url=claim_url,
fullname=unclaimed_record['name'],
)
unclaimed_record['last_sent'] = get_timestamp()
unreg_user.save()
# Send mail to claimer, telling them to wait for referrer
mails.send_mail(
claimer.username,
mails.PENDING_VERIFICATION_REGISTERED,
fullname=claimer.fullname,
referrer=referrer,
node=node,
)
def send_claim_email(email, user, node, notify=True, throttle=24 * 3600):
"""Send an email for claiming a user account. Either sends to the given email
or the referrer's email, depending on the email address provided.
:param str email: The address given in the claim user form
:param User user: The User record to claim.
:param Node node: The node where the user claimed their account.
:param bool notify: If True and an email is sent to the referrer, an email
will also be sent to the invited user about their pending verification.
:param int throttle: Time period (in seconds) after the referrer is
emailed during which the referrer will not be emailed again.
"""
claimer_email = email.lower().strip()
unclaimed_record = user.get_unclaimed_record(node._primary_key)
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = user.get_claim_url(node._primary_key, external=True)
# If given email is the same provided by user, just send to that email
if unclaimed_record.get('email') == claimer_email:
mail_tpl = mails.INVITE
to_addr = claimer_email
unclaimed_record['claimer_email'] = claimer_email
user.save()
else: # Otherwise have the referrer forward the email to the user
# roll the valid token for each email, thus user cannot change email and approve a different email address
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(400, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
unclaimed_record['last_sent'] = get_timestamp()
unclaimed_record['token'] = generate_confirm_token()
unclaimed_record['claimer_email'] = claimer_email
user.save()
claim_url = user.get_claim_url(node._primary_key, external=True)
if notify:
pending_mail = mails.PENDING_VERIFICATION
mails.send_mail(
claimer_email,
pending_mail,
user=user,
referrer=referrer,
fullname=unclaimed_record['name'],
node=node
)
mail_tpl = mails.FORWARD_INVITE
to_addr = referrer.username
mails.send_mail(
to_addr,
mail_tpl,
user=user,
referrer=referrer,
node=node,
claim_url=claim_url,
email=claimer_email,
fullname=unclaimed_record['name']
)
return to_addr
@contributor_added.connect
def notify_added_contributor(node, contributor, auth=None, throttle=None):
throttle = throttle or settings.CONTRIBUTOR_ADDED_EMAIL_THROTTLE
# Exclude forks and templates because the user forking/templating the project gets added
# via 'add_contributor' but does not need to get notified.
# Only email users for projects, or for components where they are not contributors on the parent node.
if (contributor.is_registered and not node.template_node and not node.is_fork and
(not node.parent_node or
(node.parent_node and not node.parent_node.is_contributor(contributor)))):
contributor_record = contributor.contributor_added_email_records.get(node._id, {})
if contributor_record:
timestamp = contributor_record.get('last_sent', None)
if timestamp:
if not throttle_period_expired(timestamp, throttle):
return
else:
contributor.contributor_added_email_records[node._id] = {}
mails.send_mail(
contributor.username,
mails.CONTRIBUTOR_ADDED,
user=contributor,
node=node,
referrer_name=auth.user.fullname if auth else ''
)
contributor.contributor_added_email_records[node._id]['last_sent'] = get_timestamp()
contributor.save()
def verify_claim_token(user, token, pid):
"""View helper that checks that a claim token for a given user and node ID
is valid. If not valid, throws an error with custom error messages.
"""
# if token is invalid, throw an error
if not user.verify_claim_token(token=token, project_id=pid):
if user.is_registered:
error_data = {
'message_short': 'User has already been claimed.',
'message_long': 'Please <a href="/login/">log in</a> to continue.'}
raise HTTPError(400, data=error_data)
else:
return False
return True
@collect_auth
@must_be_valid_project
def claim_user_registered(auth, node, **kwargs):
"""View that prompts user to enter their password in order to claim
contributorship on a project.
A user must be logged in.
"""
current_user = auth.user
sign_out_url = web_url_for('auth_login', logout=True, next=request.url)
if not current_user:
return redirect(sign_out_url)
# Logged in user should not be a contributor the project
if node.is_contributor(current_user):
logout_url = web_url_for('auth_logout', redirect_url=request.url)
data = {
'message_short': 'Already a contributor',
'message_long': ('The logged-in user is already a contributor to this '
'project. Would you like to <a href="{}">log out</a>?').format(logout_url)
}
raise HTTPError(http.BAD_REQUEST, data=data)
uid, pid, token = kwargs['uid'], kwargs['pid'], kwargs['token']
unreg_user = User.load(uid)
if not verify_claim_token(unreg_user, token, pid=node._primary_key):
raise HTTPError(http.BAD_REQUEST)
# Store the unreg_user data on the session in case the user registers
# a new account
session.data['unreg_user'] = {
'uid': uid, 'pid': pid, 'token': token
}
form = PasswordForm(request.form)
if request.method == 'POST':
if form.validate():
if current_user.check_password(form.password.data):
node.replace_contributor(old=unreg_user, new=current_user)
node.save()
status.push_status_message(
'You are now a contributor to this project.',
kind='success')
return redirect(node.url)
else:
status.push_status_message(language.LOGIN_FAILED, kind='warning', trust=True)
else:
forms.push_errors_to_status(form.errors)
if is_json_request():
form_ret = forms.utils.jsonify(form)
user_ret = profile_utils.serialize_user(current_user, full=False)
else:
form_ret = form
user_ret = current_user
return {
'form': form_ret,
'user': user_ret,
'signOutUrl': sign_out_url
}
@user_registered.connect
def replace_unclaimed_user_with_registered(user):
"""Listens for the user_registered signal. If unreg_user is stored in the
session, then the current user is trying to claim themselves as a contributor.
Replaces the old, unregistered contributor with the newly registered
account.
"""
unreg_user_info = session.data.get('unreg_user')
if unreg_user_info:
unreg_user = User.load(unreg_user_info['uid'])
pid = unreg_user_info['pid']
node = Node.load(pid)
node.replace_contributor(old=unreg_user, new=user)
node.save()
status.push_status_message(
'Successfully claimed contributor.', kind='success', trust=False)
@collect_auth
def claim_user_form(auth, **kwargs):
"""View for rendering the set password page for a claimed user.
Must have ``token`` as a querystring argument.
Renders the set password form, validates it, and sets the user's password.
"""
uid, pid = kwargs['uid'], kwargs['pid']
token = request.form.get('token') or request.args.get('token')
# If user is logged in, redirect to 're-enter password' page
if auth.logged_in:
return redirect(web_url_for('claim_user_registered',
uid=uid, pid=pid, token=token))
user = User.load(uid) # The unregistered user
# user ID is invalid. Unregistered user is not in database
if not user:
raise HTTPError(http.BAD_REQUEST)
# If claim token not valid, redirect to registration page
if not verify_claim_token(user, token, pid):
return redirect(web_url_for('auth_login'))
unclaimed_record = user.unclaimed_records[pid]
user.fullname = unclaimed_record['name']
user.update_guessed_names()
# The email can be the original referrer email if no claimer email has been specified.
claimer_email = unclaimed_record.get('claimer_email') or unclaimed_record.get('email')
form = SetEmailAndPasswordForm(request.form, token=token)
if request.method == 'POST':
if form.validate():
username, password = claimer_email, form.password.data
user.register(username=username, password=password)
# Clear unclaimed records
user.unclaimed_records = {}
user.verification_key = security.random_string(20)
user.save()
# Authenticate user and redirect to project page
node = Node.load(pid)
status.push_status_message(language.CLAIMED_CONTRIBUTOR.format(node=node),
kind='success',
trust=True)
# Redirect to CAS and authenticate the user with a verification key.
return redirect(cas.get_login_url(
web_url_for('user_profile', _absolute=True),
auto=True,
username=user.username,
verification_key=user.verification_key
))
else:
forms.push_errors_to_status(form.errors)
return {
'firstname': user.given_name,
'email': claimer_email if claimer_email else '',
'fullname': user.fullname,
'form': forms.utils.jsonify(form) if is_json_request() else form,
}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def invite_contributor_post(node, **kwargs):
"""API view for inviting an unregistered user. Performs validation, but does not actually invite the user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
# Validate and sanitize inputs as needed. Email will raise error if invalid.
fullname = sanitize.strip_html(fullname)
if email:
email = email.lower().strip()
try:
validate_email(email)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
if not fullname:
return {'status': 400, 'message': 'Full name field cannot be empty'}, 400
# Check if email is in the database
user = get_user(email=email)
if user:
if user.is_registered:
msg = 'User is already in database. Please go back and try your search again.'
return {'status': 400, 'message': msg}, 400
elif node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
else:
serialized = profile_utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = profile_utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized}
@must_be_contributor_or_public
def claim_user_post(node, **kwargs):
"""View for claiming a user from the X-editable form on a project page.
"""
reqdata = request.json
# Unreg user
user = User.load(reqdata['pk'])
unclaimed_data = user.get_unclaimed_record(node._primary_key)
# Submitted through X-editable
if 'value' in reqdata: # Submitted email address
email = reqdata['value'].lower().strip()
claimer = get_user(email=email)
if claimer and claimer.is_registered:
send_claim_registered_email(claimer=claimer, unreg_user=user,
node=node)
else:
send_claim_email(email, user, node, notify=True)
# TODO(sloria): Too many assumptions about the request data. Just use
elif 'claimerId' in reqdata: # User is logged in and confirmed identity
claimer_id = reqdata['claimerId']
claimer = User.load(claimer_id)
send_claim_registered_email(claimer=claimer, unreg_user=user, node=node)
email = claimer.username
else:
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'email': email,
'fullname': unclaimed_data['name']
}
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""assin2 dataset."""
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.text.assin2.assin2_utils import parse_xml_string
_HOMEPAGE = 'https://sites.google.com/view/assin2/english'
# pylint: disable=line-too-long
_DESCRIPTION = f"""\
## Contextualization
ASSIN 2 is the second edition of the Avaliação de Similaridade Semântica e
Inferência Textual (Evaluating Semantic Similarity and Textual Entailment),
and was a workshop collocated with [STIL 2019](http://www.google.com/url?q=http%3A%2F%2Fcomissoes.sbc.org.br%2Fce-pln%2Fstil2019%2F&sa=D&sntz=1&usg=AFQjCNHN8DosAsJ-gd48TfkXFX5YD6xM7g). It follows the [first edition of ASSIN](http://www.google.com/url?q=http%3A%2F%2Fpropor2016.di.fc.ul.pt%2F%3Fpage_id%3D381&sa=D&sntz=1&usg=AFQjCNHV7ySeNzH4k6MWKBLqO9yUkqiUqw),
proposing a new shared task with new data.
The workshop evaluated systems that assess two types of relations between
two sentences: Semantic Textual Similarity and Textual Entailment.
Semantic Textual Similarity consists of quantifying the level of semantic
equivalence between sentences, while Textual Entailment Recognition consists of
classifying whether the first sentence entails the second.
## Data
The corpus used in ASSIN 2 is composed of rather simple sentences. Following
the procedures of SemEval 2014 Task 1, we tried to remove from the corpus named
entities and indirect speech, and tried to have all verbs in the present tense.
The [annotation instructions](https://drive.google.com/open?id=1aUPhywEHD0r_pxPiTqZwS0fRj-1Xda2w)
given to annotators are available (in Portuguese).
The training and validation data are composed, respectively, of 6,500 and 500
sentence pairs in Brazilian Portuguese, annotated for entailment and
semantic similarity. Semantic similarity values range from 1 to 5, and text
entailment classes are either entailment or none. The test data are composed of
approximately 3,000 sentence pairs with the same annotation. All data were
manually annotated.
## Evaluation
Evaluation
The evaluation of submissions to ASSIN 2 was with the same metrics as the first
ASSIN, with the F1 of precision and recall as the main metric for text
entailment and Pearson correlation for semantic similarity.
The [evaluation scripts](https://github.com/erickrf/assin) are the same as in
the last edition.
PS.: Description is extracted from [official homepage]({_HOMEPAGE}).
"""
# pylint: disable=line-too-longm anomalous-backslash-in-string
_CITATION = """
@inproceedings{DBLP:conf/propor/RealFO20,
author = {Livy Real and
Erick Fonseca and
Hugo Gon{\c{c}}alo Oliveira},
editor = {Paulo Quaresma and
Renata Vieira and
Sandra M. Alu{\'{\i}}sio and
Helena Moniz and
Fernando Batista and
Teresa Gon{\c{c}}alves},
title = {The {ASSIN} 2 Shared Task: {A} Quick Overview},
booktitle = {Computational Processing of the Portuguese Language - 14th International
Conference, {PROPOR} 2020, Evora, Portugal, March 2-4, 2020, Proceedings},
series = {Lecture Notes in Computer Science},
volume = {12037},
pages = {406--412},
publisher = {Springer},
year = {2020},
url = {https://doi.org/10.1007/978-3-030-41505-1\_39},
doi = {10.1007/978-3-030-41505-1\_39},
timestamp = {Tue, 03 Mar 2020 09:40:18 +0100},
biburl = {https://dblp.org/rec/conf/propor/RealFO20.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DOWNLOAD_URLS = {
'train':
'https://drive.google.com/u/0/uc?id=1Q9j1a83CuKzsHCGaNulSkNxBm7Dkn7Ln&export=download',
'validation':
'https://drive.google.com/u/0/uc?id=1kb7xq6Mb3eaqe9cOAo70BaG9ypwkIqEU&export=download',
'test':
'https://drive.google.com/u/0/uc?id=1J3FpQaHxpM-FDfBUyooh-sZF-B-bM_lU&export=download',
}
class Assin2(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for assin2 dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'text':
tfds.features.Text(),
'hypothesis':
tfds.features.Text(),
'id':
tf.int32,
'entailment':
tfds.features.ClassLabel(names=['None', 'Entailment']),
'similarity':
tf.float32
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(_DOWNLOAD_URLS)
return {
'train': self._generate_examples(path['train']),
'validation': self._generate_examples(path['validation']),
'test': self._generate_examples(path['test'])
}
def _generate_examples(self, path):
"""Yields examples."""
with tf.io.gfile.GFile(path) as f:
pairs = parse_xml_string(f.read())
for pair in pairs:
yield pair.id, {
'text': pair.text,
'hypothesis': pair.hypothesis,
'id': pair.id,
'entailment': pair.entailment,
'similarity': pair.similarity
}
make citation a raw string and correct the urls
PiperOrigin-RevId: 420228486
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""assin2 dataset."""
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.text.assin2.assin2_utils import parse_xml_string
_HOMEPAGE = 'https://sites.google.com/view/assin2/english'
# pylint: disable=line-too-long
_DESCRIPTION = f"""\
## Contextualization
ASSIN 2 is the second edition of the Avaliação de Similaridade Semântica e
Inferência Textual (Evaluating Semantic Similarity and Textual Entailment),
and was a workshop collocated with [STIL 2019](http://www.google.com/url?q=http%3A%2F%2Fcomissoes.sbc.org.br%2Fce-pln%2Fstil2019%2F&sa=D&sntz=1&usg=AFQjCNHN8DosAsJ-gd48TfkXFX5YD6xM7g). It follows the [first edition of ASSIN](http://www.google.com/url?q=http%3A%2F%2Fpropor2016.di.fc.ul.pt%2F%3Fpage_id%3D381&sa=D&sntz=1&usg=AFQjCNHV7ySeNzH4k6MWKBLqO9yUkqiUqw),
proposing a new shared task with new data.
The workshop evaluated systems that assess two types of relations between
two sentences: Semantic Textual Similarity and Textual Entailment.
Semantic Textual Similarity consists of quantifying the level of semantic
equivalence between sentences, while Textual Entailment Recognition consists of
classifying whether the first sentence entails the second.
## Data
The corpus used in ASSIN 2 is composed of rather simple sentences. Following
the procedures of SemEval 2014 Task 1, we tried to remove from the corpus named
entities and indirect speech, and tried to have all verbs in the present tense.
The [annotation instructions](https://drive.google.com/open?id=1aUPhywEHD0r_pxPiTqZwS0fRj-1Xda2w)
given to annotators are available (in Portuguese).
The training and validation data are composed, respectively, of 6,500 and 500
sentence pairs in Brazilian Portuguese, annotated for entailment and
semantic similarity. Semantic similarity values range from 1 to 5, and text
entailment classes are either entailment or none. The test data are composed of
approximately 3,000 sentence pairs with the same annotation. All data were
manually annotated.
## Evaluation
Evaluation
The evaluation of submissions to ASSIN 2 was with the same metrics as the first
ASSIN, with the F1 of precision and recall as the main metric for text
entailment and Pearson correlation for semantic similarity.
The [evaluation scripts](https://github.com/erickrf/assin) are the same as in
the last edition.
PS.: Description is extracted from [official homepage]({_HOMEPAGE}).
"""
# pylint: disable=line-too-longm anomalous-backslash-in-string
_CITATION = r"""
@inproceedings{DBLP:conf/propor/RealFO20,
author = {Livy Real and
Erick Fonseca and
Hugo Gon{\c{c}}alo Oliveira},
editor = {Paulo Quaresma and
Renata Vieira and
Sandra M. Alu{\'{\i}}sio and
Helena Moniz and
Fernando Batista and
Teresa Gon{\c{c}}alves},
title = {The {ASSIN} 2 Shared Task: {A} Quick Overview},
booktitle = {Computational Processing of the Portuguese Language - 14th International
Conference, {PROPOR} 2020, Evora, Portugal, March 2-4, 2020, Proceedings},
series = {Lecture Notes in Computer Science},
volume = {12037},
pages = {406--412},
publisher = {Springer},
year = {2020},
url = {https://doi.org/10.1007/978-3-030-41505-1_39},
doi = {10.1007/978-3-030-41505-1_39},
timestamp = {Tue, 03 Mar 2020 09:40:18 +0100},
biburl = {https://dblp.org/rec/conf/propor/RealFO20.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DOWNLOAD_URLS = {
'train':
'https://drive.google.com/u/0/uc?id=1Q9j1a83CuKzsHCGaNulSkNxBm7Dkn7Ln&export=download',
'validation':
'https://drive.google.com/u/0/uc?id=1kb7xq6Mb3eaqe9cOAo70BaG9ypwkIqEU&export=download',
'test':
'https://drive.google.com/u/0/uc?id=1J3FpQaHxpM-FDfBUyooh-sZF-B-bM_lU&export=download',
}
class Assin2(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for assin2 dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'text':
tfds.features.Text(),
'hypothesis':
tfds.features.Text(),
'id':
tf.int32,
'entailment':
tfds.features.ClassLabel(names=['None', 'Entailment']),
'similarity':
tf.float32
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(_DOWNLOAD_URLS)
return {
'train': self._generate_examples(path['train']),
'validation': self._generate_examples(path['validation']),
'test': self._generate_examples(path['test'])
}
def _generate_examples(self, path):
"""Yields examples."""
with tf.io.gfile.GFile(path) as f:
pairs = parse_xml_string(f.read())
for pair in pairs:
yield pair.id, {
'text': pair.text,
'hypothesis': pair.hypothesis,
'id': pair.id,
'entailment': pair.entailment,
'similarity': pair.similarity
}
|
from django.test import TestCase
from components.base.merchandise.music.factories import (AlbumFactory,
BaseFactory, EditionFactory, SingleFactory)
class TestAlbums(TestCase):
def test_album_factory(self):
album = AlbumFactory()
assert isinstance(album, Album)
def test_album_creation(self):
album = AlbumFactory()
assert 'album' in album.romanized_name
class TestSingles(TestCase):
def test_single_factory(self):
single = SingleFactory()
assert isinstance(single, Single)
def test_single_creation(self):
single = SingleFactory()
assert 'single' in single.romanized_name
Moar tests.
from django.test import TestCase
from components.merchandise.music.factories import (AlbumFactory,
BaseFactory, SingleFactory)
class TestAlbums(TestCase):
def test_album_factory(self):
album = AlbumFactory()
assert isinstance(album, Album)
def test_album_creation(self):
album = AlbumFactory()
assert 'album' in album.romanized_name
def test_album_identifier(self):
album = AlbumFactory()
assert album.identifier == 'album'
class TestSingles(TestCase):
def test_single_factory(self):
single = SingleFactory()
assert isinstance(single, Single)
def test_single_creation(self):
single = SingleFactory()
assert 'single' in single.romanized_name
def test_single_identifier(self):
single = SingleFactory()
assert single.identifier == 'single'
|
"""Provides interface for activation-functions. Arranged in alphanumeric order."""
import tensorflow as tf
def crelu(input_):
"""
Applies the concatenated rectified-linear-unit (CReLU) on the input, element-wise.
# Parameters
input_ (tensor): The tensor to be CReLU'd.
# Returns
The resulting tensor.
"""
return tf.nn.crelu(input_)
def elu(input_):
"""
Applies the exponential-linear-unit (ELU) on the input, element-wise.
# Parameters
input_ (tensor): The tensor to be ELU'd.
# Returns
The resulting tensor.
"""
return tf.nn.elu(input_)
def prelu(input_, looks_linear=False):
"""
Applies the parametric linear-unit (PReLU) on the input, element-wise.
# Parameters
input_ (tensor): The tensor to be PReLU'd.
looks_linear (bool): Whether `alpha` is initialized to 1 or not.
See https://arxiv.org/pdf/1702.08591.pdf for details.
# Returns
The resulting tensor.
"""
if looks_linear:
alpha = tf.Variable(tf.constant(1, dtype=tf.float32, shape=input_.shape.as_list()[-1]))
else:
alpha = tf.Variable(tf.constant(0, dtype=tf.float32, shape=input_.shape.as_list()[-1]))
return tf.maximum(0.0, input_) + alpha * tf.minimum(0.0, input_)
def relu(input_):
"""
Applies rectified-linear-unit (ReLU) on the input, element-wise.
# Parameters
input_ (tensor): The tensor to be ReLU'd.
# Returns
The resulting tensor.
"""
return tf.nn.relu(input_)
def sigmoid(input_):
"""
Applies sigmoid function to the input, element-wise.
# Parameters
input_ (tensor): The tensor to be sigmoid'd.
# Returns
The resulting tensor.
"""
return tf.nn.sigmoid(input_)
def softmax(input_):
"""
Applies softmax function to the input.
# Parameters
input_ (tensor): The tensor to be softmax'd.
# Returns
The resulting tensor.
"""
return tf.nn.softmax(input_)
def tanh(input_):
"""
Applies hyperbolic-tangent to the input.
# Parameters
input_ (tensor): The tensor to be tanh'd.
# Returns
The resulting tensor.
"""
return tf.tanh(input_)
Edited docstrings to include details of activation-functions.
"""Provides interface for activation-functions. Arranged in alphanumeric order."""
import tensorflow as tf
def crelu(input_):
"""
Applies the concatenated rectified-linear-unit (CReLU) on the input, element-wise:
return [max(0, x), max(0, -x)]
# Parameters
input_ (tensor): The tensor to be CReLU'd.
# Returns
The resulting tensor.
"""
return tf.nn.crelu(input_)
def elu(input_):
"""
Applies the exponential-linear-unit (ELU) on the input, element-wise:
if x < 0:
return exp(x) - 1
else:
return x
# Parameters
input_ (tensor): The tensor to be ELU'd.
# Returns
The resulting tensor.
"""
return tf.nn.elu(input_)
def prelu(input_, looks_linear=False):
"""
Applies the parametric linear-unit (PReLU) on the input, element-wise:
if x < 0:
return variable * x
else:
return x
# Parameters
input_ (tensor): The tensor to be PReLU'd.
looks_linear (bool): Whether `alpha` is initialized to 1, or 0.25 (as original paper).
See https://arxiv.org/pdf/1702.08591.pdf for details on `True`.
# Returns
The resulting tensor.
"""
alpha_shape = input_.shape.as_list()[1:]
if looks_linear:
alpha = tf.Variable(tf.constant(1, dtype=tf.float32, shape=alpha_shape))
else:
alpha = tf.Variable(tf.constant(0.25, dtype=tf.float32, shape=alpha_shape))
return tf.maximum(0.0, input_) + alpha * tf.minimum(0.0, input_)
def relu(input_):
"""
Applies rectified-linear-unit (ReLU) on the input, element-wise:
return max(0, x)
# Parameters
input_ (tensor): The tensor to be ReLU'd.
# Returns
The resulting tensor.
"""
return tf.nn.relu(input_)
def sigmoid(input_):
"""
Applies sigmoid function to the input, element-wise:
return 1 / (1 + exp(-x))
# Parameters
input_ (tensor): The tensor to be sigmoid'd.
# Returns
The resulting tensor.
"""
return tf.nn.sigmoid(input_)
def softmax(input_):
"""
Applies softmax function to the input.
return e^x / sum(e^x)
# Parameters
input_ (tensor): The tensor to be softmax'd.
# Returns
The resulting tensor.
"""
return tf.nn.softmax(input_)
def tanh(input_):
"""
Applies hyperbolic-tangent to the input.
return tanh(x)
# Parameters
input_ (tensor): The tensor to be tanh'd.
# Returns
The resulting tensor.
"""
return tf.tanh(input_)
|
from __future__ import print_function
from collections import OrderedDict, MutableMapping
import os
import logging
import decorator
import stat
import pandas as pd
from errno import ENOENT, EIO
from fuse import Operations, FuseOSError
from gcsfs import GCSFileSystem, core
from pwd import getpwnam
from grp import getgrnam
import time
from threading import Lock
import cProfile
import atexit
prof = cProfile.Profile()
logger = logging.getLogger(__name__)
def dump():
prof.dump_stats('/home/ubuntu/out.prof')
atexit.register(dump)
@decorator.decorator
def _tracemethod(f, self, *args, **kwargs):
logger.debug("%s(args=%s, kwargs=%s)", f.__name__, args, kwargs)
prof.enable()
out = f(self, *args, **kwargs)
prof.disable()
return out
def str_to_time(s):
t = pd.to_datetime(s)
return t.to_datetime64().view('int64') / 1e9
class LRUDict(MutableMapping):
"""A dict that discards least-recently-used items"""
def __init__(self, *args, size=128, **kwargs):
"""Same arguments as OrderedDict with one additions:
size: maximum number of entries
"""
self.data = OrderedDict(*args, **kwargs)
self.size = size
self.purge()
def purge(self):
"""Removes expired or overflowing entries."""
# pop until maximum capacity is reached
extra = max(0, len(self.data) - self.size)
for _ in range(extra):
self.data.popitem(last=False)
def __getitem__(self, key):
if key not in self.data:
raise KeyError(key)
self.data.move_to_end(key)
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
self.data.move_to_end(key)
self.purge()
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(list(self.data))
def __len__(self):
return len(self.data)
class SmallChunkCacher:
"""
Cache open GCSFiles, and data chunks from small reads
Parameters
----------
gcs : instance of GCSFileSystem
cutoff : int
Will store/fetch data from cache for calls to read() with values smaller
than this.
nfile : int
Number of files to store in LRU cache.
"""
def __init__(self, gcs, cutoff=10000, nfiles=3):
self.gcs = gcs
self.cache = LRUDict(size=nfiles)
self.cutoff = cutoff
self.nfiles = nfiles
def read(self, fn, offset, size):
"""Reach block from file
If size is less than cutoff, see if the relevant data is in the cache;
either return data from there, or call read() on underlying file object
and store the resultant block in the cache.
"""
if fn not in self.cache:
self.open(fn)
f, chunks = self.cache[fn]
for chunk in chunks:
if chunk['start'] < offset and chunk['end'] > offset + size:
logger.info('cache hit')
start = offset - chunk['start']
return chunk['data'][start:start + size]
if size > self.cutoff:
# big reads are likely sequential
with f.lock:
f.seek(offset)
return f.read(size)
logger.info('cache miss')
with f.lock:
f.seek(offset)
out = f.read(size)
new = True
for chunk in chunks:
if chunk['end'] == f.start - 1:
chunk['end'] = f.end
chunk['data'] += f.cache
new = False
elif chunk['start'] == f.end + 1:
chunk['start'] = f.start
chunk['data'] = f.cache + chunk['data']
new = False
if new:
chunks.append({'start': f.start, 'end': f.end, 'data': f.cache})
return out
def open(self, fn):
"""Create cache entry, or return existing open file
May result in the eviction of LRU file object and its data blocks.
"""
if fn not in self.cache:
f = self.gcs.open(fn, 'rb')
self.cache[fn] = f, []
f.lock = Lock()
logger.info('{} inserted into cache'.format(fn))
else:
logger.info('{} found in cache'.format(fn))
return self.cache[fn][0]
class GCSFS(Operations):
def __init__(self, path='.', gcs=None, nfiles=10, **fsargs):
if gcs is None:
# minimum block size: still read on 5MB boundaries.
self.gcs = GCSFileSystem(block_size=2 * 2 ** 20, **fsargs)
else:
self.gcs = gcs
self.cache = SmallChunkCacher(self.gcs, nfiles=nfiles)
self.write_cache = {}
self.counter = 0
self.root = path
@_tracemethod
def getattr(self, path, fh=None):
try:
info = self.gcs.info(''.join([self.root, path]))
except FileNotFoundError:
raise FuseOSError(ENOENT)
data = {'st_uid': 1000, 'st_gid': 1000}
perm = 0o777
if info['storageClass'] == 'DIRECTORY' or 'bucket' in info['kind']:
data['st_atime'] = 0
data['st_ctime'] = 0
data['st_mtime'] = 0
data['st_mode'] = (stat.S_IFDIR | perm)
data['st_size'] = 0
data['st_blksize'] = 0
else:
data['st_atime'] = str_to_time(info['timeStorageClassUpdated'])
data['st_ctime'] = str_to_time(info['timeCreated'])
data['st_mtime'] = str_to_time(info['updated'])
data['st_mode'] = (stat.S_IFREG | perm)
data['st_size'] = info['size']
data['st_blksize'] = 5 * 2**20
data['st_nlink'] = 1
return data
@_tracemethod
def readdir(self, path, fh):
path = ''.join([self.root, path])
logger.info("List {}, {}".format(path, fh))
files = self.gcs.ls(path)
files = [os.path.basename(f.rstrip('/')) for f in files]
return ['.', '..'] + files
@_tracemethod
def mkdir(self, path, mode):
bucket, key = core.split_path(path)
if not self.gcs.info(path):
self.gcs.dirs['bucket'].append({
'bucket': bucket, 'kind': 'storage#object',
'size': 0, 'storageClass': 'DIRECTORY',
'name': path.rstrip('/') + '/'})
@_tracemethod
def rmdir(self, path):
info = self.gcs.info(path)
if info['storageClass': 'DIRECTORY']:
self.gcs.rm(path, False)
@_tracemethod
def read(self, path, size, offset, fh):
fn = ''.join([self.root, path])
logger.info('read #{} ({}) offset: {}, size: {}'.format(
fh, fn, offset,size))
out = self.cache.read(fn, offset, size)
return out
@_tracemethod
def write(self, path, data, offset, fh):
fn = ''.join([self.root, path])
logger.info('write #{} ({}) offset'.format(fh, fn, offset))
f = self.write_cache[fh]
f.write(data)
return len(data)
@_tracemethod
def create(self, path, flags):
fn = ''.join([self.root, path])
logger.info('create {} {}'.format(fn, oct(flags)))
self.gcs.touch(fn) # this makes sure directory entry exists - wasteful!
# write (but ignore creation flags)
f = self.gcs.open(fn, 'wb')
self.write_cache[self.counter] = f
logger.info('-> fh #{}'.format(self.counter))
self.counter += 1
return self.counter - 1
@_tracemethod
def open(self, path, flags):
fn = ''.join([self.root, path])
logger.info('open {} {}'.format(fn, oct(flags)))
if flags % 2 == 0:
# read
self.cache.open(fn)
else:
# write (but ignore creation flags)
self.gcs.open(fn, 'wb')
self.write_cache[self.counter] = f
logger.info('-> fh #{}'.format(self.counter))
self.counter += 1
return self.counter - 1
@_tracemethod
def truncate(self, path, length, fh=None):
fn = ''.join([self.root, path])
logger.info('truncate #{} ({}) to {}'.format(fh, fn, length))
if length != 0:
raise NotImplementedError
# maybe should be no-op since open with write sets size to zero anyway
self.gcs.touch(fn)
@_tracemethod
def unlink(self, path):
fn = ''.join([self.root, path])
logger.info('delete', fn)
try:
self.gcs.rm(fn, False)
except (IOError, FileNotFoundError):
raise FuseOSError(EIO)
@_tracemethod
def release(self, path, fh):
fn = ''.join([self.root, path])
logger.info('close #{} ({})'.format(fh, fn))
try:
if fh in self.write_cache:
# write mode
f = self.write_cache[fh]
f.close()
self.write_cache.pop(fh, None)
except Exception as e:
logger.exception("exception on release:" + str(e))
return 0
@_tracemethod
def chmod(self, path, mode):
raise NotImplementedError
again
from __future__ import print_function
from collections import OrderedDict, MutableMapping
import os
import logging
import decorator
import stat
import pandas as pd
from errno import ENOENT, EIO
from fuse import Operations, FuseOSError
from gcsfs import GCSFileSystem, core
from pwd import getpwnam
from grp import getgrnam
import time
from threading import Lock
import cProfile
import atexit
prof = cProfile.Profile()
prof.enable()
logger = logging.getLogger(__name__)
def dump():
prof.disable()
prof.dump_stats('/home/ubuntu/out.prof')
atexit.register(dump)
@decorator.decorator
def _tracemethod(f, self, *args, **kwargs):
logger.debug("%s(args=%s, kwargs=%s)", f.__name__, args, kwargs)
out = f(self, *args, **kwargs)
return out
def str_to_time(s):
t = pd.to_datetime(s)
return t.to_datetime64().view('int64') / 1e9
class LRUDict(MutableMapping):
"""A dict that discards least-recently-used items"""
def __init__(self, *args, size=128, **kwargs):
"""Same arguments as OrderedDict with one additions:
size: maximum number of entries
"""
self.data = OrderedDict(*args, **kwargs)
self.size = size
self.purge()
def purge(self):
"""Removes expired or overflowing entries."""
# pop until maximum capacity is reached
extra = max(0, len(self.data) - self.size)
for _ in range(extra):
self.data.popitem(last=False)
def __getitem__(self, key):
if key not in self.data:
raise KeyError(key)
self.data.move_to_end(key)
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
self.data.move_to_end(key)
self.purge()
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(list(self.data))
def __len__(self):
return len(self.data)
class SmallChunkCacher:
"""
Cache open GCSFiles, and data chunks from small reads
Parameters
----------
gcs : instance of GCSFileSystem
cutoff : int
Will store/fetch data from cache for calls to read() with values smaller
than this.
nfile : int
Number of files to store in LRU cache.
"""
def __init__(self, gcs, cutoff=10000, nfiles=3):
self.gcs = gcs
self.cache = LRUDict(size=nfiles)
self.cutoff = cutoff
self.nfiles = nfiles
def read(self, fn, offset, size):
"""Reach block from file
If size is less than cutoff, see if the relevant data is in the cache;
either return data from there, or call read() on underlying file object
and store the resultant block in the cache.
"""
if fn not in self.cache:
self.open(fn)
f, chunks = self.cache[fn]
for chunk in chunks:
if chunk['start'] < offset and chunk['end'] > offset + size:
logger.info('cache hit')
start = offset - chunk['start']
return chunk['data'][start:start + size]
if size > self.cutoff:
# big reads are likely sequential
with f.lock:
f.seek(offset)
return f.read(size)
logger.info('cache miss')
with f.lock:
f.seek(offset)
out = f.read(size)
new = True
for chunk in chunks:
if chunk['end'] == f.start - 1:
chunk['end'] = f.end
chunk['data'] += f.cache
new = False
elif chunk['start'] == f.end + 1:
chunk['start'] = f.start
chunk['data'] = f.cache + chunk['data']
new = False
if new:
chunks.append({'start': f.start, 'end': f.end, 'data': f.cache})
return out
def open(self, fn):
"""Create cache entry, or return existing open file
May result in the eviction of LRU file object and its data blocks.
"""
if fn not in self.cache:
f = self.gcs.open(fn, 'rb')
self.cache[fn] = f, []
f.lock = Lock()
logger.info('{} inserted into cache'.format(fn))
else:
logger.info('{} found in cache'.format(fn))
return self.cache[fn][0]
class GCSFS(Operations):
def __init__(self, path='.', gcs=None, nfiles=10, **fsargs):
if gcs is None:
# minimum block size: still read on 5MB boundaries.
self.gcs = GCSFileSystem(block_size=2 * 2 ** 20, **fsargs)
else:
self.gcs = gcs
self.cache = SmallChunkCacher(self.gcs, nfiles=nfiles)
self.write_cache = {}
self.counter = 0
self.root = path
@_tracemethod
def getattr(self, path, fh=None):
try:
info = self.gcs.info(''.join([self.root, path]))
except FileNotFoundError:
raise FuseOSError(ENOENT)
data = {'st_uid': 1000, 'st_gid': 1000}
perm = 0o777
if info['storageClass'] == 'DIRECTORY' or 'bucket' in info['kind']:
data['st_atime'] = 0
data['st_ctime'] = 0
data['st_mtime'] = 0
data['st_mode'] = (stat.S_IFDIR | perm)
data['st_size'] = 0
data['st_blksize'] = 0
else:
data['st_atime'] = str_to_time(info['timeStorageClassUpdated'])
data['st_ctime'] = str_to_time(info['timeCreated'])
data['st_mtime'] = str_to_time(info['updated'])
data['st_mode'] = (stat.S_IFREG | perm)
data['st_size'] = info['size']
data['st_blksize'] = 5 * 2**20
data['st_nlink'] = 1
return data
@_tracemethod
def readdir(self, path, fh):
path = ''.join([self.root, path])
logger.info("List {}, {}".format(path, fh))
files = self.gcs.ls(path)
files = [os.path.basename(f.rstrip('/')) for f in files]
return ['.', '..'] + files
@_tracemethod
def mkdir(self, path, mode):
bucket, key = core.split_path(path)
if not self.gcs.info(path):
self.gcs.dirs['bucket'].append({
'bucket': bucket, 'kind': 'storage#object',
'size': 0, 'storageClass': 'DIRECTORY',
'name': path.rstrip('/') + '/'})
@_tracemethod
def rmdir(self, path):
info = self.gcs.info(path)
if info['storageClass': 'DIRECTORY']:
self.gcs.rm(path, False)
@_tracemethod
def read(self, path, size, offset, fh):
fn = ''.join([self.root, path])
logger.info('read #{} ({}) offset: {}, size: {}'.format(
fh, fn, offset,size))
out = self.cache.read(fn, offset, size)
return out
@_tracemethod
def write(self, path, data, offset, fh):
fn = ''.join([self.root, path])
logger.info('write #{} ({}) offset'.format(fh, fn, offset))
f = self.write_cache[fh]
f.write(data)
return len(data)
@_tracemethod
def create(self, path, flags):
fn = ''.join([self.root, path])
logger.info('create {} {}'.format(fn, oct(flags)))
self.gcs.touch(fn) # this makes sure directory entry exists - wasteful!
# write (but ignore creation flags)
f = self.gcs.open(fn, 'wb')
self.write_cache[self.counter] = f
logger.info('-> fh #{}'.format(self.counter))
self.counter += 1
return self.counter - 1
@_tracemethod
def open(self, path, flags):
fn = ''.join([self.root, path])
logger.info('open {} {}'.format(fn, oct(flags)))
if flags % 2 == 0:
# read
self.cache.open(fn)
else:
# write (but ignore creation flags)
self.gcs.open(fn, 'wb')
self.write_cache[self.counter] = f
logger.info('-> fh #{}'.format(self.counter))
self.counter += 1
return self.counter - 1
@_tracemethod
def truncate(self, path, length, fh=None):
fn = ''.join([self.root, path])
logger.info('truncate #{} ({}) to {}'.format(fh, fn, length))
if length != 0:
raise NotImplementedError
# maybe should be no-op since open with write sets size to zero anyway
self.gcs.touch(fn)
@_tracemethod
def unlink(self, path):
fn = ''.join([self.root, path])
logger.info('delete', fn)
try:
self.gcs.rm(fn, False)
except (IOError, FileNotFoundError):
raise FuseOSError(EIO)
@_tracemethod
def release(self, path, fh):
fn = ''.join([self.root, path])
logger.info('close #{} ({})'.format(fh, fn))
try:
if fh in self.write_cache:
# write mode
f = self.write_cache[fh]
f.close()
self.write_cache.pop(fh, None)
except Exception as e:
logger.exception("exception on release:" + str(e))
return 0
@_tracemethod
def chmod(self, path, mode):
raise NotImplementedError
|
# -*- coding: utf-8 ; test-case-name: bridgedb.test.test_email_server -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Nick Mathewson <nickm@torproject.org>
# Isis Lovecruft <isis@torproject.org> 0xA3ADB67A2CDB8B35
# Matthew Finkel <sysrqb@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2007-2014, The Tor Project, Inc.
# (c) 2013-2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Servers which interface with clients and distribute bridges over SMTP."""
from __future__ import unicode_literals
import logging
import io
import socket
import time
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.mail import smtp
from zope.interface import implements
from bridgedb import safelog
from bridgedb import translations
from bridgedb.crypto import getGPGContext
from bridgedb.crypto import gpgSignMessage
from bridgedb.crypto import NEW_BUFFER_INTERFACE
from bridgedb.Dist import EmailRequestedHelp
from bridgedb.Dist import EmailRequestedKey
from bridgedb.Dist import TooSoonEmail
from bridgedb.Dist import IgnoreEmail
from bridgedb.email import templates
from bridgedb.email import request
from bridgedb.parse import addr
from bridgedb.parse.addr import BadEmail
from bridgedb.parse.addr import UnsupportedDomain
from bridgedb.parse.addr import canonicalizeEmailDomain
def checkDKIM(message, rules):
"""Check the DKIM verification results header.
This check is only run if the incoming email, **message**, originated from
a domain for which we're configured (in the ``EMAIL_DOMAIN_RULES``
dictionary in the config file) to check DKIM verification results for.
:type message: :api:`twisted.mail.smtp.rfc822.Message`
:param message: The incoming client request email, including headers.
:param dict rules: The list of configured ``EMAIL_DOMAIN_RULES`` for the
canonical domain which the client's email request originated from.
:rtype: bool
:returns: ``False`` if:
1. We're supposed to expect and check the DKIM headers for the
client's email provider domain.
2. Those headers were *not* okay.
Otherwise, returns ``True``.
"""
logging.info("Checking DKIM verification results...")
logging.debug("Domain has rules: %s" % ', '.join(rules))
if 'dkim' in rules:
# getheader() returns the last of a given kind of header; we want
# to get the first, so we use getheaders() instead.
dkimHeaders = message.getheaders("X-DKIM-Authentication-Results")
dkimHeader = "<no header>"
if dkimHeaders:
dkimHeader = dkimHeaders[0]
if not dkimHeader.startswith("pass"):
logging.info("Rejecting bad DKIM header on incoming email: %r "
% dkimHeader)
return False
return True
def createResponseBody(lines, context, client, lang='en'):
"""Parse the **lines** from an incoming email request and determine how to
respond.
:param list lines: The list of lines from the original request sent by the
client.
:type context: class:`MailContext`
:param context: The context which contains settings for the email server.
:type client: :api:`twisted.mail.smtp.Address`
:param client: The client's email address which should be in the
:header:`To:` header of the response email.
:param str lang: The 2-5 character locale code to use for translating the
email. This is obtained from a client sending a email to a valid plus
address which includes the translation desired, i.e. by sending an
email to ``bridges+fa@torproject.org``, the client should receive a
response in Farsi.
:rtype: None or str
:returns: None if we shouldn't respond to the client (i.e., if they have
already received a rate-limiting warning email). Otherwise, returns a
string containing the (optionally translated) body for the email
response which we should send out.
"""
t = translations.installTranslations(lang)
bridges = None
try:
bridgeRequest = request.determineBridgeRequestOptions(lines)
# The request was invalid, respond with a help email which explains
# valid email commands:
if not bridgeRequest.isValid():
raise EmailRequestedHelp("Email request from '%s' was invalid."
% str(client))
# Otherwise they must have requested bridges:
interval = context.schedule.getInterval(time.time())
bridges = context.distributor.getBridgesForEmail(
str(client),
interval,
context.nBridges,
countryCode=None,
bridgeFilterRules=bridgeRequest.filters)
except EmailRequestedHelp as error:
logging.info(error)
return templates.buildWelcomeText(t, client)
except EmailRequestedKey as error:
logging.info(error)
return templates.buildKeyMessage(t, client)
except TooSoonEmail as error:
logging.info("Got a mail too frequently: %s." % error)
return templates.buildSpamWarning(t, client)
except (IgnoreEmail, BadEmail) as error:
logging.info(error)
# Don't generate a response if their email address is unparsable or
# invalid, or if we've already warned them about rate-limiting:
return None
else:
answer = "(no bridges currently available)\r\n"
if bridges:
transport = bridgeRequest.justOnePTType()
answer = "".join(" %s\r\n" % b.getConfigLine(
includeFingerprint=context.includeFingerprints,
addressClass=bridgeRequest.addressClass,
transport=transport,
request=str(client)) for b in bridges)
return templates.buildAnswerMessage(t, client, answer)
def generateResponse(fromAddress, clientAddress, body, subject=None,
messageID=None, gpgContext=None):
"""Create a :class:`MailResponse`, which acts like an in-memory
``io.StringIO`` file, by creating and writing all headers and the email
body into the file-like ``MailResponse.mailfile``.
:param str fromAddress: The rfc:`2821` email address which should be in
the :header:`From:` header.
:param str clientAddress: The rfc:`2821` email address which should be in
the :header:`To:` header.
:param str subject: The string to write to the :header:`subject` header.
:param str body: The body of the email. If a **gpgContext** is also given,
and that ``Context`` has email signing configured, then
:meth:`MailResponse.writeBody` will generate and include any
ascii-armored OpenPGP signatures in the **body**.
:type messageID: None or str
:param messageID: The :rfc:`2822` specifier for the :header:`Message-ID:`
header, if including one is desirable.
:type gpgContext: None or ``gpgme.Context``.
:param gpgContext: A pre-configured GPGME context. See
:meth:`~crypto.getGPGContext`.
:rtype: :class:`MailResponse`
:returns: A ``MailResponse`` which contains the entire email. To obtain
the contents of the email, including all headers, simply use
:meth:`MailResponse.readContents`.
"""
response = MailResponse(gpgContext)
response.writeHeaders(fromAddress, clientAddress, subject,
inReplyTo=messageID)
response.writeBody(body)
# Only log the email text (including all headers) if SAFE_LOGGING is
# disabled:
if not safelog.safe_logging:
contents = response.readContents()
logging.debug("Email contents:\n%s" % contents)
else:
logging.debug("Email text for %r created." % clientAddress)
response.rewind()
return response
class MailContext(object):
"""Helper object that holds information used by email subsystem.
:ivar str username: Reject any RCPT TO lines that aren't to this
user. See the ``EMAIL_USERNAME`` option in the config file.
(default: ``'bridges'``)
:ivar int maximumSize: Reject any incoming emails longer than
this size (in bytes). (default: 3084 bytes).
:ivar int smtpPort: The port to use for outgoing SMTP.
:ivar str smtpServer: The IP address to use for outgoing SMTP.
:ivar str smtpFromAddr: Use this address in the raw SMTP ``MAIL FROM``
line for outgoing mail. (default: ``bridges@torproject.org``)
:ivar str fromAddr: Use this address in the email :header:`From:`
line for outgoing mail. (default: ``bridges@torproject.org``)
:ivar int nBridges: The number of bridges to send for each email.
:ivar gpgContext: A ``gpgme.GpgmeContext`` (as created by
:func:`bridgedb.crypto.getGPGContext`), or None if we couldn't create
a proper GPGME context for some reason.
"""
def __init__(self, config, distributor, schedule):
"""Create a context for storing configs for email bridge distribution.
:type config: :class:`bridgedb.persistent.Conf`
:type distributor: :class:`bridgedb.Dist.EmailBasedDistributor`.
:param distributor: The distributor will handle getting the correct
bridges (or none) for a client for us.
:type schedule: :class:`bridgedb.schedule.ScheduledInterval`.
:param schedule: An interval-based scheduler, used to help the
:ivar:`distributor` know if we should give bridges to a client.
"""
self.config = config
self.distributor = distributor
self.schedule = schedule
self.maximumSize = 32*1024
self.includeFingerprints = config.EMAIL_INCLUDE_FINGERPRINTS
self.nBridges = config.EMAIL_N_BRIDGES_PER_ANSWER
self.username = (config.EMAIL_USERNAME or "bridges")
self.hostname = socket.gethostname()
self.hostaddr = socket.gethostbyname(self.hostname)
self.fromAddr = (config.EMAIL_FROM_ADDR or "bridges@torproject.org")
self.smtpFromAddr = (config.EMAIL_SMTP_FROM_ADDR or self.fromAddr)
self.smtpServerPort = (config.EMAIL_SMTP_PORT or 25)
self.smtpServerIP = (config.EMAIL_SMTP_HOST or "127.0.0.1")
self.domainRules = config.EMAIL_DOMAIN_RULES or {}
self.domainMap = config.EMAIL_DOMAIN_MAP or {}
self.canon = self.buildCanonicalDomainMap()
self.gpgContext = getGPGContext(config)
def buildCanonicalDomainMap(self):
"""Build a map for all email provider domains from which we will accept
emails to their canonical domain name.
.. note:: Be sure that ``MailContext.domainRules`` and
``MailContext.domainMap`` are set appropriately before calling
this method.
This method is automatically called during initialisation, and the
resulting domain map is stored as ``MailContext.canon``.
:rtype: dict
:returns: A dictionary which maps all domains and subdomains which we
accept emails from to their second-level, canonical domain names.
"""
canon = self.domainMap
for domain, rule in self.domainRules.items():
if domain not in canon.keys():
canon[domain] = domain
for domain in self.config.EMAIL_DOMAINS:
canon[domain] = domain
return canon
class MailResponse(object):
"""Holds information for generating a response email for a request.
.. todo:: At some point, we may want to change this class to optionally
handle creating Multipart MIME encoding messages, so that we can
include attachments. (This would be useful for attaching our GnuPG
keyfile, for example, rather than simply pasting it into the body of
the email.)
:type _buff: unicode or buffer
:cvar _buff: Used internally to write lines for the response email into
the ``_mailfile``. The reason why both of these attributes have two
possible types is for the same Python-buggy reasons which require
:data:`~bridgedb.crypto.NEW_BUFFER_INTERFACE`.
:type mailfile: :class:`io.StringIO` or :class:`io.BytesIO`.
:cvar mailfile: An in-memory file for storing the formatted headers and
body of the response email.
"""
_buff = buffer if NEW_BUFFER_INTERFACE else unicode
mailfile = io.BytesIO if NEW_BUFFER_INTERFACE else io.StringIO
def __init__(self, gpgContext=None):
"""Create a response to an email we have recieved.
This class deals with correctly formatting text for the response email
headers and the response body into an instance of :cvar:`mailfile`.
:type gpgContext: None or ``gpgme.Context``
:param gpgContext: A pre-configured GPGME context. See
:meth:`bridgedb.crypto.getGPGContext` for obtaining a
pre-configured **gpgContext**. If given, and the ``Context`` has
been configured to sign emails, then a response email body string
given to :meth:`writeBody` will be signed before being written
into the ``mailfile``.
"""
self.gpgContext = gpgContext
self.mailfile = self.mailfile()
self.closed = False
# These are methods and attributes for controlling I/O operations on our
# underlying ``mailfile``.
def close(self):
self.mailfile.close()
self.closed = True
close.__doc__ = mailfile.close.__doc__
# The following are custom methods to control reading and writing to the
# underlying ``mailfile``.
def readContents(self):
"""Read the all the contents written thus far to the :cvar:`mailfile`,
and then :meth:`seek` to return to the original pointer position we
were at before this method was called.
:rtype: str
:returns: The entire contents of the :cvar:`mailfile`.
"""
pointer = self.mailfile.tell()
self.mailfile.seek(0)
contents = self.mailfile.read()
self.mailfile.seek(pointer)
return contents
def rewind(self):
"""Rewind to the very beginning of the :cvar:`mailfile`."""
self.mailfile.seek(0)
def write(self, line):
"""Any **line** written to me will have ``'\r\n'`` appended to it."""
if line.find('\n') != -1:
# If **line** contains newlines, send it to :meth:`writelines` to
# break it up so that we can replace them with '\r\n':
self.writelines(line)
else:
self.mailfile.write(self._buff(line + '\r\n'))
self.mailfile.flush()
def writelines(self, lines):
"""Calls :meth:`write` for each line in **lines**."""
if isinstance(lines, basestring):
for ln in lines.split('\n'):
self.write(ln)
elif isinstance(lines, (list, tuple,)):
for ln in lines:
self.write(ln)
def writeHeaders(self, fromAddress, toAddress, subject=None,
inReplyTo=None, includeMessageID=True,
contentType='text/plain; charset="utf-8"', **kwargs):
"""Write all headers into the response email.
:param str fromAddress: The email address for the ``From:`` header.
:param str toAddress: The email address for the ``To:`` header.
:type subject: None or str
:param subject: The ``Subject:`` header.
:type inReplyTo: None or str
:param inReplyTo: If set, an ``In-Reply-To:`` header will be
generated. This should be set to the ``Message-ID:`` header from
the client's original request email.
:param bool includeMessageID: If ``True``, generate and include a
``Message-ID:`` header for the response.
:param str contentType: The ``Content-Type:`` header.
:kwargs: If given, the key will become the name of the header, and the
value will become the Contents of that header.
"""
self.write("From: %s" % fromAddress)
self.write("To: %s" % toAddress)
if includeMessageID:
self.write("Message-ID: %s" % smtp.messageid())
if inReplyTo:
self.write("In-Reply-To: %s" % inReplyTo)
self.write("Content-Type: %s" % contentType)
self.write("Date: %s" % smtp.rfc822date())
if not subject:
subject = '[no subject]'
if not subject.lower().startswith('re'):
subject = "Re: " + subject
self.write("Subject: %s" % subject)
if kwargs:
for headerName, headerValue in kwargs.items():
headerName = headerName.capitalize()
headerName = headerName.replace(' ', '-')
headerName = headerName.replace('_', '-')
self.write("%s: %s" % (headerName, headerValue))
# The first blank line designates that the headers have ended:
self.write("\r\n")
def writeBody(self, body):
"""Write the response body into the :cvar:`mailfile`.
If ``MailResponse.gpgContext`` is set, and signing is configured, the
**body** will be automatically signed before writing its contents into
the ``mailfile``.
:param str body: The body of the response email.
"""
if self.gpgContext:
body, _ = gpgSignMessage(self.gpgContext, body)
self.writelines(body)
class MailMessage(object):
"""Plugs into the Twisted Mail and receives an incoming message.
:ivar list lines: A list of lines from an incoming email message.
:ivar int nBytes: The number of bytes received thus far.
:ivar bool ignoring: If ``True``, we're ignoring the rest of this message
because it exceeded :ivar:`MailContext.maximumSize`.
"""
implements(smtp.IMessage)
def __init__(self, context, fromCanonical=None):
"""Create a new MailMessage from a MailContext.
:type context: :class:`MailContext`
:param context: The configured context for the email server.
:type canonicalFrom: str or None
:param canonicalFrom: The canonical domain which this message was
received from. For example, if ``'gmail.com'`` is the configured
canonical domain for ``'googlemail.com'`` and a message is
received from the latter domain, then this would be set to the
former.
"""
self.context = context
self.fromCanonical = fromCanonical
self.lines = []
self.nBytes = 0
self.ignoring = False
def lineReceived(self, line):
"""Called when we get another line of an incoming message."""
self.nBytes += len(line)
if self.nBytes > self.context.maximumSize:
self.ignoring = True
else:
self.lines.append(line)
if not safelog.safe_logging:
logging.debug("> %s", line.rstrip("\r\n"))
def eomReceived(self):
"""Called when we receive the end of a message."""
if not self.ignoring:
self.reply()
return defer.succeed(None)
def connectionLost(self):
"""Called if we die partway through reading a message."""
pass
def getIncomingMessage(self):
"""Create and parse an :rfc:`2822` message object for all ``lines``
received thus far.
:rtype: :api:`twisted.mail.smtp.rfc822.Message`
:returns: A ``Message`` comprised of all lines received thus far.
"""
rawMessage = io.StringIO()
for ln in self.lines:
rawMessage.writelines(unicode(ln) + unicode('\n'))
rawMessage.seek(0)
return smtp.rfc822.Message(rawMessage)
def getClientAddress(self, incoming):
"""Attempt to get the client's email address from an incoming email.
:type incoming: :api:`twisted.mail.smtp.rfc822.Message`
:param incoming: An incoming ``Message``, i.e. as returned from
:meth:`getIncomingMessage`.
:rtype: ``None`` or :api:`twisted.mail.smtp.Address`
:returns: The client's email ``Address``, if it originated from a
domain that we accept and the address was well-formed. Otherwise,
returns ``None``.
"""
addrHeader = None
try: fromAddr = incoming.getaddr("From")[1]
except (IndexError, TypeError, AttributeError): pass
else: addrHeader = fromAddr
if not addrHeader:
logging.warn("No From header on incoming mail.")
try: senderHeader = incoming.getaddr("Sender")[1]
except (IndexError, TypeError, AttributeError): pass
else: addrHeader = senderHeader
if not addrHeader:
logging.warn("No Sender header on incoming mail.")
else:
try:
client = smtp.Address(addr.normalizeEmail(
addrHeader,
self.context.domainMap,
self.context.domainRules))
except (UnsupportedDomain, BadEmail, smtp.AddressError) as error:
logging.warn(error)
else:
return client
def getMailFrom(self, incoming):
"""Find our address in the recipients list of the **incoming** message.
:type incoming: :api:`twisted.mail.smtp.rfc822.Message`
:param incoming: An incoming ``Message``, i.e. as returned from
:meth:`getIncomingMessage`.
:rtype: str
:return: Our address from the recipients list. If we can't find it
return our default ``SMTP_FROM_ADDRESS`` from the config file.
"""
logging.debug("Searching for our email address in 'To:' header...")
ours = None
try:
ourAddress = smtp.Address(self.context.fromAddr)
allRecipients = incoming.getaddrlist("To")
for _, addr in allRecipients:
recipient = smtp.Address(addr)
if not (ourAddress.domain in recipient.domain):
logging.debug(("Not our domain (%s) or subdomain, skipping"
" email address: %s")
% (ourAddress.domain, str(recipient)))
continue
# The recipient's username should at least start with ours,
# but it still might be a '+' address.
if not recipient.local.startswith(ourAddress.local):
logging.debug(("Username doesn't begin with ours, skipping"
" email address: %s") % str(recipient))
continue
# Ignore everything after the first '+', if there is one.
beforePlus = recipient.local.split('+', 1)[0]
if beforePlus == ourAddress.local:
ours = str(recipient)
if not ours:
raise BadEmail(allRecipients)
except Exception as error:
logging.error(("Couldn't find our email address in incoming email "
"headers: %r" % error))
# Just return the email address that we're configured to use:
ours = self.context.fromAddr
logging.debug("Found our email address: %s." % ours)
return ours
def getCanonicalDomain(self, domain):
try:
canonical = canonicalizeEmailDomain(domain, self.context.canon)
except (UnsupportedDomain, BadEmail) as error:
logging.warn(error)
else:
return canonical
def reply(self):
"""Reply to an incoming email. Maybe.
If nothing is returned from either :func:`createResponseBody` or
:func:`generateResponse`, then the incoming email will not be
responded to at all. This can happen for several reasons, for example:
if the DKIM signature was invalid or missing, or if the incoming email
came from an unacceptable domain, or if there have been too many
emails from this client in the allotted time period.
:rtype: :api:`twisted.internet.defer.Deferred`
:returns: A ``Deferred`` which will callback when the response has
been successfully sent, or errback if an error occurred while
sending the email.
"""
logging.info("Got an email; deciding whether to reply.")
def _replyEB(fail): # pragma: no cover
"""Errback for a :api:`twisted.mail.smtp.SMTPSenderFactory`.
:param fail: A :api:`twisted.python.failure.Failure` which occurred during
the transaction.
"""
logging.debug("_replyToMailEB() called with %r" % fail)
error = fail.getTraceback() or "Unknown"
logging.error(error)
d = defer.Deferred()
d.addErrback(_replyEB)
incoming = self.getIncomingMessage()
recipient = self.getMailFrom(incoming)
client = self.getClientAddress(incoming)
if not client:
return d
if not self.fromCanonical:
self.fromCanonical = self.getCanonicalDomain(client.domain)
rules = self.context.domainRules.get(self.fromCanonical, [])
if not checkDKIM(incoming, rules):
return d
clientAddr = '@'.join([client.local, client.domain])
messageID = incoming.getheader("Message-ID", None)
subject = incoming.getheader("Subject", None)
# Look up the locale part in the 'To:' address, if there is one and
# get the appropriate Translation object:
lang = translations.getLocaleFromPlusAddr(recipient)
logging.info("Client requested email translation: %s" % lang)
body = createResponseBody(self.lines, self.context, client, lang)
if not body: return d # The client was already warned.
response = generateResponse(self.context.fromAddr, clientAddr, body,
subject, messageID, self.context.gpgContext)
if not response: return d
logging.info("Sending reply to %s" % client)
factory = smtp.SMTPSenderFactory(self.context.smtpFromAddr, clientAddr,
response, d, retries=0, timeout=30)
reactor.connectTCP(self.context.smtpServerIP,
self.context.smtpServerPort,
factory)
return d
class MailDelivery(object):
"""Plugs into Twisted Mail and handles SMTP commands."""
implements(smtp.IMessageDelivery)
def setBridgeDBContext(self, context):
self.context = context
self.fromCanonical = None
def receivedHeader(self, helo, origin, recipients):
"""Create the ``Received:`` header for an incoming email.
:type helo: tuple
:param helo: The lines received during SMTP client HELO.
:type origin: :api:`twisted.mail.smtp.Address`
:param origin: The email address of the sender.
:type recipients: list
:param recipients: A list of :api:`twisted.mail.smtp.User` instances.
"""
cameFrom = "%s (%s [%s])" % (helo[0] or origin, helo[0], helo[1])
cameFor = ', '.join(["<{0}>".format(recp.dest) for recp in recipients])
hdr = str("Received: from %s for %s; %s"
% (cameFrom, cameFor, smtp.rfc822date()))
return hdr
def validateFrom(self, helo, origin):
"""Validate the ``MAIL FROM:`` address on the incoming SMTP connection.
This is done at the SMTP layer. Meaning that if a Postfix or other
email server is proxying emails from the outside world to BridgeDB,
the :api:`origin.domain <twisted.email.smtp.Address.domain` will be
set to the local hostname. Therefore, if the SMTP ``MAIL FROM:``
domain name is our own hostname (as returned from
:func:`socket.gethostname`) or our own FQDN, allow the connection.
Otherwise, if the ``MAIL FROM:`` domain has a canonical domain in our
mapping (taken from :ivar:`context.canon <MailContext.canon>`, which
is taken in turn from the ``EMAIL_DOMAIN_MAP``), then our
:ivar:`fromCanonicalSMTP` is set to that domain.
:type helo: tuple
:param helo: The lines received during SMTP client HELO.
:type origin: :api:`twisted.mail.smtp.Address`
:param origin: The email address we received this message from.
:raises: :api:`twisted.mail.smtp.SMTPBadSender` if the
``origin.domain`` was neither our local hostname, nor one of the
canonical domains listed in :ivar:`context.canon`.
:rtype: :api:`twisted.mail.smtp.Address`
:returns: The ``origin``. We *must* return some non-``None`` data from
this method, or else Twisted will reply to the sender with a 503
error.
"""
try:
if ((origin.domain == self.context.hostname) or
(origin.domain == self.context.hostaddr)):
return origin
else:
logging.debug("ORIGIN DOMAIN: %r" % origin.domain)
canonical = canonicalizeEmailDomain(origin.domain,
self.context.canon)
logging.debug("Got canonical domain: %r" % canonical)
self.fromCanonical = canonical
except UnsupportedDomain as error:
logging.info(error)
raise smtp.SMTPBadSender(origin.domain)
except Exception as error:
logging.exception(error)
return origin # This method *cannot* return None, or it'll cause a 503.
def validateTo(self, user):
"""Validate the SMTP ``RCPT TO:`` address for the incoming connection.
The local username and domain name to which this SMTP message is
addressed, after being stripped of any ``'+'`` aliases, **must** be
identical to those in the email address set our
``EMAIL_SMTP_FROM_ADDR`` configuration file option.
:type user: :api:`twisted.mail.smtp.User`
:param user: Information about the user this SMTP message was
addressed to.
:raises: A :api:`twisted.mail.smtp.SMTPBadRcpt` if any of the above
conditions weren't met.
:rtype: callable
:returns: A parameterless function which returns an instance of
:class:`SMTPMessage`.
"""
logging.debug("Validating SMTP 'RCPT TO:' email address...")
recipient = user.dest
ourAddress = smtp.Address(self.context.smtpFromAddr)
if not (ourAddress.domain in recipient.domain):
logging.debug(("Not our domain (%s) or subdomain, skipping"
" SMTP 'RCPT TO' address: %s")
% (ourAddress.domain, str(recipient)))
raise smtp.SMTPBadRcpt(str(recipient))
# The recipient's username should at least start with ours,
# but it still might be a '+' address.
if not recipient.local.startswith(ourAddress.local):
logging.debug(("Username doesn't begin with ours, skipping"
" SMTP 'RCPT TO' address: %s") % str(recipient))
raise smtp.SMTPBadRcpt(str(recipient))
# Ignore everything after the first '+', if there is one.
beforePlus = recipient.local.split('+', 1)[0]
if beforePlus != ourAddress.local:
raise smtp.SMTPBadRcpt(str(recipient))
return lambda: MailMessage(self.context, self.fromCanonical)
class MailFactory(smtp.SMTPFactory):
"""Plugs into Twisted Mail; creates a new MailDelivery whenever we get
a connection on the SMTP port."""
def __init__(self, context=None, **kw):
smtp.SMTPFactory.__init__(self, **kw)
self.delivery = MailDelivery()
if context:
self.setBridgeDBContext(context)
def setBridgeDBContext(self, context):
self.context = context
self.delivery.setBridgeDBContext(context)
def buildProtocol(self, addr):
p = smtp.SMTPFactory.buildProtocol(self, addr)
p.delivery = self.delivery
return p
def addServer(config, distributor, schedule):
"""Set up a SMTP server for responding to requests for bridges.
:type config: :class:`bridgedb.persistent.Conf`
:param config: A configuration object.
:type distributor: :class:`bridgedb.Dist.EmailBasedDistributor`
:param dist: A distributor which will handle database interactions, and
will decide which bridges to give to who and when.
:type schedule: :class:`bridgedb.schedule.ScheduledInterval`
:param schedule: The schedule. XXX: Is this even used?
"""
context = MailContext(config, distributor, schedule)
factory = MailFactory(context)
addr = config.EMAIL_BIND_IP or ""
port = config.EMAIL_PORT
reactor.listenTCP(port, factory, interface=addr)
# Set up a LoopingCall to run every 30 minutes and forget old email times.
lc = LoopingCall(distributor.cleanDatabase)
lc.start(1800, now=False)
return factory
Change b.e.server.MailContext to use twisted's SMTP max message length.
# -*- coding: utf-8 ; test-case-name: bridgedb.test.test_email_server -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Nick Mathewson <nickm@torproject.org>
# Isis Lovecruft <isis@torproject.org> 0xA3ADB67A2CDB8B35
# Matthew Finkel <sysrqb@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2007-2014, The Tor Project, Inc.
# (c) 2013-2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Servers which interface with clients and distribute bridges over SMTP."""
from __future__ import unicode_literals
import logging
import io
import socket
import time
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.mail import smtp
from zope.interface import implements
from bridgedb import safelog
from bridgedb import translations
from bridgedb.crypto import getGPGContext
from bridgedb.crypto import gpgSignMessage
from bridgedb.crypto import NEW_BUFFER_INTERFACE
from bridgedb.Dist import EmailRequestedHelp
from bridgedb.Dist import EmailRequestedKey
from bridgedb.Dist import TooSoonEmail
from bridgedb.Dist import IgnoreEmail
from bridgedb.email import templates
from bridgedb.email import request
from bridgedb.parse import addr
from bridgedb.parse.addr import BadEmail
from bridgedb.parse.addr import UnsupportedDomain
from bridgedb.parse.addr import canonicalizeEmailDomain
def checkDKIM(message, rules):
"""Check the DKIM verification results header.
This check is only run if the incoming email, **message**, originated from
a domain for which we're configured (in the ``EMAIL_DOMAIN_RULES``
dictionary in the config file) to check DKIM verification results for.
:type message: :api:`twisted.mail.smtp.rfc822.Message`
:param message: The incoming client request email, including headers.
:param dict rules: The list of configured ``EMAIL_DOMAIN_RULES`` for the
canonical domain which the client's email request originated from.
:rtype: bool
:returns: ``False`` if:
1. We're supposed to expect and check the DKIM headers for the
client's email provider domain.
2. Those headers were *not* okay.
Otherwise, returns ``True``.
"""
logging.info("Checking DKIM verification results...")
logging.debug("Domain has rules: %s" % ', '.join(rules))
if 'dkim' in rules:
# getheader() returns the last of a given kind of header; we want
# to get the first, so we use getheaders() instead.
dkimHeaders = message.getheaders("X-DKIM-Authentication-Results")
dkimHeader = "<no header>"
if dkimHeaders:
dkimHeader = dkimHeaders[0]
if not dkimHeader.startswith("pass"):
logging.info("Rejecting bad DKIM header on incoming email: %r "
% dkimHeader)
return False
return True
def createResponseBody(lines, context, client, lang='en'):
"""Parse the **lines** from an incoming email request and determine how to
respond.
:param list lines: The list of lines from the original request sent by the
client.
:type context: class:`MailContext`
:param context: The context which contains settings for the email server.
:type client: :api:`twisted.mail.smtp.Address`
:param client: The client's email address which should be in the
:header:`To:` header of the response email.
:param str lang: The 2-5 character locale code to use for translating the
email. This is obtained from a client sending a email to a valid plus
address which includes the translation desired, i.e. by sending an
email to ``bridges+fa@torproject.org``, the client should receive a
response in Farsi.
:rtype: None or str
:returns: None if we shouldn't respond to the client (i.e., if they have
already received a rate-limiting warning email). Otherwise, returns a
string containing the (optionally translated) body for the email
response which we should send out.
"""
t = translations.installTranslations(lang)
bridges = None
try:
bridgeRequest = request.determineBridgeRequestOptions(lines)
# The request was invalid, respond with a help email which explains
# valid email commands:
if not bridgeRequest.isValid():
raise EmailRequestedHelp("Email request from '%s' was invalid."
% str(client))
# Otherwise they must have requested bridges:
interval = context.schedule.getInterval(time.time())
bridges = context.distributor.getBridgesForEmail(
str(client),
interval,
context.nBridges,
countryCode=None,
bridgeFilterRules=bridgeRequest.filters)
except EmailRequestedHelp as error:
logging.info(error)
return templates.buildWelcomeText(t, client)
except EmailRequestedKey as error:
logging.info(error)
return templates.buildKeyMessage(t, client)
except TooSoonEmail as error:
logging.info("Got a mail too frequently: %s." % error)
return templates.buildSpamWarning(t, client)
except (IgnoreEmail, BadEmail) as error:
logging.info(error)
# Don't generate a response if their email address is unparsable or
# invalid, or if we've already warned them about rate-limiting:
return None
else:
answer = "(no bridges currently available)\r\n"
if bridges:
transport = bridgeRequest.justOnePTType()
answer = "".join(" %s\r\n" % b.getConfigLine(
includeFingerprint=context.includeFingerprints,
addressClass=bridgeRequest.addressClass,
transport=transport,
request=str(client)) for b in bridges)
return templates.buildAnswerMessage(t, client, answer)
def generateResponse(fromAddress, clientAddress, body, subject=None,
messageID=None, gpgContext=None):
"""Create a :class:`MailResponse`, which acts like an in-memory
``io.StringIO`` file, by creating and writing all headers and the email
body into the file-like ``MailResponse.mailfile``.
:param str fromAddress: The rfc:`2821` email address which should be in
the :header:`From:` header.
:param str clientAddress: The rfc:`2821` email address which should be in
the :header:`To:` header.
:param str subject: The string to write to the :header:`subject` header.
:param str body: The body of the email. If a **gpgContext** is also given,
and that ``Context`` has email signing configured, then
:meth:`MailResponse.writeBody` will generate and include any
ascii-armored OpenPGP signatures in the **body**.
:type messageID: None or str
:param messageID: The :rfc:`2822` specifier for the :header:`Message-ID:`
header, if including one is desirable.
:type gpgContext: None or ``gpgme.Context``.
:param gpgContext: A pre-configured GPGME context. See
:meth:`~crypto.getGPGContext`.
:rtype: :class:`MailResponse`
:returns: A ``MailResponse`` which contains the entire email. To obtain
the contents of the email, including all headers, simply use
:meth:`MailResponse.readContents`.
"""
response = MailResponse(gpgContext)
response.writeHeaders(fromAddress, clientAddress, subject,
inReplyTo=messageID)
response.writeBody(body)
# Only log the email text (including all headers) if SAFE_LOGGING is
# disabled:
if not safelog.safe_logging:
contents = response.readContents()
logging.debug("Email contents:\n%s" % contents)
else:
logging.debug("Email text for %r created." % clientAddress)
response.rewind()
return response
class MailContext(object):
"""Helper object that holds information used by email subsystem.
:ivar str username: Reject any RCPT TO lines that aren't to this
user. See the ``EMAIL_USERNAME`` option in the config file.
(default: ``'bridges'``)
:ivar int maximumSize: Reject any incoming emails longer than
this size (in bytes). (default: 3084 bytes).
:ivar int smtpPort: The port to use for outgoing SMTP.
:ivar str smtpServer: The IP address to use for outgoing SMTP.
:ivar str smtpFromAddr: Use this address in the raw SMTP ``MAIL FROM``
line for outgoing mail. (default: ``bridges@torproject.org``)
:ivar str fromAddr: Use this address in the email :header:`From:`
line for outgoing mail. (default: ``bridges@torproject.org``)
:ivar int nBridges: The number of bridges to send for each email.
:ivar gpgContext: A ``gpgme.GpgmeContext`` (as created by
:func:`bridgedb.crypto.getGPGContext`), or None if we couldn't create
a proper GPGME context for some reason.
"""
def __init__(self, config, distributor, schedule):
"""Create a context for storing configs for email bridge distribution.
:type config: :class:`bridgedb.persistent.Conf`
:type distributor: :class:`bridgedb.Dist.EmailBasedDistributor`.
:param distributor: The distributor will handle getting the correct
bridges (or none) for a client for us.
:type schedule: :class:`bridgedb.schedule.ScheduledInterval`.
:param schedule: An interval-based scheduler, used to help the
:ivar:`distributor` know if we should give bridges to a client.
"""
self.config = config
self.distributor = distributor
self.schedule = schedule
self.maximumSize = smtp.SMTP.MAX_LENGTH
self.includeFingerprints = config.EMAIL_INCLUDE_FINGERPRINTS
self.nBridges = config.EMAIL_N_BRIDGES_PER_ANSWER
self.username = (config.EMAIL_USERNAME or "bridges")
self.hostname = socket.gethostname()
self.hostaddr = socket.gethostbyname(self.hostname)
self.fromAddr = (config.EMAIL_FROM_ADDR or "bridges@torproject.org")
self.smtpFromAddr = (config.EMAIL_SMTP_FROM_ADDR or self.fromAddr)
self.smtpServerPort = (config.EMAIL_SMTP_PORT or 25)
self.smtpServerIP = (config.EMAIL_SMTP_HOST or "127.0.0.1")
self.domainRules = config.EMAIL_DOMAIN_RULES or {}
self.domainMap = config.EMAIL_DOMAIN_MAP or {}
self.canon = self.buildCanonicalDomainMap()
self.gpgContext = getGPGContext(config)
def buildCanonicalDomainMap(self):
"""Build a map for all email provider domains from which we will accept
emails to their canonical domain name.
.. note:: Be sure that ``MailContext.domainRules`` and
``MailContext.domainMap`` are set appropriately before calling
this method.
This method is automatically called during initialisation, and the
resulting domain map is stored as ``MailContext.canon``.
:rtype: dict
:returns: A dictionary which maps all domains and subdomains which we
accept emails from to their second-level, canonical domain names.
"""
canon = self.domainMap
for domain, rule in self.domainRules.items():
if domain not in canon.keys():
canon[domain] = domain
for domain in self.config.EMAIL_DOMAINS:
canon[domain] = domain
return canon
class MailResponse(object):
"""Holds information for generating a response email for a request.
.. todo:: At some point, we may want to change this class to optionally
handle creating Multipart MIME encoding messages, so that we can
include attachments. (This would be useful for attaching our GnuPG
keyfile, for example, rather than simply pasting it into the body of
the email.)
:type _buff: unicode or buffer
:cvar _buff: Used internally to write lines for the response email into
the ``_mailfile``. The reason why both of these attributes have two
possible types is for the same Python-buggy reasons which require
:data:`~bridgedb.crypto.NEW_BUFFER_INTERFACE`.
:type mailfile: :class:`io.StringIO` or :class:`io.BytesIO`.
:cvar mailfile: An in-memory file for storing the formatted headers and
body of the response email.
"""
_buff = buffer if NEW_BUFFER_INTERFACE else unicode
mailfile = io.BytesIO if NEW_BUFFER_INTERFACE else io.StringIO
def __init__(self, gpgContext=None):
"""Create a response to an email we have recieved.
This class deals with correctly formatting text for the response email
headers and the response body into an instance of :cvar:`mailfile`.
:type gpgContext: None or ``gpgme.Context``
:param gpgContext: A pre-configured GPGME context. See
:meth:`bridgedb.crypto.getGPGContext` for obtaining a
pre-configured **gpgContext**. If given, and the ``Context`` has
been configured to sign emails, then a response email body string
given to :meth:`writeBody` will be signed before being written
into the ``mailfile``.
"""
self.gpgContext = gpgContext
self.mailfile = self.mailfile()
self.closed = False
# These are methods and attributes for controlling I/O operations on our
# underlying ``mailfile``.
def close(self):
self.mailfile.close()
self.closed = True
close.__doc__ = mailfile.close.__doc__
# The following are custom methods to control reading and writing to the
# underlying ``mailfile``.
def readContents(self):
"""Read the all the contents written thus far to the :cvar:`mailfile`,
and then :meth:`seek` to return to the original pointer position we
were at before this method was called.
:rtype: str
:returns: The entire contents of the :cvar:`mailfile`.
"""
pointer = self.mailfile.tell()
self.mailfile.seek(0)
contents = self.mailfile.read()
self.mailfile.seek(pointer)
return contents
def rewind(self):
"""Rewind to the very beginning of the :cvar:`mailfile`."""
self.mailfile.seek(0)
def write(self, line):
"""Any **line** written to me will have ``'\r\n'`` appended to it."""
if line.find('\n') != -1:
# If **line** contains newlines, send it to :meth:`writelines` to
# break it up so that we can replace them with '\r\n':
self.writelines(line)
else:
self.mailfile.write(self._buff(line + '\r\n'))
self.mailfile.flush()
def writelines(self, lines):
"""Calls :meth:`write` for each line in **lines**."""
if isinstance(lines, basestring):
for ln in lines.split('\n'):
self.write(ln)
elif isinstance(lines, (list, tuple,)):
for ln in lines:
self.write(ln)
def writeHeaders(self, fromAddress, toAddress, subject=None,
inReplyTo=None, includeMessageID=True,
contentType='text/plain; charset="utf-8"', **kwargs):
"""Write all headers into the response email.
:param str fromAddress: The email address for the ``From:`` header.
:param str toAddress: The email address for the ``To:`` header.
:type subject: None or str
:param subject: The ``Subject:`` header.
:type inReplyTo: None or str
:param inReplyTo: If set, an ``In-Reply-To:`` header will be
generated. This should be set to the ``Message-ID:`` header from
the client's original request email.
:param bool includeMessageID: If ``True``, generate and include a
``Message-ID:`` header for the response.
:param str contentType: The ``Content-Type:`` header.
:kwargs: If given, the key will become the name of the header, and the
value will become the Contents of that header.
"""
self.write("From: %s" % fromAddress)
self.write("To: %s" % toAddress)
if includeMessageID:
self.write("Message-ID: %s" % smtp.messageid())
if inReplyTo:
self.write("In-Reply-To: %s" % inReplyTo)
self.write("Content-Type: %s" % contentType)
self.write("Date: %s" % smtp.rfc822date())
if not subject:
subject = '[no subject]'
if not subject.lower().startswith('re'):
subject = "Re: " + subject
self.write("Subject: %s" % subject)
if kwargs:
for headerName, headerValue in kwargs.items():
headerName = headerName.capitalize()
headerName = headerName.replace(' ', '-')
headerName = headerName.replace('_', '-')
self.write("%s: %s" % (headerName, headerValue))
# The first blank line designates that the headers have ended:
self.write("\r\n")
def writeBody(self, body):
"""Write the response body into the :cvar:`mailfile`.
If ``MailResponse.gpgContext`` is set, and signing is configured, the
**body** will be automatically signed before writing its contents into
the ``mailfile``.
:param str body: The body of the response email.
"""
if self.gpgContext:
body, _ = gpgSignMessage(self.gpgContext, body)
self.writelines(body)
class MailMessage(object):
"""Plugs into the Twisted Mail and receives an incoming message.
:ivar list lines: A list of lines from an incoming email message.
:ivar int nBytes: The number of bytes received thus far.
:ivar bool ignoring: If ``True``, we're ignoring the rest of this message
because it exceeded :ivar:`MailContext.maximumSize`.
"""
implements(smtp.IMessage)
def __init__(self, context, fromCanonical=None):
"""Create a new MailMessage from a MailContext.
:type context: :class:`MailContext`
:param context: The configured context for the email server.
:type canonicalFrom: str or None
:param canonicalFrom: The canonical domain which this message was
received from. For example, if ``'gmail.com'`` is the configured
canonical domain for ``'googlemail.com'`` and a message is
received from the latter domain, then this would be set to the
former.
"""
self.context = context
self.fromCanonical = fromCanonical
self.lines = []
self.nBytes = 0
self.ignoring = False
def lineReceived(self, line):
"""Called when we get another line of an incoming message."""
self.nBytes += len(line)
if self.nBytes > self.context.maximumSize:
self.ignoring = True
else:
self.lines.append(line)
if not safelog.safe_logging:
logging.debug("> %s", line.rstrip("\r\n"))
def eomReceived(self):
"""Called when we receive the end of a message."""
if not self.ignoring:
self.reply()
return defer.succeed(None)
def connectionLost(self):
"""Called if we die partway through reading a message."""
pass
def getIncomingMessage(self):
"""Create and parse an :rfc:`2822` message object for all ``lines``
received thus far.
:rtype: :api:`twisted.mail.smtp.rfc822.Message`
:returns: A ``Message`` comprised of all lines received thus far.
"""
rawMessage = io.StringIO()
for ln in self.lines:
rawMessage.writelines(unicode(ln) + unicode('\n'))
rawMessage.seek(0)
return smtp.rfc822.Message(rawMessage)
def getClientAddress(self, incoming):
"""Attempt to get the client's email address from an incoming email.
:type incoming: :api:`twisted.mail.smtp.rfc822.Message`
:param incoming: An incoming ``Message``, i.e. as returned from
:meth:`getIncomingMessage`.
:rtype: ``None`` or :api:`twisted.mail.smtp.Address`
:returns: The client's email ``Address``, if it originated from a
domain that we accept and the address was well-formed. Otherwise,
returns ``None``.
"""
addrHeader = None
try: fromAddr = incoming.getaddr("From")[1]
except (IndexError, TypeError, AttributeError): pass
else: addrHeader = fromAddr
if not addrHeader:
logging.warn("No From header on incoming mail.")
try: senderHeader = incoming.getaddr("Sender")[1]
except (IndexError, TypeError, AttributeError): pass
else: addrHeader = senderHeader
if not addrHeader:
logging.warn("No Sender header on incoming mail.")
else:
try:
client = smtp.Address(addr.normalizeEmail(
addrHeader,
self.context.domainMap,
self.context.domainRules))
except (UnsupportedDomain, BadEmail, smtp.AddressError) as error:
logging.warn(error)
else:
return client
def getMailFrom(self, incoming):
"""Find our address in the recipients list of the **incoming** message.
:type incoming: :api:`twisted.mail.smtp.rfc822.Message`
:param incoming: An incoming ``Message``, i.e. as returned from
:meth:`getIncomingMessage`.
:rtype: str
:return: Our address from the recipients list. If we can't find it
return our default ``SMTP_FROM_ADDRESS`` from the config file.
"""
logging.debug("Searching for our email address in 'To:' header...")
ours = None
try:
ourAddress = smtp.Address(self.context.fromAddr)
allRecipients = incoming.getaddrlist("To")
for _, addr in allRecipients:
recipient = smtp.Address(addr)
if not (ourAddress.domain in recipient.domain):
logging.debug(("Not our domain (%s) or subdomain, skipping"
" email address: %s")
% (ourAddress.domain, str(recipient)))
continue
# The recipient's username should at least start with ours,
# but it still might be a '+' address.
if not recipient.local.startswith(ourAddress.local):
logging.debug(("Username doesn't begin with ours, skipping"
" email address: %s") % str(recipient))
continue
# Ignore everything after the first '+', if there is one.
beforePlus = recipient.local.split('+', 1)[0]
if beforePlus == ourAddress.local:
ours = str(recipient)
if not ours:
raise BadEmail(allRecipients)
except Exception as error:
logging.error(("Couldn't find our email address in incoming email "
"headers: %r" % error))
# Just return the email address that we're configured to use:
ours = self.context.fromAddr
logging.debug("Found our email address: %s." % ours)
return ours
def getCanonicalDomain(self, domain):
try:
canonical = canonicalizeEmailDomain(domain, self.context.canon)
except (UnsupportedDomain, BadEmail) as error:
logging.warn(error)
else:
return canonical
def reply(self):
"""Reply to an incoming email. Maybe.
If nothing is returned from either :func:`createResponseBody` or
:func:`generateResponse`, then the incoming email will not be
responded to at all. This can happen for several reasons, for example:
if the DKIM signature was invalid or missing, or if the incoming email
came from an unacceptable domain, or if there have been too many
emails from this client in the allotted time period.
:rtype: :api:`twisted.internet.defer.Deferred`
:returns: A ``Deferred`` which will callback when the response has
been successfully sent, or errback if an error occurred while
sending the email.
"""
logging.info("Got an email; deciding whether to reply.")
def _replyEB(fail): # pragma: no cover
"""Errback for a :api:`twisted.mail.smtp.SMTPSenderFactory`.
:param fail: A :api:`twisted.python.failure.Failure` which occurred during
the transaction.
"""
logging.debug("_replyToMailEB() called with %r" % fail)
error = fail.getTraceback() or "Unknown"
logging.error(error)
d = defer.Deferred()
d.addErrback(_replyEB)
incoming = self.getIncomingMessage()
recipient = self.getMailFrom(incoming)
client = self.getClientAddress(incoming)
if not client:
return d
if not self.fromCanonical:
self.fromCanonical = self.getCanonicalDomain(client.domain)
rules = self.context.domainRules.get(self.fromCanonical, [])
if not checkDKIM(incoming, rules):
return d
clientAddr = '@'.join([client.local, client.domain])
messageID = incoming.getheader("Message-ID", None)
subject = incoming.getheader("Subject", None)
# Look up the locale part in the 'To:' address, if there is one and
# get the appropriate Translation object:
lang = translations.getLocaleFromPlusAddr(recipient)
logging.info("Client requested email translation: %s" % lang)
body = createResponseBody(self.lines, self.context, client, lang)
if not body: return d # The client was already warned.
response = generateResponse(self.context.fromAddr, clientAddr, body,
subject, messageID, self.context.gpgContext)
if not response: return d
logging.info("Sending reply to %s" % client)
factory = smtp.SMTPSenderFactory(self.context.smtpFromAddr, clientAddr,
response, d, retries=0, timeout=30)
reactor.connectTCP(self.context.smtpServerIP,
self.context.smtpServerPort,
factory)
return d
class MailDelivery(object):
"""Plugs into Twisted Mail and handles SMTP commands."""
implements(smtp.IMessageDelivery)
def setBridgeDBContext(self, context):
self.context = context
self.fromCanonical = None
def receivedHeader(self, helo, origin, recipients):
"""Create the ``Received:`` header for an incoming email.
:type helo: tuple
:param helo: The lines received during SMTP client HELO.
:type origin: :api:`twisted.mail.smtp.Address`
:param origin: The email address of the sender.
:type recipients: list
:param recipients: A list of :api:`twisted.mail.smtp.User` instances.
"""
cameFrom = "%s (%s [%s])" % (helo[0] or origin, helo[0], helo[1])
cameFor = ', '.join(["<{0}>".format(recp.dest) for recp in recipients])
hdr = str("Received: from %s for %s; %s"
% (cameFrom, cameFor, smtp.rfc822date()))
return hdr
def validateFrom(self, helo, origin):
"""Validate the ``MAIL FROM:`` address on the incoming SMTP connection.
This is done at the SMTP layer. Meaning that if a Postfix or other
email server is proxying emails from the outside world to BridgeDB,
the :api:`origin.domain <twisted.email.smtp.Address.domain` will be
set to the local hostname. Therefore, if the SMTP ``MAIL FROM:``
domain name is our own hostname (as returned from
:func:`socket.gethostname`) or our own FQDN, allow the connection.
Otherwise, if the ``MAIL FROM:`` domain has a canonical domain in our
mapping (taken from :ivar:`context.canon <MailContext.canon>`, which
is taken in turn from the ``EMAIL_DOMAIN_MAP``), then our
:ivar:`fromCanonicalSMTP` is set to that domain.
:type helo: tuple
:param helo: The lines received during SMTP client HELO.
:type origin: :api:`twisted.mail.smtp.Address`
:param origin: The email address we received this message from.
:raises: :api:`twisted.mail.smtp.SMTPBadSender` if the
``origin.domain`` was neither our local hostname, nor one of the
canonical domains listed in :ivar:`context.canon`.
:rtype: :api:`twisted.mail.smtp.Address`
:returns: The ``origin``. We *must* return some non-``None`` data from
this method, or else Twisted will reply to the sender with a 503
error.
"""
try:
if ((origin.domain == self.context.hostname) or
(origin.domain == self.context.hostaddr)):
return origin
else:
logging.debug("ORIGIN DOMAIN: %r" % origin.domain)
canonical = canonicalizeEmailDomain(origin.domain,
self.context.canon)
logging.debug("Got canonical domain: %r" % canonical)
self.fromCanonical = canonical
except UnsupportedDomain as error:
logging.info(error)
raise smtp.SMTPBadSender(origin.domain)
except Exception as error:
logging.exception(error)
return origin # This method *cannot* return None, or it'll cause a 503.
def validateTo(self, user):
"""Validate the SMTP ``RCPT TO:`` address for the incoming connection.
The local username and domain name to which this SMTP message is
addressed, after being stripped of any ``'+'`` aliases, **must** be
identical to those in the email address set our
``EMAIL_SMTP_FROM_ADDR`` configuration file option.
:type user: :api:`twisted.mail.smtp.User`
:param user: Information about the user this SMTP message was
addressed to.
:raises: A :api:`twisted.mail.smtp.SMTPBadRcpt` if any of the above
conditions weren't met.
:rtype: callable
:returns: A parameterless function which returns an instance of
:class:`SMTPMessage`.
"""
logging.debug("Validating SMTP 'RCPT TO:' email address...")
recipient = user.dest
ourAddress = smtp.Address(self.context.smtpFromAddr)
if not (ourAddress.domain in recipient.domain):
logging.debug(("Not our domain (%s) or subdomain, skipping"
" SMTP 'RCPT TO' address: %s")
% (ourAddress.domain, str(recipient)))
raise smtp.SMTPBadRcpt(str(recipient))
# The recipient's username should at least start with ours,
# but it still might be a '+' address.
if not recipient.local.startswith(ourAddress.local):
logging.debug(("Username doesn't begin with ours, skipping"
" SMTP 'RCPT TO' address: %s") % str(recipient))
raise smtp.SMTPBadRcpt(str(recipient))
# Ignore everything after the first '+', if there is one.
beforePlus = recipient.local.split('+', 1)[0]
if beforePlus != ourAddress.local:
raise smtp.SMTPBadRcpt(str(recipient))
return lambda: MailMessage(self.context, self.fromCanonical)
class MailFactory(smtp.SMTPFactory):
"""Plugs into Twisted Mail; creates a new MailDelivery whenever we get
a connection on the SMTP port."""
def __init__(self, context=None, **kw):
smtp.SMTPFactory.__init__(self, **kw)
self.delivery = MailDelivery()
if context:
self.setBridgeDBContext(context)
def setBridgeDBContext(self, context):
self.context = context
self.delivery.setBridgeDBContext(context)
def buildProtocol(self, addr):
p = smtp.SMTPFactory.buildProtocol(self, addr)
p.delivery = self.delivery
return p
def addServer(config, distributor, schedule):
"""Set up a SMTP server for responding to requests for bridges.
:type config: :class:`bridgedb.persistent.Conf`
:param config: A configuration object.
:type distributor: :class:`bridgedb.Dist.EmailBasedDistributor`
:param dist: A distributor which will handle database interactions, and
will decide which bridges to give to who and when.
:type schedule: :class:`bridgedb.schedule.ScheduledInterval`
:param schedule: The schedule. XXX: Is this even used?
"""
context = MailContext(config, distributor, schedule)
factory = MailFactory(context)
addr = config.EMAIL_BIND_IP or ""
port = config.EMAIL_PORT
reactor.listenTCP(port, factory, interface=addr)
# Set up a LoopingCall to run every 30 minutes and forget old email times.
lc = LoopingCall(distributor.cleanDatabase)
lc.start(1800, now=False)
return factory
|
#!/usr/bin/env python
'''
Provides classes for loading chunk files from local storage and
putting them out into local storage.
This software is released under an MIT/X11 open source license.
Copyright 2012-2013 Diffeo, Inc.
'''
import os
import sys
import time
import logging
import hashlib
import requests
import traceback
import _extract_spinn3r
import streamcorpus
from _get_name_info import get_name_info
from streamcorpus import decrypt_and_uncompress, compress_and_encrypt_path, Chunk
from cStringIO import StringIO
from _exceptions import FailedExtraction
from _tarball_export import tarball_export
logger = logging.getLogger(__name__)
_message_versions = {
'v0_1_0': streamcorpus.StreamItem_v0_1_0,
'v0_2_0': streamcorpus.StreamItem_v0_2_0,
'v0_3_0': streamcorpus.StreamItem_v0_3_0,
}
import boto
from boto.s3.key import Key
from boto.s3.connection import S3Connection
## stop Boto's built-in retries, so we can do our own
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
def _retry(func):
'''
Decorator for methods that need many retries, because of
intermittent failures, such as AWS calls via boto, which has a
non-back-off retry.
'''
def retry_func(self, *args, **kwargs):
tries = 0
while 1:
try:
return func(self, *args, **kwargs)
break
except OSError, exc:
## OSError: [Errno 24] Too many open files
logger.critical(traceback.format_exc(exc))
raise exc
except FailedExtraction, exc:
## pass through exc to caller
logger.critical(traceback.format_exc(exc))
raise exc
except Exception, exc:
logger.critical(traceback.format_exc(exc))
logger.critical('will retry')
time.sleep(3 * tries)
msg = 'FAIL(%d): having I/O trouble with S3: %s' % \
(tries, traceback.format_exc(exc))
logger.info(msg)
tries += 1
if tries > self.config['tries']:
## indicate complete failure to pipeline so it
## gets recorded in task_queue
raise FailedExtraction(msg)
return retry_func
def get_bucket(config):
## use special keys for accessing AWS public data sets bucket
aws_access_key_id = open(config['aws_access_key_id_path']).read()
aws_secret_access_key = open(config['aws_secret_access_key_path']).read()
conn = S3Connection(aws_access_key_id,
aws_secret_access_key)
bucket = conn.get_bucket(config['bucket'])
return bucket
class from_s3_chunks(object):
def __init__(self, config):
self.config = config
self.bucket = get_bucket(config)
def __call__(self, i_str):
'''
Takes a date_hour string over stdin and generates chunks from
s3://<bucket><s3_prefix_path>/data_hour/
'''
logger.info('from_s3_chunks: %r' % i_str)
key = Key(self.bucket, i_str.strip())
return self.get_chunk(key)
@_retry
def get_keys(self, date_hour):
'''
Given a date_hour dir, generate all the Key instances for
chunks in this dir, requires fetch, decrypt, uncompress,
deserialize:
'''
prefix = os.path.join(self.config['s3_path_prefix'], date_hour)
return self.bucket.list(prefix=prefix)
@_retry
def get_chunk(self, key):
tries = 0
while 1:
fh = StringIO()
key.get_contents_to_file(fh)
data = fh.getvalue()
_errors, data = decrypt_and_uncompress(
data,
self.config['gpg_decryption_key_path'])
logger.info( '\n'.join(_errors) )
if self.config['input_format'] == 'streamitem' and \
self.config['streamcorpus_version'] == 'v0_1_0':
i_content_md5 = key.key.split('.')[-3]
else:
## go past {sc,protostream}.xz.gpg
i_content_md5 = key.key.split('.')[-4][-32:]
## verify the data matches expected md5
f_content_md5 = hashlib.md5(data).hexdigest() # pylint: disable=E1101
if i_content_md5 != f_content_md5:
msg = 'FAIL(%d): %s --> %s != %s' % (tries, key.key, i_content_md5, f_content_md5)
logger.critical(msg)
tries += 1
if tries > self.config['tries']:
## indicate complete failure to pipeline so it
## gets recorded in task_queue
raise FailedExtraction(msg)
else:
continue
if self.config['input_format'] == 'spinn3r':
## convert the data from spinn3r's protostream format
return _extract_spinn3r._generate_stream_items( data )
elif self.config['input_format'] == 'streamitem':
message = _message_versions[ self.config['streamcorpus_version'] ]
return streamcorpus.Chunk(data=data, message=message)
else:
sys.exit('Invalid config: input_format = %r' % self.config['input_format'])
class to_s3_chunks(object):
def __init__(self, config):
self.config = config
self.bucket = get_bucket(config)
def __call__(self, t_path, name_info, i_str):
'''
Load chunk from t_path and put it into the right place in s3
using the output_name template from the config
'''
name_info.update( get_name_info(t_path, i_str=i_str) )
if name_info['num'] == 0:
o_path = None
return o_path
o_fname = self.config['output_name'] % name_info
o_path = os.path.join(self.config['s3_path_prefix'], o_fname + '.sc.xz.gpg')
name_info['s3_output_path'] = o_path
logger.info('to_s3_chunks: \n\t%r\n\tfrom: %r\n\tby way of %r ' % (o_path, i_str, t_path))
## forcibly collect dereferenced objects
#gc.collect()
## compress and encrypt
logger.critical( 'key path: %r' % self.config['gpg_encryption_key_path'] )
_errors, t_path2 = compress_and_encrypt_path(
t_path,
self.config['gpg_encryption_key_path'],
gpg_recipient=self.config['gpg_recipient'])
logger.info( '\n'.join(_errors) )
data = open(t_path2).read()
logger.debug('compressed size: %d' % len(data))
while 1:
start_time = time.time()
self.put(o_path, data)
elapsed = time.time() - start_time
if elapsed > 0:
logger.debug('put %.1f bytes/second' % (len(data) / elapsed))
if self.config['verify_via_http']:
try:
start_time = time.time()
self.verify(o_path, name_info['md5'])
elapsed = time.time() - start_time
if elapsed > 0:
logger.debug('verify %.1f bytes/second' % (len(data) / elapsed))
break
except Exception, exc:
logger.critical( 'verify_via_http failed so retrying: %r' % exc )
## keep looping if verify raises anything
continue
else:
## not verifying, so don't attempt multiple puts
break
logger.info('to_s3_chunks finished: %r' % i_str)
if self.config['cleanup_tmp_files']:
try:
os.remove( t_path )
except Exception, exc:
logger.info('%s --> failed to remove %s' % (exc, t_path))
## return the final output path
logger.info('to_s3_chunks finished:\n\t input: %s\n\toutput: %s' % (i_str, o_path))
return o_path
@_retry
def put(self, o_path, data):
key = Key(self.bucket, o_path)
key.set_contents_from_file(StringIO(data))
key.set_acl('public-read')
@_retry
def verify(self, o_path, md5):
url = 'http://s3.amazonaws.com/%(bucket)s/%(o_path)s' % dict(
bucket = self.config['bucket'],
o_path = o_path)
logger.info('fetching %r' % url)
req = requests.get(url)
errors, data = decrypt_and_uncompress(
req.content, # pylint: disable=E1103
self.config['gpg_decryption_key_path'])
logger.info( 'got back SIs: %d' % len( list( Chunk(data=data) ) ))
rec_md5 = hashlib.md5(data).hexdigest() # pylint: disable=E1101
if md5 == rec_md5:
return
else:
logger.critical('\n'.join(errors))
raise Exception('original md5 = %r != %r = received md5' % (md5, rec_md5))
class to_s3_tarballs(object):
def __init__(self, config):
self.config = config
self.bucket = get_bucket(config)
def __call__(self, t_path, name_info, i_str):
'''
Load chunk from t_path and put it into the right place in s3
using the output_name template from the config
'''
name_info.update( get_name_info(t_path, i_str=i_str) )
if name_info['num'] == 0:
o_path = None
return o_path
o_fname = self.config['output_name'] % name_info
o_path = os.path.join(self.config['s3_path_prefix'], o_fname + '.tar.gz')
logger.info('to_s3_tarballs: \n\t%r\n\tfrom: %r\n\tby way of %r ' % (o_path, i_str, t_path))
## forcibly collect dereferenced objects
#gc.collect()
t_path2 = tarball_export(t_path, name_info)
data = open(t_path2).read()
name_info['md5'] = hashlib.md5(data).hexdigest() # pylint: disable=E1101
self.upload(o_path, data, name_info)
self.cleanup(t_path)
self.cleanup(t_path2)
logger.info('to_s3_tarballs finished:\n\t input: %s\n\toutput: %s' % (i_str, o_path))
## return the final output path
return o_path
def upload(self, o_path, data, name_info):
logger.debug('to_s3_tarballs: compressed size: %d' % len(data))
max_retries = 20
tries = 0
while tries < max_retries:
tries += 1
start_time = time.time()
## this automatically retries
self.put(o_path, data)
elapsed = time.time() - start_time
if elapsed > 0:
logger.debug('to_s3_tarballs: put %.1f bytes/second' % (len(data) / elapsed))
if self.config['verify_via_http']:
try:
start_time = time.time()
self.verify(o_path, name_info['md5'])
elapsed = time.time() - start_time
if elapsed > 0:
logger.debug('to_s3_tarballs: verify %.1f bytes/second' % (len(data) / elapsed))
break
except Exception, exc:
logger.critical( 'to_s3_tarballs: verify_via_http failed so retrying: %r' % exc )
## keep looping if verify raises anything
continue
else:
## not verifying, so don't attempt multiple puts
break
@_retry
def cleanup(self, t_path):
if self.config['cleanup_tmp_files']:
try:
os.remove( t_path )
except Exception, exc:
logger.info('%s --> failed to remove %s' % (exc, t_path))
@_retry
def put(self, o_path, data):
key = Key(self.bucket, o_path)
key.set_contents_from_file(StringIO(data))
key.set_acl('public-read')
@_retry
def verify(self, o_path, md5):
url = 'http://s3.amazonaws.com/%(bucket)s/%(o_path)s' % dict(
bucket = self.config['bucket'],
o_path = o_path)
logger.info('fetching %r' % url)
req = requests.get(url)
data = req.content # pylint: disable=E1103
rec_md5 = hashlib.md5(data).hexdigest() # pylint: disable=E1101
if md5 == rec_md5:
return
else:
raise Exception('original md5 = %r != %r = received md5' % (md5, rec_md5))
causing the to_s3_chunks loader to also properly cleanup its tmp_dir, just like to_local_chunks
#!/usr/bin/env python
'''
Provides classes for loading chunk files from local storage and
putting them out into local storage.
This software is released under an MIT/X11 open source license.
Copyright 2012-2013 Diffeo, Inc.
'''
import os
import sys
import time
import logging
import hashlib
import requests
import traceback
import _extract_spinn3r
import streamcorpus
from _get_name_info import get_name_info
from streamcorpus import decrypt_and_uncompress, compress_and_encrypt_path, Chunk
from cStringIO import StringIO
from _exceptions import FailedExtraction
from _tarball_export import tarball_export
logger = logging.getLogger(__name__)
_message_versions = {
'v0_1_0': streamcorpus.StreamItem_v0_1_0,
'v0_2_0': streamcorpus.StreamItem_v0_2_0,
'v0_3_0': streamcorpus.StreamItem_v0_3_0,
}
import boto
from boto.s3.key import Key
from boto.s3.connection import S3Connection
## stop Boto's built-in retries, so we can do our own
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
def _retry(func):
'''
Decorator for methods that need many retries, because of
intermittent failures, such as AWS calls via boto, which has a
non-back-off retry.
'''
def retry_func(self, *args, **kwargs):
tries = 0
while 1:
try:
return func(self, *args, **kwargs)
break
except OSError, exc:
## OSError: [Errno 24] Too many open files
logger.critical(traceback.format_exc(exc))
raise exc
except FailedExtraction, exc:
## pass through exc to caller
logger.critical(traceback.format_exc(exc))
raise exc
except Exception, exc:
logger.critical(traceback.format_exc(exc))
logger.critical('will retry')
time.sleep(3 * tries)
msg = 'FAIL(%d): having I/O trouble with S3: %s' % \
(tries, traceback.format_exc(exc))
logger.info(msg)
tries += 1
if tries > self.config['tries']:
## indicate complete failure to pipeline so it
## gets recorded in task_queue
raise FailedExtraction(msg)
return retry_func
def get_bucket(config):
## use special keys for accessing AWS public data sets bucket
aws_access_key_id = open(config['aws_access_key_id_path']).read()
aws_secret_access_key = open(config['aws_secret_access_key_path']).read()
conn = S3Connection(aws_access_key_id,
aws_secret_access_key)
bucket = conn.get_bucket(config['bucket'])
return bucket
class from_s3_chunks(object):
def __init__(self, config):
self.config = config
self.bucket = get_bucket(config)
def __call__(self, i_str):
'''
Takes a date_hour string over stdin and generates chunks from
s3://<bucket><s3_prefix_path>/data_hour/
'''
logger.info('from_s3_chunks: %r' % i_str)
key = Key(self.bucket, i_str.strip())
return self.get_chunk(key)
@_retry
def get_keys(self, date_hour):
'''
Given a date_hour dir, generate all the Key instances for
chunks in this dir, requires fetch, decrypt, uncompress,
deserialize:
'''
prefix = os.path.join(self.config['s3_path_prefix'], date_hour)
return self.bucket.list(prefix=prefix)
@_retry
def get_chunk(self, key):
tries = 0
while 1:
fh = StringIO()
key.get_contents_to_file(fh)
data = fh.getvalue()
_errors, data = decrypt_and_uncompress(
data,
self.config['gpg_decryption_key_path'])
logger.info( '\n'.join(_errors) )
if self.config['input_format'] == 'streamitem' and \
self.config['streamcorpus_version'] == 'v0_1_0':
i_content_md5 = key.key.split('.')[-3]
else:
## go past {sc,protostream}.xz.gpg
i_content_md5 = key.key.split('.')[-4][-32:]
## verify the data matches expected md5
f_content_md5 = hashlib.md5(data).hexdigest() # pylint: disable=E1101
if i_content_md5 != f_content_md5:
msg = 'FAIL(%d): %s --> %s != %s' % (tries, key.key, i_content_md5, f_content_md5)
logger.critical(msg)
tries += 1
if tries > self.config['tries']:
## indicate complete failure to pipeline so it
## gets recorded in task_queue
raise FailedExtraction(msg)
else:
continue
if self.config['input_format'] == 'spinn3r':
## convert the data from spinn3r's protostream format
return _extract_spinn3r._generate_stream_items( data )
elif self.config['input_format'] == 'streamitem':
message = _message_versions[ self.config['streamcorpus_version'] ]
return streamcorpus.Chunk(data=data, message=message)
else:
sys.exit('Invalid config: input_format = %r' % self.config['input_format'])
class to_s3_chunks(object):
def __init__(self, config):
self.config = config
self.bucket = get_bucket(config)
def __call__(self, t_path, name_info, i_str):
'''
Load chunk from t_path and put it into the right place in s3
using the output_name template from the config
'''
name_info.update( get_name_info(t_path, i_str=i_str) )
if name_info['num'] == 0:
o_path = None
return o_path
o_fname = self.config['output_name'] % name_info
o_path = os.path.join(self.config['s3_path_prefix'], o_fname + '.sc.xz.gpg')
name_info['s3_output_path'] = o_path
logger.info('to_s3_chunks: \n\t%r\n\tfrom: %r\n\tby way of %r ' % (o_path, i_str, t_path))
## forcibly collect dereferenced objects
#gc.collect()
## compress and encrypt
logger.critical( 'key path: %r' % self.config['gpg_encryption_key_path'] )
_errors, t_path2 = compress_and_encrypt_path(
t_path,
self.config['gpg_encryption_key_path'],
gpg_recipient=self.config['gpg_recipient'],
tmp_dir=self.config['tmp_dir_path'],
)
logger.info( '\n'.join(_errors) )
data = open(t_path2).read()
logger.debug('compressed size: %d' % len(data))
while 1:
start_time = time.time()
self.put(o_path, data)
elapsed = time.time() - start_time
if elapsed > 0:
logger.debug('put %.1f bytes/second' % (len(data) / elapsed))
if self.config['verify_via_http']:
try:
start_time = time.time()
self.verify(o_path, name_info['md5'])
elapsed = time.time() - start_time
if elapsed > 0:
logger.debug('verify %.1f bytes/second' % (len(data) / elapsed))
break
except Exception, exc:
logger.critical( 'verify_via_http failed so retrying: %r' % exc )
## keep looping if verify raises anything
continue
else:
## not verifying, so don't attempt multiple puts
break
logger.info('to_s3_chunks finished: %r' % i_str)
if self.config['cleanup_tmp_files']:
try:
os.remove( t_path )
except Exception, exc:
logger.info('%s --> failed to remove %s' % (exc, t_path))
## return the final output path
logger.info('to_s3_chunks finished:\n\t input: %s\n\toutput: %s' % (i_str, o_path))
return o_path
@_retry
def put(self, o_path, data):
key = Key(self.bucket, o_path)
key.set_contents_from_file(StringIO(data))
key.set_acl('public-read')
@_retry
def verify(self, o_path, md5):
url = 'http://s3.amazonaws.com/%(bucket)s/%(o_path)s' % dict(
bucket = self.config['bucket'],
o_path = o_path)
logger.info('fetching %r' % url)
req = requests.get(url)
errors, data = decrypt_and_uncompress(
req.content, # pylint: disable=E1103
self.config['gpg_decryption_key_path'])
logger.info( 'got back SIs: %d' % len( list( Chunk(data=data) ) ))
rec_md5 = hashlib.md5(data).hexdigest() # pylint: disable=E1101
if md5 == rec_md5:
return
else:
logger.critical('\n'.join(errors))
raise Exception('original md5 = %r != %r = received md5' % (md5, rec_md5))
class to_s3_tarballs(object):
def __init__(self, config):
self.config = config
self.bucket = get_bucket(config)
def __call__(self, t_path, name_info, i_str):
'''
Load chunk from t_path and put it into the right place in s3
using the output_name template from the config
'''
name_info.update( get_name_info(t_path, i_str=i_str) )
if name_info['num'] == 0:
o_path = None
return o_path
o_fname = self.config['output_name'] % name_info
o_path = os.path.join(self.config['s3_path_prefix'], o_fname + '.tar.gz')
logger.info('to_s3_tarballs: \n\t%r\n\tfrom: %r\n\tby way of %r ' % (o_path, i_str, t_path))
## forcibly collect dereferenced objects
#gc.collect()
t_path2 = tarball_export(t_path, name_info)
data = open(t_path2).read()
name_info['md5'] = hashlib.md5(data).hexdigest() # pylint: disable=E1101
self.upload(o_path, data, name_info)
self.cleanup(t_path)
self.cleanup(t_path2)
logger.info('to_s3_tarballs finished:\n\t input: %s\n\toutput: %s' % (i_str, o_path))
## return the final output path
return o_path
def upload(self, o_path, data, name_info):
logger.debug('to_s3_tarballs: compressed size: %d' % len(data))
max_retries = 20
tries = 0
while tries < max_retries:
tries += 1
start_time = time.time()
## this automatically retries
self.put(o_path, data)
elapsed = time.time() - start_time
if elapsed > 0:
logger.debug('to_s3_tarballs: put %.1f bytes/second' % (len(data) / elapsed))
if self.config['verify_via_http']:
try:
start_time = time.time()
self.verify(o_path, name_info['md5'])
elapsed = time.time() - start_time
if elapsed > 0:
logger.debug('to_s3_tarballs: verify %.1f bytes/second' % (len(data) / elapsed))
break
except Exception, exc:
logger.critical( 'to_s3_tarballs: verify_via_http failed so retrying: %r' % exc )
## keep looping if verify raises anything
continue
else:
## not verifying, so don't attempt multiple puts
break
@_retry
def cleanup(self, t_path):
if self.config['cleanup_tmp_files']:
try:
os.remove( t_path )
except Exception, exc:
logger.info('%s --> failed to remove %s' % (exc, t_path))
@_retry
def put(self, o_path, data):
key = Key(self.bucket, o_path)
key.set_contents_from_file(StringIO(data))
key.set_acl('public-read')
@_retry
def verify(self, o_path, md5):
url = 'http://s3.amazonaws.com/%(bucket)s/%(o_path)s' % dict(
bucket = self.config['bucket'],
o_path = o_path)
logger.info('fetching %r' % url)
req = requests.get(url)
data = req.content # pylint: disable=E1103
rec_md5 = hashlib.md5(data).hexdigest() # pylint: disable=E1101
if md5 == rec_md5:
return
else:
raise Exception('original md5 = %r != %r = received md5' % (md5, rec_md5))
|
# -*- coding: utf-8 -*-
import os,yaml,re,json
from subprocess import check_output, STDOUT,PIPE,CalledProcessError
from collections import Counter
from importlib import import_module
MODULE_PATH = 'modules'
IGNORE_FILES = ['^_', '^\.', '\.pyc$']
_filefilters = [re.compile(pat) for pat in IGNORE_FILES]
###############################################################
# Common Functions
###############################################################
def dict2csv(dictdata,columnnames):
"""Transfer the dict data to csv string.
"""
values =[]
for name in columnnames:
if isinstance(dictdata[name],list):
values.append('|'.join(dictdata[name]))
else:
values.append(str(dictdata[name]))
return ','.join(values)
def ignore_files(filename):
"""filter the non-script filenames
if '.ignore_file' is found in 'modules' directory, then those filter in file
will be added to the patterns
"""
for pat in _filefilters:
if pat.search(filename):
return False
return True
###############################################################
# Class CheckStatus
###############################################################
class CheckStatus(object):
PASSED = 'PASSED' # check result is passed
FAILED = 'FAILED' # check result is failed
UNKNOWN = 'UNKNOWN' # unknown stituation
RUNERR = 'RUNERR' # error in runtime
UNCHECKED = 'UNCHECKED'
def validateResult(result):
if hasattr(CheckStatus,result.upper):
return result.upper()
return UNKNOWN
###############################################################
# Class ShellModule
###############################################################
class ShellModule(object):
"""This class wrap the non-python script.
"""
def __init__(self,scriptname, path, *args, **kwargs):
self.name = scriptname
self.desc = 'shell script' # read from the script
self.args = args
self.criteria = "criteria to determinate the check"
self.kwargs = kwargs
self.path = path
def run(self,parameters):
result = ResultInfo(self.name)
cmd = "%(cmd)s %(parameter)s" % dict(cmd=self.name,parameter=parameters)
cmdline = "\\".join([self.path,cmd]) ### only for WINDOWS
try:
out = check_output(cmdline,shell=True,stderr=PIPE)
result.data.update(json.loads(out.strip().split('\n')[-1]))
#print 'result status:',result.data['status']
## if return code is non-zero. error happens.
except CalledProcessError,err:
error = " errmsg: <<%s>> " % err
result.load(status=CheckStatus.RUNERR,error=error)
#result = validateResult(result)
return result
class CheckList(object):
"""Class store and handle the data of check list.
checklist = Checklist(checklist_file)
checklist.module
"""
def __init__(self,filename=''):
self.info = {}
self.modules = []
self.filename = os.path.split(filename)[1]
if filename:
self.load(filename)
@property
def templates(self):
return self.info.get('templates',{})
@property
def paths(self):
return self.info.get('paths',{})
@property
def modules_name(self):
return self.info.get('modules_name',[])
def load(self,filename):
self.info.update(yaml.load(file(filename)))
def getinfo(self,key):
return self.info.get(key)
def import_modules(self,modules_filename=None):
if modules_filename:
self._modules_name = modules_filename
module_path = self.info['paths']['modules']
module_names = filter(ignore_files,self.info['modules_name'])
self.modules = ImportCheckModules(module_names,module_path)
def ImportCheckModules(modulefilenames, modulepath=MODULE_PATH):
"""Import the modules and return the modules list.
"""
modules = []
for name in modulefilenames:
#print 'module path/name: %s\\%s' % (path,name)
if name.endswith('.py'):
mpath = '.'.join(os.path.split(modulepath))
mname = '.'.join([mpath,name[:-3]])
#print "mname:",mpath,name[:-3]
modules.append(import_module(mname))
elif os.path.isdir(os.path.join(path,name)):
# this name is a directory.
continue
else:
# non-python script modules
#print "name:",name
modules.append(ShellModule(name,path))
return modules
class ResultInfo(object):
"""Storing the information of the check result.
status, PASSED/FAILED/UNKNOW/
info, information.
error, error information.
"""
strformat = " Status: %(status)s\n Info:\n%(info)s\n error:%(error)s"
keys = ['status','info','error']
def __init__(self,name):
self.name = name
self.criteria = ''
self.data = {'status' : CheckStatus.UNKNOWN,
'info' : '',
'error' : '',
}
def setvalue(self,key,value):
if key not in ResultInfo.keys:
return False
self.__dict__[key] == value
return True
@property
def status(self):
return self.data['status']
@status.setter
def status(self,value):
self.data['status'] = value
@property
def info(self):
return self.data['info']
@info.setter
def info(self,value):
self.data['info'] = value if isinstance(value, unicode) else unicode(value, "utf-8")
@property
def error(self):
return self.data['error']
@error.setter
def error(self,value):
self.data['error'] = value if isinstance(value, unicode) else unicode(value, "utf-8")
def _encode_to_unicode(self):
for index in xrange(len(self.data['info'])):
self.data['info'][index] = self.data['info'][index] \
if isinstance(self.data['info'][index], unicode) \
else unicode(self.data['info'][index], "utf-8")
self.data['error'] = self.data['error'] if isinstance(self.data['error'], unicode) else unicode(self.data['error'], "utf-8")
def update(self,**kwargs):
self.data.update(kwargs)
self._encode_to_unicode()
def load(self,**kwargs):
"obslated function, please use the update instead."
self.update(**kwargs)
self._encode_to_unicode()
def dump(self,oformat='reading'):
if oformat == 'reading':
data = self.data.copy()
#data['info'] = "\n".join(data['info'])
data['info'] = "\n".join(data['info'])
return ResultInfo.strformat % data
elif oformat == 'json':
return json.dumps(self.data)
elif oformat == 'csv':
return dict2csv(self.data, ResultInfo.keys)
else:
#status_str = "\n[%s] %s, " % (idx+1,m.name)
return ResultInfo.strformat % self.data
def dumpstr(self,template=None):
"""
"""
strbuf = []
class ResultList(object):
"""this class store all the check results.
"""
def __init__(self):
self._results = []
def append(self,obj):
self._results.append(obj)
def __len__(self):
return len(self._results)
def stats(self):
return Counter([r.data['status'] for r in self._results])
def __iter__(self):
return iter(self._results)
add version info to checker.py
# -*- coding: utf-8 -*-
import os,yaml,re,json
from subprocess import check_output, STDOUT,PIPE,CalledProcessError
from collections import Counter
from importlib import import_module
from libs.tools import to_unicode
__version__ = "v1.0"
MODULE_PATH = 'modules'
IGNORE_FILES = ['^_', '^\.', '\.pyc$']
_filefilters = [re.compile(pat) for pat in IGNORE_FILES]
###############################################################
# Common Functions
###############################################################
def dict2csv(dictdata,columnnames):
"""Transfer the dict data to csv string.
"""
values =[]
for name in columnnames:
if isinstance(dictdata[name],list):
values.append('|'.join(dictdata[name]))
else:
values.append(str(dictdata[name]))
return ','.join(values)
def ignore_files(filename):
"""filter the non-script filenames
if '.ignore_file' is found in 'modules' directory, then those filter in file
will be added to the patterns
"""
for pat in _filefilters:
if pat.search(filename):
return False
return True
###############################################################
# Class CheckStatus
###############################################################
class CheckStatus(object):
PASSED = 'PASSED' # check result is passed
FAILED = 'FAILED' # check result is failed
UNKNOWN = 'UNKNOWN' # unknown stituation
RUNERR = 'RUNERR' # error in runtime
UNCHECKED = 'UNCHECKED'
def validateResult(result):
if hasattr(CheckStatus,result.upper):
return result.upper()
return UNKNOWN
###############################################################
# Class ShellModule
###############################################################
class ShellModule(object):
"""This class wrap the non-python script.
"""
def __init__(self,scriptname, path, *args, **kwargs):
self.name = scriptname
self.desc = 'shell script' # read from the script
self.args = args
self.criteria = "criteria to determinate the check"
self.kwargs = kwargs
self.path = path
def run(self,parameters):
result = ResultInfo(self.name)
cmd = "%(cmd)s %(parameter)s" % dict(cmd=self.name,parameter=parameters)
cmdline = "\\".join([self.path,cmd]) ### only for WINDOWS
try:
out = check_output(cmdline,shell=True,stderr=PIPE)
result.data.update(json.loads(out.strip().split('\n')[-1]))
#print 'result status:',result.data['status']
## if return code is non-zero. error happens.
except CalledProcessError,err:
error = " errmsg: <<%s>> " % err
result.load(status=CheckStatus.RUNERR,error=error)
#result = validateResult(result)
return result
class CheckList(object):
"""Class store and handle the data of check list.
checklist = Checklist(checklist_file)
checklist.module
"""
def __init__(self,filename=''):
self.info = {}
self.modules = []
self.filename = os.path.split(filename)[1]
if filename:
self.load(filename)
@property
def templates(self):
return self.info.get('templates',{})
@property
def paths(self):
return self.info.get('paths',{})
@property
def modules_name(self):
return self.info.get('modules_name',[])
def load(self,filename):
self.info.update(yaml.load(file(filename)))
def getinfo(self,key):
return self.info.get(key)
def import_modules(self,modules_filename=None):
if modules_filename:
self._modules_name = modules_filename
module_path = self.info['paths']['modules']
module_names = filter(ignore_files,self.info['modules_name'])
self.modules = ImportCheckModules(module_names,module_path)
def ImportCheckModules(modulefilenames, modulepath=MODULE_PATH):
"""Import the modules and return the modules list.
"""
modules = []
for name in modulefilenames:
#print 'module path/name: %s\\%s' % (path,name)
if name.endswith('.py'):
mpath = '.'.join(os.path.split(modulepath))
mname = '.'.join([mpath,name[:-3]])
#print "mname:",mpath,name[:-3]
modules.append(import_module(mname))
elif os.path.isdir(os.path.join(path,name)):
# this name is a directory.
continue
else:
# non-python script modules
#print "name:",name
modules.append(ShellModule(name,path))
return modules
class ResultInfo(object):
"""Storing the information of the check result.
status, PASSED/FAILED/UNKNOW/
info, information.
error, error information.
"""
strformat = " Status: %(status)s\n Info:\n%(info)s\n error:%(error)s"
keys = ['status','info','error']
def __init__(self,name):
self.name = name
self.criteria = ''
self.data = {'status' : CheckStatus.UNKNOWN,
'info' : '',
'error' : '',
}
def setvalue(self,key,value):
if key not in ResultInfo.keys:
return False
self.__dict__[key] == value
return True
@property
def status(self):
return self.data['status']
@status.setter
def status(self,value):
self.data['status'] = value
@property
def info(self):
return self.data['info']
@info.setter
def info(self,value):
self.data['info'] = value if isinstance(value, unicode) else unicode(value, "utf-8")
@property
def error(self):
return self.data['error']
@error.setter
def error(self,value):
self.data['error'] = value if isinstance(value, unicode) else unicode(value, "utf-8")
def _encode_to_unicode(self):
for index in xrange(len(self.data['info'])):
self.data['info'][index] = self.data['info'][index] \
if isinstance(self.data['info'][index], unicode) \
else unicode(self.data['info'][index], "utf-8")
self.data['error'] = self.data['error'] if isinstance(self.data['error'], unicode) else unicode(self.data['error'], "utf-8")
def update(self,**kwargs):
self.data.update(kwargs)
self._encode_to_unicode()
def load(self,**kwargs):
"obslated function, please use the update instead."
self.update(**kwargs)
self._encode_to_unicode()
def dump(self,oformat='reading'):
if oformat == 'reading':
data = self.data.copy()
#data['info'] = "\n".join(data['info'])
data['info'] = "\n".join(data['info'])
return ResultInfo.strformat % data
elif oformat == 'json':
return json.dumps(self.data)
elif oformat == 'csv':
return dict2csv(self.data, ResultInfo.keys)
else:
#status_str = "\n[%s] %s, " % (idx+1,m.name)
return ResultInfo.strformat % self.data
def dumpstr(self,template=None):
"""
"""
strbuf = []
class ResultList(object):
"""this class store all the check results.
"""
def __init__(self):
self._results = []
def append(self,obj):
self._results.append(obj)
def __len__(self):
return len(self._results)
def stats(self):
return Counter([r.data['status'] for r in self._results])
def __iter__(self):
return iter(self._results)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import warnings
import nibabel as nib
import numpy as np
import scipy.ndimage
warnings.simplefilter("ignore", UserWarning)
FILE_EXTENSIONS = [".nii.gz", ".tar.gz"]
#### utilities for file headers
def create_affine_pixdim(affine, pixdim):
norm_affine = np.sqrt(np.sum(np.square(affine[:, 0:3]), 0))
to_divide = np.tile(
np.expand_dims(np.append(norm_affine, 1), axis=1), [1, 4])
to_multiply = np.tile(
np.expand_dims(np.append(np.asarray(pixdim), 1), axis=1), [1, 4])
return np.multiply(np.divide(affine, to_divide.T), to_multiply.T)
def rectify_header_sform_qform(img_nii):
# TODO: check img_nii is a nibabel object
pixdim = img_nii.header.get_zooms()
sform = img_nii.get_sform()
qform = img_nii.get_qform()
norm_sform = np.sqrt(np.sum(np.square(sform[0:3, 0:3]), 0))
norm_qform = np.sqrt(np.sum(np.square(qform[0:3, 0:3]), 0))
flag_sform_problem = False
flag_qform_problem = False
if not np.array_equal(norm_sform, np.asarray(pixdim)):
flag_sform_problem = True
if not np.array_equal(norm_qform, np.asarray(pixdim)):
flag_qform_problem = True
# print("Qform problem is ", flag_qform_problem , " and sform prob is ",
# flag_sform_problem)
if not flag_qform_problem and not flag_sform_problem:
return img_nii
if flag_sform_problem and img_nii.get_header()['sform_code'] > 0:
if not flag_qform_problem:
# print("Updating with existing qform")
img_nii.set_sform(np.copy(img_nii.get_qform()))
return img_nii
else:
affine = img_nii.affine
pixdim = img_nii.header.get_zooms()
new_affine = create_affine_pixdim(affine, pixdim)
img_nii.set_sform(new_affine)
img_nii.set_qform(new_affine)
return img_nii
elif flag_sform_problem and not flag_qform_problem:
img_nii.set_sform(np.copy(img_nii.get_qform()))
elif flag_sform_problem and flag_qform_problem:
affine = img_nii.affine
pixdim = img_nii.header.get_zooms()
new_affine = create_affine_pixdim(affine, pixdim)
img_nii.set_sform(new_affine)
img_nii.set_qform(new_affine)
return img_nii
else:
img_nii.set_qform(np.copy(img_nii.get_sform()))
return img_nii
#### end of utilities for file headers
### resample/reorientation original volumes
# Perform the reorientation to ornt_fin of the data array given ornt_init
def do_reorientation(data_array, ornt_init, ornt_fin):
if np.array_equal(ornt_init, ornt_fin):
return data_array
ornt_transf = nib.orientations.ornt_transform(ornt_init, ornt_fin)
data_reoriented = nib.orientations.apply_orientation(
data_array, ornt_transf)
return data_reoriented
# Perform the resampling of the data array given the initial and final pixel
# dimensions and the interpolation order
# this function assumes the same interp_order for multi-modal images
# do we need separate interp_order for each modality?
def do_resampling(data_array, pixdim_init, pixdim_fin, interp_order):
if data_array is None:
# warnings.warn("None array, nothing to resample")
return
if np.array_equal(pixdim_fin, pixdim_init):
return data_array
to_multiply = np.divide(pixdim_init[0:], pixdim_fin[0:len(pixdim_init)])
if len(to_multiply) < data_array.ndim:
to_multiply = np.pad(to_multiply,
(0, data_array.ndim - len(to_multiply)),
mode='constant',
constant_values=1)
# resampling each 3d volume in the 5D data
data_resampled = []
for t in range(0, data_array.shape[4]):
data_mod = []
for m in range(0, data_array.shape[3]):
data_3d = data_array[..., m, t]
# interp_order_m = interp_order[min(len(interp_order) - 1, m)]
data_new = scipy.ndimage.zoom(data_3d,
to_multiply[0:3],
order=interp_order)
data_mod.append(data_new[..., np.newaxis])
data_mod = np.concatenate(data_mod, axis=-1)
data_resampled.append(data_mod[..., np.newaxis])
data_resampled = np.concatenate(data_resampled, axis=-1)
return data_resampled
### end of resample/reorientation original volumes
def split_filename(file_name):
pth = os.path.dirname(file_name)
fname = os.path.basename(file_name)
ext = None
for special_ext in FILE_EXTENSIONS:
ext_len = len(special_ext)
if fname[-ext_len:].lower() == special_ext:
ext = fname[-ext_len:]
fname = fname[:-ext_len] if len(fname) > ext_len else ''
break
if ext is None:
fname, ext = os.path.splitext(fname)
return pth, fname, ext
def csv_cell_to_volume_5d(csv_cell):
"""
This function create a 5D image matrix from a csv_cell
:param csv_cell: an array of file names, e.g. ['path_to_T1', 'path_to_T2']
:return: 5D image consisting of images from 'path_to_T1', 'path_to_T2'
The five dimensions are H x W x D x Modality x Time point
"""
if csv_cell is None:
return None
numb_tp = csv_cell.num_time_point
numb_mod = csv_cell.num_modality
max_time = numb_tp
max_mod = numb_mod
expand_modality_dim = True if numb_mod == 1 else False
expand_time_point_dim = True if numb_tp == 1 else False
flag_dimensions_set = False
dimensions = []
data_array = []
for t in range(0, numb_tp):
data_array.append([])
for m in range(0, numb_mod):
data_array[t].append([])
if not os.path.exists(csv_cell()[t][m]):
data_array[t][m] = expand_to_5d(np.zeros(dimensions))
continue
# load a 3d volume
img_nii = nib.load(csv_cell()[t][m])
img_data_shape = img_nii.header.get_data_shape()
assert np.prod(img_data_shape) > 1
if not flag_dimensions_set:
dimensions = img_data_shape[0:min(3, len(img_data_shape))]
flag_dimensions_set = True
else:
if not np.all(img_data_shape[:3] == dimensions[:3]):
raise ValueError("The 3d dimensionality of image %s "
"%s is not consistent with %s "
% (csv_cell()[m][t],
' '.join(map(str, img_data_shape[0:3])),
' '.join(map(str, dimensions))))
if len(img_data_shape) >= 4 and img_data_shape[3] > 1 \
and not expand_time_point_dim:
raise ValueError("You cannot provide already stacked time "
"points if you stack additional time points ")
elif expand_time_point_dim and len(img_data_shape) >= 4:
max_time = max(img_data_shape[3], max_time)
if len(img_data_shape) >= 5 and img_data_shape[4] > 1 \
and not expand_modality_dim:
raise ValueError("You cannot provide already stacked "
"modalities "
" if you stack additional modalities ")
elif expand_modality_dim and len(img_data_shape) == 5:
max_mod = max(max_mod, img_data_shape[4])
img_data = img_nii.get_data().astype(np.float32)
img_data = expand_to_5d(img_data)
img_data = np.swapaxes(img_data, 4, 3)
data_array[t][m] = img_data
if expand_modality_dim or expand_time_point_dim:
data_array = pad_zeros_to_5d(data_array, max_mod, max_time)
data_to_save = create_5d_from_array(data_array)
return data_to_save
def expand_to_5d(img_data):
while img_data.ndim < 5:
img_data = np.expand_dims(img_data, axis=-1)
return img_data
def pad_zeros_to_5d(data_array, max_mod, max_time):
if len(data_array) == max_time and len(data_array[0]) == max_mod:
return data_array
if len(data_array) == 1:
for m in range(0, len(data_array[0])):
if data_array[0][m].shape[4] < max_time:
data = data_array[0][m]
zeros_to_append = np.zeros([data.shape[0],
data.shape[1],
data.shape[2],
data.shape[3],
max_time - data.shape[4]])
data_array[0][m] = np.concatenate(
[data, zeros_to_append], axis=4)
else:
for t in range(0, len(data_array)):
data = data_array[t][0]
if data.shape[3] < max_mod:
zeros_to_append = np.zeros([data.shape[0],
data.shape[1],
data.shape[2],
max_mod - data.shape[3],
data.shape[4]])
data_array[t][0] = np.concatenate(
[data, zeros_to_append], axis=3)
return data_array
def create_5d_from_array(data_array):
data_5d = []
for t in range(0, len(data_array)):
data_mod_temp = []
for m in range(0, len(data_array[0])):
data_temp = data_array[t][m]
data_mod_temp.append(data_temp)
data_mod_temp = np.concatenate(data_mod_temp, axis=3)
data_5d.append(data_mod_temp)
data_5d = np.concatenate(data_5d, axis=4)
return data_5d
def save_volume_5d(img_data, filename, save_path, img_ref=None):
if img_data is None:
return
if not os.path.exists(save_path):
os.makedirs(save_path)
img_ref = rectify_header_sform_qform(img_ref)
affine = img_ref.affine
img_nii = nib.Nifti1Image(img_data, affine)
img_nii.set_data_dtype(np.dtype(np.float32))
output_name = os.path.join(save_path, filename)
nib.save(img_nii, output_name)
print('Saved {}'.format(output_name))
def match_volume_shape_to_patch_definition(image_data, patch):
if image_data is None:
return None
if patch is None:
return None
# always casting to 4D input volume [H x W x D x Modality]
while image_data.ndim > 4:
image_data = image_data[..., 0]
while image_data.ndim < 4:
image_data = np.expand_dims(image_data, axis=-1)
return image_data
def spatial_padding_to_indexes(spatial_padding):
indexes = np.zeros((len(spatial_padding), 2), dtype=np.int)
for (i, s) in enumerate(spatial_padding):
if len(s) == 1:
indexes[i] = [s[0], s[0]]
elif len(s) == 2:
indexes[i] = [s[0], s[1]]
else:
raise ValueError("unknown spatial_padding format")
return indexes.flatten()
# def adapt_to_shape(img_to_change, shape, mod='tile'):
# if img_to_change is None or img_to_change.size == 0:
# return np.zeros(shape)
# shape_to_change = img_to_change.shape
# if len(shape) < len(shape_to_change):
# raise ValueError('shape inconsistency')
# if np.all(shape_to_change == shape):
# return img_to_change
# new_img = np.resize(img_to_change, shape)
# return new_img
# # Check compatibility in dimensions for the first 3 dimensions of two images
# def check_shape_compatibility_3d(img1, img2):
# # consider by default that there are a min of 3 dimensions (2d images are
# # always expanded beforehand
# if img1.ndim < 3 or img2.ndim < 3:
# raise ValueError
# return np.all(img1.shape[:3] == img2.shape[:3])
# def create_new_filename(filename_init, new_path='', new_prefix='',
# new_suffix=''):
# path, name, ext = split_filename(filename_init)
# if new_path is None or len(new_path) == 0:
# new_path = path
# new_name = "%s_%s_%s" % (new_prefix, name, new_suffix)
# new_filename = os.path.join(new_path, new_name + ext)
# new_filename = clean_name(new_filename)
# return new_filename
# def clean_name(filename):
# filename = filename.replace("__", "_")
# filename = filename.replace("..", ".")
# filename = filename.replace("_.", ".")
# return filename
# def load_volume(filename,
# allow_multimod_single_file=False,
# allow_timeseries=False):
# if not os.path.exists(filename):
# warnings.warn("This file %s does not exist" % filename)
# return None
#
# print(filename)
# img_nii = nib.load(filename)
# img_shape = img_nii.header.get_data_shape()
# img_data = img_nii.get_data().astype(np.float32)
# if len(img_shape) == 2: # If the image is 2D it is expanded as a 3D
# return np.expand_dims(img_data, axis=2)
#
# if len(img_shape) == 3: # do nothing if image is 3D
# return img_data
#
# if len(img_shape) == 4: # 4D depends on use of multi time series
# warnings.warn("A 4D image has been detected. As per Nifti "
# "standards, it will be considered as a time series "
# "image")
# if not allow_timeseries: # if no time series allowed, take only
# # the first volume
# warnings.warn("Time series not allowed in this setting, "
# "only the first volume will be returned")
# return img_data[..., 0:1]
# else:
# # time series are moved to the 5th dimension
# return np.swapaxes(np.expand_dims(img_data, axis=4), 4, 3)
#
# if len(img_shape) == 5:
# warnings.warn("A 5D image has been detected. As per Nifti "
# "conventions, it will be considered as a multimodal image")
# if not allow_multimod_single_file:
# warnings.warn("Multiple modalities in a single file not "
# "allowed in this setting. Only the first "
# "modality will be considered")
# if img_shape[3] == 1: # only one time point in the 4th dimension
# return img_data[..., 0, 0]
# else:
# if not allow_timeseries:
# warnings.warn("Time series not allowed in this "
# "setting, only the first volume of the "
# "time series will be returned")
# return img_data[..., 0, 0]
# else:
# return np.swapaxes(img_data[..., 0], 4, 3)
# else:
# if img_shape[3] == 1: # only one time point in the image series
# return np.swapaxes(img_data[..., 0, :], 4, 3)
# elif not allow_timeseries:
# warnings.warn("Time series not allowed in this setting, "
# "only the first volume multimodal will be "
# "returned")
# return np.swapaxes(img_data[..., 0, :], 4, 3)
# else:
# return np.swapaxes(img_data, 4, 3)
refactoring misc_io
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import warnings
import nibabel as nib
import numpy as np
import scipy.ndimage
warnings.simplefilter("ignore", UserWarning)
FILE_EXTENSIONS = [".nii.gz", ".tar.gz"]
#### utilities for file headers
def create_affine_pixdim(affine, pixdim):
norm_affine = np.sqrt(np.sum(np.square(affine[:, 0:3]), 0))
to_divide = np.tile(
np.expand_dims(np.append(norm_affine, 1), axis=1), [1, 4])
to_multiply = np.tile(
np.expand_dims(np.append(np.asarray(pixdim), 1), axis=1), [1, 4])
return np.multiply(np.divide(affine, to_divide.T), to_multiply.T)
def rectify_header_sform_qform(img_nii):
# TODO: check img_nii is a nibabel object
pixdim = img_nii.header.get_zooms()
sform = img_nii.get_sform()
qform = img_nii.get_qform()
norm_sform = np.sqrt(np.sum(np.square(sform[0:3, 0:3]), 0))
norm_qform = np.sqrt(np.sum(np.square(qform[0:3, 0:3]), 0))
flag_sform_problem = False
flag_qform_problem = False
if not np.array_equal(norm_sform, np.asarray(pixdim)):
flag_sform_problem = True
if not np.array_equal(norm_qform, np.asarray(pixdim)):
flag_qform_problem = True
if not flag_qform_problem and not flag_sform_problem:
return img_nii
elif flag_qform_problem and not flag_sform_problem:
img_nii.set_qform(np.copy(img_nii.get_sform()))
return img_nii
elif not flag_qform_problem and flag_sform_problem:
img_nii.set_sform(np.copy(img_nii.get_qform()))
return img_nii
else:
affine = img_nii.affine
pixdim = img_nii.header.get_zooms()
new_affine = create_affine_pixdim(affine, pixdim)
img_nii.set_sform(new_affine)
img_nii.set_qform(new_affine)
return img_nii
#### end of utilities for file headers
### resample/reorientation original volumes
# Perform the reorientation to ornt_fin of the data array given ornt_init
def do_reorientation(data_array, ornt_init, ornt_fin):
if np.array_equal(ornt_init, ornt_fin):
return data_array
ornt_transf = nib.orientations.ornt_transform(ornt_init, ornt_fin)
data_reoriented = nib.orientations.apply_orientation(
data_array, ornt_transf)
return data_reoriented
# Perform the resampling of the data array given the initial and final pixel
# dimensions and the interpolation order
# this function assumes the same interp_order for multi-modal images
# do we need separate interp_order for each modality?
def do_resampling(data_array, pixdim_init, pixdim_fin, interp_order):
if data_array is None:
# warnings.warn("None array, nothing to resample")
return
if np.array_equal(pixdim_fin, pixdim_init):
return data_array
to_multiply = np.divide(pixdim_init[0:], pixdim_fin[0:len(pixdim_init)])
if len(to_multiply) < data_array.ndim:
to_multiply = np.pad(to_multiply,
(0, data_array.ndim - len(to_multiply)),
mode='constant',
constant_values=1)
# resampling each 3d volume in the 5D data
data_resampled = []
for t in range(0, data_array.shape[4]):
data_mod = []
for m in range(0, data_array.shape[3]):
data_3d = data_array[..., m, t]
# interp_order_m = interp_order[min(len(interp_order) - 1, m)]
data_new = scipy.ndimage.zoom(data_3d,
to_multiply[0:3],
order=interp_order)
data_mod.append(data_new[..., np.newaxis])
data_mod = np.concatenate(data_mod, axis=-1)
data_resampled.append(data_mod[..., np.newaxis])
data_resampled = np.concatenate(data_resampled, axis=-1)
return data_resampled
### end of resample/reorientation original volumes
def split_filename(file_name):
pth = os.path.dirname(file_name)
fname = os.path.basename(file_name)
ext = None
for special_ext in FILE_EXTENSIONS:
ext_len = len(special_ext)
if fname[-ext_len:].lower() == special_ext:
ext = fname[-ext_len:]
fname = fname[:-ext_len] if len(fname) > ext_len else ''
break
if ext is None:
fname, ext = os.path.splitext(fname)
return pth, fname, ext
def csv_cell_to_volume_5d(csv_cell):
"""
This function create a 5D image matrix from a csv_cell
:param csv_cell: an array of file names, e.g. ['path_to_T1', 'path_to_T2']
:return: 5D image consisting of images from 'path_to_T1', 'path_to_T2'
The five dimensions are H x W x D x Modality x Time point
"""
if csv_cell is None:
return None
numb_tp = csv_cell.num_time_point
numb_mod = csv_cell.num_modality
max_time = numb_tp
max_mod = numb_mod
expand_modality_dim = True if numb_mod == 1 else False
expand_time_point_dim = True if numb_tp == 1 else False
flag_dimensions_set = False
dimensions = []
data_array = []
for t in range(0, numb_tp):
data_array.append([])
for m in range(0, numb_mod):
data_array[t].append([])
if not os.path.exists(csv_cell()[t][m]):
data_array[t][m] = expand_to_5d(np.zeros(dimensions))
continue
# load a 3d volume
img_nii = nib.load(csv_cell()[t][m])
img_data_shape = img_nii.header.get_data_shape()
assert np.prod(img_data_shape) > 1
if not flag_dimensions_set:
dimensions = img_data_shape[0:min(3, len(img_data_shape))]
flag_dimensions_set = True
else:
if not np.all(img_data_shape[:3] == dimensions[:3]):
raise ValueError("The 3d dimensionality of image %s "
"%s is not consistent with %s "
% (csv_cell()[m][t],
' '.join(map(str, img_data_shape[0:3])),
' '.join(map(str, dimensions))))
if len(img_data_shape) >= 4 and img_data_shape[3] > 1 \
and not expand_time_point_dim:
raise ValueError("You cannot provide already stacked time "
"points if you stack additional time points ")
elif expand_time_point_dim and len(img_data_shape) >= 4:
max_time = max(img_data_shape[3], max_time)
if len(img_data_shape) >= 5 and img_data_shape[4] > 1 \
and not expand_modality_dim:
raise ValueError("You cannot provide already stacked "
"modalities "
" if you stack additional modalities ")
elif expand_modality_dim and len(img_data_shape) == 5:
max_mod = max(max_mod, img_data_shape[4])
img_data = img_nii.get_data().astype(np.float32)
img_data = expand_to_5d(img_data)
img_data = np.swapaxes(img_data, 4, 3)
data_array[t][m] = img_data
if expand_modality_dim or expand_time_point_dim:
data_array = pad_zeros_to_5d(data_array, max_mod, max_time)
data_to_save = create_5d_from_array(data_array)
return data_to_save
def expand_to_5d(img_data):
while img_data.ndim < 5:
img_data = np.expand_dims(img_data, axis=-1)
return img_data
def pad_zeros_to_5d(data_array, max_mod, max_time):
if len(data_array) == max_time and len(data_array[0]) == max_mod:
return data_array
if len(data_array) == 1:
for m in range(0, len(data_array[0])):
if data_array[0][m].shape[4] < max_time:
data = data_array[0][m]
zeros_to_append = np.zeros([data.shape[0],
data.shape[1],
data.shape[2],
data.shape[3],
max_time - data.shape[4]])
data_array[0][m] = np.concatenate(
[data, zeros_to_append], axis=4)
else:
for t in range(0, len(data_array)):
data = data_array[t][0]
if data.shape[3] < max_mod:
zeros_to_append = np.zeros([data.shape[0],
data.shape[1],
data.shape[2],
max_mod - data.shape[3],
data.shape[4]])
data_array[t][0] = np.concatenate(
[data, zeros_to_append], axis=3)
return data_array
def create_5d_from_array(data_array):
data_5d = []
for t in range(0, len(data_array)):
data_mod_temp = []
for m in range(0, len(data_array[0])):
data_temp = data_array[t][m]
data_mod_temp.append(data_temp)
data_mod_temp = np.concatenate(data_mod_temp, axis=3)
data_5d.append(data_mod_temp)
data_5d = np.concatenate(data_5d, axis=4)
return data_5d
def save_volume_5d(img_data, filename, save_path, img_ref=None):
if img_data is None:
return
if not os.path.exists(save_path):
os.makedirs(save_path)
img_ref = rectify_header_sform_qform(img_ref)
affine = img_ref.affine
img_nii = nib.Nifti1Image(img_data, affine)
img_nii.set_data_dtype(np.dtype(np.float32))
output_name = os.path.join(save_path, filename)
nib.save(img_nii, output_name)
print('Saved {}'.format(output_name))
def match_volume_shape_to_patch_definition(image_data, patch):
if image_data is None:
return None
if patch is None:
return None
# always casting to 4D input volume [H x W x D x Modality]
while image_data.ndim > 4:
image_data = image_data[..., 0]
while image_data.ndim < 4:
image_data = np.expand_dims(image_data, axis=-1)
return image_data
def spatial_padding_to_indexes(spatial_padding):
indexes = np.zeros((len(spatial_padding), 2), dtype=np.int)
for (i, s) in enumerate(spatial_padding):
if len(s) == 1:
indexes[i] = [s[0], s[0]]
elif len(s) == 2:
indexes[i] = [s[0], s[1]]
else:
raise ValueError("unknown spatial_padding format")
return indexes.flatten()
# def adapt_to_shape(img_to_change, shape, mod='tile'):
# if img_to_change is None or img_to_change.size == 0:
# return np.zeros(shape)
# shape_to_change = img_to_change.shape
# if len(shape) < len(shape_to_change):
# raise ValueError('shape inconsistency')
# if np.all(shape_to_change == shape):
# return img_to_change
# new_img = np.resize(img_to_change, shape)
# return new_img
# # Check compatibility in dimensions for the first 3 dimensions of two images
# def check_shape_compatibility_3d(img1, img2):
# # consider by default that there are a min of 3 dimensions (2d images are
# # always expanded beforehand
# if img1.ndim < 3 or img2.ndim < 3:
# raise ValueError
# return np.all(img1.shape[:3] == img2.shape[:3])
# def create_new_filename(filename_init, new_path='', new_prefix='',
# new_suffix=''):
# path, name, ext = split_filename(filename_init)
# if new_path is None or len(new_path) == 0:
# new_path = path
# new_name = "%s_%s_%s" % (new_prefix, name, new_suffix)
# new_filename = os.path.join(new_path, new_name + ext)
# new_filename = clean_name(new_filename)
# return new_filename
# def clean_name(filename):
# filename = filename.replace("__", "_")
# filename = filename.replace("..", ".")
# filename = filename.replace("_.", ".")
# return filename
# def load_volume(filename,
# allow_multimod_single_file=False,
# allow_timeseries=False):
# if not os.path.exists(filename):
# warnings.warn("This file %s does not exist" % filename)
# return None
#
# print(filename)
# img_nii = nib.load(filename)
# img_shape = img_nii.header.get_data_shape()
# img_data = img_nii.get_data().astype(np.float32)
# if len(img_shape) == 2: # If the image is 2D it is expanded as a 3D
# return np.expand_dims(img_data, axis=2)
#
# if len(img_shape) == 3: # do nothing if image is 3D
# return img_data
#
# if len(img_shape) == 4: # 4D depends on use of multi time series
# warnings.warn("A 4D image has been detected. As per Nifti "
# "standards, it will be considered as a time series "
# "image")
# if not allow_timeseries: # if no time series allowed, take only
# # the first volume
# warnings.warn("Time series not allowed in this setting, "
# "only the first volume will be returned")
# return img_data[..., 0:1]
# else:
# # time series are moved to the 5th dimension
# return np.swapaxes(np.expand_dims(img_data, axis=4), 4, 3)
#
# if len(img_shape) == 5:
# warnings.warn("A 5D image has been detected. As per Nifti "
# "conventions, it will be considered as a multimodal image")
# if not allow_multimod_single_file:
# warnings.warn("Multiple modalities in a single file not "
# "allowed in this setting. Only the first "
# "modality will be considered")
# if img_shape[3] == 1: # only one time point in the 4th dimension
# return img_data[..., 0, 0]
# else:
# if not allow_timeseries:
# warnings.warn("Time series not allowed in this "
# "setting, only the first volume of the "
# "time series will be returned")
# return img_data[..., 0, 0]
# else:
# return np.swapaxes(img_data[..., 0], 4, 3)
# else:
# if img_shape[3] == 1: # only one time point in the image series
# return np.swapaxes(img_data[..., 0, :], 4, 3)
# elif not allow_timeseries:
# warnings.warn("Time series not allowed in this setting, "
# "only the first volume multimodal will be "
# "returned")
# return np.swapaxes(img_data[..., 0, :], 4, 3)
# else:
# return np.swapaxes(img_data, 4, 3)
|
import numpy as np
from hyperopt import fmin
from .ensemble import VotingModel
import os
import inspect
import re
import sys
sys.path.append(".")
def minimize(model, data, algo, max_evals, trials, rseed=1337):
"""Minimize a keras model for given data and implicit hyperparameters.
Parameters
----------
model: A function defining a keras model with hyperas templates, which returns a
valid hyperopt results dictionary, e.g.
return {'loss': -acc, 'status': STATUS_OK}
data: A parameterless function that defines and return all data needed in the above
model definition.
algo: A hyperopt algorithm, like tpe.suggest or rand.suggest
max_evals: Maximum number of optimization runs
trials: A hyperopt trials object, used to store intermediate results for all
optimization runs
rseed: Integer random seed for experiments
Returns
-------
A pair consisting of the results dictionary of the best run and the corresponing
keras model.
"""
best_run = base_minimizer(model, data, algo, max_evals, trials, rseed)
best_model = None
for trial in trials:
vals = trial.get('misc').get('vals')
for key in vals.keys():
vals[key] = vals[key][0]
if trial.get('misc').get('vals') == best_run and 'model' in trial.get('result').keys():
best_model = trial.get('result').get('model')
return best_run, best_model
def best_ensemble(nb_ensemble_models, model, data, algo, max_evals, trials, voting='hard', weights=None):
model_list = best_models(nb_models=nb_ensemble_models, model=model,
data=data, algo=algo, max_evals=max_evals, trials=trials)
return VotingModel(model_list, voting, weights)
def best_models(nb_models, model, data, algo, max_evals, trials):
base_minimizer(model, data, algo, max_evals, trials)
if len(trials) < nb_models:
nb_models = len(trials)
scores = [trial.get('result').get('loss') for trial in trials]
cut_off = sorted(scores, reverse=True)[nb_models-1]
model_list = [trial.get('result').get('model') for trial in trials if trial.get('result').get('loss') >= cut_off]
return model_list
# match a string that starts with the keyword `import`, with any indentation
_starts_with_import = re.compile(r"^\s*\bimport\b")
# match a string that uses the `from .* import .*` syntax, with any indentation
_has_from_import = re.compile(r"^\s*\bfrom\b.*\bimport\b")
def has_raw_import(line):
# Return whether a line in a source file is a valid import statement
return bool(_starts_with_import.match(line)) or bool(_has_from_import.match(line))
def get_hyperopt_model_string(model, data):
model_string = inspect.getsource(model)
lines = model_string.split("\n")
lines = [line for line in lines if not line.strip().startswith('#')]
calling_script_file = os.path.abspath(inspect.stack()[-1][1])
with open(calling_script_file, 'r') as f:
calling_lines = f.read().split('\n')
raw_imports = [
"try:\n %s\nexcept:\n pass\n" % line.strip()
for line in calling_lines
if has_raw_import(line)
]
imports = ''.join(raw_imports)
model_string = [line + "\n" for line in lines if "import" not in line]
model_string = ''.join(model_string)
parts = hyperparameter_names(model_string)
aug_parts = augmented_names(parts)
hyperopt_params = get_hyperparameters(model_string)
space = get_hyperopt_space(parts, hyperopt_params)
data_string = retrieve_data_string(data)
model = hyperopt_keras_model(model_string, parts, aug_parts)
temp_str = temp_string(imports, model, data_string, space)
return temp_str
def base_minimizer(model, data, algo, max_evals, trials, rseed=1337, full_model_string=None):
if full_model_string is not None:
model_str = full_model_string
else:
model_str = get_hyperopt_model_string(model, data)
write_temp_files(model_str)
try:
from temp_model import keras_fmin_fnct, get_space
except:
print("Unexpected error: {}".format(sys.exc_info()[0]))
raise
try:
os.remove('./temp_model.py')
os.remove('./temp_model.pyc')
except OSError:
pass
try: # for backward compatibility.
best_run = fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed)
except TypeError:
best_run = fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rstate=np.random.RandomState(rseed))
return best_run
def get_hyperopt_space(parts, hyperopt_params):
space = "def get_space():\n return {\n"
for name, param in zip(parts, hyperopt_params):
param = re.sub(r"\(", "('" + name + "', ", param, 1)
space += " '" + name + "': hp." + param + ",\n"
space = space[:-1]
space += "\n }\n"
print('>>> Hyperas search space:\n')
print(space)
return space
def retrieve_data_string(data):
'''
This assumes 4 spaces for indentation and won't work otherwise
'''
data_string = inspect.getsource(data)
first_line = data_string.split("\n")[0]
data_string = data_string.replace(first_line, "")
data_string = re.sub(r"return.*", "", data_string)
split_data = data_string.split("\n")
for i, line in enumerate(split_data):
split_data[i] = line[4:] + "\n"
data_string = ''.join(split_data)
print(">>> Data")
print(data_string)
return data_string
def hyperparameter_names(model_string):
parts = []
params = re.findall(r"(\{\{[^}]+}\})", model_string)
for param in params:
name = re.findall(r"(\w+(?=\s*[\=\(]\s*" + re.escape(param) + r"))", model_string)
if len(name) > 0:
parts.append(name[0])
else:
parts.append(parts[-1])
# parts = re.findall(r"(\w+(?=\s*[\=\(]\s*\{\{[^}]+}\}))", model_string)
print("PARTS:")
for part in parts:
print(part)
part_dict = {}
for i, part in enumerate(parts):
if part in part_dict.keys():
part_dict[part] += 1
parts[i] = part + "_" + str(part_dict[part])
else:
part_dict[part] = 0
return parts
def get_hyperparameters(model_string):
hyperopt_params = re.findall(r"(\{\{[^}]+}\})", model_string)
for i, param in enumerate(hyperopt_params):
hyperopt_params[i] = re.sub(r"[\{\}]", '', param)
return hyperopt_params
def augmented_names(parts):
aug_parts = []
for i, part in enumerate(parts):
aug_parts.append("space['" + part + "']")
return aug_parts
def hyperopt_keras_model(model_string, parts, aug_parts):
first_line = model_string.split("\n")[0]
model_string = model_string.replace(first_line, "def keras_fmin_fnct(space):\n")
result = re.sub(r"(\{\{[^}]+}\})", lambda match: aug_parts.pop(0), model_string, count=len(parts))
print('>>> Resulting replaced keras model:\n')
print(result)
return result
def temp_string(imports, model, data, space):
temp = (imports + "from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n" +
"from hyperas.distributions import conditional\n" +
data + model + "\n" + space)
return temp
def write_temp_files(tmp_str, path='./temp_model.py'):
with open(path, 'w') as f:
f.write(tmp_str)
f.close()
return
print_function must be the first import
import numpy as np
from hyperopt import fmin
from .ensemble import VotingModel
import os
import inspect
import re
import sys
sys.path.append(".")
def minimize(model, data, algo, max_evals, trials, rseed=1337):
"""Minimize a keras model for given data and implicit hyperparameters.
Parameters
----------
model: A function defining a keras model with hyperas templates, which returns a
valid hyperopt results dictionary, e.g.
return {'loss': -acc, 'status': STATUS_OK}
data: A parameterless function that defines and return all data needed in the above
model definition.
algo: A hyperopt algorithm, like tpe.suggest or rand.suggest
max_evals: Maximum number of optimization runs
trials: A hyperopt trials object, used to store intermediate results for all
optimization runs
rseed: Integer random seed for experiments
Returns
-------
A pair consisting of the results dictionary of the best run and the corresponing
keras model.
"""
best_run = base_minimizer(model, data, algo, max_evals, trials, rseed)
best_model = None
for trial in trials:
vals = trial.get('misc').get('vals')
for key in vals.keys():
vals[key] = vals[key][0]
if trial.get('misc').get('vals') == best_run and 'model' in trial.get('result').keys():
best_model = trial.get('result').get('model')
return best_run, best_model
def best_ensemble(nb_ensemble_models, model, data, algo, max_evals, trials, voting='hard', weights=None):
model_list = best_models(nb_models=nb_ensemble_models, model=model,
data=data, algo=algo, max_evals=max_evals, trials=trials)
return VotingModel(model_list, voting, weights)
def best_models(nb_models, model, data, algo, max_evals, trials):
base_minimizer(model, data, algo, max_evals, trials)
if len(trials) < nb_models:
nb_models = len(trials)
scores = [trial.get('result').get('loss') for trial in trials]
cut_off = sorted(scores, reverse=True)[nb_models-1]
model_list = [trial.get('result').get('model') for trial in trials if trial.get('result').get('loss') >= cut_off]
return model_list
# match a string that starts with the keyword `import`, with any indentation
_starts_with_import = re.compile(r"^\s*\bimport\b")
# match a string that uses the `from .* import .*` syntax, with any indentation
_has_from_import = re.compile(r"^\s*\bfrom\b.*\bimport\b")
def has_raw_import(line):
# Return whether a line in a source file is a valid import statement
return bool(_starts_with_import.match(line)) or bool(_has_from_import.match(line))
def extract_and_format_imports(lines):
imports = []
for line in lines:
if has_raw_import(line):
if "print_function" in line:
# Importing the print_function must be the first line in the the file.
# We cannot wrap it in a try/except.
imports.append(line.strip() + "\n")
else:
# Wrap all other imports in try/except, as some python source files do.
imports.append("try:\n %s\nexcept:\n pass\n" % line.strip())
return "".join(imports)
def get_hyperopt_model_string(model, data):
model_string = inspect.getsource(model)
lines = model_string.split("\n")
lines = [line for line in lines if not line.strip().startswith('#')]
calling_script_file = os.path.abspath(inspect.stack()[-1][1])
with open(calling_script_file, 'r') as f:
calling_lines = f.read().split('\n')
imports = extract_and_format_imports(lines)
model_string = [line + "\n" for line in lines if not has_raw_import(line)]
model_string = ''.join(model_string)
parts = hyperparameter_names(model_string)
aug_parts = augmented_names(parts)
hyperopt_params = get_hyperparameters(model_string)
space = get_hyperopt_space(parts, hyperopt_params)
data_string = retrieve_data_string(data)
model = hyperopt_keras_model(model_string, parts, aug_parts)
temp_str = temp_string(imports, model, data_string, space)
return temp_str
def base_minimizer(model, data, algo, max_evals, trials, rseed=1337, full_model_string=None):
if full_model_string is not None:
model_str = full_model_string
else:
model_str = get_hyperopt_model_string(model, data)
write_temp_files(model_str)
try:
from temp_model import keras_fmin_fnct, get_space
except:
print("Unexpected error: {}".format(sys.exc_info()[0]))
raise
try:
os.remove('./temp_model.py')
os.remove('./temp_model.pyc')
except OSError:
pass
try: # for backward compatibility.
best_run = fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed)
except TypeError:
best_run = fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rstate=np.random.RandomState(rseed))
return best_run
def get_hyperopt_space(parts, hyperopt_params):
space = "def get_space():\n return {\n"
for name, param in zip(parts, hyperopt_params):
param = re.sub(r"\(", "('" + name + "', ", param, 1)
space += " '" + name + "': hp." + param + ",\n"
space = space[:-1]
space += "\n }\n"
print('>>> Hyperas search space:\n')
print(space)
return space
def retrieve_data_string(data):
'''
This assumes 4 spaces for indentation and won't work otherwise
'''
data_string = inspect.getsource(data)
first_line = data_string.split("\n")[0]
data_string = data_string.replace(first_line, "")
data_string = re.sub(r"return.*", "", data_string)
split_data = data_string.split("\n")
for i, line in enumerate(split_data):
split_data[i] = line[4:] + "\n"
data_string = ''.join(split_data)
print(">>> Data")
print(data_string)
return data_string
def hyperparameter_names(model_string):
parts = []
params = re.findall(r"(\{\{[^}]+}\})", model_string)
for param in params:
name = re.findall(r"(\w+(?=\s*[\=\(]\s*" + re.escape(param) + r"))", model_string)
if len(name) > 0:
parts.append(name[0])
else:
parts.append(parts[-1])
# parts = re.findall(r"(\w+(?=\s*[\=\(]\s*\{\{[^}]+}\}))", model_string)
print("PARTS:")
for part in parts:
print(part)
part_dict = {}
for i, part in enumerate(parts):
if part in part_dict.keys():
part_dict[part] += 1
parts[i] = part + "_" + str(part_dict[part])
else:
part_dict[part] = 0
return parts
def get_hyperparameters(model_string):
hyperopt_params = re.findall(r"(\{\{[^}]+}\})", model_string)
for i, param in enumerate(hyperopt_params):
hyperopt_params[i] = re.sub(r"[\{\}]", '', param)
return hyperopt_params
def augmented_names(parts):
aug_parts = []
for i, part in enumerate(parts):
aug_parts.append("space['" + part + "']")
return aug_parts
def hyperopt_keras_model(model_string, parts, aug_parts):
first_line = model_string.split("\n")[0]
model_string = model_string.replace(first_line, "def keras_fmin_fnct(space):\n")
result = re.sub(r"(\{\{[^}]+}\})", lambda match: aug_parts.pop(0), model_string, count=len(parts))
print('>>> Resulting replaced keras model:\n')
print(result)
return result
def temp_string(imports, model, data, space):
temp = (imports + "from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n" +
"from hyperas.distributions import conditional\n" +
data + model + "\n" + space)
return temp
def write_temp_files(tmp_str, path='./temp_model.py'):
with open(path, 'w') as f:
f.write(tmp_str)
f.close()
return
|
#!/usr/bin/env python
"""
usage: linux_binary (-h | -r <VERSION> | -i <VERSION> | -v)
optional arguments:
-h, --help
Show this help message and exit
-i <VERSION>, --install <VERSION>
Create installation script
-r <VERSION>, --remove <VERSION>
Create removal script
-v, --versions
List versions
"""
from __future__ import print_function
from collections import OrderedDict
from docopt import docopt
def dict_variables(rd, sv, name):
return {
"RELEASE_DATE": rd,
"SHORT_STRING": "".join(sv),
"SHORT_VERSION": ".".join(str(int(x)) for x in sv if int(x)),
"NAME": name,
}
VERSIONS = OrderedDict([
('3.14.5', dict_variables("201405311735", ("03", "14", "05"), "utopic")),
('3.14.6', dict_variables("201406071635", ("03", "14", "06"), "utopic")),
('3.14.7', dict_variables("201406111644", ("03", "14", "07"), "utopic")),
('3.14.8', dict_variables("201406161755", ("03", "14", "08"), "utopic")),
('3.14.9', dict_variables("201406261553", ("03", "14", "09"), "utopic")),
('3.14.10', dict_variables("201406302353", ("03", "14", "10"), "utopic")),
('3.14.11', dict_variables("201407062254", ("03", "14", "11"), "utopic")),
('3.14.12', dict_variables("201407091455", ("03", "14", "12"), "utopic")),
('3.14.13', dict_variables("201407171953", ("03", "14", "13"), "utopic")),
('3.14.14', dict_variables("201407281153", ("03", "14", "14"), "utopic")),
('3.14.15', dict_variables("201407311853", ("03", "14", "15"), "utopic")),
('3.14.16', dict_variables("201408072035", ("03", "14", "16"), "utopic")),
('3.14.17', dict_variables("201408132253", ("03", "14", "17"), "utopic")),
('3.15.1', dict_variables("201406161841", ("03", "15", "01"), "utopic")),
('3.15.2', dict_variables("201406261639", ("03", "15", "02"), "utopic")),
('3.15.3', dict_variables("201407010040", ("03", "15", "03"), "utopic")),
('3.15.4', dict_variables("201407062345", ("03", "15", "04"), "utopic")),
('3.15.5', dict_variables("201407091543", ("03", "15", "05"), "utopic")),
('3.15.6', dict_variables("201407172034", ("03", "15", "06"), "utopic")),
('3.15.7', dict_variables("201407281235", ("03", "15", "07"), "utopic")),
('3.15.8', dict_variables("201407091543", ("03", "15", "08"), "utopic")),
('3.15.9', dict_variables("201408072114", ("03", "15", "09"), "utopic")),
('3.15.10', dict_variables("201408132333", ("03", "15", "10"), "utopic")),
('3.16.1', dict_variables("201408140014", ("03", "16", "01"), "utopic")),
('3.16.2', dict_variables("201409052035", ("03", "16", "02"), "utopic")),
('3.16.3', dict_variables("201409171435", ("03", "16", "03"), "utopic")),
('3.17.0', dict_variables("201410060605", ("03", "17", "00"), "utopic")),
('3.17.1', dict_variables("201410150735", ("03", "17", "01"), "utopic")),
('3.17.2', dict_variables("201410301416", ("03", "17", "02"), "vivid")),
('3.17.3', dict_variables("201411141335", ("03", "17", "03"), "vivid")),
('3.17.4', dict_variables("201411211317", ("03", "17", "04"), "vivid")),
('3.17.5', dict_variables("201412070036", ("03", "17", "05"), "vivid")),
('3.17.6', dict_variables("201412071535", ("03", "17", "06"), "vivid")),
('3.18.0', dict_variables("201412071935", ("03", "18", "00"), "vivid")),
('3.18.1', dict_variables("201412170637", ("03", "18", "01"), "vivid")),
('3.18.2', dict_variables("201501082011", ("03", "18", "02"), "vivid")),
('3.18.3', dict_variables("201501161810", ("03", "18", "03"), "vivid")),
('3.18.4', dict_variables("201501271243", ("03", "18", "04"), "vivid")),
('3.18.5', dict_variables("201501292218", ("03", "18", "05"), "vivid")),
])
def print_exec_header():
print("#!/bin/sh -e\n")
def wget_file(filename):
http_address = "http://kernel.ubuntu.com/~kernel-ppa/mainline/v${{SHORT_VERSION}}-${{NAME}}/{0}".format(filename)
print("wget {0}".format(http_address))
def install_script(version):
x = VERSIONS[version]
print_exec_header()
for k, v in x.items():
print("export {0}={1}".format(k, v))
print("export VERSION=${SHORT_VERSION}-${SHORT_STRING}")
print("export FULL_VERSION=${VERSION}.${RELEASE_DATE}")
print()
wget_file("linux-headers-${VERSION}_${FULL_VERSION}_all.deb")
wget_file("linux-headers-${VERSION}-generic_${FULL_VERSION}_amd64.deb")
wget_file("linux-image-${VERSION}-generic_${FULL_VERSION}_amd64.deb")
print()
print("sudo dpkg -i linux-headers-${SHORT_VERSION}-*.deb linux-image-${SHORT_VERSION}-*.deb")
print("sudo update-grub")
print("sudo reboot")
def remove_script(version):
x = VERSIONS[version]
print_exec_header()
for k, v in x.items():
print("export {0}={1}".format(k, v))
print("sudo apt-get remove linux-headers-${SHORT_VERSION}-* linux-image-${SHORT_VERSION}-*")
def print_versions():
for key in VERSIONS:
print(key)
def main():
options = docopt(__doc__)
if options['--versions']:
print_versions()
else:
options_remove, options_install = [options.get(key) for key in ("--remove", "--install")]
if options_remove:
remove_script(options_remove)
elif options_install:
install_script(options_install)
if __name__ == '__main__':
main()
Add 3.18.6-7
#!/usr/bin/env python
"""
usage: linux_binary (-h | -r <VERSION> | -i <VERSION> | -v)
optional arguments:
-h, --help
Show this help message and exit
-i <VERSION>, --install <VERSION>
Create installation script
-r <VERSION>, --remove <VERSION>
Create removal script
-v, --versions
List versions
"""
from __future__ import print_function
from collections import OrderedDict
from docopt import docopt
def dict_variables(rd, sv, name):
return {
"RELEASE_DATE": rd,
"SHORT_STRING": "".join(sv),
"SHORT_VERSION": ".".join(str(int(x)) for x in sv if int(x)),
"NAME": name,
}
VERSIONS = OrderedDict([
('3.14.5', dict_variables("201405311735", ("03", "14", "05"), "utopic")),
('3.14.6', dict_variables("201406071635", ("03", "14", "06"), "utopic")),
('3.14.7', dict_variables("201406111644", ("03", "14", "07"), "utopic")),
('3.14.8', dict_variables("201406161755", ("03", "14", "08"), "utopic")),
('3.14.9', dict_variables("201406261553", ("03", "14", "09"), "utopic")),
('3.14.10', dict_variables("201406302353", ("03", "14", "10"), "utopic")),
('3.14.11', dict_variables("201407062254", ("03", "14", "11"), "utopic")),
('3.14.12', dict_variables("201407091455", ("03", "14", "12"), "utopic")),
('3.14.13', dict_variables("201407171953", ("03", "14", "13"), "utopic")),
('3.14.14', dict_variables("201407281153", ("03", "14", "14"), "utopic")),
('3.14.15', dict_variables("201407311853", ("03", "14", "15"), "utopic")),
('3.14.16', dict_variables("201408072035", ("03", "14", "16"), "utopic")),
('3.14.17', dict_variables("201408132253", ("03", "14", "17"), "utopic")),
('3.15.1', dict_variables("201406161841", ("03", "15", "01"), "utopic")),
('3.15.2', dict_variables("201406261639", ("03", "15", "02"), "utopic")),
('3.15.3', dict_variables("201407010040", ("03", "15", "03"), "utopic")),
('3.15.4', dict_variables("201407062345", ("03", "15", "04"), "utopic")),
('3.15.5', dict_variables("201407091543", ("03", "15", "05"), "utopic")),
('3.15.6', dict_variables("201407172034", ("03", "15", "06"), "utopic")),
('3.15.7', dict_variables("201407281235", ("03", "15", "07"), "utopic")),
('3.15.8', dict_variables("201407091543", ("03", "15", "08"), "utopic")),
('3.15.9', dict_variables("201408072114", ("03", "15", "09"), "utopic")),
('3.15.10', dict_variables("201408132333", ("03", "15", "10"), "utopic")),
('3.16.1', dict_variables("201408140014", ("03", "16", "01"), "utopic")),
('3.16.2', dict_variables("201409052035", ("03", "16", "02"), "utopic")),
('3.16.3', dict_variables("201409171435", ("03", "16", "03"), "utopic")),
('3.17.0', dict_variables("201410060605", ("03", "17", "00"), "utopic")),
('3.17.1', dict_variables("201410150735", ("03", "17", "01"), "utopic")),
('3.17.2', dict_variables("201410301416", ("03", "17", "02"), "vivid")),
('3.17.3', dict_variables("201411141335", ("03", "17", "03"), "vivid")),
('3.17.4', dict_variables("201411211317", ("03", "17", "04"), "vivid")),
('3.17.5', dict_variables("201412070036", ("03", "17", "05"), "vivid")),
('3.17.6', dict_variables("201412071535", ("03", "17", "06"), "vivid")),
('3.18.0', dict_variables("201412071935", ("03", "18", "00"), "vivid")),
('3.18.1', dict_variables("201412170637", ("03", "18", "01"), "vivid")),
('3.18.2', dict_variables("201501082011", ("03", "18", "02"), "vivid")),
('3.18.3', dict_variables("201501161810", ("03", "18", "03"), "vivid")),
('3.18.4', dict_variables("201501271243", ("03", "18", "04"), "vivid")),
('3.18.5', dict_variables("201501292218", ("03", "18", "05"), "vivid")),
('3.18.6', dict_variables("201502061036", ("03", "18", "06"), "vivid")),
('3.18.7', dict_variables("201502110759", ("03", "18", "07"), "vivid")),
])
def print_exec_header():
print("#!/bin/sh -e\n")
def wget_file(filename):
http_address = "http://kernel.ubuntu.com/~kernel-ppa/mainline/v${{SHORT_VERSION}}-${{NAME}}/{0}".format(filename)
print("wget {0}".format(http_address))
def install_script(version):
x = VERSIONS[version]
print_exec_header()
for k, v in x.items():
print("export {0}={1}".format(k, v))
print("export VERSION=${SHORT_VERSION}-${SHORT_STRING}")
print("export FULL_VERSION=${VERSION}.${RELEASE_DATE}")
print()
wget_file("linux-headers-${VERSION}_${FULL_VERSION}_all.deb")
wget_file("linux-headers-${VERSION}-generic_${FULL_VERSION}_amd64.deb")
wget_file("linux-image-${VERSION}-generic_${FULL_VERSION}_amd64.deb")
print()
print("sudo dpkg -i linux-headers-${SHORT_VERSION}-*.deb linux-image-${SHORT_VERSION}-*.deb")
print("sudo update-grub")
print("sudo reboot")
def remove_script(version):
x = VERSIONS[version]
print_exec_header()
for k, v in x.items():
print("export {0}={1}".format(k, v))
print("sudo apt-get remove linux-headers-${SHORT_VERSION}-* linux-image-${SHORT_VERSION}-*")
def print_versions():
for key in VERSIONS:
print(key)
def main():
options = docopt(__doc__)
if options['--versions']:
print_versions()
else:
options_remove, options_install = [options.get(key) for key in ("--remove", "--install")]
if options_remove:
remove_script(options_remove)
elif options_install:
install_script(options_install)
if __name__ == '__main__':
main()
|
"""
Overridden syncdb command
"""
import sys
from optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from django.core.management.color import no_style
from django.utils.datastructures import SortedDict
from django.core.management.commands import syncdb
from django.conf import settings
from django.db import models
from django.db.models.loading import cache
from django.core import management
from south.db import dbs
from south import migration
from south.exceptions import NoMigrations
def get_app_label(app):
return '.'.join( app.__name__.split('.')[0:-1] )
class Command(NoArgsCommand):
option_list = syncdb.Command.option_list + (
make_option('--migrate', action='store_true', dest='migrate', default=False,
help='Tells South to also perform migrations after the sync. Default for during testing, and other internal calls.'),
make_option('--all', action='store_true', dest='migrate_all', default=False,
help='Makes syncdb work on all apps, even migrated ones. Be careful!'),
)
if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]:
option_list += (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created, except those which use migrations."
def handle_noargs(self, migrate_all=False, **options):
# Work out what uses migrations and so doesn't need syncing
apps_needing_sync = []
apps_migrated = []
for app in models.get_apps():
app_label = get_app_label(app)
if migrate_all:
apps_needing_sync.append(app_label)
else:
try:
migrations = migration.Migrations(app_label)
except NoMigrations:
# It needs syncing
apps_needing_sync.append(app_label)
else:
# This is a migrated app, leave it
apps_migrated.append(app_label)
verbosity = int(options.get('verbosity', 0))
# Run syncdb on only the ones needed
if verbosity:
print "Syncing..."
old_installed, settings.INSTALLED_APPS = settings.INSTALLED_APPS, apps_needing_sync
old_app_store, cache.app_store = cache.app_store, SortedDict([
(k, v) for (k, v) in cache.app_store.items()
if get_app_label(k) in apps_needing_sync
])
# This will allow the setting of the MySQL storage engine, for example.
for db in dbs.values():
db.connection_init()
# OK, run the actual syncdb
syncdb.Command().execute(**options)
settings.INSTALLED_APPS = old_installed
cache.app_store = old_app_store
# Migrate if needed
if options.get('migrate', True):
if verbosity:
print "Migrating..."
management.call_command('migrate', **options)
# Be obvious about what we did
if verbosity:
print "\nSynced:\n > %s" % "\n > ".join(apps_needing_sync)
if options.get('migrate', True):
if verbosity:
print "\nMigrated:\n - %s" % "\n - ".join(apps_migrated)
else:
if verbosity:
print "\nNot synced (use migrations):\n - %s" % "\n - ".join(apps_migrated)
print "(use ./manage.py migrate to migrate these)"
Fix from mailing list for Django 1.0.
"""
Overridden syncdb command
"""
import sys
from optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from django.core.management.color import no_style
from django.utils.datastructures import SortedDict
from django.core.management.commands import syncdb
from django.conf import settings
from django.db import models
from django.db.models.loading import cache
from django.core import management
from south.db import dbs
from south import migration
from south.exceptions import NoMigrations
def get_app_label(app):
return '.'.join( app.__name__.split('.')[0:-1] )
class Command(NoArgsCommand):
option_list = syncdb.Command.option_list + (
make_option('--migrate', action='store_true', dest='migrate', default=False,
help='Tells South to also perform migrations after the sync. Default for during testing, and other internal calls.'),
make_option('--all', action='store_true', dest='migrate_all', default=False,
help='Makes syncdb work on all apps, even migrated ones. Be careful!'),
)
if '--verbosity' not in [opt.get_opt_string() for opt in syncdb.Command.option_list]:
option_list += (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created, except those which use migrations."
def handle_noargs(self, migrate_all=False, **options):
# Work out what uses migrations and so doesn't need syncing
apps_needing_sync = []
apps_migrated = []
for app in models.get_apps():
app_label = get_app_label(app)
if migrate_all:
apps_needing_sync.append(app_label)
else:
try:
migrations = migration.Migrations(app_label)
except NoMigrations:
# It needs syncing
apps_needing_sync.append(app_label)
else:
# This is a migrated app, leave it
apps_migrated.append(app_label)
verbosity = int(options.get('verbosity', 0))
# Run syncdb on only the ones needed
if verbosity:
print "Syncing..."
old_installed, settings.INSTALLED_APPS = settings.INSTALLED_APPS, apps_needing_sync
old_app_store, cache.app_store = cache.app_store, SortedDict([
(k, v) for (k, v) in cache.app_store.items()
if get_app_label(k) in apps_needing_sync
])
# This will allow the setting of the MySQL storage engine, for example.
for db in dbs.values():
db.connection_init()
# OK, run the actual syncdb
syncdb.Command().execute(**options)
settings.INSTALLED_APPS = old_installed
cache.app_store = old_app_store
# Migrate if needed
if options.get('migrate', True):
if verbosity:
print "Migrating..."
management.call_command('migrate', **options)
# Be obvious about what we did
if verbosity:
print "\nSynced:\n > %s" % "\n > ".join(apps_needing_sync)
if options.get('migrate', True):
if verbosity:
print "\nMigrated:\n - %s" % "\n - ".join(apps_migrated)
else:
if verbosity:
print "\nNot synced (use migrations):\n - %s" % "\n - ".join(apps_migrated)
print "(use ./manage.py migrate to migrate these)"
|
import datetime
from django.contrib.auth import get_user_model
from django.utils.text import slugify
import factory
from factory.django import DjangoModelFactory
from . import models
from common.mixins import TimestampMixin
class TimestampFactory(DjangoModelFactory):
"""TimestampMixin model factory."""
class Meta:
model = TimestampMixin
abstract = True
date_created = datetime.datetime(2008, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
date_modified = datetime.datetime(2009, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
class WeekdayFactory(DjangoModelFactory):
"""Weekday model factory."""
class Meta:
model = models.Weekday
name = 'Monday'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
class MealFactory(DjangoModelFactory):
"""Meal model factory."""
class Meta:
model = models.Meal
name = 'Breakfast'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
start_time = datetime.time(5, 7, 9)
end_time = datetime.time(6, 7, 9)
class MealOptionFactory(DjangoModelFactory):
"""MealOption model factory."""
class Meta:
model = models.MealOption
name = 'Option A'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
class CourseFactory(DjangoModelFactory):
"""Course model factory."""
class Meta:
model = models.Course
name = 'Appetizer'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
class UserFactory(DjangoModelFactory):
"""Django admin user model factory."""
class Meta:
model = get_user_model()
django_get_or_create = ('username', 'email', 'password')
username = 'admin'
email = 'admin@admin.com'
password = 'adminpassword'
is_superuser = True
is_active = True
class TimetableFactory(TimestampFactory):
"""Timetable model factory."""
class Meta:
model = models.Timetable
name = 'Fellows Timetable'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
code = 'FT7876'
api_key = 'TF78993jTY'
cycle_length = 14
current_cycle_day = 2
description = 'Some random description'
class AdminFactory(DjangoModelFactory):
"""Admin model factory."""
class Meta:
model = models.Admin
user = factory.SubFactory(UserFactory)
timetable = factory.SubFactory(TimetableFactory)
is_super = True
class UserWithTimetableFactory(DjangoModelFactory):
"""
Factory specifying many-to-many through relationship.
The 'admin' field in Timetable model is a many-to-many relationship
to User model through Admin model.
"""
admins = factory.RelatedFactory(AdminFactory, 'user')
class DishFactory(TimestampFactory):
"""Dish model factory."""
class Meta:
model = models.Dish
name = 'Coconut rice'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
description = 'Some random description'
class MenuItem(TimestampFactory):
"""MenuItem model factory."""
class Meta:
model = models.MenuItem
timetable = factory.SubFactory(TimetableFactory)
cycle_day = 2
meal = factory.SubFactory(MealFactory)
meal_option = factory.SubFactory(MealOptionFactory)
class EventFactory(TimestampFactory):
"""Event model factory."""
class Meta:
model = models.Event
name = 'Christmas'
timetable = factory.SubFactory(TimetableFactory)
start_date = datetime.datetime(2008, 12, 23, 0, 0, 0, tzinfo=datetime.timezone.utc)
end_date = datetime.datetime(2008, 12, 28, 0, 0, 0, tzinfo=datetime.timezone.utc)
class VendorFactory(DjangoModelFactory):
"""Vendor model Factory."""
class Meta:
model = models.Vendor
name = 'Mama Taverna'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
info = 'Some random info'
start_date = datetime.datetime(2008, 1, 23, 0, 0, 0, tzinfo=datetime.timezone.utc)
end_date = datetime.datetime(2008, 12, 28, 0, 0, 0, tzinfo=datetime.timezone.utc)
Update admin references to Timetable management
import datetime
from django.contrib.auth import get_user_model
from django.utils.text import slugify
import factory
from factory.django import DjangoModelFactory
from . import models
from common.mixins import TimestampMixin
class TimestampFactory(DjangoModelFactory):
"""TimestampMixin model factory."""
class Meta:
model = TimestampMixin
abstract = True
date_created = datetime.datetime(2008, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
date_modified = datetime.datetime(2009, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
class WeekdayFactory(DjangoModelFactory):
"""Weekday model factory."""
class Meta:
model = models.Weekday
name = 'Monday'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
class MealFactory(DjangoModelFactory):
"""Meal model factory."""
class Meta:
model = models.Meal
name = 'Breakfast'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
start_time = datetime.time(5, 7, 9)
end_time = datetime.time(6, 7, 9)
class MealOptionFactory(DjangoModelFactory):
"""MealOption model factory."""
class Meta:
model = models.MealOption
name = 'Option A'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
class CourseFactory(DjangoModelFactory):
"""Course model factory."""
class Meta:
model = models.Course
name = 'Appetizer'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
class UserFactory(DjangoModelFactory):
"""Django admin user model factory."""
class Meta:
model = get_user_model()
django_get_or_create = ('username', 'email', 'password')
username = 'admin'
email = 'admin@admin.com'
password = 'adminpassword'
is_superuser = True
is_active = True
class TimetableFactory(TimestampFactory):
"""Timetable model factory."""
class Meta:
model = models.Timetable
name = 'Fellows Timetable'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
code = 'FT7876'
api_key = 'TF78993jTY'
cycle_length = 14
current_cycle_day = 2
description = 'Some random description'
class AdminFactory(DjangoModelFactory):
"""Admin model factory."""
class Meta:
model = models.TimetableManagement
user = factory.SubFactory(UserFactory)
timetable = factory.SubFactory(TimetableFactory)
is_super = True
class UserWithTimetableFactory(DjangoModelFactory):
"""
Factory specifying many-to-many through relationship.
The 'admin' field in Timetable model is a many-to-many relationship
to User model through Admin model.
"""
admins = factory.RelatedFactory(AdminFactory, 'user')
class DishFactory(TimestampFactory):
"""Dish model factory."""
class Meta:
model = models.Dish
name = 'Coconut rice'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
description = 'Some random description'
class MenuItem(TimestampFactory):
"""MenuItem model factory."""
class Meta:
model = models.MenuItem
timetable = factory.SubFactory(TimetableFactory)
cycle_day = 2
meal = factory.SubFactory(MealFactory)
meal_option = factory.SubFactory(MealOptionFactory)
class EventFactory(TimestampFactory):
"""Event model factory."""
class Meta:
model = models.Event
name = 'Christmas'
timetable = factory.SubFactory(TimetableFactory)
start_date = datetime.datetime(2008, 12, 23, 0, 0, 0, tzinfo=datetime.timezone.utc)
end_date = datetime.datetime(2008, 12, 28, 0, 0, 0, tzinfo=datetime.timezone.utc)
class VendorFactory(DjangoModelFactory):
"""Vendor model Factory."""
class Meta:
model = models.Vendor
name = 'Mama Taverna'
slug = factory.LazyAttribute(lambda obj: '%s' % slugify(obj.name))
info = 'Some random info'
start_date = datetime.datetime(2008, 1, 23, 0, 0, 0, tzinfo=datetime.timezone.utc)
end_date = datetime.datetime(2008, 12, 28, 0, 0, 0, tzinfo=datetime.timezone.utc)
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_france_data.input_data_builders import get_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
from openfisca_plugin_aggregates.aggregates import Aggregates
def create_survey_scenario(year = None, reform = None):
assert year is not None
if reform is not None :
log.warning("="*10 + "Is working in reform mode, take care when compare to true aggregates"+ "="*10)
input_data_frame = get_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
reform = reform,
used_as_input_variables = ['salaire_imposable', 'cho', 'rst', 'age_en_mois'],
year = year,
)
return survey_scenario
def test_aggregates(year = None, reform = None):
'''
test aggregates value with data
:param year: year of data and simulation to test agregates
:param reform: optional argument, put an openfisca_france.refoms object, default None
'''
assert year is not None
survey_scenario = create_survey_scenario(year, reform)
aggregates = Aggregates(survey_scenario = survey_scenario)
aggregates.compute()
print aggregates.aggr_frame
return aggregates
if __name__ == '__main__':
from openfisca_france.reforms import allocations_familiales_imposables as reform
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
aggregates = test_aggregates(year = 2009, reform = reform)
# df = aggregates.aggr_frame
Clarify log message in test_aggegates_reform
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_france_data.input_data_builders import get_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
from openfisca_plugin_aggregates.aggregates import Aggregates
def create_survey_scenario(year = None, reform = None):
assert year is not None
if reform is not None:
log.warning("Using reform {}".format(reform.name))
input_data_frame = get_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
reform = reform,
used_as_input_variables = ['salaire_imposable', 'cho', 'rst', 'age_en_mois'],
year = year,
)
return survey_scenario
def test_aggregates(year = None, reform = None):
'''
test aggregates value with data
:param year: year of data and simulation to test agregates
:param reform: optional argument, put an openfisca_france.refoms object, default None
'''
assert year is not None
survey_scenario = create_survey_scenario(year, reform)
aggregates = Aggregates(survey_scenario = survey_scenario)
aggregates.compute()
print aggregates.aggr_frame
return aggregates
if __name__ == '__main__':
from openfisca_france.reforms import allocations_familiales_imposables as reform
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
aggregates = test_aggregates(year = 2009, reform = reform)
# df = aggregates.aggr_frame
|
# This software and any associated files are copyright 2010 Brian Carver and
# Michael Lissner.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from djangosphinx.models import SphinxSearch
from django.db import models
import alert
# a tuple, which we'll pass to the choices argument in various places
PACER_CODES = (
('ca1', 'Court of Appeals for the First Circuit'),
('ca2', 'Court of Appeals for the Second Circuit'),
('ca3', 'Court of Appeals for the Third Circuit'),
('ca4', 'Court of Appeals for the Fourth Circuit'),
('ca5', 'Court of Appeals for the Fifth Circuit'),
('ca6', 'Court of Appeals for the Sixth Circuit'),
('ca7', 'Court of Appeals for the Seventh Circuit'),
('ca8', 'Court of Appeals for the Eighth Circuit'),
('ca9', 'Court of Appeals for the Ninth Circuit'),
('ca10', 'Court of Appeals for the Tenth Circuit'),
('ca11', 'Court of Appeals for the Eleventh Circuit'),
('cadc', 'Court of Appeals for the D.C. Circuit'),
('cafc', 'Court of Appeals for the Federal Circuit'),
('scotus', 'Supreme Court of the United States'),
)
# changes here need to be mirrored in the coverage page view and the exceptions
# list for sphinx
DOCUMENT_STATUSES = (
('P', 'Published/Precedential'),
('U', 'Unpublished/Non-Precedential'),
('E', 'Errata'),
('I', 'In-chambers'),
('R', 'Relating-to orders'),
)
DOCUMENT_SOURCES = (
('C', 'court website'),
('R', 'bulk.resource.org'),
('M', 'manual input'),
)
# A class to represent some information about each court, can be extended as needed.
class Court(models.Model):
courtUUID = models.CharField("a unique ID for each court",
max_length=100,
primary_key=True,
choices=PACER_CODES)
courtURL = models.URLField("the homepage for each court")
courtShortName = models.CharField("the shortname for the court",
max_length=100,
blank=True)
# uses the choices argument in courtUUID to create a good display of the object.
def __unicode__(self):
return self.get_courtUUID_display()
class Meta:
db_table = "Court"
ordering = ["courtUUID"] #this reinforces the default
# A class to represent each party that is extracted from a document
class Party(models.Model):
partyUUID = models.AutoField("a unique ID for each party", primary_key=True)
partyExtracted = models.CharField("a party name", max_length=100)
def __unicode__(self):
return self.partyExtracted
class Meta:
verbose_name_plural = "parties"
db_table = "Party"
ordering = ["partyExtracted"]
# A class to represent each judge that is extracted from a document
class Judge(models.Model):
judgeUUID = models.AutoField("a unique ID for each judge", primary_key=True)
court = models.ForeignKey(Court, verbose_name="the court where the judge served during this time period")
canonicalName = models.CharField("the official name of the judge: fname, mname, lname",
max_length=150)
judgeAvatar = models.ImageField("the judge's face",
upload_to="avatars/judges/%Y/%m/%d",
blank=True)
startDate = models.DateField("the start date that the judge is on the bench")
endDate = models.DateField("the end date that the judge is on the bench")
def __unicode__(self):
return self.canonicalName
class Meta:
db_table = "Judge"
ordering = ["court", "canonicalName"]
# A class to hold the various aliases that a judge may have, such as M. Lissner,
# Michael Jay Lissner, Michael Lissner, etc.
class JudgeAlias (models.Model):
aliasUUID = models.AutoField("a unique ID for each alias", primary_key=True)
judgeUUID = models.ForeignKey(Judge, verbose_name="the judge for whom we are assigning an alias")
alias = models.CharField("a name under which the judge appears in a document", max_length=100)
# should return something like 'Mike is mapped to Michael Lissner'
def __unicode__(self):
return u'%s is mapped to %s' % (self.alias, self.judgeUUID.canonicalName)
class Meta:
verbose_name = "judge alias"
verbose_name_plural = "judge aliases"
db_table = "JudgeAlias"
ordering = ["alias"]
class Citation(models.Model):
search = SphinxSearch()
citationUUID = models.AutoField("a unique ID for each citation",
primary_key=True)
caseNameShort = models.CharField("short name, as it is usually found on the court website",
max_length=100,
blank=True,
db_index=True)
caseNameFull = models.TextField("full name of the case, as found on the first page of the PDF",
blank=True)
caseNumber = models.CharField("the case number",
blank=True,
max_length=50,
db_index=True)
officialCitationWest = models.CharField("the citation number, as described by WestLaw",
max_length=50,
blank=True)
officialCitationLexis = models.CharField("the citation number, as described by LexisNexis",
max_length=50,
blank=True)
def __unicode__(self):
if self.caseNameShort:
return self.caseNameShort
else:
return self.citationUUID
class Meta:
db_table = "Citation"
ordering = ["caseNameFull"]
class ExcerptSummary(models.Model):
excerptUUID = models.AutoField("a unique ID for each excerpt",
primary_key=True)
autoExcerpt = models.TextField("the first 100 words of the PDF file",
blank=True)
courtSummary = models.TextField("a summary of the document, as provided by the court itself",
blank=True)
def __unicode__(self):
return self.excerptUUID
class Meta:
verbose_name = "excerpt summary"
verbose_name_plural = "excerpt summaries"
db_table = "ExcerptSummary"
# A class which holds the bulk of the information regarding documents. This must
# go last, since it references the above classes
class Document(models.Model):
search = SphinxSearch()
documentUUID = models.AutoField("a unique ID for each document",
primary_key=True)
source = models.CharField("the source of the document",
max_length=3,
choices=DOCUMENT_SOURCES,
blank=True)
documentSHA1 = models.CharField("unique ID for the document, as generated via sha1 on the PDF",
max_length=40,
db_index=True)
dateFiled = models.DateField("the date filed by the court",
blank=True,
null=True,
db_index=True)
court = models.ForeignKey(Court,
verbose_name="the court where the document was filed",
db_index=True)
judge = models.ManyToManyField(Judge,
verbose_name="the judges that heard the case",
blank=True,
null=True)
party = models.ManyToManyField(Party,
verbose_name="the parties that were in the case",
blank=True,
null=True)
citation = models.ForeignKey(Citation,
verbose_name="the citation information for the document",
blank=True,
null=True)
excerptSummary = models.ForeignKey(ExcerptSummary,
verbose_name="the excerpt information for the document",
blank=True,
null=True)
download_URL = models.URLField("the URL on the court website where the document was originally scraped",
verify_exists=False)
time_retrieved = models.DateTimeField("the exact date and time stamp that the document was placed into our database",
auto_now_add=True,
editable=False)
local_path = models.FileField("the location, relative to MEDIA_ROOT, where the files are stored",
upload_to='pdf/%Y/%m/%d',
blank=True)
documentPlainText = models.TextField("plain text of the document after extraction from the PDF",
blank=True)
documentHTML = models.TextField("HTML of the document",
blank=True)
documentType = models.CharField("the type of document, as described by document_types.txt",
max_length=50,
blank=True,
choices=DOCUMENT_STATUSES)
def __unicode__(self):
if self.citation:
return self.citation.caseNameShort
else:
return self.documentSHA1
@models.permalink
def get_absolute_url(self):
try:
return ('viewCases', [str(self.court.courtUUID),
str(self.citation.caseNameShort).replace('-', '_').replace(' ', '-')])
except:
return ('viewCases', [str(self.court.courtUUID),
str(self.documentSHA1)])
class Meta:
db_table = "Document"
ordering = ["-time_retrieved"]
monkeying with the model.
# This software and any associated files are copyright 2010 Brian Carver and
# Michael Lissner.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from djangosphinx.models import SphinxSearch
from django.db import models
import alert
# a tuple, which we'll pass to the choices argument in various places
PACER_CODES = (
('ca1', 'Court of Appeals for the First Circuit'),
('ca2', 'Court of Appeals for the Second Circuit'),
('ca3', 'Court of Appeals for the Third Circuit'),
('ca4', 'Court of Appeals for the Fourth Circuit'),
('ca5', 'Court of Appeals for the Fifth Circuit'),
('ca6', 'Court of Appeals for the Sixth Circuit'),
('ca7', 'Court of Appeals for the Seventh Circuit'),
('ca8', 'Court of Appeals for the Eighth Circuit'),
('ca9', 'Court of Appeals for the Ninth Circuit'),
('ca10', 'Court of Appeals for the Tenth Circuit'),
('ca11', 'Court of Appeals for the Eleventh Circuit'),
('cadc', 'Court of Appeals for the D.C. Circuit'),
('cafc', 'Court of Appeals for the Federal Circuit'),
('scotus', 'Supreme Court of the United States'),
)
# changes here need to be mirrored in the coverage page view and the exceptions
# list for sphinx
DOCUMENT_STATUSES = (
('P', 'Published/Precedential'),
('U', 'Unpublished/Non-Precedential'),
('E', 'Errata'),
('I', 'In-chambers'),
('R', 'Relating-to orders'),
)
DOCUMENT_SOURCES = (
('C', 'court website'),
('R', 'bulk.resource.org'),
('M', 'manual input'),
)
# A class to represent some information about each court, can be extended as needed.
class Court(models.Model):
courtUUID = models.CharField("a unique ID for each court",
max_length=100,
primary_key=True,
choices=PACER_CODES)
courtURL = models.URLField("the homepage for each court")
courtShortName = models.CharField("the shortname for the court",
max_length=100,
blank=True)
# uses the choices argument in courtUUID to create a good display of the object.
def __unicode__(self):
return self.get_courtUUID_display()
class Meta:
db_table = "Court"
ordering = ["courtUUID"] #this reinforces the default
# A class to represent each party that is extracted from a document
class Party(models.Model):
partyUUID = models.AutoField("a unique ID for each party", primary_key=True)
partyExtracted = models.CharField("a party name", max_length=100)
def __unicode__(self):
return self.partyExtracted
class Meta:
verbose_name_plural = "parties"
db_table = "Party"
ordering = ["partyExtracted"]
# A class to represent each judge that is extracted from a document
class Judge(models.Model):
judgeUUID = models.AutoField("a unique ID for each judge", primary_key=True)
court = models.ForeignKey(Court, verbose_name="the court where the judge served during this time period")
canonicalName = models.CharField("the official name of the judge: fname, mname, lname",
max_length=150)
judgeAvatar = models.ImageField("the judge's face",
upload_to="avatars/judges/%Y/%m/%d",
blank=True)
startDate = models.DateField("the start date that the judge is on the bench")
endDate = models.DateField("the end date that the judge is on the bench")
def __unicode__(self):
return self.canonicalName
class Meta:
db_table = "Judge"
ordering = ["court", "canonicalName"]
# A class to hold the various aliases that a judge may have, such as M. Lissner,
# Michael Jay Lissner, Michael Lissner, etc.
class JudgeAlias (models.Model):
aliasUUID = models.AutoField("a unique ID for each alias", primary_key=True)
judgeUUID = models.ForeignKey(Judge, verbose_name="the judge for whom we are assigning an alias")
alias = models.CharField("a name under which the judge appears in a document", max_length=100)
# should return something like 'Mike is mapped to Michael Lissner'
def __unicode__(self):
return u'%s is mapped to %s' % (self.alias, self.judgeUUID.canonicalName)
class Meta:
verbose_name = "judge alias"
verbose_name_plural = "judge aliases"
db_table = "JudgeAlias"
ordering = ["alias"]
class Citation(models.Model):
search = SphinxSearch()
citationUUID = models.AutoField("a unique ID for each citation",
primary_key=True)
caseNameShort = models.CharField("short name, as it is usually found on the court website",
max_length=100,
blank=True,
db_index=True)
caseNameFull = models.TextField("full name of the case, as found on the first page of the PDF",
blank=True)
caseNumber = models.CharField("the case number",
blank=True,
max_length=50,
db_index=True)
officialCitationWest = models.CharField("the citation number, as described by WestLaw",
max_length=50,
blank=True)
officialCitationLexis = models.CharField("the citation number, as described by LexisNexis",
max_length=50,
blank=True)
def __unicode__(self):
if self.caseNameShort:
return self.caseNameShort
else:
return self.citationUUID
class Meta:
db_table = "Citation"
ordering = ["caseNameFull"]
class ExcerptSummary(models.Model):
excerptUUID = models.AutoField("a unique ID for each excerpt",
primary_key=True)
autoExcerpt = models.TextField("the first 100 words of the PDF file",
blank=True)
courtSummary = models.TextField("a summary of the document, as provided by the court itself",
blank=True)
def __unicode__(self):
return self.excerptUUID
class Meta:
verbose_name = "excerpt summary"
verbose_name_plural = "excerpt summaries"
db_table = "ExcerptSummary"
# A class which holds the bulk of the information regarding documents. This must
# go last, since it references the above classes
class Document(models.Model):
search = SphinxSearch()
documentUUID = models.AutoField("a unique ID for each document",
primary_key=True)
source = models.CharField("the source of the document",
max_length=3,
choices=DOCUMENT_SOURCES,
blank=True)
documentSHA1 = models.CharField("unique ID for the document, as generated via sha1 on the PDF",
max_length=40,
db_index=True)
dateFiled = models.DateField("the date filed by the court",
blank=True,
null=True,
db_index=True)
court = models.ForeignKey(Court,
verbose_name="the court where the document was filed",
db_index=True)
judge = models.ManyToManyField(Judge,
verbose_name="the judges that heard the case",
blank=True,
null=True)
party = models.ManyToManyField(Party,
verbose_name="the parties that were in the case",
blank=True,
null=True)
citation = models.ForeignKey(Citation,
verbose_name="the citation information for the document",
blank=True,
null=True)
excerptSummary = models.ForeignKey(ExcerptSummary,
verbose_name="the excerpt information for the document",
blank=True,
null=True)
download_URL = models.URLField("the URL on the court website where the document was originally scraped",
verify_exists=False)
time_retrieved = models.DateTimeField("the exact date and time stamp that the document was placed into our database",
auto_now_add=True,
editable=False)
local_path = models.FileField("the location, relative to MEDIA_ROOT, where the files are stored",
upload_to='pdf/%Y/%m/%d',
blank=True)
documentPlainText = models.TextField("plain text of the document after extraction from the PDF",
blank=True)
documentHTML = models.TextField("HTML of the document",
blank=True)
documentType = models.CharField("the type of document, as described by document_types.txt",
max_length=50,
blank=True,
choices=DOCUMENT_STATUSES)
def __unicode__(self):
if self.citation:
return self.citation.caseNameShort
else:
return self.documentUUID
@models.permalink
def get_absolute_url(self):
try:
return ('viewCases', [str(self.court.courtUUID),
str(self.citation.caseNameShort).replace('-', '_').replace(' ', '-')])
except:
return ('viewCases', [str(self.court.courtUUID),
str(self.documentSHA1)])
class Meta:
db_table = "Document"
ordering = ["-time_retrieved"]
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core wrapper.
This file contains the Keras model wrapper around an Yggdrasil model/learner.
While it can be used directly, the helper functions in keras.py /
wrapper_pre_generated.py should be preferred as they explicit more directly the
learner specific hyper-parameters.
Usage example:
```python
# Indirect usage
import tensorflow_decision_forests as tfdf
model = tfdf.keras.RandomForestModel()
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(...)
model.fit(train_ds)
# Direct usage
import tensorflow_decision_forests as tfdf
model = tfdf.keras.CoreModel(learner="RANDOM_FOREST")
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(...)
model.fit(train_ds)
```
See "CoreModel" for more details
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from functools import partial # pylint: disable=g-importing-member
import inspect
import os
import tempfile
from typing import Optional, List, Dict, Any, Union, Text, Tuple, NamedTuple, Set
import uuid
from absl import logging
import tensorflow as tf
from tensorflow.python.training.tracking import base as base_tracking # pylint: disable=g-direct-tensorflow-import
from tensorflow_decision_forests.component.inspector import inspector as inspector_lib
from tensorflow_decision_forests.tensorflow import core as tf_core
from tensorflow_decision_forests.tensorflow.ops.inference import api as tf_op
from tensorflow_decision_forests.tensorflow.ops.training import op as training_op
from yggdrasil_decision_forests.dataset import data_spec_pb2
from yggdrasil_decision_forests.learner import abstract_learner_pb2
from yggdrasil_decision_forests.model import abstract_model_pb2 # pylint: disable=unused-import
from yggdrasil_decision_forests.utils.distribute.implementations.grpc import grpc_pb2 # pylint: disable=unused-import
layers = tf.keras.layers
models = tf.keras.models
optimizers = tf.keras.optimizers
losses = tf.keras.losses
backend = tf.keras.backend
# Task solved by a model (e.g. classification, regression, ranking);
Task = tf_core.Task
TaskType = "abstract_model_pb2.Task" # pylint: disable=invalid-name
# Hyper-parameters of the model represented as a dictionary of <parameter names,
# parameter values>.
HyperParameters = tf_core.HyperParameters
# A tensorflow feature column.
FeatureColumn = Any
# Semantic of a feature.
#
# The semantic of a feature defines its meaning and constraint how the feature
# is consumed by the model. For example, a feature can has a numerical or
# categorical semantic. The semantic is often related but not equivalent to the
# representation (e.g. float, integer, string).
#
# Each semantic support a different type of representations, tensor formats and
# has specific way to represent and handle missing (and possibly
# out-of-vocabulary) values.
#
# See "smltf.Semantic" for a detailed explanation.
FeatureSemantic = tf_core.Semantic
# Feature name placeholder.
_LABEL = "__LABEL"
_RANK_GROUP = "__RANK_GROUP"
_WEIGHTS = "__WEIGHTS"
# This is the list of characters that should not be used as feature name as they
# as not supported by SavedModel serving signatures.
_FORBIDDEN_FEATURE_CHARACTERS = " \t?%,"
# Advanced configuration for the underlying learning library.
YggdrasilDeploymentConfig = abstract_learner_pb2.DeploymentConfig
YggdrasilTrainingConfig = abstract_learner_pb2.TrainingConfig
# Get the current worker index and total number of workers.
get_worker_idx_and_num_workers = tf_core.get_worker_idx_and_num_workers
class FeatureUsage(object):
"""Semantic and hyper-parameters for a single feature.
This class allows to:
1. Limit the input features of the model.
2. Set manually the semantic of a feature.
3. Specify feature specific hyper-parameters.
Note that the model's "features" argument is optional. If it is not specified,
all available feature will be used. See the "CoreModel" class
documentation for more details.
Usage example:
```python
# A feature named "A". The semantic will be detected automatically. The
# global hyper-parameters of the model will be used.
feature_a = FeatureUsage(name="A")
# A feature named "C" representing a CATEGORICAL value.
# Specifying the semantic ensure the feature is correctly detected.
# In this case, the feature might be stored as an integer, and would have be
# detected as NUMERICAL.
feature_b = FeatureUsage(name="B", semantic=Semantic.CATEGORICAL)
# A feature with a specific maximum dictionary size.
feature_c = FeatureUsage(name="C",
semantic=Semantic.CATEGORICAL,
max_vocab_count=32)
model = CoreModel(features=[feature_a, feature_b, feature_c])
```
Attributes:
name: The name of the feature. Used as an identifier if the dataset is a
dictionary of tensors.
semantic: Semantic of the feature. If None, the semantic is automatically
determined. The semantic controls how a feature is interpreted by a model.
Using the wrong semantic (e.g. numerical instead of categorical) will hurt
your model. See "FeatureSemantic" and "Semantic" for the definition of the
of available semantics.
discretized: For NUMERICAL features only. If set, the numerical values are
discretized into a small set of unique values. This makes the training
faster but often lead to worst models. A reasonable discretization value
is 255.
max_vocab_count: For CATEGORICAL and CATEGORICAL_SET features only. Number
of unique categorical values stored as string. If more categorical values
are present, the least frequent values are grouped into a
Out-of-vocabulary item. Reducing the value can improve or hurt the model.
"""
def __init__(self,
name: Text,
semantic: Optional[FeatureSemantic] = None,
discretized: Optional[int] = None,
max_vocab_count: Optional[int] = None):
self._name = name
self._semantic = semantic
self._guide = data_spec_pb2.ColumnGuide()
# Check matching between hyper-parameters and semantic.
if semantic != FeatureSemantic.NUMERICAL:
if discretized is not None:
raise ValueError("\"discretized\" only works for NUMERICAL semantic.")
if semantic not in [
FeatureSemantic.CATEGORICAL, FeatureSemantic.CATEGORICAL_SET
]:
if max_vocab_count is not None:
raise ValueError("\"max_vocab_count\" only works for CATEGORICAL "
"and CATEGORICAL_SET semantic.")
if semantic is None:
# The semantic is automatically determined at training time.
pass
elif semantic == FeatureSemantic.NUMERICAL:
self._guide.type = (
data_spec_pb2.DISCRETIZED_NUMERICAL
if discretized else data_spec_pb2.NUMERICAL)
elif semantic in [
FeatureSemantic.CATEGORICAL, FeatureSemantic.CATEGORICAL_SET
]:
if semantic == FeatureSemantic.CATEGORICAL:
self._guide.type = data_spec_pb2.CATEGORICAL
else:
self._guide.type = data_spec_pb2.CATEGORICAL_SET
if max_vocab_count:
self._guide.categorial.max_vocab_count = max_vocab_count
else:
raise ValueError("Non supported semantic {}".format(semantic))
@property
def guide(self) -> data_spec_pb2.ColumnGuide: # pylint: disable=g-missing-from-attributes
return self._guide
@property
def semantic(self) -> FeatureSemantic:
return self._semantic
@property
def name(self) -> Text:
return self._name
class HyperParameterTemplate(NamedTuple):
"""Named and versionned set of hyper-parameters.
list of hyper-parameter sets that outperforms the default hyper-parameters
(either generally or in specific scenarios).
"""
name: str
version: int
parameters: Dict[str, Any]
description: str
class AdvancedArguments(NamedTuple):
"""Advanced control of the model that most users won't need to use.
Attributes:
infer_prediction_signature: Instantiate the model graph after training. This
allows the model to be saved without specifying an input signature and
without calling "predict", "evaluate". Disabling this logic can be useful
in two situations: (1) When the exported signature is different from the
one used during training, (2) When using a fixed-shape pre-processing
that consume 1 dimensional tensors (as keras will automatically expend
its shape to rank 2). For example, when using tf.Transform.
yggdrasil_training_config: Yggdrasil Decision Forests training
configuration. Expose a few extra hyper-parameters.
yggdrasil_deployment_config: Configuration of the computing resources used
to train the model e.g. number of threads. Does not impact the model
quality.
fail_on_non_keras_compatible_feature_name: If true (default), training will
fail if one of the feature name is not compatible with part of the Keras
API. If false, a warning will be generated instead.
predict_single_probability_for_binary_classification: Only used for binary
classification. If true (default), the prediction of a binary class model
is a tensor of shape [None, 1] containing the probability of the positive
class (value=1). If false, the prediction of a binary class model is a
tensor of shape [None, num_classes=2] containing the probability of the
complementary classes.
"""
infer_prediction_signature: Optional[bool] = True
yggdrasil_training_config: Optional[
YggdrasilTrainingConfig] = abstract_learner_pb2.TrainingConfig()
yggdrasil_deployment_config: Optional[
YggdrasilDeploymentConfig] = abstract_learner_pb2.DeploymentConfig()
fail_on_non_keras_compatible_feature_name: Optional[bool] = True
predict_single_probability_for_binary_classification: Optional[bool] = True
class CoreModel(models.Model):
"""Keras Model V2 wrapper around an Yggdrasil Learner and Model.
Basic usage example:
```python
import tensorflow_decision_forests as tfdf
# Train a classification model with automatic feature discovery.
model = tfdf.keras.CoreModel(learner="RANDOM_FOREST")
train_ds = tf.data.Dataset.from_tensor_slices((train_x, train_y))
model.fit(train_ds)
# Evaluate the model on another dataset.
model.evaluate(test_ds)
# Show information about the model
model.summary()
# Export the model with the TF.SavedModel format.
model.save("/path/to/my/model")
```
The training logs (e.g. feature statistics, validation loss, remaining
training time) are exported to LOG(INFO). If you use a colab, make sure to
display these logs:
from colabtools import googlelog
with googlelog.CaptureLog():
model.fit(...)
Using this model has some caveats:
* Decision Forest models are not Neural Networks. Feature preprocessing that
are beneficial to neural network (normalization, one-hot encoding) can be
detrimental to decision forests. In most cases, it is best to feed the raw
features (e.g. both numerical and categorical) without preprocessing to
the model.
* During training, the entire dataset is loaded in memory (in an efficient
representation). In case of large datasets (>100M examples), it is
recommended to randomly downsample the examples.
* The model trains for exactly one epoch. The core of the training
computation is done at the end of the first epoch. The console will show
training logs (including validations losses and feature statistics).
* The model cannot make predictions before the training is done. Applying
the model before training will raise an error. During training Keras
evaluation will be invalid (the model always returns zero).
* Yggdrasil is itself a C++ model wrapper. Learners and models need to be
added as dependency to the calling code. To make things practical, the
Random Forest (without Borg distribution) and Gradient Boosted Decision
Forest learners and models are linked by default. Other model/learners
(including yours :)), needs to be added as a dependency manually.
Attributes:
task: Task to solve (e.g. CLASSIFICATION, REGRESSION, RANKING).
learner: The learning algorithm used to train the model. Possible values
include (but at not limited to) "LEARNER_*".
learner_params: Hyper-parameters for the learner. The list of available
hyper-parameters is available at: go/simple_ml/hyper_parameters.
features: Specify the list and semantic of the input features of the model.
If not specified, all the available features will be used. If specified
and if "exclude_non_specified_features=True", only the features in
"features" will be used by the model. If "preprocessing" is used,
"features" corresponds to the output of the preprocessing. In this case,
it is recommended for the preprocessing to return a dictionary of tensors.
exclude_non_specified_features: If true, only use the features specified in
"features".
preprocessing: Functional keras model or @tf.function to apply on the input
feature before the model to train. This preprocessing model can consume
and return tensors, list of tensors or dictionary of tensors. If
specified, the model only "sees" the output of the preprocessing (and not
the raw input). Can be used to prepare the features or to stack multiple
models on top of each other. Unlike preprocessing done in the tf.dataset,
the operation in "preprocessing" are serialized with the model.
postprocessing: Like "preprocessing" but applied on the model output.
ranking_group: Only for task=Task.RANKING. Name of a tf.string feature that
identifies queries in a query/document ranking task. The ranking group is
not added automatically for the set of features if
exclude_non_specified_features=false.
temp_directory: Temporary directory used to store the model Assets after the
training, and possibly as a work directory during the training. This
temporary directory is necessary for the model to be exported after
training e.g. `model.save(path)`. If not specified, `temp_directory` is
set to a temporary directory using `tempfile.TemporaryDirectory`. This
directory is deleted when the model python object is garbage-collected.
verbose: If true, displays information about the training.
advanced_arguments: Advanced control of the model that most users won't need
to use. See `AdvancedArguments` for details.
num_threads: Number of threads used to train the model. Different learning
algorithms use multi-threading differently and with different degree of
efficiency. If specified, `num_threads` field of the
`advanced_arguments.yggdrasil_deployment_config` has priority.
name: The name of the model.
max_vocab_count: Default maximum size of the vocabulary for CATEGORICAL and
CATEGORICAL_SET features stored as strings. If more unique values exist,
only the most frequent values are kept, and the remaining values are
considered as out-of-vocabulary. The value `max_vocab_count` defined in a
`FeatureUsage` (if any) takes precedence.
"""
def __init__(self,
task: Optional[TaskType] = Task.CLASSIFICATION,
learner: Optional[str] = "RANDOM_FOREST",
learner_params: Optional[HyperParameters] = None,
features: Optional[List[FeatureUsage]] = None,
exclude_non_specified_features: Optional[bool] = False,
preprocessing: Optional["models.Functional"] = None,
postprocessing: Optional["models.Functional"] = None,
ranking_group: Optional[str] = None,
temp_directory: Optional[str] = None,
verbose: Optional[bool] = True,
advanced_arguments: Optional[AdvancedArguments] = None,
num_threads: Optional[int] = 6,
name: Optional[str] = None,
max_vocab_count: Optional[int] = 2000) -> None:
super(CoreModel, self).__init__(name=name)
self._task = task
self._learner = learner
self._learner_params = learner_params
self._features = features or []
self._exclude_non_specified = exclude_non_specified_features
self._preprocessing = preprocessing
self._postprocessing = postprocessing
self._ranking_group = ranking_group
self._temp_directory = temp_directory
self._verbose = verbose
self._num_threads = num_threads
self._max_vocab_count = max_vocab_count
# Internal, indicates whether the first evaluation during training,
# triggered by providing validation data, should trigger the training
# itself.
self._train_on_evaluate: bool = False
if advanced_arguments is None:
advanced_arguments = AdvancedArguments()
self._advanced_arguments = advanced_arguments
if not self._features and exclude_non_specified_features:
raise ValueError(
"The model does not have any input features: "
"exclude_non_specified_features is True and not features are "
"provided as input.")
if self._temp_directory is None:
self._temp_directory_handle = tempfile.TemporaryDirectory()
self._temp_directory = self._temp_directory_handle.name
logging.info("Using %s as temporary training directory",
self._temp_directory)
if (self._task == Task.RANKING) != (ranking_group is not None):
raise ValueError(
"ranking_key is used iif. the task is RANKING or the loss is a "
"ranking loss")
# True iif. the model is trained.
self._is_trained = tf.Variable(False, trainable=False, name="is_trained")
# Unique ID to identify the model during training.
self._training_model_id = str(uuid.uuid4())
# The following fields contain the trained model. They are set during the
# graph construction and training process.
# The compiled Yggdrasil model.
self._model: Optional[tf_op.ModelV2] = None
# Semantic of the input features.
# Also defines what are the input features of the model.
self._semantics: Optional[Dict[Text, FeatureSemantic]] = None
# List of Yggdrasil feature identifiers i.e. feature seen by the Yggdrasil
# learner. Those are computed after the preprocessing, unfolding and
# casting.
self._normalized_input_keys: Optional[List[Text]] = None
# Textual description of the model.
self._description: Optional[Text] = None
# If the model is trained with weights.
self._weighted_training = False
def make_inspector(self) -> inspector_lib.AbstractInspector:
"""Creates an inspector to access the internal model structure.
Usage example:
```python
inspector = model.make_inspector()
print(inspector.num_trees())
print(inspector.variable_importances())
```
Returns:
A model inspector.
"""
path = self.yggdrasil_model_path_tensor().numpy().decode("utf-8")
return inspector_lib.make_inspector(path)
@tf.function(input_signature=[])
def yggdrasil_model_path_tensor(self) -> Optional[tf.Tensor]:
"""Gets the path to yggdrasil model, if available.
The effective path can be obtained with:
```python
yggdrasil_model_path_tensor().numpy().decode("utf-8")
```
Returns:
Path to the Yggdrasil model.
"""
return self._model._compiled_model._model_loader.get_model_path() # pylint: disable=protected-access
def make_predict_function(self):
"""Prediction of the model (!= evaluation)."""
@tf.function(experimental_relax_shapes=True)
def predict_function_not_trained(iterator):
"""Prediction of a non-trained model. Returns "zeros"."""
data = next(iterator)
x, _, _ = tf.keras.utils.unpack_x_y_sample_weight(data)
batch_size = _batch_size(x)
return tf.zeros([batch_size, 1])
@tf.function(experimental_relax_shapes=True)
def predict_function_trained(iterator, model):
"""Prediction of a trained model.
The only difference with "super.make_predict_function()" is that
"self.predict_function" is not set and that the "distribute_strategy"
is not used.
Args:
iterator: Iterator over the dataset.
model: Model object.
Returns:
Model predictions.
"""
def run_step(data):
outputs = model.predict_step(data)
with tf.control_dependencies(_minimum_control_deps(outputs)):
model._predict_counter.assign_add(1) # pylint:disable=protected-access
return outputs
data = next(iterator)
return run_step(data)
if self._is_trained:
return partial(predict_function_trained, model=self)
else:
return predict_function_not_trained
def make_test_function(self):
"""Predictions for evaluation."""
@tf.function(experimental_relax_shapes=True)
def test_function_not_trained(iterator):
"""Evaluation of a non-trained model."""
next(iterator)
return {}
@tf.function(experimental_relax_shapes=True)
def step_function_trained(model, iterator):
"""Evaluation of a trained model.
The only difference with "super.make_test_function()" is that
"self.test_function" is not set.
Args:
model: Model object.
iterator: Iterator over dataset.
Returns:
Evaluation metrics.
"""
def run_step(data):
outputs = model.test_step(data)
with tf.control_dependencies(_minimum_control_deps(outputs)):
model._test_counter.assign_add(1) # pylint:disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = _reduce_per_replica(
outputs, self.distribute_strategy, reduction="first")
return outputs
if self._is_trained:
# Special case if steps_per_execution is one.
if (self._steps_per_execution is None or
self._steps_per_execution.numpy().item() == 1):
def test_function(iterator):
"""Runs a test execution with a single step."""
return step_function_trained(self, iterator)
if not self.run_eagerly:
test_function = tf.function(
test_function, experimental_relax_shapes=True)
if self._cluster_coordinator:
return lambda it: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda
test_function, args=(it,))
else:
return test_function
# If we're using a coordinator, use the value of self._steps_per_execution
# at the time the function is called/scheduled, and not when it is
# actually executed.
elif self._cluster_coordinator:
def test_function(iterator, steps_per_execution):
"""Runs a test execution with multiple steps."""
for _ in tf.range(steps_per_execution):
outputs = step_function_trained(self, iterator)
return outputs
if not self.run_eagerly:
test_function = tf.function(
test_function, experimental_relax_shapes=True)
return lambda it: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda
test_function,
args=(it, self._steps_per_execution.value()))
else:
def test_function(iterator):
"""Runs a test execution with multiple steps."""
for _ in tf.range(self._steps_per_execution):
outputs = step_function_trained(self, iterator)
return outputs
if not self.run_eagerly:
test_function = tf.function(
test_function, experimental_relax_shapes=True)
return test_function
else:
return test_function_not_trained
@tf.function(experimental_relax_shapes=True)
def call(self, inputs, training=False):
"""Inference of the model.
This method is used for prediction and evaluation of a trained model.
Args:
inputs: Input tensors.
training: Is the model being trained. Always False.
Returns:
Model predictions.
"""
del training
if self._semantics is None:
logging.warning(
"The model was called directly (i.e. using `model(data)` instead of "
"using `model.predict(data)`) before being trained. The model will "
"only return zeros until trained. The output shape might change "
"after training %s", inputs)
return tf.zeros([_batch_size(inputs), 1])
assert self._semantics is not None
assert self._model is not None
if self._preprocessing is not None:
inputs = self._preprocessing(inputs)
if isinstance(inputs, dict):
# Native format
pass
elif isinstance(inputs, tf.Tensor):
assert len(self._semantics) == 1
inputs = {next(iter(self._semantics.keys())): inputs}
elif isinstance(inputs, list) or isinstance(inputs, tuple):
# Note: The name of a tensor (value.name) can change between the training
# and the inference.
inputs = {str(idx): value for idx, value in enumerate(inputs)}
else:
raise ValueError(
f"The inference input tensor is expected to be a tensor, list of "
f"tensors or a dictionary of tensors. Got {inputs} instead")
# Normalize the input tensor to match Yggdrasil requirements.
semantic_inputs = tf_core.combine_tensors_and_semantics(
inputs, self._semantics)
normalized_semantic_inputs = tf_core.normalize_inputs(semantic_inputs)
normalized_inputs, _ = tf_core.decombine_tensors_and_semantics(
normalized_semantic_inputs)
# Apply the model.
predictions = self._model.apply(normalized_inputs)
if (self._advanced_arguments
.predict_single_probability_for_binary_classification and
self._task == Task.CLASSIFICATION and
predictions.dense_predictions.shape[1] == 2):
# Yggdrasil returns the probably of both classes in binary classification.
# Keras expects only the value (logit or probability) of the "positive"
# class (value=1).
predictions = predictions.dense_predictions[:, 1:2]
else:
predictions = predictions.dense_predictions
if self._postprocessing is not None:
predictions = self._postprocessing(predictions)
return predictions
# This function should not be serialized in the SavedModel.
@base_tracking.no_automatic_dependency_tracking
@tf.function(experimental_relax_shapes=True)
def train_step(self, data):
"""Collects training examples."""
if isinstance(data, dict):
raise ValueError("No label received for training. If you used "
"`pd_dataframe_to_tf_dataset`, make sure to "
f"specify the `label` argument. data={data}")
if len(data) == 2:
train_x, train_y = data
train_weights = None
elif len(data) == 3:
train_x, train_y, train_weights = data
else:
raise ValueError(f"Unexpected data shape {data}")
if self._verbose:
logging.info("Collect training examples.\nFeatures: %s\nLabel: %s",
train_x, train_y)
if isinstance(train_x, dict):
_check_feature_names(
train_x.keys(),
self._advanced_arguments.fail_on_non_keras_compatible_feature_name)
if self._preprocessing is not None:
train_x = self._preprocessing(train_x)
if self._verbose:
logging.info("Applying preprocessing on inputs. Result: %s", train_x)
if isinstance(train_x, list) and self._features:
logging.warn(
"Using \"features\" with a pre-processing stage returning a list "
"is not recommended. Use a pre-processing stage that returns a "
"dictionary instead.")
if isinstance(train_x, dict):
# Native format
pass
elif isinstance(train_x, tf.Tensor):
train_x = {train_x.name: train_x}
elif isinstance(train_x, list) or isinstance(train_x, tuple):
# Note: The name of a tensor (value.name) can change between the training
# and the inference.
train_x = {str(idx): value for idx, value in enumerate(train_x)}
else:
raise ValueError(
f"The training input tensor is expected to be a tensor, list of "
f"tensors or a dictionary of tensors. Got {train_x} instead")
# Check the labels
if not isinstance(train_y, tf.Tensor):
raise ValueError(
f"The training label tensor is expected to be a tensor. Got {train_y}"
" instead.")
if len(train_y.shape) != 1:
if self._verbose:
logging.info("Squeezing labels to [batch_size] from [batch_size, 1].")
train_y = tf.squeeze(train_y, axis=1)
if len(train_y.shape) != 1:
raise ValueError(
"Labels can either be passed in as [batch_size, 1] or [batch_size]. "
"Invalid shape %s." % train_y.shape)
# Check the training
self._weighted_training = train_weights is not None
if self._weighted_training:
if not isinstance(train_weights, tf.Tensor):
raise ValueError(
f"The training weights tensor is expected to be a tensor. Got {train_weights}"
" instead.")
if len(train_weights.shape) != 1:
if self._verbose:
logging.info("Squeezing labels to [batch_size] from [batch_size, 1].")
train_weights = tf.squeeze(train_weights, axis=1)
if len(train_weights.shape) != 1:
raise ValueError(
"Weights can either be passed in as [batch_size, 1] or [batch_size]. "
"Invalid shape %s." % train_weights.shape)
# List the input features and their semantics.
assert self._semantics is None, "The model is already trained"
self._semantics = tf_core.infer_semantic(
train_x, {feature.name: feature.semantic for feature in self._features},
self._exclude_non_specified)
# The ranking group is not part of the features, unless specified
# explicitly.
if (self._ranking_group is not None and
self._ranking_group not in self._features and
self._ranking_group in self._semantics):
del self._semantics[self._ranking_group]
semantic_inputs = tf_core.combine_tensors_and_semantics(
train_x, self._semantics)
normalized_semantic_inputs = tf_core.normalize_inputs(semantic_inputs)
if self._verbose:
logging.info("Normalized features: %s", normalized_semantic_inputs)
self._normalized_input_keys = sorted(
list(normalized_semantic_inputs.keys()))
# Add the weights
if self._weighted_training:
normalized_semantic_inputs[_WEIGHTS] = tf_core.SemanticTensor(
tensor=tf.cast(train_weights, tf_core.NormalizedNumericalType),
semantic=tf_core.Semantic.NUMERICAL)
# Add the semantic of the label.
if self._task == Task.CLASSIFICATION:
normalized_semantic_inputs[_LABEL] = tf_core.SemanticTensor(
tensor=tf.cast(train_y, tf_core.NormalizedCategoricalIntType) +
tf_core.CATEGORICAL_INTEGER_OFFSET,
semantic=tf_core.Semantic.CATEGORICAL)
elif self._task == Task.REGRESSION:
normalized_semantic_inputs[_LABEL] = tf_core.SemanticTensor(
tensor=tf.cast(train_y, tf_core.NormalizedNumericalType),
semantic=tf_core.Semantic.NUMERICAL)
elif self._task == Task.RANKING:
normalized_semantic_inputs[_LABEL] = tf_core.SemanticTensor(
tensor=tf.cast(train_y, tf_core.NormalizedNumericalType),
semantic=tf_core.Semantic.NUMERICAL)
assert self._ranking_group is not None
if self._ranking_group not in train_x:
raise Exception(
"The ranking key feature \"{}\" is not available as an input "
"feature.".format(self._ranking_group))
normalized_semantic_inputs[_RANK_GROUP] = tf_core.SemanticTensor(
tensor=tf.cast(train_x[self._ranking_group],
tf_core.NormalizedHashType),
semantic=tf_core.Semantic.HASH)
else:
raise Exception("Non supported task {}".format(self._task))
if not self._is_trained:
# Collects the training examples.
distribution_config = tf_core.get_distribution_configuration(
self.distribute_strategy)
if distribution_config is None:
# No distribution strategy. Collecting examples in memory.
tf_core.collect_training_examples(normalized_semantic_inputs,
self._training_model_id)
else:
# Each worker collects a part of the dataset.
if not self.capabilities().support_partial_cache_dataset_format:
raise ValueError(
f"The model {type(self)} does not support training with a TF "
"Distribution strategy (i.e. model.capabilities()."
"support_partial_cache_dataset_format == False). If the dataset "
"is small, simply remove "
"the distribution strategy scope (i.e. `with strategy.scope():` "
"around the model construction). If the dataset is large, use a "
"distributed version of the model. For Example, use "
"DistributedGradientBoostedTreesModel instead of "
"GradientBoostedTreesModel.")
tf_core.collect_distributed_training_examples(
inputs=normalized_semantic_inputs,
model_id=self._training_model_id,
dataset_path=self._distributed_partial_dataset_cache_path())
# Not metrics are returned during the collection of training examples.
return {}
def _distributed_partial_dataset_cache_path(self):
"""Directory accessible from all workers containing the partial cache."""
return os.path.join(self._temp_directory, "partial_dataset_cache")
def compile(self, metrics=None):
"""Configure the model for training.
Unlike for most Keras model, calling "compile" is optional before calling
"fit".
Args:
metrics: Metrics to report during training.
Raises:
ValueError: Invalid arguments.
"""
super(CoreModel, self).compile(metrics=metrics)
def fit(self,
x=None,
y=None,
callbacks=None,
**kwargs) -> tf.keras.callbacks.History:
"""Trains the model.
The following dataset formats are supported:
1. "x" is a tf.data.Dataset containing a tuple "(features, labels)".
"features" can be a dictionary a tensor, a list of tensors or a
dictionary of tensors (recommended). "labels" is a tensor.
2. "x" is a tensor, list of tensors or dictionary of tensors containing
the input features. "y" is a tensor.
3. "x" is a numpy-array, list of numpy-arrays or dictionary of
numpy-arrays containing the input features. "y" is a numpy-array.
Unlike classical neural networks, the learning algorithm requires to scan
the training dataset exactly once. Therefore, the dataset should not be
repeated. The algorithm also does not benefit from shuffling the dataset.
Input features generally do not need to be normalized (numerical) or indexed
(categorical features stored as string). Also, missing values are well
supported (i.e. not need to replace missing values).
Pandas Dataframe can be prepared with "dataframe_to_tf_dataset":
dataset = pandas.Dataframe(...)
model.fit(pd_dataframe_to_tf_dataset(dataset, label="my_label"))
Some of the learning algorithm will support distributed training with the
ParameterServerStrategy e.g.:
with tf.distribute.experimental.ParameterServerStrategy(...).scope():
model = DistributedGradientBoostedTreesModel()
model.fit(...)
Args:
x: Training dataset (See details above for the supported formats).
y: Label of the training dataset. Only used if "x" does not contains the
labels.
callbacks: Callbacks triggered during the training.
**kwargs: Arguments passed to the core keras model's fit.
Returns:
A `History` object. Its `History.history` attribute is not yet
implemented for decision forests algorithms, and will return empty.
All other fields are filled as usual for `Keras.Mode.fit()`.
"""
self._clear_function_cache()
# Check for a Pandas Dataframe without injecting a dependency.
if str(type(x)) == "<class 'pandas.core.frame.DataFrame'>":
raise ValueError(
"`fit` cannot consume Pandas' dataframes directly. Instead, use the "
"`pd_dataframe_to_tf_dataset` utility function. For example: "
"`model.fit(tfdf.keras.pd_dataframe_to_tf_dataset(train_dataframe, "
"label=\"label_column\"))")
# If the dataset was created with "pd_dataframe_to_tf_dataset", ensure that
# the task is correctly set.
if hasattr(x, "_tfdf_task"):
dataset_task = getattr(x, "_tfdf_task")
if dataset_task != self._task:
raise ValueError(
f"The model's `task` attribute ({Task.Name(self._task)}) does "
"not match the `task` attribute passed to "
f"`pd_dataframe_to_tf_dataset` ({Task.Name(dataset_task)}).")
# Call "compile" if the user forgot to do so.
if not self._is_compiled:
self.compile()
if "epochs" in kwargs:
if kwargs["epochs"] != 1:
raise ValueError("all decision forests algorithms train with only 1 " +
"epoch, epochs={} given".format(kwargs["epochs"]))
del kwargs["epochs"] # Not needed since we force it to 1 below.
# This callback will trigger the training at the end of the first epoch.
callbacks = [_TrainerCallBack(self)] + (callbacks if callbacks else [])
# We want the model trained before any evaluation is done at the
# end of the epoch. This may fail in case any of the `on_train_batch_*`
# callbacks calls `evaluate()` before the end of the 1st epoch.
self._train_on_evaluate = True
try:
history = super(CoreModel, self).fit(
x=x, y=y, epochs=1, callbacks=callbacks, **kwargs)
finally:
self._train_on_evaluate = False
self._build(x)
return history
def fit_on_dataset_path(
self,
train_path: str,
label_key: str,
weight_key: Optional[str] = None,
ranking_key: Optional[str] = None,
valid_path: Optional[str] = None,
dataset_format: Optional[str] = "csv",
max_num_scanned_rows_to_accumulate_statistics: Optional[int] = 100_000):
"""Trains the model on a dataset stored on disk.
This solution is generally more efficient and easier that loading the
dataset with a tf.Dataset both for local and distributed training.
Usage example:
# Local training
model = model = keras.GradientBoostedTreesModel()
model.fit_on_dataset_path(
train_path="/path/to/dataset.csv",
label_key="label",
dataset_format="csv")
model.save("/model/path")
# Distributed training
with tf.distribute.experimental.ParameterServerStrategy(...).scope():
model = model = keras.DistributedGradientBoostedTreesModel()
model.fit_on_dataset_path(
train_path="/path/to/dataset@10",
label_key="label",
dataset_format="tfrecord+tfe")
model.save("/model/path")
Args:
train_path: Path to the training dataset. Support comma separated files,
shard and glob notation.
label_key: Name of the label column.
weight_key: Name of the weighing column.
ranking_key: Name of the ranking column.
valid_path: Path to the validation dataset. If not provided, or if the
learning algorithm does not support/need a validation dataset,
`valid_path` is ignored.
dataset_format: Format of the dataset. Should be one of the registered
dataset format (see
https://github.com/google/yggdrasil-decision-forests/blob/main/documentation/user_manual.md#dataset-path-and-format
for more details). The format "csv" always available but it is
generally only suited for small datasets.
max_num_scanned_rows_to_accumulate_statistics: Maximum number of examples
to scan to determine the statistics of the features (i.e. the dataspec,
e.g. mean value, dictionaries). (Currently) the "first" examples of the
dataset are scanned (e.g. the first examples of the dataset is a single
file). Therefore, it is important that the sampled dataset is relatively
uniformly sampled, notably the scanned examples should contains all the
possible categorical values (otherwise the not seen value will be
treated as out-of-vocabulary). If set to None, the entire dataset is
scanned. This parameter has no effect if the dataset is stored in a
format that already contains those values.
Returns:
A `History` object. Its `History.history` attribute is not yet
implemented for decision forests algorithms, and will return empty.
All other fields are filled as usual for `Keras.Mode.fit()`.
"""
if self._verbose:
logging.info("Training on dataset %s", train_path)
self._clear_function_cache()
# Call "compile" if the user forgot to do so.
if not self._is_compiled:
self.compile()
train_model_path = self._temp_directory
model_path = os.path.join(train_model_path, "model")
# Create the dataspec guide.
guide = data_spec_pb2.DataSpecificationGuide(
ignore_columns_without_guides=self._exclude_non_specified,
max_num_scanned_rows_to_accumulate_statistics=max_num_scanned_rows_to_accumulate_statistics
)
guide.default_column_guide.categorial.max_vocab_count = self._max_vocab_count
self._normalized_input_keys = []
for feature in self._features:
col_guide = copy.deepcopy(feature.guide)
col_guide.column_name_pattern = tf_core.normalize_inputs_regexp(
feature.name)
guide.column_guides.append(col_guide)
self._normalized_input_keys.append(feature.name)
label_guide = data_spec_pb2.ColumnGuide(
column_name_pattern=tf_core.normalize_inputs_regexp(label_key))
if self._task == Task.CLASSIFICATION:
label_guide.type = data_spec_pb2.CATEGORICAL
label_guide.categorial.min_vocab_frequency = 0
label_guide.categorial.max_vocab_count = -1
elif self._task == Task.REGRESSION:
label_guide.type = data_spec_pb2.NUMERICAL
elif self._task == Task.RANKING:
label_guide.type = data_spec_pb2.NUMERICAL
else:
raise ValueError(
"Non implemented task with \"fit_on_dataset_path\"."
f" Use a different task or train with \"fit\".", self._task)
guide.column_guides.append(label_guide)
if ranking_key:
ranking_guide = data_spec_pb2.ColumnGuide(
column_name_pattern=tf_core.normalize_inputs_regexp(ranking_key),
type=data_spec_pb2.HASH)
guide.column_guides.append(ranking_guide)
if weight_key:
weight_guide = data_spec_pb2.ColumnGuide(
column_name_pattern=tf_core.normalize_inputs_regexp(weight_key),
type=data_spec_pb2.NUMERICAL)
guide.column_guides.append(weight_guide)
# Deployment configuration
deployment_config = copy.deepcopy(
self._advanced_arguments.yggdrasil_deployment_config)
if not deployment_config.HasField("num_threads"):
deployment_config.num_threads = self._num_threads
distribution_config = tf_core.get_distribution_configuration(
self.distribute_strategy)
logging.info("distribution_config: %s", distribution_config)
if distribution_config is not None and not self.capabilities(
).support_partial_cache_dataset_format:
raise ValueError(
f"The model {type(self)} does not support training with a TF "
"Distribution strategy (i.e. model.capabilities()."
"support_partial_cache_dataset_format == False). If the dataset "
"is small, simply remove the distribution strategy scope (i.e. `with "
"strategy.scope():` around the model construction). If the dataset "
"is large, use a distributed version of the model. For Example, use "
"DistributedGradientBoostedTreesModel instead of "
"GradientBoostedTreesModel.")
# Train the model.
tf_core.train_on_file_dataset(
train_dataset_path=dataset_format + ":" + train_path,
valid_dataset_path=(dataset_format + ":" +
valid_path) if valid_path else None,
feature_ids=self._normalized_input_keys,
label_id=label_key,
weight_id=weight_key,
model_id=self._training_model_id,
model_dir=train_model_path,
learner=self._learner,
task=self._task,
generic_hparms=tf_core.hparams_dict_to_generic_proto(
self._learner_params),
ranking_group=ranking_key,
keep_model_in_resource=True,
guide=guide,
training_config=self._advanced_arguments.yggdrasil_training_config,
deployment_config=deployment_config,
working_cache_path=os.path.join(self._temp_directory, "working_cache"),
distribution_config=distribution_config)
if self._verbose:
logging.info("Training done. Finalizing the model.")
# Request and store a description of the model.
self._description = training_op.SimpleMLShowModel(
model_identifier=self._training_model_id).numpy().decode("utf-8")
training_op.SimpleMLUnloadModel(model_identifier=self._training_model_id)
# Build the model's graph.
inspector = inspector_lib.make_inspector(model_path)
self._set_from_yggdrasil_model(inspector, model_path)
# Build the model history.
history = tf.keras.callbacks.History()
history.model = self
history.on_train_begin()
training_logs = inspector.training_logs()
if training_logs is not None:
for src_logs in training_logs:
if src_logs.evaluation is not None:
history.on_epoch_end(src_logs.num_trees,
src_logs.evaluation.to_dict())
self.history = history
return self.history
def save(self, filepath: str, overwrite: Optional[bool] = True, **kwargs):
"""Saves the model as a TensorFlow SavedModel.
The exported SavedModel contains a standalone Yggdrasil Decision Forests
model in the "assets" sub-directory. The Yggdrasil model can be used
directly using the Yggdrasil API. However, this model does not contain the
"preprocessing" layer (if any).
Args:
filepath: Path to the output model.
overwrite: If true, override an already existing model. If false, raise an
error if a model already exist.
**kwargs: Arguments passed to the core keras model's save.
"""
# TF does not override assets when exporting a model in a directory already
# containing a model. In such case, we need to remove the initial assets
# directory manually.
# Only the assets directory is removed (instead of the whole "filepath") in
# case this directory contains important files.
assets_dir = os.path.join(filepath, "assets")
saved_model_file = os.path.join(filepath, "saved_model.pb")
if tf.io.gfile.exists(saved_model_file) and tf.io.gfile.exists(assets_dir):
if overwrite:
tf.io.gfile.rmtree(assets_dir)
else:
raise ValueError(
f"A model already exist as {filepath}. Use an empty directory "
"or set overwrite=True")
super(CoreModel, self).save(
filepath=filepath, overwrite=overwrite, **kwargs)
def evaluate(self, *args, **kwargs):
"""Returns the loss value & metrics values for the model.
See details on `keras.Model.evaluate`.
Args:
*args: Passed to `keras.Model.evaluate`.
**kwargs: Passed to `keras.Model.evaluate`. Scalar test loss (if the
model has a single output and no metrics) or list of scalars (if the
model has multiple outputs and/or metrics). See details in
`keras.Model.evaluate`.
"""
if self._train_on_evaluate:
if not self._is_trained.numpy():
self._train_model()
else:
raise ValueError(
"evaluate() requested training of an already trained model -- "
"did you call `Model.evaluate` from a `on_train_batch*` callback ?"
"this is not yet supported in Decision Forests models, where one "
"can only evaluate after the first epoch is finished and the "
"model trained")
return super(CoreModel, self).evaluate(*args, **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
"""Shows information about the model."""
super(CoreModel, self).summary(
line_length=line_length, positions=positions, print_fn=print_fn)
if print_fn is None:
print_fn = print
if self._model is not None:
print_fn(self._description)
@staticmethod
def predefined_hyperparameters() -> List[HyperParameterTemplate]:
"""Returns a better than default set of hyper-parameters.
They can be used directly with the `hyperparameter_template` argument of the
model constructor.
These hyper-parameters outperforms the default hyper-parameters (either
generally or in specific scenarios). Like default hyper-parameters, existing
pre-defined hyper-parameters cannot change.
"""
return []
# TODO(b/205971333): Use Trace Protocol For TF DF custom types to avoid
# clearing the cache.
def _clear_function_cache(self):
"""Clear the @tf.function cache and force re-tracing.
"""
# pylint: disable=protected-access
if self.call._stateful_fn:
self.call._stateful_fn._function_cache.clear()
# pylint: enable=protected-access
def _extract_sample(self, x):
"""Extracts a sample (e.g.
batch, row) from the training dataset.
Returns None is the sample cannot be extracted.
Args:
x: Training dataset in the same format as "fit".
Returns:
A sample.
"""
if isinstance(x, tf.data.Dataset):
return x.take(1)
try:
# Work for numpy array and TensorFlow Tensors.
return tf.nest.map_structure(lambda v: v[0:1], x)
except Exception: # pylint: disable=broad-except
pass
try:
# Works for list of primitives.
if isinstance(x, list) and isinstance(x[0],
(int, float, str, bytes, bool)):
return x[0:1]
except Exception: # pylint: disable=broad-except
pass
logging.warning("Dataset sampling not implemented for %s", x)
return None
def _build(self, x):
"""Build the internal graph similarly as "build" for classical Keras models.
Compared to the classical build, supports features with dtypes != float32.
Args:
x: Training dataset in the same format as "fit".
"""
# Note: Build does not support dtypes other than float32.
super(CoreModel, self).build([])
# Force the creation of the graph.
# If a sample cannot be extracted, the graph will be built at the first call
# to "predict" or "evaluate".
if self._advanced_arguments.infer_prediction_signature:
sample = self._extract_sample(x)
if sample is not None:
self.predict(sample)
def _train_model(self):
"""Effectively train the model."""
if self._normalized_input_keys is None:
raise Exception("The training graph was not built.")
train_model_path = self._temp_directory
model_path = os.path.join(train_model_path, "model")
# Create the dataspec guide.
guide = data_spec_pb2.DataSpecificationGuide()
guide.default_column_guide.categorial.max_vocab_count = self._max_vocab_count
for feature in self._features:
col_guide = copy.deepcopy(feature.guide)
col_guide.column_name_pattern = tf_core.normalize_inputs_regexp(
feature.name)
guide.column_guides.append(col_guide)
# Deployment configuration
deployment_config = copy.deepcopy(
self._advanced_arguments.yggdrasil_deployment_config)
if not deployment_config.HasField("num_threads"):
deployment_config.num_threads = self._num_threads
distribution_config = tf_core.get_distribution_configuration(
self.distribute_strategy)
if distribution_config is None:
# Train the model.
# The model will be exported to "train_model_path".
#
# Note: It would be possible to train and load the model without saving
# the model to file.
tf_core.train(
input_ids=self._normalized_input_keys,
label_id=_LABEL,
weight_id=_WEIGHTS if self._weighted_training else None,
model_id=self._training_model_id,
model_dir=train_model_path,
learner=self._learner,
task=self._task,
generic_hparms=tf_core.hparams_dict_to_generic_proto(
self._learner_params),
ranking_group=_RANK_GROUP if self._task == Task.RANKING else None,
keep_model_in_resource=True,
guide=guide,
training_config=self._advanced_arguments.yggdrasil_training_config,
deployment_config=deployment_config,
)
else:
tf_core.finalize_distributed_dataset_collection(
cluster_coordinator=self._cluster_coordinator,
input_ids=self._normalized_input_keys + [_LABEL] +
([_WEIGHTS] if self._weighted_training else []),
model_id=self._training_model_id,
dataset_path=self._distributed_partial_dataset_cache_path())
tf_core.train_on_file_dataset(
train_dataset_path="partial_dataset_cache:" +
self._distributed_partial_dataset_cache_path(),
valid_dataset_path=None,
feature_ids=self._normalized_input_keys,
label_id=_LABEL,
weight_id=_WEIGHTS if self._weighted_training else None,
model_id=self._training_model_id,
model_dir=train_model_path,
learner=self._learner,
task=self._task,
generic_hparms=tf_core.hparams_dict_to_generic_proto(
self._learner_params),
ranking_group=_RANK_GROUP if self._task == Task.RANKING else None,
keep_model_in_resource=True,
guide=guide,
training_config=self._advanced_arguments.yggdrasil_training_config,
deployment_config=deployment_config,
working_cache_path=os.path.join(self._temp_directory,
"working_cache"),
distribution_config=distribution_config,
)
# Request and store a description of the model.
self._description = training_op.SimpleMLShowModel(
model_identifier=self._training_model_id).numpy().decode("utf-8")
training_op.SimpleMLUnloadModel(model_identifier=self._training_model_id)
self._is_trained.assign(True)
# Load and optimize the model in memory.
# Register the model as a SavedModel asset.
self._model = tf_op.ModelV2(model_path=model_path, verbose=False)
def _set_from_yggdrasil_model(self,
inspector: inspector_lib.AbstractInspector,
path: str):
if not self._is_compiled:
self.compile()
features = inspector.features()
semantics = {
feature.name: tf_core.column_type_to_semantic(feature.type)
for feature in features
}
self._semantics = semantics
self._normalized_input_keys = sorted(list(semantics.keys()))
self._is_trained.assign(True)
self._model = tf_op.ModelV2(model_path=path, verbose=False)
# Creates a toy batch to initialize the Keras model. The values are not
# used.
examples = {}
for feature in features:
if feature.type == data_spec_pb2.ColumnType.NUMERICAL:
examples[feature.name] = tf.constant([1.0, 2.0])
elif feature.type == data_spec_pb2.ColumnType.CATEGORICAL:
if inspector.dataspec.columns[
feature.col_idx].categorical.is_already_integerized:
examples[feature.name] = tf.constant([1, 2])
else:
examples[feature.name] = tf.constant(["a", "b"])
elif feature.type == data_spec_pb2.ColumnType.CATEGORICAL_SET:
if inspector.dataspec.columns[
feature.col_idx].categorical.is_already_integerized:
examples[feature.name] = tf.ragged.constant([[1, 2], [3]],
dtype=tf.int32)
else:
examples[feature.name] = tf.ragged.constant([["a", "b"], ["c"]],
dtype=tf.string)
elif feature.type == data_spec_pb2.ColumnType.BOOLEAN:
examples[feature.name] = tf.constant([0.0, 1.0])
else:
raise ValueError("Non supported feature type")
self.predict(tf.data.Dataset.from_tensor_slices(examples).batch(2))
@staticmethod
def capabilities() -> abstract_learner_pb2.LearnerCapabilities:
"""Lists the capabilities of the learning algorithm."""
return abstract_learner_pb2.LearnerCapabilities()
class _TrainerCallBack(tf.keras.callbacks.Callback):
"""Callback that trains the model at the end of the first epoch."""
def __init__(self, model: CoreModel):
self._model = model
def on_epoch_end(self, epoch, logs=None):
del logs
if epoch == 0 and not self._model._is_trained.numpy(): # pylint:disable=protected-access
self._model._train_model() # pylint:disable=protected-access
# After this the model is trained, and evaluations shouldn't attempt
# to retrain.
self._model._train_on_evaluate = False # pylint:disable=protected-access
def _batch_size(inputs: Union[tf.Tensor, Dict[str, tf.Tensor]]) -> tf.Tensor:
"""Gets the batch size of a tensor or dictionary of tensors.
Assumes that all the tensors have the same batchsize.
Args:
inputs: Dict of tensors.
Returns:
The batch size.
Raises:
ValueError: Invalid arguments.
"""
if isinstance(inputs, dict):
for v in inputs.values():
return tf.shape(v)[0]
raise ValueError("Empty input")
else:
return tf.shape(inputs)[0]
def pd_dataframe_to_tf_dataset(dataframe,
label: Optional[str] = None,
task: Optional[TaskType] = Task.CLASSIFICATION,
max_num_classes: Optional[int] = 100,
in_place: Optional[bool] = False,
fix_feature_names: Optional[bool] = True,
weight: Optional[str] = None) -> tf.data.Dataset:
"""Converts a Panda Dataframe into a TF Dataset compatible with Keras.
Details:
- Ensures columns have uniform types.
- If "label" is provided, separate it as a second channel in the tf.Dataset
(as expected by Keras).
- If "weight" is provided, separate it as a third channel in the tf.Dataset
(as expected by Keras).
- If "task" is provided, ensure the correct dtype of the label. If the task
a classification and the label a string, integerize the labels. In this
case, the label values are extracted from the dataset and ordered
lexicographically. Warning: This logic won't work as expected if the
training and testing dataset contains different label values. In such
case, it is preferable to convert the label to integers beforehand while
making sure the same encoding is used for all the datasets.
- Returns "tf.data.from_tensor_slices"
Args:
dataframe: Pandas dataframe containing a training or evaluation dataset.
label: Name of the label column.
task: Target task of the dataset.
max_num_classes: Maximum number of classes for a classification task. A high
number of unique value / classes might indicate that the problem is a
regression or a ranking instead of a classification. Set to None to
disable checking the number of classes.
in_place: If false (default), the input `dataframe` will not be modified by
`pd_dataframe_to_tf_dataset`. However, a copy of the dataset memory will
be made. If true, the dataframe will be modified in place.
fix_feature_names: Some feature names are not supported by the SavedModel
signature. If `fix_feature_names=True` (default) the feature will be
renamed and made compatible. If `fix_feature_names=False`, the feature
name will not be changed, but exporting the model might fail (i.e.
`model.save(...)`).
weight: Optional name of a column in `dataframe` to use to weight the
training.
Returns:
A TensorFlow Dataset.
"""
if not in_place:
dataframe = dataframe.copy(deep=True)
if label is not None:
if label not in dataframe.columns:
raise ValueError(
f"The label \"{label}\" is not a column of the dataframe.")
if task == Task.CLASSIFICATION:
classification_classes = dataframe[label].unique().tolist()
if len(classification_classes) > max_num_classes:
raise ValueError(
f"The number of unique classes ({len(classification_classes)}) "
f"exceeds max_num_classes ({max_num_classes}). A high number of "
"unique value / classes might indicate that the problem is a "
"regression or a ranking instead of a classification. If this "
"problem is effectively a classification problem, increase "
"`max_num_classes`.")
if dataframe[label].dtypes in [str, object]:
classification_classes.sort()
dataframe[label] = dataframe[label].map(classification_classes.index)
elif dataframe[label].dtypes in [int, float]:
if (dataframe[label] < 0).any():
raise ValueError(
"Negative integer classification label found. Make sure "
"you label values are positive or stored as string.")
if weight is not None:
if weight not in dataframe.columns:
raise ValueError(
f"The weight \"{weight}\" is not a column of the dataframe.")
if fix_feature_names:
# Rename the features so they are compatible with SaveModel serving
# signatures.
rename_mapping = {}
new_names = set()
change_any_feature_name = False
for column in dataframe:
new_name = column
for forbidden_character in _FORBIDDEN_FEATURE_CHARACTERS:
if forbidden_character in new_name:
change_any_feature_name = True
new_name = new_name.replace(forbidden_character, "_")
# Add a tailing "_" until there are not feature name collisions.
while new_name in new_names:
new_name += "_"
change_any_feature_name = True
rename_mapping[column] = new_name
new_names.add(new_name)
dataframe = dataframe.rename(columns=rename_mapping)
if change_any_feature_name:
logging.warning(
"Some of the feature names have been changed automatically to be "
"compatible with SavedModels because fix_feature_names=True.")
# Make sure that missing values for string columns are not represented as
# float(NaN).
for col in dataframe.columns:
if dataframe[col].dtype in [str, object]:
dataframe[col] = dataframe[col].fillna("")
if label is not None:
features_dataframe = dataframe.drop(label, 1)
if weight is not None:
features_dataframe = features_dataframe.drop(weight, 1)
output = (dict(features_dataframe), dataframe[label].values,
dataframe[weight].values)
else:
output = (dict(features_dataframe), dataframe[label].values)
tf_dataset = tf.data.Dataset.from_tensor_slices(output)
else:
if weight is not None:
raise ValueError(
"\"weight\" is only supported if the \"label\" is also provided")
tf_dataset = tf.data.Dataset.from_tensor_slices(dict(dataframe))
# The batch size does not impact the training of TF-DF.
tf_dataset = tf_dataset.batch(64)
setattr(tf_dataset, "_tfdf_task", task)
return tf_dataset
def yggdrasil_model_to_keras_model(src_path: str, dst_path: str):
"""Converts an Yggdrasil model into a Keras model."""
inspector = inspector_lib.make_inspector(src_path)
objective = inspector.objective()
model = CoreModel(
task=objective.task,
learner="MANUAL",
ranking_group=objective.group
if objective.task == inspector_lib.Task.RANKING else None)
model._set_from_yggdrasil_model(inspector, src_path) # pylint: disable=protected-access
model.save(dst_path)
def _list_explicit_arguments(func):
"""Function decorator that adds an "explicit_args" with the explicit args."""
arguments = inspect.getfullargspec(func)[0]
def wrapper(*args, **kargs):
kargs["explicit_args"] = set(
list(arguments[:len(args)]) + list(kargs.keys()))
return func(*args, **kargs)
return wrapper
def _parse_hp_template(template_name) -> Tuple[str, Optional[int]]:
"""Parses a template name as specified by the user.
Template can versionned:
"my_template@v5" -> Returns (my_template, 5)
or non versionned:
"my_template" -> Returns (my_template, None)
Args:
template_name: User specified template.
Returns:
Base template name and version.
"""
malformed_msg = (f"The template \"{template_name}\" is malformed. Expecting "
"\"{template}@v{version}\" or \"{template}")
if "@" in template_name:
# Template with version.
parts = template_name.split("@v")
if len(parts) != 2:
raise ValueError(malformed_msg)
base_name = parts[0]
try:
version = int(parts[1])
except:
raise ValueError(malformed_msg)
return base_name, version
else:
# Template without version?
return template_name, None
def _get_matching_template(
template_name: str,
all_templates: List[HyperParameterTemplate]) -> HyperParameterTemplate:
"""Returns the template that matches a template name.
Args:
template_name: User specified template.
all_templates: Candidate templates.
Returns:
The matching template.
"""
# Extract the base name and version of the template.
template_base, template_version = _parse_hp_template(template_name)
if template_version is not None:
# Template with version.
# Matching templates.
matching = [
template for template in all_templates if
template.name == template_base and template.version == template_version
]
if not matching:
available = [
f"{template.name}@v{template.version}" for template in all_templates
]
raise ValueError(f"No template is matching {template_name}. "
f"The available templates are: {available}")
if len(matching) > 1:
raise ValueError("Internal error. Multiple matching templates")
return matching[0]
else:
# Template without version?
matching = [
template for template in all_templates if template.name == template_base
]
matching.sort(key=lambda x: x.version, reverse=True)
if not matching:
available = list(set([template.name for template in all_templates]))
raise ValueError(f"No template is matching {template_name}. "
f"Available template names are: {available}")
return matching[0]
def _apply_hp_template(parameters: Dict[str, Any], template_name: str,
all_templates: List[HyperParameterTemplate],
explicit_parameters: Set[str]) -> Dict[str, Any]:
"""Applies the hyper-parameter template to the user+default parameters.
Look for a template called "template_name" (is "template_name" is a versioned
template e.g. "name@v5") or for the latest (higher version) template (if
"template_name" is a non versioned template e.g. "name").
Once the template is found, merges "parameters" and the template according to
the user parameters i.e. the final value is (in order of importance):
user parameters > template parameters > default parameters.
Args:
parameters: User and default hyper-parameters.
template_name: Name of the template as specified by the user.
all_templates: All the available templates.
explicit_parameters: Set of parameters (in parameters) defined by the user.
Returns:
The merged hyper-parameters.
"""
template = _get_matching_template(template_name, all_templates)
logging.info("Resolve hyper-parameter template \"%s\" to \"%s@v%d\" -> %s.",
template_name, template.name, template.version,
template.parameters)
for key in list(parameters.keys()):
if key in template.parameters and key not in explicit_parameters:
parameters[key] = template.parameters[key]
return parameters
def _check_feature_names(feature_names: List[str], raise_error: bool):
"""Checks if the features names are compatible with all of the Keras API."""
def problem(reason):
full_reason = (
"One or more feature names are not compatible with the Keras API: "
f"{reason} This problem can be solved in one of two ways: (1; "
"Recommended) Rename the features to be compatible. You can use "
"the argument `fix_feature_names=True` if you are using "
"`pd_dataframe_to_tf_dataset`. (2) Disable this error message "
"(`fail_on_non_keras_compatible_feature_name=False`) and only use part"
" of the compatible Keras API.")
if raise_error:
raise ValueError(full_reason)
else:
logging.warning(full_reason)
# List of character forbidden in a serving signature name.
for feature_name in feature_names:
if not feature_name:
problem("One of the feature names is empty.")
for character in _FORBIDDEN_FEATURE_CHARACTERS:
if character in feature_name:
problem(f"The feature name \"{feature_name}\" contains a forbidden "
"character ({_FORBIDDEN_FEATURE_CHARACTERS}).")
# The following section is a copy of internal Keras functions that are not
# available in the public api.
#
# Keras does not allow projects to depend on the internal api.
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
def _minimum_control_deps(outputs):
"""Returns the minimum control dependencies to ensure step succeeded.
This function is a strict copy of the function of the same name in the keras
private API:
third_party/tensorflow/python/keras/engine/training.py
"""
if tf.executing_eagerly():
return [] # Control dependencies not needed.
outputs = tf.nest.flatten(outputs, expand_composites=True)
for out in outputs:
# Variables can't be control dependencies.
if not isinstance(out, tf.Variable):
return [out] # Return first Tensor or Op from outputs.
return [] # No viable Tensor or Op to use for control deps.
def _expand_1d(data):
"""Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s.
This function is a strict copy of the function of the same name in the keras
private API:
third_party/tensorflow/python/keras/engine/data_adapter.py
"""
def _expand_single_1d_tensor(t):
# Leaves `CompositeTensor`s as-is.
if (isinstance(t, tf.Tensor) and isinstance(t.shape, tf.TensorShape) and
t.shape.rank == 1):
return tf.expand_dims(t, axis=-1)
return t
return tf.nest.map_structure(_expand_single_1d_tensor, data)
def _write_scalar_summaries(logs, step):
for name, value in logs.items():
if _is_scalar(value):
tf.scalar("batch_" + name, value, step=step)
def _is_scalar(x):
return isinstance(x, (tf.Tensor, tf.Variable)) and x.shape.rank == 0
def _is_per_replica_instance(obj):
return (isinstance(obj, tf.distribute.DistributedValues) and
isinstance(obj, tf.__internal__.CompositeTensor))
def _reduce_per_replica(values, strategy, reduction="first"):
"""Reduce PerReplica objects.
Args:
values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are
returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of 'first', 'concat'.
Returns:
Structure of `Tensor`s.
"""
def _reduce(v):
"""Reduce a single `PerReplica` object."""
if not _is_per_replica_instance(v):
return v
elif reduction == "first":
return strategy.unwrap(v)[0]
else:
raise ValueError('`reduction` must be "first" or "concat". Received: '
f"reduction={reduction}.")
return tf.nest.map_structure(_reduce, values)
# pylint: enable=g-doc-args
# pylint: enable=g-doc-return-or-yield
Internal change
PiperOrigin-RevId: 409990582
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core wrapper.
This file contains the Keras model wrapper around an Yggdrasil model/learner.
While it can be used directly, the helper functions in keras.py /
wrapper_pre_generated.py should be preferred as they explicit more directly the
learner specific hyper-parameters.
Usage example:
```python
# Indirect usage
import tensorflow_decision_forests as tfdf
model = tfdf.keras.RandomForestModel()
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(...)
model.fit(train_ds)
# Direct usage
import tensorflow_decision_forests as tfdf
model = tfdf.keras.CoreModel(learner="RANDOM_FOREST")
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(...)
model.fit(train_ds)
```
See "CoreModel" for more details
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from functools import partial # pylint: disable=g-importing-member
import inspect
import os
import tempfile
from typing import Optional, List, Dict, Any, Union, Text, Tuple, NamedTuple, Set
import uuid
from absl import logging
import tensorflow as tf
from tensorflow.python.training.tracking import base as base_tracking # pylint: disable=g-direct-tensorflow-import
from tensorflow_decision_forests.component.inspector import inspector as inspector_lib
from tensorflow_decision_forests.tensorflow import core as tf_core
from tensorflow_decision_forests.tensorflow.ops.inference import api as tf_op
from tensorflow_decision_forests.tensorflow.ops.training import op as training_op
from yggdrasil_decision_forests.dataset import data_spec_pb2
from yggdrasil_decision_forests.learner import abstract_learner_pb2
from yggdrasil_decision_forests.model import abstract_model_pb2 # pylint: disable=unused-import
from yggdrasil_decision_forests.utils.distribute.implementations.grpc import grpc_pb2 # pylint: disable=unused-import
layers = tf.keras.layers
models = tf.keras.models
optimizers = tf.keras.optimizers
losses = tf.keras.losses
backend = tf.keras.backend
# Task solved by a model (e.g. classification, regression, ranking);
Task = tf_core.Task
TaskType = "abstract_model_pb2.Task" # pylint: disable=invalid-name
# Hyper-parameters of the model represented as a dictionary of <parameter names,
# parameter values>.
HyperParameters = tf_core.HyperParameters
# A tensorflow feature column.
FeatureColumn = Any
# Semantic of a feature.
#
# The semantic of a feature defines its meaning and constraint how the feature
# is consumed by the model. For example, a feature can has a numerical or
# categorical semantic. The semantic is often related but not equivalent to the
# representation (e.g. float, integer, string).
#
# Each semantic support a different type of representations, tensor formats and
# has specific way to represent and handle missing (and possibly
# out-of-vocabulary) values.
#
# See "smltf.Semantic" for a detailed explanation.
FeatureSemantic = tf_core.Semantic
# Feature name placeholder.
_LABEL = "__LABEL"
_RANK_GROUP = "__RANK_GROUP"
_WEIGHTS = "__WEIGHTS"
# This is the list of characters that should not be used as feature name as they
# as not supported by SavedModel serving signatures.
_FORBIDDEN_FEATURE_CHARACTERS = " \t?%,"
# Advanced configuration for the underlying learning library.
YggdrasilDeploymentConfig = abstract_learner_pb2.DeploymentConfig
YggdrasilTrainingConfig = abstract_learner_pb2.TrainingConfig
# Get the current worker index and total number of workers.
get_worker_idx_and_num_workers = tf_core.get_worker_idx_and_num_workers
class FeatureUsage(object):
"""Semantic and hyper-parameters for a single feature.
This class allows to:
1. Limit the input features of the model.
2. Set manually the semantic of a feature.
3. Specify feature specific hyper-parameters.
Note that the model's "features" argument is optional. If it is not specified,
all available feature will be used. See the "CoreModel" class
documentation for more details.
Usage example:
```python
# A feature named "A". The semantic will be detected automatically. The
# global hyper-parameters of the model will be used.
feature_a = FeatureUsage(name="A")
# A feature named "C" representing a CATEGORICAL value.
# Specifying the semantic ensure the feature is correctly detected.
# In this case, the feature might be stored as an integer, and would have be
# detected as NUMERICAL.
feature_b = FeatureUsage(name="B", semantic=Semantic.CATEGORICAL)
# A feature with a specific maximum dictionary size.
feature_c = FeatureUsage(name="C",
semantic=Semantic.CATEGORICAL,
max_vocab_count=32)
model = CoreModel(features=[feature_a, feature_b, feature_c])
```
Attributes:
name: The name of the feature. Used as an identifier if the dataset is a
dictionary of tensors.
semantic: Semantic of the feature. If None, the semantic is automatically
determined. The semantic controls how a feature is interpreted by a model.
Using the wrong semantic (e.g. numerical instead of categorical) will hurt
your model. See "FeatureSemantic" and "Semantic" for the definition of the
of available semantics.
discretized: For NUMERICAL features only. If set, the numerical values are
discretized into a small set of unique values. This makes the training
faster but often lead to worst models. A reasonable discretization value
is 255.
max_vocab_count: For CATEGORICAL and CATEGORICAL_SET features only. Number
of unique categorical values stored as string. If more categorical values
are present, the least frequent values are grouped into a
Out-of-vocabulary item. Reducing the value can improve or hurt the model.
"""
def __init__(self,
name: Text,
semantic: Optional[FeatureSemantic] = None,
discretized: Optional[int] = None,
max_vocab_count: Optional[int] = None):
self._name = name
self._semantic = semantic
self._guide = data_spec_pb2.ColumnGuide()
# Check matching between hyper-parameters and semantic.
if semantic != FeatureSemantic.NUMERICAL:
if discretized is not None:
raise ValueError("\"discretized\" only works for NUMERICAL semantic.")
if semantic not in [
FeatureSemantic.CATEGORICAL, FeatureSemantic.CATEGORICAL_SET
]:
if max_vocab_count is not None:
raise ValueError("\"max_vocab_count\" only works for CATEGORICAL "
"and CATEGORICAL_SET semantic.")
if semantic is None:
# The semantic is automatically determined at training time.
pass
elif semantic == FeatureSemantic.NUMERICAL:
self._guide.type = (
data_spec_pb2.DISCRETIZED_NUMERICAL
if discretized else data_spec_pb2.NUMERICAL)
elif semantic in [
FeatureSemantic.CATEGORICAL, FeatureSemantic.CATEGORICAL_SET
]:
if semantic == FeatureSemantic.CATEGORICAL:
self._guide.type = data_spec_pb2.CATEGORICAL
else:
self._guide.type = data_spec_pb2.CATEGORICAL_SET
if max_vocab_count:
self._guide.categorial.max_vocab_count = max_vocab_count
else:
raise ValueError("Non supported semantic {}".format(semantic))
@property
def guide(self) -> data_spec_pb2.ColumnGuide: # pylint: disable=g-missing-from-attributes
return self._guide
@property
def semantic(self) -> FeatureSemantic:
return self._semantic
@property
def name(self) -> Text:
return self._name
class HyperParameterTemplate(NamedTuple):
"""Named and versionned set of hyper-parameters.
list of hyper-parameter sets that outperforms the default hyper-parameters
(either generally or in specific scenarios).
"""
name: str
version: int
parameters: Dict[str, Any]
description: str
class AdvancedArguments(NamedTuple):
"""Advanced control of the model that most users won't need to use.
Attributes:
infer_prediction_signature: Instantiate the model graph after training. This
allows the model to be saved without specifying an input signature and
without calling "predict", "evaluate". Disabling this logic can be useful
in two situations: (1) When the exported signature is different from the
one used during training, (2) When using a fixed-shape pre-processing
that consume 1 dimensional tensors (as keras will automatically expend
its shape to rank 2). For example, when using tf.Transform.
yggdrasil_training_config: Yggdrasil Decision Forests training
configuration. Expose a few extra hyper-parameters.
yggdrasil_deployment_config: Configuration of the computing resources used
to train the model e.g. number of threads. Does not impact the model
quality.
fail_on_non_keras_compatible_feature_name: If true (default), training will
fail if one of the feature name is not compatible with part of the Keras
API. If false, a warning will be generated instead.
predict_single_probability_for_binary_classification: Only used for binary
classification. If true (default), the prediction of a binary class model
is a tensor of shape [None, 1] containing the probability of the positive
class (value=1). If false, the prediction of a binary class model is a
tensor of shape [None, num_classes=2] containing the probability of the
complementary classes.
"""
infer_prediction_signature: Optional[bool] = True
yggdrasil_training_config: Optional[
YggdrasilTrainingConfig] = abstract_learner_pb2.TrainingConfig()
yggdrasil_deployment_config: Optional[
YggdrasilDeploymentConfig] = abstract_learner_pb2.DeploymentConfig()
fail_on_non_keras_compatible_feature_name: Optional[bool] = True
predict_single_probability_for_binary_classification: Optional[bool] = True
class CoreModel(models.Model):
"""Keras Model V2 wrapper around an Yggdrasil Learner and Model.
Basic usage example:
```python
import tensorflow_decision_forests as tfdf
# Train a classification model with automatic feature discovery.
model = tfdf.keras.CoreModel(learner="RANDOM_FOREST")
train_ds = tf.data.Dataset.from_tensor_slices((train_x, train_y))
model.fit(train_ds)
# Evaluate the model on another dataset.
model.evaluate(test_ds)
# Show information about the model
model.summary()
# Export the model with the TF.SavedModel format.
model.save("/path/to/my/model")
```
The training logs (e.g. feature statistics, validation loss, remaining
training time) are exported to LOG(INFO). If you use a colab, make sure to
display these logs:
from colabtools import googlelog
with googlelog.CaptureLog():
model.fit(...)
Using this model has some caveats:
* Decision Forest models are not Neural Networks. Feature preprocessing that
are beneficial to neural network (normalization, one-hot encoding) can be
detrimental to decision forests. In most cases, it is best to feed the raw
features (e.g. both numerical and categorical) without preprocessing to
the model.
* During training, the entire dataset is loaded in memory (in an efficient
representation). In case of large datasets (>100M examples), it is
recommended to randomly downsample the examples.
* The model trains for exactly one epoch. The core of the training
computation is done at the end of the first epoch. The console will show
training logs (including validations losses and feature statistics).
* The model cannot make predictions before the training is done. Applying
the model before training will raise an error. During training Keras
evaluation will be invalid (the model always returns zero).
* Yggdrasil is itself a C++ model wrapper. Learners and models need to be
added as dependency to the calling code. To make things practical, the
Random Forest (without Borg distribution) and Gradient Boosted Decision
Forest learners and models are linked by default. Other model/learners
(including yours :)), needs to be added as a dependency manually.
Attributes:
task: Task to solve (e.g. CLASSIFICATION, REGRESSION, RANKING).
learner: The learning algorithm used to train the model. Possible values
include (but at not limited to) "LEARNER_*".
learner_params: Hyper-parameters for the learner. The list of available
hyper-parameters is available at: go/simple_ml/hyper_parameters.
features: Specify the list and semantic of the input features of the model.
If not specified, all the available features will be used. If specified
and if "exclude_non_specified_features=True", only the features in
"features" will be used by the model. If "preprocessing" is used,
"features" corresponds to the output of the preprocessing. In this case,
it is recommended for the preprocessing to return a dictionary of tensors.
exclude_non_specified_features: If true, only use the features specified in
"features".
preprocessing: Functional keras model or @tf.function to apply on the input
feature before the model to train. This preprocessing model can consume
and return tensors, list of tensors or dictionary of tensors. If
specified, the model only "sees" the output of the preprocessing (and not
the raw input). Can be used to prepare the features or to stack multiple
models on top of each other. Unlike preprocessing done in the tf.dataset,
the operation in "preprocessing" are serialized with the model.
postprocessing: Like "preprocessing" but applied on the model output.
ranking_group: Only for task=Task.RANKING. Name of a tf.string feature that
identifies queries in a query/document ranking task. The ranking group is
not added automatically for the set of features if
exclude_non_specified_features=false.
temp_directory: Temporary directory used to store the model Assets after the
training, and possibly as a work directory during the training. This
temporary directory is necessary for the model to be exported after
training e.g. `model.save(path)`. If not specified, `temp_directory` is
set to a temporary directory using `tempfile.TemporaryDirectory`. This
directory is deleted when the model python object is garbage-collected.
verbose: If true, displays information about the training.
advanced_arguments: Advanced control of the model that most users won't need
to use. See `AdvancedArguments` for details.
num_threads: Number of threads used to train the model. Different learning
algorithms use multi-threading differently and with different degree of
efficiency. If specified, `num_threads` field of the
`advanced_arguments.yggdrasil_deployment_config` has priority.
name: The name of the model.
max_vocab_count: Default maximum size of the vocabulary for CATEGORICAL and
CATEGORICAL_SET features stored as strings. If more unique values exist,
only the most frequent values are kept, and the remaining values are
considered as out-of-vocabulary. The value `max_vocab_count` defined in a
`FeatureUsage` (if any) takes precedence.
"""
def __init__(self,
task: Optional[TaskType] = Task.CLASSIFICATION,
learner: Optional[str] = "RANDOM_FOREST",
learner_params: Optional[HyperParameters] = None,
features: Optional[List[FeatureUsage]] = None,
exclude_non_specified_features: Optional[bool] = False,
preprocessing: Optional["models.Functional"] = None,
postprocessing: Optional["models.Functional"] = None,
ranking_group: Optional[str] = None,
temp_directory: Optional[str] = None,
verbose: Optional[bool] = True,
advanced_arguments: Optional[AdvancedArguments] = None,
num_threads: Optional[int] = 6,
name: Optional[str] = None,
max_vocab_count: Optional[int] = 2000) -> None:
super(CoreModel, self).__init__(name=name)
self._task = task
self._learner = learner
self._learner_params = learner_params
self._features = features or []
self._exclude_non_specified = exclude_non_specified_features
self._preprocessing = preprocessing
self._postprocessing = postprocessing
self._ranking_group = ranking_group
self._temp_directory = temp_directory
self._verbose = verbose
self._num_threads = num_threads
self._max_vocab_count = max_vocab_count
# Internal, indicates whether the first evaluation during training,
# triggered by providing validation data, should trigger the training
# itself.
self._train_on_evaluate: bool = False
if advanced_arguments is None:
advanced_arguments = AdvancedArguments()
self._advanced_arguments = advanced_arguments
if not self._features and exclude_non_specified_features:
raise ValueError(
"The model does not have any input features: "
"exclude_non_specified_features is True and not features are "
"provided as input.")
if self._temp_directory is None:
self._temp_directory_handle = tempfile.TemporaryDirectory()
self._temp_directory = self._temp_directory_handle.name
logging.info("Using %s as temporary training directory",
self._temp_directory)
if (self._task == Task.RANKING) != (ranking_group is not None):
raise ValueError(
"ranking_key is used iif. the task is RANKING or the loss is a "
"ranking loss")
# True iif. the model is trained.
self._is_trained = tf.Variable(False, trainable=False, name="is_trained")
# Unique ID to identify the model during training.
self._training_model_id = str(uuid.uuid4())
# The following fields contain the trained model. They are set during the
# graph construction and training process.
# The compiled Yggdrasil model.
self._model: Optional[tf_op.ModelV2] = None
# Semantic of the input features.
# Also defines what are the input features of the model.
self._semantics: Optional[Dict[Text, FeatureSemantic]] = None
# List of Yggdrasil feature identifiers i.e. feature seen by the Yggdrasil
# learner. Those are computed after the preprocessing, unfolding and
# casting.
self._normalized_input_keys: Optional[List[Text]] = None
# Textual description of the model.
self._description: Optional[Text] = None
# If the model is trained with weights.
self._weighted_training = False
def make_inspector(self) -> inspector_lib.AbstractInspector:
"""Creates an inspector to access the internal model structure.
Usage example:
```python
inspector = model.make_inspector()
print(inspector.num_trees())
print(inspector.variable_importances())
```
Returns:
A model inspector.
"""
path = self.yggdrasil_model_path_tensor().numpy().decode("utf-8")
return inspector_lib.make_inspector(path)
@tf.function(input_signature=[])
def yggdrasil_model_path_tensor(self) -> Optional[tf.Tensor]:
"""Gets the path to yggdrasil model, if available.
The effective path can be obtained with:
```python
yggdrasil_model_path_tensor().numpy().decode("utf-8")
```
Returns:
Path to the Yggdrasil model.
"""
return self._model._compiled_model._model_loader.get_model_path() # pylint: disable=protected-access
def make_predict_function(self):
"""Prediction of the model (!= evaluation)."""
@tf.function(experimental_relax_shapes=True)
def predict_function_not_trained(iterator):
"""Prediction of a non-trained model. Returns "zeros"."""
data = next(iterator)
x, _, _ = tf.keras.utils.unpack_x_y_sample_weight(data)
batch_size = _batch_size(x)
return tf.zeros([batch_size, 1])
@tf.function(experimental_relax_shapes=True)
def predict_function_trained(iterator, model):
"""Prediction of a trained model.
The only difference with "super.make_predict_function()" is that
"self.predict_function" is not set and that the "distribute_strategy"
is not used.
Args:
iterator: Iterator over the dataset.
model: Model object.
Returns:
Model predictions.
"""
def run_step(data):
outputs = model.predict_step(data)
with tf.control_dependencies(_minimum_control_deps(outputs)):
model._predict_counter.assign_add(1) # pylint:disable=protected-access
return outputs
data = next(iterator)
return run_step(data)
if self._is_trained:
return partial(predict_function_trained, model=self)
else:
return predict_function_not_trained
def make_test_function(self):
"""Predictions for evaluation."""
@tf.function(experimental_relax_shapes=True)
def test_function_not_trained(iterator):
"""Evaluation of a non-trained model."""
next(iterator)
return {}
@tf.function(experimental_relax_shapes=True)
def step_function_trained(model, iterator):
"""Evaluation of a trained model.
The only difference with "super.make_test_function()" is that
"self.test_function" is not set.
Args:
model: Model object.
iterator: Iterator over dataset.
Returns:
Evaluation metrics.
"""
def run_step(data):
outputs = model.test_step(data)
with tf.control_dependencies(_minimum_control_deps(outputs)):
model._test_counter.assign_add(1) # pylint:disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = _reduce_per_replica(
outputs, self.distribute_strategy, reduction="first")
return outputs
if self._is_trained:
# Special case if steps_per_execution is one.
if (self._steps_per_execution is None or
self._steps_per_execution.numpy().item() == 1):
def test_function(iterator):
"""Runs a test execution with a single step."""
return step_function_trained(self, iterator)
if not self.run_eagerly:
test_function = tf.function(
test_function, experimental_relax_shapes=True)
if self._cluster_coordinator:
return lambda it: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda
test_function, args=(it,))
else:
return test_function
# If we're using a coordinator, use the value of self._steps_per_execution
# at the time the function is called/scheduled, and not when it is
# actually executed.
elif self._cluster_coordinator:
def test_function(iterator, steps_per_execution):
"""Runs a test execution with multiple steps."""
for _ in tf.range(steps_per_execution):
outputs = step_function_trained(self, iterator)
return outputs
if not self.run_eagerly:
test_function = tf.function(
test_function, experimental_relax_shapes=True)
return lambda it: self._cluster_coordinator.schedule( # pylint: disable=g-long-lambda
test_function,
args=(it, self._steps_per_execution.value()))
else:
def test_function(iterator):
"""Runs a test execution with multiple steps."""
for _ in tf.range(self._steps_per_execution):
outputs = step_function_trained(self, iterator)
return outputs
if not self.run_eagerly:
test_function = tf.function(
test_function, experimental_relax_shapes=True)
return test_function
else:
return test_function_not_trained
@tf.function(experimental_relax_shapes=True)
def call(self, inputs, training=False):
"""Inference of the model.
This method is used for prediction and evaluation of a trained model.
Args:
inputs: Input tensors.
training: Is the model being trained. Always False.
Returns:
Model predictions.
"""
del training
if self._semantics is None:
logging.warning(
"The model was called directly (i.e. using `model(data)` instead of "
"using `model.predict(data)`) before being trained. The model will "
"only return zeros until trained. The output shape might change "
"after training %s", inputs)
return tf.zeros([_batch_size(inputs), 1])
assert self._semantics is not None
assert self._model is not None
if self._preprocessing is not None:
inputs = self._preprocessing(inputs)
if isinstance(inputs, dict):
# Native format
pass
elif isinstance(inputs, tf.Tensor):
assert len(self._semantics) == 1
inputs = {next(iter(self._semantics.keys())): inputs}
elif isinstance(inputs, list) or isinstance(inputs, tuple):
# Note: The name of a tensor (value.name) can change between the training
# and the inference.
inputs = {str(idx): value for idx, value in enumerate(inputs)}
else:
raise ValueError(
f"The inference input tensor is expected to be a tensor, list of "
f"tensors or a dictionary of tensors. Got {inputs} instead")
# Normalize the input tensor to match Yggdrasil requirements.
semantic_inputs = tf_core.combine_tensors_and_semantics(
inputs, self._semantics)
normalized_semantic_inputs = tf_core.normalize_inputs(semantic_inputs)
normalized_inputs, _ = tf_core.decombine_tensors_and_semantics(
normalized_semantic_inputs)
# Apply the model.
predictions = self._model.apply(normalized_inputs)
if (self._advanced_arguments
.predict_single_probability_for_binary_classification and
self._task == Task.CLASSIFICATION and
predictions.dense_predictions.shape[1] == 2):
# Yggdrasil returns the probably of both classes in binary classification.
# Keras expects only the value (logit or probability) of the "positive"
# class (value=1).
predictions = predictions.dense_predictions[:, 1:2]
else:
predictions = predictions.dense_predictions
if self._postprocessing is not None:
predictions = self._postprocessing(predictions)
return predictions
# This function should not be serialized in the SavedModel.
@base_tracking.no_automatic_dependency_tracking
@tf.function(experimental_relax_shapes=True)
def train_step(self, data):
"""Collects training examples."""
if isinstance(data, dict):
raise ValueError("No label received for training. If you used "
"`pd_dataframe_to_tf_dataset`, make sure to "
f"specify the `label` argument. data={data}")
if len(data) == 2:
train_x, train_y = data
train_weights = None
elif len(data) == 3:
train_x, train_y, train_weights = data
else:
raise ValueError(f"Unexpected data shape {data}")
if self._verbose:
logging.info("Collect training examples.\nFeatures: %s\nLabel: %s",
train_x, train_y)
if isinstance(train_x, dict):
_check_feature_names(
train_x.keys(),
self._advanced_arguments.fail_on_non_keras_compatible_feature_name)
if self._preprocessing is not None:
train_x = self._preprocessing(train_x)
if self._verbose:
logging.info("Applying preprocessing on inputs. Result: %s", train_x)
if isinstance(train_x, list) and self._features:
logging.warn(
"Using \"features\" with a pre-processing stage returning a list "
"is not recommended. Use a pre-processing stage that returns a "
"dictionary instead.")
if isinstance(train_x, dict):
# Native format
pass
elif isinstance(train_x, tf.Tensor):
train_x = {train_x.name: train_x}
elif isinstance(train_x, list) or isinstance(train_x, tuple):
# Note: The name of a tensor (value.name) can change between the training
# and the inference.
train_x = {str(idx): value for idx, value in enumerate(train_x)}
else:
raise ValueError(
f"The training input tensor is expected to be a tensor, list of "
f"tensors or a dictionary of tensors. Got {train_x} instead")
# Check the labels
if not isinstance(train_y, tf.Tensor):
raise ValueError(
f"The training label tensor is expected to be a tensor. Got {train_y}"
" instead.")
if len(train_y.shape) != 1:
if self._verbose:
logging.info("Squeezing labels to [batch_size] from [batch_size, 1].")
train_y = tf.squeeze(train_y, axis=1)
if len(train_y.shape) != 1:
raise ValueError(
"Labels can either be passed in as [batch_size, 1] or [batch_size]. "
"Invalid shape %s." % train_y.shape)
# Check the training
self._weighted_training = train_weights is not None
if self._weighted_training:
if not isinstance(train_weights, tf.Tensor):
raise ValueError(
f"The training weights tensor is expected to be a tensor. Got {train_weights}"
" instead.")
if len(train_weights.shape) != 1:
if self._verbose:
logging.info("Squeezing labels to [batch_size] from [batch_size, 1].")
train_weights = tf.squeeze(train_weights, axis=1)
if len(train_weights.shape) != 1:
raise ValueError(
"Weights can either be passed in as [batch_size, 1] or [batch_size]. "
"Invalid shape %s." % train_weights.shape)
# List the input features and their semantics.
assert self._semantics is None, "The model is already trained"
self._semantics = tf_core.infer_semantic(
train_x, {feature.name: feature.semantic for feature in self._features},
self._exclude_non_specified)
# The ranking group is not part of the features, unless specified
# explicitly.
if (self._ranking_group is not None and
self._ranking_group not in self._features and
self._ranking_group in self._semantics):
del self._semantics[self._ranking_group]
semantic_inputs = tf_core.combine_tensors_and_semantics(
train_x, self._semantics)
normalized_semantic_inputs = tf_core.normalize_inputs(semantic_inputs)
if self._verbose:
logging.info("Normalized features: %s", normalized_semantic_inputs)
self._normalized_input_keys = sorted(
list(normalized_semantic_inputs.keys()))
# Add the weights
if self._weighted_training:
normalized_semantic_inputs[_WEIGHTS] = tf_core.SemanticTensor(
tensor=tf.cast(train_weights, tf_core.NormalizedNumericalType),
semantic=tf_core.Semantic.NUMERICAL)
# Add the semantic of the label.
if self._task == Task.CLASSIFICATION:
normalized_semantic_inputs[_LABEL] = tf_core.SemanticTensor(
tensor=tf.cast(train_y, tf_core.NormalizedCategoricalIntType) +
tf_core.CATEGORICAL_INTEGER_OFFSET,
semantic=tf_core.Semantic.CATEGORICAL)
elif self._task == Task.REGRESSION:
normalized_semantic_inputs[_LABEL] = tf_core.SemanticTensor(
tensor=tf.cast(train_y, tf_core.NormalizedNumericalType),
semantic=tf_core.Semantic.NUMERICAL)
elif self._task == Task.RANKING:
normalized_semantic_inputs[_LABEL] = tf_core.SemanticTensor(
tensor=tf.cast(train_y, tf_core.NormalizedNumericalType),
semantic=tf_core.Semantic.NUMERICAL)
assert self._ranking_group is not None
if self._ranking_group not in train_x:
raise Exception(
"The ranking key feature \"{}\" is not available as an input "
"feature.".format(self._ranking_group))
normalized_semantic_inputs[_RANK_GROUP] = tf_core.SemanticTensor(
tensor=tf.cast(train_x[self._ranking_group],
tf_core.NormalizedHashType),
semantic=tf_core.Semantic.HASH)
else:
raise Exception("Non supported task {}".format(self._task))
if not self._is_trained:
# Collects the training examples.
distribution_config = tf_core.get_distribution_configuration(
self.distribute_strategy)
if distribution_config is None:
# No distribution strategy. Collecting examples in memory.
tf_core.collect_training_examples(normalized_semantic_inputs,
self._training_model_id)
else:
# Each worker collects a part of the dataset.
if not self.capabilities().support_partial_cache_dataset_format:
raise ValueError(
f"The model {type(self)} does not support training with a TF "
"Distribution strategy (i.e. model.capabilities()."
"support_partial_cache_dataset_format == False). If the dataset "
"is small, simply remove "
"the distribution strategy scope (i.e. `with strategy.scope():` "
"around the model construction). If the dataset is large, use a "
"distributed version of the model. For Example, use "
"DistributedGradientBoostedTreesModel instead of "
"GradientBoostedTreesModel.")
tf_core.collect_distributed_training_examples(
inputs=normalized_semantic_inputs,
model_id=self._training_model_id,
dataset_path=self._distributed_partial_dataset_cache_path())
# Not metrics are returned during the collection of training examples.
return {}
def _distributed_partial_dataset_cache_path(self):
"""Directory accessible from all workers containing the partial cache."""
return os.path.join(self._temp_directory, "partial_dataset_cache")
def compile(self, metrics=None):
"""Configure the model for training.
Unlike for most Keras model, calling "compile" is optional before calling
"fit".
Args:
metrics: Metrics to report during training.
Raises:
ValueError: Invalid arguments.
"""
super(CoreModel, self).compile(metrics=metrics)
def fit(self,
x=None,
y=None,
callbacks=None,
**kwargs) -> tf.keras.callbacks.History:
"""Trains the model.
The following dataset formats are supported:
1. "x" is a tf.data.Dataset containing a tuple "(features, labels)".
"features" can be a dictionary a tensor, a list of tensors or a
dictionary of tensors (recommended). "labels" is a tensor.
2. "x" is a tensor, list of tensors or dictionary of tensors containing
the input features. "y" is a tensor.
3. "x" is a numpy-array, list of numpy-arrays or dictionary of
numpy-arrays containing the input features. "y" is a numpy-array.
Unlike classical neural networks, the learning algorithm requires to scan
the training dataset exactly once. Therefore, the dataset should not be
repeated. The algorithm also does not benefit from shuffling the dataset.
Input features generally do not need to be normalized (numerical) or indexed
(categorical features stored as string). Also, missing values are well
supported (i.e. not need to replace missing values).
Pandas Dataframe can be prepared with "dataframe_to_tf_dataset":
dataset = pandas.Dataframe(...)
model.fit(pd_dataframe_to_tf_dataset(dataset, label="my_label"))
Some of the learning algorithm will support distributed training with the
ParameterServerStrategy e.g.:
with tf.distribute.experimental.ParameterServerStrategy(...).scope():
model = DistributedGradientBoostedTreesModel()
model.fit(...)
Args:
x: Training dataset (See details above for the supported formats).
y: Label of the training dataset. Only used if "x" does not contains the
labels.
callbacks: Callbacks triggered during the training.
**kwargs: Arguments passed to the core keras model's fit.
Returns:
A `History` object. Its `History.history` attribute is not yet
implemented for decision forests algorithms, and will return empty.
All other fields are filled as usual for `Keras.Mode.fit()`.
"""
self._clear_function_cache()
# Check for a Pandas Dataframe without injecting a dependency.
if str(type(x)) == "<class 'pandas.core.frame.DataFrame'>":
raise ValueError(
"`fit` cannot consume Pandas' dataframes directly. Instead, use the "
"`pd_dataframe_to_tf_dataset` utility function. For example: "
"`model.fit(tfdf.keras.pd_dataframe_to_tf_dataset(train_dataframe, "
"label=\"label_column\"))")
# If the dataset was created with "pd_dataframe_to_tf_dataset", ensure that
# the task is correctly set.
if hasattr(x, "_tfdf_task"):
dataset_task = getattr(x, "_tfdf_task")
if dataset_task != self._task:
raise ValueError(
f"The model's `task` attribute ({Task.Name(self._task)}) does "
"not match the `task` attribute passed to "
f"`pd_dataframe_to_tf_dataset` ({Task.Name(dataset_task)}).")
# Call "compile" if the user forgot to do so.
if not self._is_compiled:
self.compile()
if "epochs" in kwargs:
if kwargs["epochs"] != 1:
raise ValueError("all decision forests algorithms train with only 1 " +
"epoch, epochs={} given".format(kwargs["epochs"]))
del kwargs["epochs"] # Not needed since we force it to 1 below.
# This callback will trigger the training at the end of the first epoch.
callbacks = [_TrainerCallBack(self)] + (callbacks if callbacks else [])
# We want the model trained before any evaluation is done at the
# end of the epoch. This may fail in case any of the `on_train_batch_*`
# callbacks calls `evaluate()` before the end of the 1st epoch.
self._train_on_evaluate = True
try:
history = super(CoreModel, self).fit(
x=x, y=y, epochs=1, callbacks=callbacks, **kwargs)
finally:
self._train_on_evaluate = False
self._build(x)
return history
def fit_on_dataset_path(
self,
train_path: str,
label_key: str,
weight_key: Optional[str] = None,
ranking_key: Optional[str] = None,
valid_path: Optional[str] = None,
dataset_format: Optional[str] = "csv",
max_num_scanned_rows_to_accumulate_statistics: Optional[int] = 100_000):
"""Trains the model on a dataset stored on disk.
This solution is generally more efficient and easier that loading the
dataset with a tf.Dataset both for local and distributed training.
Usage example:
# Local training
model = model = keras.GradientBoostedTreesModel()
model.fit_on_dataset_path(
train_path="/path/to/dataset.csv",
label_key="label",
dataset_format="csv")
model.save("/model/path")
# Distributed training
with tf.distribute.experimental.ParameterServerStrategy(...).scope():
model = model = keras.DistributedGradientBoostedTreesModel()
model.fit_on_dataset_path(
train_path="/path/to/dataset@10",
label_key="label",
dataset_format="tfrecord+tfe")
model.save("/model/path")
Args:
train_path: Path to the training dataset. Support comma separated files,
shard and glob notation.
label_key: Name of the label column.
weight_key: Name of the weighing column.
ranking_key: Name of the ranking column.
valid_path: Path to the validation dataset. If not provided, or if the
learning algorithm does not support/need a validation dataset,
`valid_path` is ignored.
dataset_format: Format of the dataset. Should be one of the registered
dataset format (see
https://github.com/google/yggdrasil-decision-forests/blob/main/documentation/user_manual.md#dataset-path-and-format
for more details). The format "csv" always available but it is
generally only suited for small datasets.
max_num_scanned_rows_to_accumulate_statistics: Maximum number of examples
to scan to determine the statistics of the features (i.e. the dataspec,
e.g. mean value, dictionaries). (Currently) the "first" examples of the
dataset are scanned (e.g. the first examples of the dataset is a single
file). Therefore, it is important that the sampled dataset is relatively
uniformly sampled, notably the scanned examples should contains all the
possible categorical values (otherwise the not seen value will be
treated as out-of-vocabulary). If set to None, the entire dataset is
scanned. This parameter has no effect if the dataset is stored in a
format that already contains those values.
Returns:
A `History` object. Its `History.history` attribute is not yet
implemented for decision forests algorithms, and will return empty.
All other fields are filled as usual for `Keras.Mode.fit()`.
"""
if self._verbose:
logging.info("Training on dataset %s", train_path)
self._clear_function_cache()
# Call "compile" if the user forgot to do so.
if not self._is_compiled:
self.compile()
train_model_path = self._temp_directory
model_path = os.path.join(train_model_path, "model")
# Create the dataspec guide.
guide = data_spec_pb2.DataSpecificationGuide(
ignore_columns_without_guides=self._exclude_non_specified,
max_num_scanned_rows_to_accumulate_statistics=max_num_scanned_rows_to_accumulate_statistics
)
guide.default_column_guide.categorial.max_vocab_count = self._max_vocab_count
self._normalized_input_keys = []
for feature in self._features:
col_guide = copy.deepcopy(feature.guide)
col_guide.column_name_pattern = tf_core.normalize_inputs_regexp(
feature.name)
guide.column_guides.append(col_guide)
self._normalized_input_keys.append(feature.name)
label_guide = data_spec_pb2.ColumnGuide(
column_name_pattern=tf_core.normalize_inputs_regexp(label_key))
if self._task == Task.CLASSIFICATION:
label_guide.type = data_spec_pb2.CATEGORICAL
label_guide.categorial.min_vocab_frequency = 0
label_guide.categorial.max_vocab_count = -1
elif self._task == Task.REGRESSION:
label_guide.type = data_spec_pb2.NUMERICAL
elif self._task == Task.RANKING:
label_guide.type = data_spec_pb2.NUMERICAL
else:
raise ValueError(
f"Non implemented task {self._task} with \"fit_on_dataset_path\"."
" Use a different task or train with \"fit\".")
guide.column_guides.append(label_guide)
if ranking_key:
ranking_guide = data_spec_pb2.ColumnGuide(
column_name_pattern=tf_core.normalize_inputs_regexp(ranking_key),
type=data_spec_pb2.HASH)
guide.column_guides.append(ranking_guide)
if weight_key:
weight_guide = data_spec_pb2.ColumnGuide(
column_name_pattern=tf_core.normalize_inputs_regexp(weight_key),
type=data_spec_pb2.NUMERICAL)
guide.column_guides.append(weight_guide)
# Deployment configuration
deployment_config = copy.deepcopy(
self._advanced_arguments.yggdrasil_deployment_config)
if not deployment_config.HasField("num_threads"):
deployment_config.num_threads = self._num_threads
distribution_config = tf_core.get_distribution_configuration(
self.distribute_strategy)
logging.info("distribution_config: %s", distribution_config)
if distribution_config is not None and not self.capabilities(
).support_partial_cache_dataset_format:
raise ValueError(
f"The model {type(self)} does not support training with a TF "
"Distribution strategy (i.e. model.capabilities()."
"support_partial_cache_dataset_format == False). If the dataset "
"is small, simply remove the distribution strategy scope (i.e. `with "
"strategy.scope():` around the model construction). If the dataset "
"is large, use a distributed version of the model. For Example, use "
"DistributedGradientBoostedTreesModel instead of "
"GradientBoostedTreesModel.")
# Train the model.
tf_core.train_on_file_dataset(
train_dataset_path=dataset_format + ":" + train_path,
valid_dataset_path=(dataset_format + ":" +
valid_path) if valid_path else None,
feature_ids=self._normalized_input_keys,
label_id=label_key,
weight_id=weight_key,
model_id=self._training_model_id,
model_dir=train_model_path,
learner=self._learner,
task=self._task,
generic_hparms=tf_core.hparams_dict_to_generic_proto(
self._learner_params),
ranking_group=ranking_key,
keep_model_in_resource=True,
guide=guide,
training_config=self._advanced_arguments.yggdrasil_training_config,
deployment_config=deployment_config,
working_cache_path=os.path.join(self._temp_directory, "working_cache"),
distribution_config=distribution_config)
if self._verbose:
logging.info("Training done. Finalizing the model.")
# Request and store a description of the model.
self._description = training_op.SimpleMLShowModel(
model_identifier=self._training_model_id).numpy().decode("utf-8")
training_op.SimpleMLUnloadModel(model_identifier=self._training_model_id)
# Build the model's graph.
inspector = inspector_lib.make_inspector(model_path)
self._set_from_yggdrasil_model(inspector, model_path)
# Build the model history.
history = tf.keras.callbacks.History()
history.model = self
history.on_train_begin()
training_logs = inspector.training_logs()
if training_logs is not None:
for src_logs in training_logs:
if src_logs.evaluation is not None:
history.on_epoch_end(src_logs.num_trees,
src_logs.evaluation.to_dict())
self.history = history
return self.history
def save(self, filepath: str, overwrite: Optional[bool] = True, **kwargs):
"""Saves the model as a TensorFlow SavedModel.
The exported SavedModel contains a standalone Yggdrasil Decision Forests
model in the "assets" sub-directory. The Yggdrasil model can be used
directly using the Yggdrasil API. However, this model does not contain the
"preprocessing" layer (if any).
Args:
filepath: Path to the output model.
overwrite: If true, override an already existing model. If false, raise an
error if a model already exist.
**kwargs: Arguments passed to the core keras model's save.
"""
# TF does not override assets when exporting a model in a directory already
# containing a model. In such case, we need to remove the initial assets
# directory manually.
# Only the assets directory is removed (instead of the whole "filepath") in
# case this directory contains important files.
assets_dir = os.path.join(filepath, "assets")
saved_model_file = os.path.join(filepath, "saved_model.pb")
if tf.io.gfile.exists(saved_model_file) and tf.io.gfile.exists(assets_dir):
if overwrite:
tf.io.gfile.rmtree(assets_dir)
else:
raise ValueError(
f"A model already exist as {filepath}. Use an empty directory "
"or set overwrite=True")
super(CoreModel, self).save(
filepath=filepath, overwrite=overwrite, **kwargs)
def evaluate(self, *args, **kwargs):
"""Returns the loss value & metrics values for the model.
See details on `keras.Model.evaluate`.
Args:
*args: Passed to `keras.Model.evaluate`.
**kwargs: Passed to `keras.Model.evaluate`. Scalar test loss (if the
model has a single output and no metrics) or list of scalars (if the
model has multiple outputs and/or metrics). See details in
`keras.Model.evaluate`.
"""
if self._train_on_evaluate:
if not self._is_trained.numpy():
self._train_model()
else:
raise ValueError(
"evaluate() requested training of an already trained model -- "
"did you call `Model.evaluate` from a `on_train_batch*` callback ?"
"this is not yet supported in Decision Forests models, where one "
"can only evaluate after the first epoch is finished and the "
"model trained")
return super(CoreModel, self).evaluate(*args, **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
"""Shows information about the model."""
super(CoreModel, self).summary(
line_length=line_length, positions=positions, print_fn=print_fn)
if print_fn is None:
print_fn = print
if self._model is not None:
print_fn(self._description)
@staticmethod
def predefined_hyperparameters() -> List[HyperParameterTemplate]:
"""Returns a better than default set of hyper-parameters.
They can be used directly with the `hyperparameter_template` argument of the
model constructor.
These hyper-parameters outperforms the default hyper-parameters (either
generally or in specific scenarios). Like default hyper-parameters, existing
pre-defined hyper-parameters cannot change.
"""
return []
# TODO(b/205971333): Use Trace Protocol For TF DF custom types to avoid
# clearing the cache.
def _clear_function_cache(self):
"""Clear the @tf.function cache and force re-tracing.
"""
# pylint: disable=protected-access
if self.call._stateful_fn:
self.call._stateful_fn._function_cache.clear()
# pylint: enable=protected-access
def _extract_sample(self, x):
"""Extracts a sample (e.g.
batch, row) from the training dataset.
Returns None is the sample cannot be extracted.
Args:
x: Training dataset in the same format as "fit".
Returns:
A sample.
"""
if isinstance(x, tf.data.Dataset):
return x.take(1)
try:
# Work for numpy array and TensorFlow Tensors.
return tf.nest.map_structure(lambda v: v[0:1], x)
except Exception: # pylint: disable=broad-except
pass
try:
# Works for list of primitives.
if isinstance(x, list) and isinstance(x[0],
(int, float, str, bytes, bool)):
return x[0:1]
except Exception: # pylint: disable=broad-except
pass
logging.warning("Dataset sampling not implemented for %s", x)
return None
def _build(self, x):
"""Build the internal graph similarly as "build" for classical Keras models.
Compared to the classical build, supports features with dtypes != float32.
Args:
x: Training dataset in the same format as "fit".
"""
# Note: Build does not support dtypes other than float32.
super(CoreModel, self).build([])
# Force the creation of the graph.
# If a sample cannot be extracted, the graph will be built at the first call
# to "predict" or "evaluate".
if self._advanced_arguments.infer_prediction_signature:
sample = self._extract_sample(x)
if sample is not None:
self.predict(sample)
def _train_model(self):
"""Effectively train the model."""
if self._normalized_input_keys is None:
raise Exception("The training graph was not built.")
train_model_path = self._temp_directory
model_path = os.path.join(train_model_path, "model")
# Create the dataspec guide.
guide = data_spec_pb2.DataSpecificationGuide()
guide.default_column_guide.categorial.max_vocab_count = self._max_vocab_count
for feature in self._features:
col_guide = copy.deepcopy(feature.guide)
col_guide.column_name_pattern = tf_core.normalize_inputs_regexp(
feature.name)
guide.column_guides.append(col_guide)
# Deployment configuration
deployment_config = copy.deepcopy(
self._advanced_arguments.yggdrasil_deployment_config)
if not deployment_config.HasField("num_threads"):
deployment_config.num_threads = self._num_threads
distribution_config = tf_core.get_distribution_configuration(
self.distribute_strategy)
if distribution_config is None:
# Train the model.
# The model will be exported to "train_model_path".
#
# Note: It would be possible to train and load the model without saving
# the model to file.
tf_core.train(
input_ids=self._normalized_input_keys,
label_id=_LABEL,
weight_id=_WEIGHTS if self._weighted_training else None,
model_id=self._training_model_id,
model_dir=train_model_path,
learner=self._learner,
task=self._task,
generic_hparms=tf_core.hparams_dict_to_generic_proto(
self._learner_params),
ranking_group=_RANK_GROUP if self._task == Task.RANKING else None,
keep_model_in_resource=True,
guide=guide,
training_config=self._advanced_arguments.yggdrasil_training_config,
deployment_config=deployment_config,
)
else:
tf_core.finalize_distributed_dataset_collection(
cluster_coordinator=self._cluster_coordinator,
input_ids=self._normalized_input_keys + [_LABEL] +
([_WEIGHTS] if self._weighted_training else []),
model_id=self._training_model_id,
dataset_path=self._distributed_partial_dataset_cache_path())
tf_core.train_on_file_dataset(
train_dataset_path="partial_dataset_cache:" +
self._distributed_partial_dataset_cache_path(),
valid_dataset_path=None,
feature_ids=self._normalized_input_keys,
label_id=_LABEL,
weight_id=_WEIGHTS if self._weighted_training else None,
model_id=self._training_model_id,
model_dir=train_model_path,
learner=self._learner,
task=self._task,
generic_hparms=tf_core.hparams_dict_to_generic_proto(
self._learner_params),
ranking_group=_RANK_GROUP if self._task == Task.RANKING else None,
keep_model_in_resource=True,
guide=guide,
training_config=self._advanced_arguments.yggdrasil_training_config,
deployment_config=deployment_config,
working_cache_path=os.path.join(self._temp_directory,
"working_cache"),
distribution_config=distribution_config,
)
# Request and store a description of the model.
self._description = training_op.SimpleMLShowModel(
model_identifier=self._training_model_id).numpy().decode("utf-8")
training_op.SimpleMLUnloadModel(model_identifier=self._training_model_id)
self._is_trained.assign(True)
# Load and optimize the model in memory.
# Register the model as a SavedModel asset.
self._model = tf_op.ModelV2(model_path=model_path, verbose=False)
def _set_from_yggdrasil_model(self,
inspector: inspector_lib.AbstractInspector,
path: str):
if not self._is_compiled:
self.compile()
features = inspector.features()
semantics = {
feature.name: tf_core.column_type_to_semantic(feature.type)
for feature in features
}
self._semantics = semantics
self._normalized_input_keys = sorted(list(semantics.keys()))
self._is_trained.assign(True)
self._model = tf_op.ModelV2(model_path=path, verbose=False)
# Creates a toy batch to initialize the Keras model. The values are not
# used.
examples = {}
for feature in features:
if feature.type == data_spec_pb2.ColumnType.NUMERICAL:
examples[feature.name] = tf.constant([1.0, 2.0])
elif feature.type == data_spec_pb2.ColumnType.CATEGORICAL:
if inspector.dataspec.columns[
feature.col_idx].categorical.is_already_integerized:
examples[feature.name] = tf.constant([1, 2])
else:
examples[feature.name] = tf.constant(["a", "b"])
elif feature.type == data_spec_pb2.ColumnType.CATEGORICAL_SET:
if inspector.dataspec.columns[
feature.col_idx].categorical.is_already_integerized:
examples[feature.name] = tf.ragged.constant([[1, 2], [3]],
dtype=tf.int32)
else:
examples[feature.name] = tf.ragged.constant([["a", "b"], ["c"]],
dtype=tf.string)
elif feature.type == data_spec_pb2.ColumnType.BOOLEAN:
examples[feature.name] = tf.constant([0.0, 1.0])
else:
raise ValueError("Non supported feature type")
self.predict(tf.data.Dataset.from_tensor_slices(examples).batch(2))
@staticmethod
def capabilities() -> abstract_learner_pb2.LearnerCapabilities:
"""Lists the capabilities of the learning algorithm."""
return abstract_learner_pb2.LearnerCapabilities()
class _TrainerCallBack(tf.keras.callbacks.Callback):
"""Callback that trains the model at the end of the first epoch."""
def __init__(self, model: CoreModel):
self._model = model
def on_epoch_end(self, epoch, logs=None):
del logs
if epoch == 0 and not self._model._is_trained.numpy(): # pylint:disable=protected-access
self._model._train_model() # pylint:disable=protected-access
# After this the model is trained, and evaluations shouldn't attempt
# to retrain.
self._model._train_on_evaluate = False # pylint:disable=protected-access
def _batch_size(inputs: Union[tf.Tensor, Dict[str, tf.Tensor]]) -> tf.Tensor:
"""Gets the batch size of a tensor or dictionary of tensors.
Assumes that all the tensors have the same batchsize.
Args:
inputs: Dict of tensors.
Returns:
The batch size.
Raises:
ValueError: Invalid arguments.
"""
if isinstance(inputs, dict):
for v in inputs.values():
return tf.shape(v)[0]
raise ValueError("Empty input")
else:
return tf.shape(inputs)[0]
def pd_dataframe_to_tf_dataset(dataframe,
label: Optional[str] = None,
task: Optional[TaskType] = Task.CLASSIFICATION,
max_num_classes: Optional[int] = 100,
in_place: Optional[bool] = False,
fix_feature_names: Optional[bool] = True,
weight: Optional[str] = None) -> tf.data.Dataset:
"""Converts a Panda Dataframe into a TF Dataset compatible with Keras.
Details:
- Ensures columns have uniform types.
- If "label" is provided, separate it as a second channel in the tf.Dataset
(as expected by Keras).
- If "weight" is provided, separate it as a third channel in the tf.Dataset
(as expected by Keras).
- If "task" is provided, ensure the correct dtype of the label. If the task
a classification and the label a string, integerize the labels. In this
case, the label values are extracted from the dataset and ordered
lexicographically. Warning: This logic won't work as expected if the
training and testing dataset contains different label values. In such
case, it is preferable to convert the label to integers beforehand while
making sure the same encoding is used for all the datasets.
- Returns "tf.data.from_tensor_slices"
Args:
dataframe: Pandas dataframe containing a training or evaluation dataset.
label: Name of the label column.
task: Target task of the dataset.
max_num_classes: Maximum number of classes for a classification task. A high
number of unique value / classes might indicate that the problem is a
regression or a ranking instead of a classification. Set to None to
disable checking the number of classes.
in_place: If false (default), the input `dataframe` will not be modified by
`pd_dataframe_to_tf_dataset`. However, a copy of the dataset memory will
be made. If true, the dataframe will be modified in place.
fix_feature_names: Some feature names are not supported by the SavedModel
signature. If `fix_feature_names=True` (default) the feature will be
renamed and made compatible. If `fix_feature_names=False`, the feature
name will not be changed, but exporting the model might fail (i.e.
`model.save(...)`).
weight: Optional name of a column in `dataframe` to use to weight the
training.
Returns:
A TensorFlow Dataset.
"""
if not in_place:
dataframe = dataframe.copy(deep=True)
if label is not None:
if label not in dataframe.columns:
raise ValueError(
f"The label \"{label}\" is not a column of the dataframe.")
if task == Task.CLASSIFICATION:
classification_classes = dataframe[label].unique().tolist()
if len(classification_classes) > max_num_classes:
raise ValueError(
f"The number of unique classes ({len(classification_classes)}) "
f"exceeds max_num_classes ({max_num_classes}). A high number of "
"unique value / classes might indicate that the problem is a "
"regression or a ranking instead of a classification. If this "
"problem is effectively a classification problem, increase "
"`max_num_classes`.")
if dataframe[label].dtypes in [str, object]:
classification_classes.sort()
dataframe[label] = dataframe[label].map(classification_classes.index)
elif dataframe[label].dtypes in [int, float]:
if (dataframe[label] < 0).any():
raise ValueError(
"Negative integer classification label found. Make sure "
"you label values are positive or stored as string.")
if weight is not None:
if weight not in dataframe.columns:
raise ValueError(
f"The weight \"{weight}\" is not a column of the dataframe.")
if fix_feature_names:
# Rename the features so they are compatible with SaveModel serving
# signatures.
rename_mapping = {}
new_names = set()
change_any_feature_name = False
for column in dataframe:
new_name = column
for forbidden_character in _FORBIDDEN_FEATURE_CHARACTERS:
if forbidden_character in new_name:
change_any_feature_name = True
new_name = new_name.replace(forbidden_character, "_")
# Add a tailing "_" until there are not feature name collisions.
while new_name in new_names:
new_name += "_"
change_any_feature_name = True
rename_mapping[column] = new_name
new_names.add(new_name)
dataframe = dataframe.rename(columns=rename_mapping)
if change_any_feature_name:
logging.warning(
"Some of the feature names have been changed automatically to be "
"compatible with SavedModels because fix_feature_names=True.")
# Make sure that missing values for string columns are not represented as
# float(NaN).
for col in dataframe.columns:
if dataframe[col].dtype in [str, object]:
dataframe[col] = dataframe[col].fillna("")
if label is not None:
features_dataframe = dataframe.drop(label, 1)
if weight is not None:
features_dataframe = features_dataframe.drop(weight, 1)
output = (dict(features_dataframe), dataframe[label].values,
dataframe[weight].values)
else:
output = (dict(features_dataframe), dataframe[label].values)
tf_dataset = tf.data.Dataset.from_tensor_slices(output)
else:
if weight is not None:
raise ValueError(
"\"weight\" is only supported if the \"label\" is also provided")
tf_dataset = tf.data.Dataset.from_tensor_slices(dict(dataframe))
# The batch size does not impact the training of TF-DF.
tf_dataset = tf_dataset.batch(64)
setattr(tf_dataset, "_tfdf_task", task)
return tf_dataset
def yggdrasil_model_to_keras_model(src_path: str, dst_path: str):
"""Converts an Yggdrasil model into a Keras model."""
inspector = inspector_lib.make_inspector(src_path)
objective = inspector.objective()
model = CoreModel(
task=objective.task,
learner="MANUAL",
ranking_group=objective.group
if objective.task == inspector_lib.Task.RANKING else None)
model._set_from_yggdrasil_model(inspector, src_path) # pylint: disable=protected-access
model.save(dst_path)
def _list_explicit_arguments(func):
"""Function decorator that adds an "explicit_args" with the explicit args."""
arguments = inspect.getfullargspec(func)[0]
def wrapper(*args, **kargs):
kargs["explicit_args"] = set(
list(arguments[:len(args)]) + list(kargs.keys()))
return func(*args, **kargs)
return wrapper
def _parse_hp_template(template_name) -> Tuple[str, Optional[int]]:
"""Parses a template name as specified by the user.
Template can versionned:
"my_template@v5" -> Returns (my_template, 5)
or non versionned:
"my_template" -> Returns (my_template, None)
Args:
template_name: User specified template.
Returns:
Base template name and version.
"""
malformed_msg = (f"The template \"{template_name}\" is malformed. Expecting "
"\"{template}@v{version}\" or \"{template}")
if "@" in template_name:
# Template with version.
parts = template_name.split("@v")
if len(parts) != 2:
raise ValueError(malformed_msg)
base_name = parts[0]
try:
version = int(parts[1])
except:
raise ValueError(malformed_msg)
return base_name, version
else:
# Template without version?
return template_name, None
def _get_matching_template(
template_name: str,
all_templates: List[HyperParameterTemplate]) -> HyperParameterTemplate:
"""Returns the template that matches a template name.
Args:
template_name: User specified template.
all_templates: Candidate templates.
Returns:
The matching template.
"""
# Extract the base name and version of the template.
template_base, template_version = _parse_hp_template(template_name)
if template_version is not None:
# Template with version.
# Matching templates.
matching = [
template for template in all_templates if
template.name == template_base and template.version == template_version
]
if not matching:
available = [
f"{template.name}@v{template.version}" for template in all_templates
]
raise ValueError(f"No template is matching {template_name}. "
f"The available templates are: {available}")
if len(matching) > 1:
raise ValueError("Internal error. Multiple matching templates")
return matching[0]
else:
# Template without version?
matching = [
template for template in all_templates if template.name == template_base
]
matching.sort(key=lambda x: x.version, reverse=True)
if not matching:
available = list(set([template.name for template in all_templates]))
raise ValueError(f"No template is matching {template_name}. "
f"Available template names are: {available}")
return matching[0]
def _apply_hp_template(parameters: Dict[str, Any], template_name: str,
all_templates: List[HyperParameterTemplate],
explicit_parameters: Set[str]) -> Dict[str, Any]:
"""Applies the hyper-parameter template to the user+default parameters.
Look for a template called "template_name" (is "template_name" is a versioned
template e.g. "name@v5") or for the latest (higher version) template (if
"template_name" is a non versioned template e.g. "name").
Once the template is found, merges "parameters" and the template according to
the user parameters i.e. the final value is (in order of importance):
user parameters > template parameters > default parameters.
Args:
parameters: User and default hyper-parameters.
template_name: Name of the template as specified by the user.
all_templates: All the available templates.
explicit_parameters: Set of parameters (in parameters) defined by the user.
Returns:
The merged hyper-parameters.
"""
template = _get_matching_template(template_name, all_templates)
logging.info("Resolve hyper-parameter template \"%s\" to \"%s@v%d\" -> %s.",
template_name, template.name, template.version,
template.parameters)
for key in list(parameters.keys()):
if key in template.parameters and key not in explicit_parameters:
parameters[key] = template.parameters[key]
return parameters
def _check_feature_names(feature_names: List[str], raise_error: bool):
"""Checks if the features names are compatible with all of the Keras API."""
def problem(reason):
full_reason = (
"One or more feature names are not compatible with the Keras API: "
f"{reason} This problem can be solved in one of two ways: (1; "
"Recommended) Rename the features to be compatible. You can use "
"the argument `fix_feature_names=True` if you are using "
"`pd_dataframe_to_tf_dataset`. (2) Disable this error message "
"(`fail_on_non_keras_compatible_feature_name=False`) and only use part"
" of the compatible Keras API.")
if raise_error:
raise ValueError(full_reason)
else:
logging.warning(full_reason)
# List of character forbidden in a serving signature name.
for feature_name in feature_names:
if not feature_name:
problem("One of the feature names is empty.")
for character in _FORBIDDEN_FEATURE_CHARACTERS:
if character in feature_name:
problem(f"The feature name \"{feature_name}\" contains a forbidden "
"character ({_FORBIDDEN_FEATURE_CHARACTERS}).")
# The following section is a copy of internal Keras functions that are not
# available in the public api.
#
# Keras does not allow projects to depend on the internal api.
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
def _minimum_control_deps(outputs):
"""Returns the minimum control dependencies to ensure step succeeded.
This function is a strict copy of the function of the same name in the keras
private API:
third_party/tensorflow/python/keras/engine/training.py
"""
if tf.executing_eagerly():
return [] # Control dependencies not needed.
outputs = tf.nest.flatten(outputs, expand_composites=True)
for out in outputs:
# Variables can't be control dependencies.
if not isinstance(out, tf.Variable):
return [out] # Return first Tensor or Op from outputs.
return [] # No viable Tensor or Op to use for control deps.
def _expand_1d(data):
"""Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s.
This function is a strict copy of the function of the same name in the keras
private API:
third_party/tensorflow/python/keras/engine/data_adapter.py
"""
def _expand_single_1d_tensor(t):
# Leaves `CompositeTensor`s as-is.
if (isinstance(t, tf.Tensor) and isinstance(t.shape, tf.TensorShape) and
t.shape.rank == 1):
return tf.expand_dims(t, axis=-1)
return t
return tf.nest.map_structure(_expand_single_1d_tensor, data)
def _write_scalar_summaries(logs, step):
for name, value in logs.items():
if _is_scalar(value):
tf.scalar("batch_" + name, value, step=step)
def _is_scalar(x):
return isinstance(x, (tf.Tensor, tf.Variable)) and x.shape.rank == 0
def _is_per_replica_instance(obj):
return (isinstance(obj, tf.distribute.DistributedValues) and
isinstance(obj, tf.__internal__.CompositeTensor))
def _reduce_per_replica(values, strategy, reduction="first"):
"""Reduce PerReplica objects.
Args:
values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are
returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of 'first', 'concat'.
Returns:
Structure of `Tensor`s.
"""
def _reduce(v):
"""Reduce a single `PerReplica` object."""
if not _is_per_replica_instance(v):
return v
elif reduction == "first":
return strategy.unwrap(v)[0]
else:
raise ValueError('`reduction` must be "first" or "concat". Received: '
f"reduction={reduction}.")
return tf.nest.map_structure(_reduce, values)
# pylint: enable=g-doc-args
# pylint: enable=g-doc-return-or-yield
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
import json
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../mq"))
from mq.esworker_sns_sqs import taskConsumer
from mozdef_util.utilities.dot_dict import DotDict
from mozdef_util.query_models import SearchQuery, ExistsMatch
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from unit_test_suite import UnitTestSuite
class TestEsworkerSNSSQS(UnitTestSuite):
def setup(self):
super(TestEsworkerSNSSQS, self).setup()
mq_conn = 'abc'
task_queue = 'example-logs-mozdef'
es_connection = self.es_client
options = DotDict(
{
"esbulksize": 0,
"mozdefhostname": "unittest.hostname",
"taskexchange": task_queue,
'plugincheckfrequency': 120,
}
)
self.consumer = taskConsumer(mq_conn, task_queue, es_connection, options)
def search_and_verify_event(self, expected_event):
self.refresh('events')
search_query = SearchQuery(minutes=5)
search_query.add_must(ExistsMatch('tags'))
results = search_query.execute(self.es_client)
assert len(results['hits']) == 1
saved_event = results['hits'][0]['_source']
self.verify_event(saved_event, expected_event)
def test_syslog_event(self):
event = {
"Type": "Notification",
"MessageId": "abcdefg",
"TopicArn": "arn:aws:sns:us-west-2:123456789:example-logs-mozdef",
"Subject": "Fluentd-Notification",
"Message": "{\"time\":\"2017-05-25 07:14:15 +0000\",\"timestamp\":\"2017-05-25T07:14:15+00:00\",\"hostname\":\"abcdefghostname\",\"pname\":\"dhclient\",\"processid\":\"[123]\",\"type\":\"syslog\",\"logger\":\"systemslogs\",\"payload\":\"DHCPREQUEST of 1.2.3.4 on eth0 to 5.6.7.8 port 67 (xid=0x123456)\"}",
"Timestamp": "2017-05-25T07:14:16.103Z",
"SignatureVersion": "1",
"Signature": "examplesignatureabcd",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-12345.pem",
"UnsubscribeURL": "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:123456789:example-logs-mozdef:adsf0laser013"
}
self.consumer.on_message(event)
expected_event = {
u'category': u'syslog',
u'details': {u'logger': u'systemslogs'},
u'hostname': u'abcdefghostname',
u'mozdefhostname': u'unittest.hostname',
u'processid': u'123',
u'processname': u'dhclient',
u'receivedtimestamp': u'2017-05-26T17:47:17.813876+00:00',
u'severity': u'INFO',
u'source': u'UNKNOWN',
u'summary': u'DHCPREQUEST of 1.2.3.4 on eth0 to 5.6.7.8 port 67 (xid=0x123456)',
u'tags': [u'example-logs-mozdef'],
u'timestamp': u'2017-05-25T07:14:15+00:00',
u'utctimestamp': u'2017-05-25T07:14:15+00:00',
u'plugins': []
}
self.search_and_verify_event(expected_event)
def test_sso_event(self):
message_dict = {
u'category': u'user_feedback',
u'details': {
u'action': u'escalate',
u'alert_information': {
u'alert_code': u'12345',
u'alert_id': u'abcdefg',
u'alert_str_json': u'{"url": "https://www.mozilla.org/alert", "severity": "NOTICE", "tags": ["geomodel"], "utctimestamp": "1976-09-13T07:43:49+00:00", "category": "geomodel", "summary": "christianherring@gmail.com NEWCOUNTRY New York, Mauritania access from 25.141.235.246", "details": {"locality_details": {"city": "New York", "country": "Mauritania"}, "category": "NEWCOUNTRY", "principal": "christianherring@gmail.com", "source_ip": "25.141.235.246"}}',
u'date': u'1998-06-24',
u'description': u'This alert is created based on geo ip information about the last login of a user.',
u'duplicate': False,
u'last_update': 1524700512,
u'risk': u'high',
u'state': u'escalate',
u'summary': u'Did you recently login from New York, Mauritania (25.141.235.246)?',
u'url': u'https://www.mozilla.org',
u'url_title': u'Get Help',
u'user_id': u'ad|Mozilla-LDAP-Dev|ttesterson'
}
}
}
event = {
u'Message': json.dumps(message_dict),
u'MessageId': u'123456-248e-5b78-84c5-46ac332ea6cd',
u'Signature': u'abcdefgh',
u'SignatureVersion': u'1',
u'SigningCertURL': u'https://sns.us-west-2.amazonaws.com/SimpleNotificationService-1098765.pem',
u'Subject': u'sso-dashboard-user-feedback',
u'Timestamp': u'2018-04-25T23:55:12.854Z',
u'TopicArn': u'arn:aws:sns:us-west-2:7777777777:SSODashboardAlertFeedback',
u'Type': u'Notification',
u'UnsubscribeURL': u'https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:7777777777:SSODashboardAlertFeedback:123456-248e-5b78-84c5-46ac332ea6cd'
}
self.consumer.on_message(event)
expected_event = {
u'category': u'user_feedback',
u'details': {
u'action': u'escalate',
u'alert_information': {
u'alert_code': u'12345',
u'alert_id': u'abcdefg',
u'alert_str_json': message_dict['details']['alert_information']['alert_str_json'],
u'date': u'1998-06-24',
u'description': u'This alert is created based on geo ip information about the last login of a user.',
u'duplicate': False,
u'last_update': 1524700512,
u'risk': u'high',
u'state': u'escalate',
u'summary': u'Did you recently login from New York, Mauritania (25.141.235.246)?',
u'url': u'https://www.mozilla.org',
u'url_title': u'Get Help',
u'user_id': u'ad|Mozilla-LDAP-Dev|ttesterson'
}
},
u'hostname': u'UNKNOWN',
u'mozdefhostname': u'unittest.hostname',
u'processid': u'UNKNOWN',
u'processname': u'UNKNOWN',
u'receivedtimestamp': u'2018-04-26T00:11:23.479565+00:00',
u'severity': u'INFO',
u'source': u'UNKNOWN',
u'summary': u'UNKNOWN',
u'tags': [u'example-logs-mozdef'],
u'timestamp': u'2018-04-26T00:11:23.479771+00:00',
u'utctimestamp': u'2018-04-26T00:11:23.479771+00:00',
u'plugins': []
}
self.search_and_verify_event(expected_event)
Fix esworker sns sqs test to verify type
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
import json
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../mq"))
from mq.esworker_sns_sqs import taskConsumer
from mozdef_util.utilities.dot_dict import DotDict
from mozdef_util.query_models import SearchQuery, ExistsMatch
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from unit_test_suite import UnitTestSuite
class TestEsworkerSNSSQS(UnitTestSuite):
def setup(self):
super(TestEsworkerSNSSQS, self).setup()
mq_conn = 'abc'
task_queue = 'example-logs-mozdef'
es_connection = self.es_client
options = DotDict(
{
"esbulksize": 0,
"mozdefhostname": "unittest.hostname",
"taskexchange": task_queue,
'plugincheckfrequency': 120,
}
)
self.consumer = taskConsumer(mq_conn, task_queue, es_connection, options)
def search_and_verify_event(self, expected_event):
self.refresh('events')
search_query = SearchQuery(minutes=5)
search_query.add_must(ExistsMatch('tags'))
results = search_query.execute(self.es_client)
assert len(results['hits']) == 1
saved_event = results['hits'][0]['_source']
self.verify_event(saved_event, expected_event)
def test_syslog_event(self):
event = {
"Type": "Notification",
"MessageId": "abcdefg",
"TopicArn": "arn:aws:sns:us-west-2:123456789:example-logs-mozdef",
"Subject": "Fluentd-Notification",
"Message": "{\"time\":\"2017-05-25 07:14:15 +0000\",\"timestamp\":\"2017-05-25T07:14:15+00:00\",\"hostname\":\"abcdefghostname\",\"pname\":\"dhclient\",\"processid\":\"[123]\",\"type\":\"syslog\",\"logger\":\"systemslogs\",\"payload\":\"DHCPREQUEST of 1.2.3.4 on eth0 to 5.6.7.8 port 67 (xid=0x123456)\"}",
"Timestamp": "2017-05-25T07:14:16.103Z",
"SignatureVersion": "1",
"Signature": "examplesignatureabcd",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-12345.pem",
"UnsubscribeURL": "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:123456789:example-logs-mozdef:adsf0laser013"
}
self.consumer.on_message(event)
expected_event = {
u'category': u'syslog',
u'details': {u'logger': u'systemslogs'},
u'hostname': u'abcdefghostname',
u'mozdefhostname': u'unittest.hostname',
u'processid': u'123',
u'processname': u'dhclient',
u'receivedtimestamp': u'2017-05-26T17:47:17.813876+00:00',
u'severity': u'INFO',
u'source': u'UNKNOWN',
u'summary': u'DHCPREQUEST of 1.2.3.4 on eth0 to 5.6.7.8 port 67 (xid=0x123456)',
u'tags': [u'example-logs-mozdef'],
u'timestamp': u'2017-05-25T07:14:15+00:00',
u'utctimestamp': u'2017-05-25T07:14:15+00:00',
u'plugins': [],
u'type': 'event'
}
self.search_and_verify_event(expected_event)
def test_sso_event(self):
message_dict = {
u'category': u'user_feedback',
u'details': {
u'action': u'escalate',
u'alert_information': {
u'alert_code': u'12345',
u'alert_id': u'abcdefg',
u'alert_str_json': u'{"url": "https://www.mozilla.org/alert", "severity": "NOTICE", "tags": ["geomodel"], "utctimestamp": "1976-09-13T07:43:49+00:00", "category": "geomodel", "summary": "christianherring@gmail.com NEWCOUNTRY New York, Mauritania access from 25.141.235.246", "details": {"locality_details": {"city": "New York", "country": "Mauritania"}, "category": "NEWCOUNTRY", "principal": "christianherring@gmail.com", "source_ip": "25.141.235.246"}}',
u'date': u'1998-06-24',
u'description': u'This alert is created based on geo ip information about the last login of a user.',
u'duplicate': False,
u'last_update': 1524700512,
u'risk': u'high',
u'state': u'escalate',
u'summary': u'Did you recently login from New York, Mauritania (25.141.235.246)?',
u'url': u'https://www.mozilla.org',
u'url_title': u'Get Help',
u'user_id': u'ad|Mozilla-LDAP-Dev|ttesterson'
}
}
}
event = {
u'Message': json.dumps(message_dict),
u'MessageId': u'123456-248e-5b78-84c5-46ac332ea6cd',
u'Signature': u'abcdefgh',
u'SignatureVersion': u'1',
u'SigningCertURL': u'https://sns.us-west-2.amazonaws.com/SimpleNotificationService-1098765.pem',
u'Subject': u'sso-dashboard-user-feedback',
u'Timestamp': u'2018-04-25T23:55:12.854Z',
u'TopicArn': u'arn:aws:sns:us-west-2:7777777777:SSODashboardAlertFeedback',
u'Type': u'Notification',
u'UnsubscribeURL': u'https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:7777777777:SSODashboardAlertFeedback:123456-248e-5b78-84c5-46ac332ea6cd'
}
self.consumer.on_message(event)
expected_event = {
u'category': u'user_feedback',
u'details': {
u'action': u'escalate',
u'alert_information': {
u'alert_code': u'12345',
u'alert_id': u'abcdefg',
u'alert_str_json': message_dict['details']['alert_information']['alert_str_json'],
u'date': u'1998-06-24',
u'description': u'This alert is created based on geo ip information about the last login of a user.',
u'duplicate': False,
u'last_update': 1524700512,
u'risk': u'high',
u'state': u'escalate',
u'summary': u'Did you recently login from New York, Mauritania (25.141.235.246)?',
u'url': u'https://www.mozilla.org',
u'url_title': u'Get Help',
u'user_id': u'ad|Mozilla-LDAP-Dev|ttesterson'
}
},
u'hostname': u'UNKNOWN',
u'mozdefhostname': u'unittest.hostname',
u'processid': u'UNKNOWN',
u'processname': u'UNKNOWN',
u'receivedtimestamp': u'2018-04-26T00:11:23.479565+00:00',
u'severity': u'INFO',
u'source': u'UNKNOWN',
u'summary': u'UNKNOWN',
u'tags': [u'example-logs-mozdef'],
u'timestamp': u'2018-04-26T00:11:23.479771+00:00',
u'utctimestamp': u'2018-04-26T00:11:23.479771+00:00',
u'plugins': [],
u'type': 'event'
}
self.search_and_verify_event(expected_event)
|
# -*- coding: utf-8 -*-
"""Support for the EXASOL database.
Auto Increment Behavior
-----------------------
``IDENTITY`` columns are supported by using SQLAlchemy
``schema.Sequence()`` objects. Example:
from sqlalchemy import Table, Integer, String, Sequence, Column
Table('test', metadata,
Column('id', Integer,
Sequence('blah',1000), primary_key=True),
Column('name', String(20))
).create(some_engine)
will yield::
CREATE TABLE test (
id INTEGER IDENTITY 1000,
name VARCHAR(20) NULL,
PRIMARY KEY(id)
)
Note that the ``start`` value for sequences is optional and will default to 1.
The start value of a sequence cannot be retrieved when reflecting a Table
object.
The autoincrement flag for Column Objects is not supporte by exadialect.
Identifier Casing
-----------------
EXASol mimics the behavior of Oracle. Thus, for this dialect implementation
the Oracle dialect was taken as a reference.
In EXASol, the data dictionary represents all case insensitive identifier names
using UPPERCASE text.SQLAlchemy on the other hand considers an all-lower case
identifiers to be case insensitive. The Oracle dialect converts identifier to
and from those two formats during schema level communication, such as reflection
of tables and indexes.
It is recommended to work with all lowercase identifiers on the SQLAlchemy side.
These are treated as case insensitve identifiers by SQLAlchemy. The EXASol
dialect takes care of converting them to the internal case insensitive
representation (all uppercase).
"""
from decimal import Decimal
from sqlalchemy import sql, schema, types as sqltypes, util, event
from sqlalchemy.schema import AddConstraint
from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler
from datetime import date, datetime
import re
RESERVED_WORDS = set([
'absolute', 'action', 'add', 'after', 'all', 'allocate', 'alter', 'and', 'any', 'append',
'are', 'array', 'as', 'asc', 'asensitive', 'assertion', 'at', 'attribute', 'authid', 'authorization',
'before', 'begin', 'between', 'bigint', 'binary', 'bit', 'blob', 'blocked', 'bool', 'boolean',
'both', 'by', 'byte', 'call', 'called', 'cardinality', 'cascade', 'cascaded', 'case', 'casespecific',
'cast', 'catalog', 'chain', 'char', 'character', 'characteristics', 'character_set_catalog',
'character_set_name', 'character_set_schema', 'check', 'checked', 'clob', 'close', 'coalesce',
'collate', 'collation', 'collation_catalog', 'collation_name', 'collation_schema', 'column',
'commit', 'condition', 'connection', 'constant', 'constraint', 'constraints',
'constraint_state_default', 'constructor', 'contains', 'continue', 'control', 'convert',
'corresponding', 'create', 'cs', 'csv', 'cube', 'current', 'current_date', 'current_path',
'current_role', 'current_schema', 'current_session', 'current_statement', 'current_time',
'current_timestamp', 'current_user', 'cursor', 'cycle', 'data', 'datalink', 'date',
'datetime_interval_code', 'datetime_interval_precision', 'day', 'deallocate', 'dec', 'decimal',
'declare', 'default', 'deferrable', 'deferred', 'defined', 'definer', 'delete', 'deref', 'derived',
'desc', 'describe', 'descriptor', 'deterministic', 'disable', 'disabled', 'disconnect', 'dispatch',
'distinct', 'dlurlcomplete', 'dlurlpath', 'dlurlpathonly', 'dlurlscheme', 'dlurlserver', 'dlvalue',
'do', 'domain', 'double', 'drop', 'dynamic', 'dynamic_function', 'dynamic_function_code', 'each',
'else', 'elseif', 'elsif', 'enable', 'enabled', 'end', 'end-exec', 'enforce', 'equals', 'errors',
'escape', 'except', 'exception', 'exec', 'execute', 'exists', 'exit', 'export', 'external', 'extract',
'false', 'fbv', 'fetch', 'file', 'final', 'first', 'float', 'following', 'for', 'forall',
'force', 'format', 'found', 'free', 'from', 'fs', 'full', 'function', 'general', 'generated',
'geometry', 'get', 'global', 'go', 'goto', 'grant', 'granted', 'group', 'grouping', 'group_concat',
'having', 'hold', 'hour', 'identity', 'if', 'ifnull', 'immediate', 'implementation', 'import', 'in',
'index', 'indicator', 'inner', 'inout', 'input', 'insensitive', 'insert', 'instance', 'instantiable',
'int', 'integer', 'integrity', 'intersect', 'interval', 'into', 'invoker', 'is', 'iterate', 'join',
'key_member', 'key_type', 'large', 'last', 'lateral', 'ldap', 'leading', 'leave', 'left', 'like',
'limit', 'local', 'localtime', 'localtimestamp', 'locator', 'log', 'longvarchar', 'loop', 'map',
'match', 'matched', 'merge', 'method', 'minus', 'minute', 'mod', 'modifies', 'modify', 'module',
'month', 'names', 'national', 'natural', 'nchar', 'nclob', 'new', 'next', 'nls_date_format',
'nls_date_language', 'nls_numeric_characters', 'nls_timestamp_format', 'no', 'nologging', 'none',
'not', 'null', 'nullif', 'number', 'numeric', 'object', 'of', 'off', 'old', 'on', 'only', 'open',
'option', 'options', 'or', 'order', 'ordering', 'ordinality', 'others', 'out', 'outer', 'output',
'over', 'overlaps', 'overlay', 'overriding', 'pad', 'parallel_enable', 'parameter',
'parameter_specific_catalog', 'parameter_specific_name', 'parameter_specific_schema', 'partial',
'path', 'permission', 'placing', 'position', 'preceding', 'prepare', 'preserve', 'prior',
'privileges', 'procedure', 'profile', 'random', 'range', 'read', 'reads', 'real', 'recovery',
'recursive', 'ref', 'references', 'referencing', 'regexp_like', 'relative', 'release', 'rename',
'repeat', 'replace', 'restore', 'restrict', 'result', 'return', 'returned_length',
'returned_octet_length', 'returns', 'revoke', 'right', 'rollback', 'rollup', 'routine', 'row',
'rows', 'rowtype', 'savepoint', 'schema', 'scope', 'script', 'scroll', 'search', 'second',
'section', 'security', 'select', 'selective', 'self', 'sensitive', 'separator', 'sequence',
'session', 'session_user', 'set', 'sets', 'shortint', 'similar', 'smallint', 'some', 'source',
'space', 'specific', 'specifictype', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning', 'sql_bigint',
'sql_bit', 'sql_char', 'sql_date', 'sql_decimal', 'sql_double', 'sql_float', 'sql_integer',
'sql_longvarchar', 'sql_numeric', 'sql_real', 'sql_smallint', 'sql_timestamp', 'sql_tinyint',
'sql_type_date', 'sql_type_timestamp', 'sql_varchar', 'start', 'state', 'statement', 'static',
'structure', 'style', 'substring', 'subtype', 'sysdate', 'system', 'system_user', 'systimestamp',
'table', 'temporary', 'text', 'then', 'time', 'timestamp', 'timezone_hour', 'timezone_minute',
'tinyint', 'to', 'trailing', 'transaction', 'transform', 'transforms', 'translation', 'treat',
'trigger', 'trim', 'true', 'truncate', 'under', 'union', 'unique', 'unknown', 'unlink', 'unnest',
'until', 'update', 'usage', 'user', 'using', 'value', 'values', 'varchar', 'varchar2', 'varray',
'verify', 'view', 'when', 'whenever', 'where', 'while', 'window', 'with', 'within', 'without',
'work', 'year', 'yes', 'zone',
])
colspecs = {
}
class EXABOOLEAN(sqltypes.BOOLEAN):
"""Because Exasol does not support CHECK constraints"""
def __init__(self, create_constraint=False, name=None):
super(EXABOOLEAN, self).__init__(create_constraint, name)
ischema_names = {
'BOOLEAN': EXABOOLEAN,
'CHAR': sqltypes.CHAR,
'CLOB': sqltypes.TEXT,
'DATE': sqltypes.DATE,
'DECIMAL': sqltypes.DECIMAL,
'DOUBLE': sqltypes.FLOAT, # EXASOL mapps DOUBLE, DOUBLE PRECISION, FLOAT to DOUBLE PRECISION
# internally but returns 'DOUBLE' as type when asking the DB catalog
# INTERVAL DAY [(p)] TO SECOND [(fp)] TODO: missing support for EXA Datatype, check Oracle Engine
# INTERVAL YEAR[(p)] TO MONTH TODO: missing support for EXA Datatype, check Oracle Engine
'TIMESTAMP': sqltypes.TIMESTAMP,
'VARCHAR': sqltypes.VARCHAR,
}
class EXACompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'month': '%m',
'day': '%d',
'year': '%Y',
'second': '%S',
'hour': '%H',
'doy': '%j',
'minute': '%M',
'epoch': '%s',
'dow': '%w',
'week': '%W'
})
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += "\n LIMIT %d" % int(select._limit)
if select._offset is not None:
util.warn("EXASolution does not support OFFSET")
return text
def for_update_clause(self, select):
# Exasol has no "FOR UPDATE"
util.warn("EXASolution does not support SELECT ... FOR UPDATE")
return ''
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
"""
return " FROM DUAL"
class EXADDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
colspec += " " + self.dialect.type_compiler.process(column.type)
if column is column.table._autoincrement_column and \
True and \
(
column.default is None or \
isinstance(column.default, schema.Sequence)
):
colspec += " IDENTITY"
if isinstance(column.default, schema.Sequence) and \
column.default.start > 0:
colspec += " " + str(column.default.start)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def create_table_constraints(self, table):
table_constraint_str = ", \n\t".join(p for p in
(self.process(constraint)
for constraint in [table.primary_key]
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
for c in [c for c in table._sorted_constraints if c is not table.primary_key]:
event.listen(
table,
"after_create",
AddConstraint(c)
)
return table_constraint_str
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=True)
def visit_create_index(self, create):
"""EXASol manages indexes internally"""
raise NotImplementedError()
def visit_drop_index(self, drop):
"""EXASol manages indexes internally"""
raise NotImplementedError()
class EXATypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
class EXAIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class EXAExecutionContext(default.DefaultExecutionContext):
executemany=True
def fire_sequence(self, default, type_):
raise NotImplemented
def get_insert_default(self, column):
if column.default.is_sequence:
return 'DEFAULT'
else:
return super(EXAExecutionContext, self).get_insert_default(self, column)
def get_lastrowid(self):
columns = self.compiled.sql_compiler.statement.table.columns
autoinc_pk_columns = \
[c.name for c in columns if c.autoincrement and c.primary_key]
if len(autoinc_pk_columns) == 0:
return None
elif len(autoinc_pk_columns) > 1:
util.warn("Table with more than one autoincrement, primary key"\
" Column!")
raise Exception
else:
id_col = self.dialect.denormalize_name(autoinc_pk_columns[0])
id_col = self.compiled.render_literal_value(id_col, None)
table = self.compiled.sql_compiler.statement.table.name
table = self.dialect.denormalize_name(table)
table = self.compiled.render_literal_value(table, None)
sql_stmnt = "SELECT column_identity from SYS.EXA_ALL_COLUMNS "\
"WHERE column_object_type = 'TABLE' and column_table "\
"= " + table + " AND column_name = " + id_col
schema = self.compiled.sql_compiler.statement.table.schema
if schema is not None:
schema = self.dialect.denormalize_name(schema)
schema = self.compiled.render_literal_value(schema, None)
sql_stmnt += " AND column_schema = " + schema
cursor = self.create_cursor()
cursor.execute(sql_stmnt)
lastrowid = cursor.fetchone()[0] - 1
cursor.close()
return lastrowid
def pre_exec(self):
"""
This routine inserts the parameters into the compiled query prior to executing it.
The reason for this workaround is the poor performance for prepared statements.
Note: Parameter replacement is done for server versions < 4.1.8 or
in case a delete query is executed.
"""
if self.isdelete or self.root_connection.dialect.server_version_info < (4, 1, 8):
db_query = self.statement
for i in range(1, len(self.parameters)):
db_query += ", (" + ", ".join(['?'] * len(self.parameters[i])) + ")"
for db_para in self.parameters:
for value in db_para:
ident = '?'
if value is None:
db_query = db_query.replace(ident, 'NULL', 1)
elif isinstance(value, (int, long)):
db_query = db_query.replace(ident, str(value), 1)
elif isinstance(value, (float, Decimal)):
db_query = db_query.replace(ident, str(float(value)), 1)
elif isinstance(value, bool):
db_query = db_query.replace(ident, '1' if value else '0', 1)
elif isinstance(value, datetime):
db_query = db_query.replace(ident, "to_timestamp('%s', 'YYYY-MM-DD HH24:MI:SS.FF6')" % value.strftime('%Y-%m-%d %H:%M:%S.%f'), 1)
elif isinstance(value, date):
db_query = db_query.replace(ident, "to_date('%s', 'YYYY-MM-DD')" % value.strftime('%Y-%m-%d'), 1)
elif isinstance(value, str):
db_query = db_query.replace(ident, "'%s'" % value.decode('UTF-8'), 1)
elif isinstance(value, unicode):
db_query = db_query.replace(ident, "'%s'" % value, 1)
else:
raise TypeError('Data type not supported: %s' % type(value))
self.statement = db_query
self.parameters = [[]]
class EXADialect(default.DefaultDialect):
name = 'exasol'
supports_alter = True
supports_unicode_statements = True
supports_unicode_binds = True
supports_default_values = True
supports_empty_insert = False
supports_sequences = False
# sequences_optional = True
# controls in SQLAlchemy base which columns are part of an insert statement
# postfetch_lastrowid = True
supports_cast = True
requires_name_normalize = True
default_paramstyle = 'qmark'
execution_ctx_cls = EXAExecutionContext
statement_compiler = EXACompiler
ddl_compiler = EXADDLCompiler
type_compiler = EXATypeCompiler
preparer = EXAIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
isolation_level = None
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
_isolation_lookup = {
'SERIALIZABLE': 0
}
def normalize_name(self, name):
"""
Converting EXASol case insensitive identifiers (upper case)
to SQLAlchemy case insensitive identifiers (lower case)
"""
if name is None:
return None
# Py2K
if isinstance(name, str):
name = name.decode(self.encoding)
# end Py2K
if name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
def denormalize_name(self, name):
"""
Converting SQLAlchemy case insensitive identifiers (lower case)
to EXASol case insensitive identifiers (upper case)
"""
if name is None or len(name) == 0:
return None
elif name.lower() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
name = name.upper()
# Py2K
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name)
# end Py2K
return name
def get_isolation_level(self, connection):
return "SERIALIZABLE"
def on_connect(self):
# TODO: set isolation level
pass
# never called during reflection
@reflection.cache
def get_schema_names(self, connection, **kw):
sql_stmnt = "select SCHEMA_NAME from SYS.EXA_SCHEMAS"
rs = connection.execute(sql.text(sql_stmnt))
return [row[0] for row in rs]
@reflection.cache
def get_table_names(self, connection, schema, **kw):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT table_name FROM SYS.EXA_ALL_TABLES WHERE table_schema = "
if schema is None:
sql_stmnt += "CURRENT_SCHEMA ORDER BY table_name"
rs = connection.execute(sql_stmnt)
else:
sql_stmnt += ":schema ORDER BY table_name"
rs = connection.execute(sql.text(sql_stmnt), \
schema=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in rs]
def has_table(self, connection, table_name, schema=None):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT table_name from SYS.EXA_ALL_TABLES "\
"WHERE table_name = :table_name "
if schema is not None:
sql_stmnt += "AND table_schema = :schema"
rp = connection.execute(
sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
row = rp.fetchone()
return (row is not None)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT view_name FROM SYS.EXA_ALL_VIEWS "
if schema is not None:
sql_stmnt += "WHERE view_schema = :schema "
sql_stmnt += " ORDER BY view_name"
rs = connection.execute(sql.text(sql_stmnt),
schema=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT view_text FROM sys.exa_all_views "
if schema is not None:
sql_stmnt += "WHERE view_schema = :schema "
rp = connection.execute(sql_stmnt,
view_name=self.denormalize_name(view_name),
schema=self.denormalize_name(schema)).scalar()
if rp:
return rp.decode(self.encoding)
else:
return None
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT column_name, column_type, column_maxsize, column_num_prec, column_num_scale, " \
"column_is_nullable, column_default, column_identity FROM sys.exa_all_columns " \
"WHERE column_object_type IN ('TABLE', 'VIEW') AND column_table = :table_name AND column_schema = "
if schema is None:
sql_stmnt += "CURRENT_SCHEMA "
else:
sql_stmnt += ":schema "
sql_stmnt += "ORDER BY column_ordinal_position"
c = connection.execute(sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
columns = []
for row in c:
(colname, coltype, length, precision, scale, nullable, default, identity) = \
(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])
# FIXME: Missing type support: INTERVAL DAY [(p)] TO SECOND [(fp)], INTERVAL YEAR[(p)] TO MONTH
# remove ASCII, UTF8 and spaces from char-like types
coltype = re.sub(r'ASCII|UTF8| ', '', coltype)
# remove precision and scale addition from numeric types
coltype = re.sub(r'\(\d+(\,\d+)?\)', '', coltype)
try:
if coltype == 'VARCHAR':
coltype = sqltypes.VARCHAR(length)
elif coltype == 'DECIMAL':
# this Dialect forces INTTYPESINRESULTSIFPOSSIBLE=y on ODBC level
# thus, we need to convert DECIMAL(<=18,0) back to INTEGER type
if scale == 0 and precision <= 18:
coltype = sqltypes.INTEGER()
else:
coltype = sqltypes.DECIMAL(precision, scale)
else:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, colname))
coltype = sqltypes.NULLTYPE
cdict = {
'name': self.normalize_name(colname),
'type': coltype,
'nullable': nullable,
'default': default
}
# if we have a positive identity value add a sequence
if identity >= 0:
cdict['sequence'] = {'name':''}
# TODO: we have to possibility to encode the current identity value count
# into the column metadata. But the consequence is that it would also be used
# as start value in CREATE statements. For now the current value is ignored.
# Add it by changing the dict to: {'name':'', 'start': int(identity)}
columns.append(cdict)
return columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
schema = schema or connection.engine.url.database
pkeys = []
constraint_name = None
sql_stmnt = "SELECT column_name, constraint_name from SYS.EXA_ALL_CONSTRAINT_COLUMNS " \
"WHERE constraint_type = 'PRIMARY KEY' AND constraint_table = :table_name "
if schema is not None:
sql_stmnt += "AND constraint_schema = :schema"
rp = connection.execute(sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
constraint_data = rp.fetchall()
for row in constraint_data:
pkeys.append(self.normalize_name(row[0]))
constraint_name = self.normalize_name(row[1])
return {'constrained_columns': pkeys, 'name': constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
schema_int = schema or connection.engine.url.database
sql_stmnt = "SELECT constraint_name, column_name, referenced_schema, referenced_table, " \
"referenced_column FROM SYS.EXA_ALL_CONSTRAINT_COLUMNS " \
"WHERE constraint_type = 'FOREIGN KEY' AND constraint_table = :table_name "
if schema_int is not None:
sql_stmnt += "AND constraint_schema = :schema "
sql_stmnt += "ORDER BY ordinal_position"
rp = connection.execute(sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema_int))
constraint_data = rp.fetchall()
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(cons_name, local_column, remote_schema, remote_table, remote_column) = \
(row[0], row[1], row[2], row[3], row[4])
rec = fkeys[self.normalize_name(cons_name)]
rec['name'] = self.normalize_name(cons_name)
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
rec['referred_table'] = self.normalize_name(remote_table)
# we need to take care of calls without schema. the sqla test suite
# expects referred_schema to be None if None is passed in to this function
if schema is None and schema_int == self.normalize_name(remote_schema):
rec['referred_schema'] = None
else:
rec['referred_schema'] = self.normalize_name(remote_schema)
local_cols.append(self.normalize_name(local_column))
remote_cols.append(self.normalize_name(remote_column))
return fkeys.values()
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
schema = schema or connection.engine.url.database
# EXASolution has no indexes
# TODO: check if indexes are used by SQLA for optimizing SQL Statements.
# If so, we should return all columns as being indexed
return []
in get_foreign_keys():
- return only keys for tables in current schema when no explicit filter is given.
- does NOT fix test.test_suite.ComponentReflectionTest.test_get_foreign_keys
... this seems to expect referenced_schema=None in that case, while EXASolution will always fill the column with correct schema names.
# -*- coding: utf-8 -*-
"""Support for the EXASOL database.
Auto Increment Behavior
-----------------------
``IDENTITY`` columns are supported by using SQLAlchemy
``schema.Sequence()`` objects. Example:
from sqlalchemy import Table, Integer, String, Sequence, Column
Table('test', metadata,
Column('id', Integer,
Sequence('blah',1000), primary_key=True),
Column('name', String(20))
).create(some_engine)
will yield::
CREATE TABLE test (
id INTEGER IDENTITY 1000,
name VARCHAR(20) NULL,
PRIMARY KEY(id)
)
Note that the ``start`` value for sequences is optional and will default to 1.
The start value of a sequence cannot be retrieved when reflecting a Table
object.
The autoincrement flag for Column Objects is not supporte by exadialect.
Identifier Casing
-----------------
EXASol mimics the behavior of Oracle. Thus, for this dialect implementation
the Oracle dialect was taken as a reference.
In EXASol, the data dictionary represents all case insensitive identifier names
using UPPERCASE text.SQLAlchemy on the other hand considers an all-lower case
identifiers to be case insensitive. The Oracle dialect converts identifier to
and from those two formats during schema level communication, such as reflection
of tables and indexes.
It is recommended to work with all lowercase identifiers on the SQLAlchemy side.
These are treated as case insensitve identifiers by SQLAlchemy. The EXASol
dialect takes care of converting them to the internal case insensitive
representation (all uppercase).
"""
from decimal import Decimal
from sqlalchemy import sql, schema, types as sqltypes, util, event
from sqlalchemy.schema import AddConstraint
from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler
from datetime import date, datetime
import re
RESERVED_WORDS = set([
'absolute', 'action', 'add', 'after', 'all', 'allocate', 'alter', 'and', 'any', 'append',
'are', 'array', 'as', 'asc', 'asensitive', 'assertion', 'at', 'attribute', 'authid', 'authorization',
'before', 'begin', 'between', 'bigint', 'binary', 'bit', 'blob', 'blocked', 'bool', 'boolean',
'both', 'by', 'byte', 'call', 'called', 'cardinality', 'cascade', 'cascaded', 'case', 'casespecific',
'cast', 'catalog', 'chain', 'char', 'character', 'characteristics', 'character_set_catalog',
'character_set_name', 'character_set_schema', 'check', 'checked', 'clob', 'close', 'coalesce',
'collate', 'collation', 'collation_catalog', 'collation_name', 'collation_schema', 'column',
'commit', 'condition', 'connection', 'constant', 'constraint', 'constraints',
'constraint_state_default', 'constructor', 'contains', 'continue', 'control', 'convert',
'corresponding', 'create', 'cs', 'csv', 'cube', 'current', 'current_date', 'current_path',
'current_role', 'current_schema', 'current_session', 'current_statement', 'current_time',
'current_timestamp', 'current_user', 'cursor', 'cycle', 'data', 'datalink', 'date',
'datetime_interval_code', 'datetime_interval_precision', 'day', 'deallocate', 'dec', 'decimal',
'declare', 'default', 'deferrable', 'deferred', 'defined', 'definer', 'delete', 'deref', 'derived',
'desc', 'describe', 'descriptor', 'deterministic', 'disable', 'disabled', 'disconnect', 'dispatch',
'distinct', 'dlurlcomplete', 'dlurlpath', 'dlurlpathonly', 'dlurlscheme', 'dlurlserver', 'dlvalue',
'do', 'domain', 'double', 'drop', 'dynamic', 'dynamic_function', 'dynamic_function_code', 'each',
'else', 'elseif', 'elsif', 'enable', 'enabled', 'end', 'end-exec', 'enforce', 'equals', 'errors',
'escape', 'except', 'exception', 'exec', 'execute', 'exists', 'exit', 'export', 'external', 'extract',
'false', 'fbv', 'fetch', 'file', 'final', 'first', 'float', 'following', 'for', 'forall',
'force', 'format', 'found', 'free', 'from', 'fs', 'full', 'function', 'general', 'generated',
'geometry', 'get', 'global', 'go', 'goto', 'grant', 'granted', 'group', 'grouping', 'group_concat',
'having', 'hold', 'hour', 'identity', 'if', 'ifnull', 'immediate', 'implementation', 'import', 'in',
'index', 'indicator', 'inner', 'inout', 'input', 'insensitive', 'insert', 'instance', 'instantiable',
'int', 'integer', 'integrity', 'intersect', 'interval', 'into', 'invoker', 'is', 'iterate', 'join',
'key_member', 'key_type', 'large', 'last', 'lateral', 'ldap', 'leading', 'leave', 'left', 'like',
'limit', 'local', 'localtime', 'localtimestamp', 'locator', 'log', 'longvarchar', 'loop', 'map',
'match', 'matched', 'merge', 'method', 'minus', 'minute', 'mod', 'modifies', 'modify', 'module',
'month', 'names', 'national', 'natural', 'nchar', 'nclob', 'new', 'next', 'nls_date_format',
'nls_date_language', 'nls_numeric_characters', 'nls_timestamp_format', 'no', 'nologging', 'none',
'not', 'null', 'nullif', 'number', 'numeric', 'object', 'of', 'off', 'old', 'on', 'only', 'open',
'option', 'options', 'or', 'order', 'ordering', 'ordinality', 'others', 'out', 'outer', 'output',
'over', 'overlaps', 'overlay', 'overriding', 'pad', 'parallel_enable', 'parameter',
'parameter_specific_catalog', 'parameter_specific_name', 'parameter_specific_schema', 'partial',
'path', 'permission', 'placing', 'position', 'preceding', 'prepare', 'preserve', 'prior',
'privileges', 'procedure', 'profile', 'random', 'range', 'read', 'reads', 'real', 'recovery',
'recursive', 'ref', 'references', 'referencing', 'regexp_like', 'relative', 'release', 'rename',
'repeat', 'replace', 'restore', 'restrict', 'result', 'return', 'returned_length',
'returned_octet_length', 'returns', 'revoke', 'right', 'rollback', 'rollup', 'routine', 'row',
'rows', 'rowtype', 'savepoint', 'schema', 'scope', 'script', 'scroll', 'search', 'second',
'section', 'security', 'select', 'selective', 'self', 'sensitive', 'separator', 'sequence',
'session', 'session_user', 'set', 'sets', 'shortint', 'similar', 'smallint', 'some', 'source',
'space', 'specific', 'specifictype', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning', 'sql_bigint',
'sql_bit', 'sql_char', 'sql_date', 'sql_decimal', 'sql_double', 'sql_float', 'sql_integer',
'sql_longvarchar', 'sql_numeric', 'sql_real', 'sql_smallint', 'sql_timestamp', 'sql_tinyint',
'sql_type_date', 'sql_type_timestamp', 'sql_varchar', 'start', 'state', 'statement', 'static',
'structure', 'style', 'substring', 'subtype', 'sysdate', 'system', 'system_user', 'systimestamp',
'table', 'temporary', 'text', 'then', 'time', 'timestamp', 'timezone_hour', 'timezone_minute',
'tinyint', 'to', 'trailing', 'transaction', 'transform', 'transforms', 'translation', 'treat',
'trigger', 'trim', 'true', 'truncate', 'under', 'union', 'unique', 'unknown', 'unlink', 'unnest',
'until', 'update', 'usage', 'user', 'using', 'value', 'values', 'varchar', 'varchar2', 'varray',
'verify', 'view', 'when', 'whenever', 'where', 'while', 'window', 'with', 'within', 'without',
'work', 'year', 'yes', 'zone',
])
colspecs = {
}
class EXABOOLEAN(sqltypes.BOOLEAN):
"""Because Exasol does not support CHECK constraints"""
def __init__(self, create_constraint=False, name=None):
super(EXABOOLEAN, self).__init__(create_constraint, name)
ischema_names = {
'BOOLEAN': EXABOOLEAN,
'CHAR': sqltypes.CHAR,
'CLOB': sqltypes.TEXT,
'DATE': sqltypes.DATE,
'DECIMAL': sqltypes.DECIMAL,
'DOUBLE': sqltypes.FLOAT, # EXASOL mapps DOUBLE, DOUBLE PRECISION, FLOAT to DOUBLE PRECISION
# internally but returns 'DOUBLE' as type when asking the DB catalog
# INTERVAL DAY [(p)] TO SECOND [(fp)] TODO: missing support for EXA Datatype, check Oracle Engine
# INTERVAL YEAR[(p)] TO MONTH TODO: missing support for EXA Datatype, check Oracle Engine
'TIMESTAMP': sqltypes.TIMESTAMP,
'VARCHAR': sqltypes.VARCHAR,
}
class EXACompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'month': '%m',
'day': '%d',
'year': '%Y',
'second': '%S',
'hour': '%H',
'doy': '%j',
'minute': '%M',
'epoch': '%s',
'dow': '%w',
'week': '%W'
})
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += "\n LIMIT %d" % int(select._limit)
if select._offset is not None:
util.warn("EXASolution does not support OFFSET")
return text
def for_update_clause(self, select):
# Exasol has no "FOR UPDATE"
util.warn("EXASolution does not support SELECT ... FOR UPDATE")
return ''
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
"""
return " FROM DUAL"
class EXADDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
colspec += " " + self.dialect.type_compiler.process(column.type)
if column is column.table._autoincrement_column and \
True and \
(
column.default is None or \
isinstance(column.default, schema.Sequence)
):
colspec += " IDENTITY"
if isinstance(column.default, schema.Sequence) and \
column.default.start > 0:
colspec += " " + str(column.default.start)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def create_table_constraints(self, table):
table_constraint_str = ", \n\t".join(p for p in
(self.process(constraint)
for constraint in [table.primary_key]
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
for c in [c for c in table._sorted_constraints if c is not table.primary_key]:
event.listen(
table,
"after_create",
AddConstraint(c)
)
return table_constraint_str
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=True)
def visit_create_index(self, create):
"""EXASol manages indexes internally"""
raise NotImplementedError()
def visit_drop_index(self, drop):
"""EXASol manages indexes internally"""
raise NotImplementedError()
class EXATypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
class EXAIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class EXAExecutionContext(default.DefaultExecutionContext):
executemany=True
def fire_sequence(self, default, type_):
raise NotImplemented
def get_insert_default(self, column):
if column.default.is_sequence:
return 'DEFAULT'
else:
return super(EXAExecutionContext, self).get_insert_default(self, column)
def get_lastrowid(self):
columns = self.compiled.sql_compiler.statement.table.columns
autoinc_pk_columns = \
[c.name for c in columns if c.autoincrement and c.primary_key]
if len(autoinc_pk_columns) == 0:
return None
elif len(autoinc_pk_columns) > 1:
util.warn("Table with more than one autoincrement, primary key"\
" Column!")
raise Exception
else:
id_col = self.dialect.denormalize_name(autoinc_pk_columns[0])
id_col = self.compiled.render_literal_value(id_col, None)
table = self.compiled.sql_compiler.statement.table.name
table = self.dialect.denormalize_name(table)
table = self.compiled.render_literal_value(table, None)
sql_stmnt = "SELECT column_identity from SYS.EXA_ALL_COLUMNS "\
"WHERE column_object_type = 'TABLE' and column_table "\
"= " + table + " AND column_name = " + id_col
schema = self.compiled.sql_compiler.statement.table.schema
if schema is not None:
schema = self.dialect.denormalize_name(schema)
schema = self.compiled.render_literal_value(schema, None)
sql_stmnt += " AND column_schema = " + schema
cursor = self.create_cursor()
cursor.execute(sql_stmnt)
lastrowid = cursor.fetchone()[0] - 1
cursor.close()
return lastrowid
def pre_exec(self):
"""
This routine inserts the parameters into the compiled query prior to executing it.
The reason for this workaround is the poor performance for prepared statements.
Note: Parameter replacement is done for server versions < 4.1.8 or
in case a delete query is executed.
"""
if self.isdelete or self.root_connection.dialect.server_version_info < (4, 1, 8):
db_query = self.statement
for i in range(1, len(self.parameters)):
db_query += ", (" + ", ".join(['?'] * len(self.parameters[i])) + ")"
for db_para in self.parameters:
for value in db_para:
ident = '?'
if value is None:
db_query = db_query.replace(ident, 'NULL', 1)
elif isinstance(value, (int, long)):
db_query = db_query.replace(ident, str(value), 1)
elif isinstance(value, (float, Decimal)):
db_query = db_query.replace(ident, str(float(value)), 1)
elif isinstance(value, bool):
db_query = db_query.replace(ident, '1' if value else '0', 1)
elif isinstance(value, datetime):
db_query = db_query.replace(ident, "to_timestamp('%s', 'YYYY-MM-DD HH24:MI:SS.FF6')" % value.strftime('%Y-%m-%d %H:%M:%S.%f'), 1)
elif isinstance(value, date):
db_query = db_query.replace(ident, "to_date('%s', 'YYYY-MM-DD')" % value.strftime('%Y-%m-%d'), 1)
elif isinstance(value, str):
db_query = db_query.replace(ident, "'%s'" % value.decode('UTF-8'), 1)
elif isinstance(value, unicode):
db_query = db_query.replace(ident, "'%s'" % value, 1)
else:
raise TypeError('Data type not supported: %s' % type(value))
self.statement = db_query
self.parameters = [[]]
class EXADialect(default.DefaultDialect):
name = 'exasol'
supports_alter = True
supports_unicode_statements = True
supports_unicode_binds = True
supports_default_values = True
supports_empty_insert = False
supports_sequences = False
# sequences_optional = True
# controls in SQLAlchemy base which columns are part of an insert statement
# postfetch_lastrowid = True
supports_cast = True
requires_name_normalize = True
default_paramstyle = 'qmark'
execution_ctx_cls = EXAExecutionContext
statement_compiler = EXACompiler
ddl_compiler = EXADDLCompiler
type_compiler = EXATypeCompiler
preparer = EXAIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
isolation_level = None
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
_isolation_lookup = {
'SERIALIZABLE': 0
}
def normalize_name(self, name):
"""
Converting EXASol case insensitive identifiers (upper case)
to SQLAlchemy case insensitive identifiers (lower case)
"""
if name is None:
return None
# Py2K
if isinstance(name, str):
name = name.decode(self.encoding)
# end Py2K
if name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
def denormalize_name(self, name):
"""
Converting SQLAlchemy case insensitive identifiers (lower case)
to EXASol case insensitive identifiers (upper case)
"""
if name is None or len(name) == 0:
return None
elif name.lower() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
name = name.upper()
# Py2K
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name)
# end Py2K
return name
def get_isolation_level(self, connection):
return "SERIALIZABLE"
def on_connect(self):
# TODO: set isolation level
pass
# never called during reflection
@reflection.cache
def get_schema_names(self, connection, **kw):
sql_stmnt = "select SCHEMA_NAME from SYS.EXA_SCHEMAS"
rs = connection.execute(sql.text(sql_stmnt))
return [row[0] for row in rs]
@reflection.cache
def get_table_names(self, connection, schema, **kw):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT table_name FROM SYS.EXA_ALL_TABLES WHERE table_schema = "
if schema is None:
sql_stmnt += "CURRENT_SCHEMA ORDER BY table_name"
rs = connection.execute(sql_stmnt)
else:
sql_stmnt += ":schema ORDER BY table_name"
rs = connection.execute(sql.text(sql_stmnt), \
schema=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in rs]
def has_table(self, connection, table_name, schema=None):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT table_name from SYS.EXA_ALL_TABLES "\
"WHERE table_name = :table_name "
if schema is not None:
sql_stmnt += "AND table_schema = :schema"
rp = connection.execute(
sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
row = rp.fetchone()
return (row is not None)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT view_name FROM SYS.EXA_ALL_VIEWS "
if schema is not None:
sql_stmnt += "WHERE view_schema = :schema "
sql_stmnt += " ORDER BY view_name"
rs = connection.execute(sql.text(sql_stmnt),
schema=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT view_text FROM sys.exa_all_views "
if schema is not None:
sql_stmnt += "WHERE view_schema = :schema "
rp = connection.execute(sql_stmnt,
view_name=self.denormalize_name(view_name),
schema=self.denormalize_name(schema)).scalar()
if rp:
return rp.decode(self.encoding)
else:
return None
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
schema = schema or connection.engine.url.database
sql_stmnt = "SELECT column_name, column_type, column_maxsize, column_num_prec, column_num_scale, " \
"column_is_nullable, column_default, column_identity FROM sys.exa_all_columns " \
"WHERE column_object_type IN ('TABLE', 'VIEW') AND column_table = :table_name AND column_schema = "
if schema is None:
sql_stmnt += "CURRENT_SCHEMA "
else:
sql_stmnt += ":schema "
sql_stmnt += "ORDER BY column_ordinal_position"
c = connection.execute(sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
columns = []
for row in c:
(colname, coltype, length, precision, scale, nullable, default, identity) = \
(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])
# FIXME: Missing type support: INTERVAL DAY [(p)] TO SECOND [(fp)], INTERVAL YEAR[(p)] TO MONTH
# remove ASCII, UTF8 and spaces from char-like types
coltype = re.sub(r'ASCII|UTF8| ', '', coltype)
# remove precision and scale addition from numeric types
coltype = re.sub(r'\(\d+(\,\d+)?\)', '', coltype)
try:
if coltype == 'VARCHAR':
coltype = sqltypes.VARCHAR(length)
elif coltype == 'DECIMAL':
# this Dialect forces INTTYPESINRESULTSIFPOSSIBLE=y on ODBC level
# thus, we need to convert DECIMAL(<=18,0) back to INTEGER type
if scale == 0 and precision <= 18:
coltype = sqltypes.INTEGER()
else:
coltype = sqltypes.DECIMAL(precision, scale)
else:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, colname))
coltype = sqltypes.NULLTYPE
cdict = {
'name': self.normalize_name(colname),
'type': coltype,
'nullable': nullable,
'default': default
}
# if we have a positive identity value add a sequence
if identity >= 0:
cdict['sequence'] = {'name':''}
# TODO: we have to possibility to encode the current identity value count
# into the column metadata. But the consequence is that it would also be used
# as start value in CREATE statements. For now the current value is ignored.
# Add it by changing the dict to: {'name':'', 'start': int(identity)}
columns.append(cdict)
return columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
schema = schema or connection.engine.url.database
pkeys = []
constraint_name = None
sql_stmnt = "SELECT column_name, constraint_name from SYS.EXA_ALL_CONSTRAINT_COLUMNS " \
"WHERE constraint_type = 'PRIMARY KEY' AND constraint_table = :table_name "
if schema is not None:
sql_stmnt += "AND constraint_schema = :schema"
rp = connection.execute(sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
constraint_data = rp.fetchall()
for row in constraint_data:
pkeys.append(self.normalize_name(row[0]))
constraint_name = self.normalize_name(row[1])
return {'constrained_columns': pkeys, 'name': constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
schema_int = schema or connection.engine.url.database
sql_stmnt = "SELECT constraint_name, column_name, referenced_schema, referenced_table, " \
"referenced_column FROM SYS.EXA_ALL_CONSTRAINT_COLUMNS " \
"WHERE constraint_type = 'FOREIGN KEY' AND constraint_table = :table_name " \
"AND constraint_schema = "
if schema_int is None:
sql_stmnt += "CURRENT_SCHEMA "
else:
sql_stmnt += ":schema "
sql_stmnt += "ORDER BY ordinal_position"
rp = connection.execute(sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema_int))
constraint_data = rp.fetchall()
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(cons_name, local_column, remote_schema, remote_table, remote_column) = \
(row[0], row[1], row[2], row[3], row[4])
rec = fkeys[self.normalize_name(cons_name)]
rec['name'] = self.normalize_name(cons_name)
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
rec['referred_table'] = self.normalize_name(remote_table)
# we need to take care of calls without schema. the sqla test suite
# expects referred_schema to be None if None is passed in to this function
if schema is None and schema_int == self.normalize_name(remote_schema):
rec['referred_schema'] = None
else:
rec['referred_schema'] = self.normalize_name(remote_schema)
local_cols.append(self.normalize_name(local_column))
remote_cols.append(self.normalize_name(remote_column))
return fkeys.values()
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
schema = schema or connection.engine.url.database
# EXASolution has no indexes
# TODO: check if indexes are used by SQLA for optimizing SQL Statements.
# If so, we should return all columns as being indexed
return []
|
# coding: utf-8
import logging
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.plugins.bases import notify
from sentry.http import safe_urlopen
from sentry.utils.safe import safe_execute
from . import __version__, __doc__ as package_doc
class TelegramNotificationsOptionsForm(notify.NotificationConfigurationForm):
api_token = forms.CharField(
label=_('BotAPI token'),
widget=forms.TextInput(attrs={'placeholder': '123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11'}),
help_text=_('Read more: https://core.telegram.org/bots/api#authorizing-your-bot'),
)
receivers = forms.CharField(
label=_('Receivers'),
widget=forms.Textarea(attrs={'class': 'span6'}),
help_text=_('Enter receivers IDs (one per line). Personal messages, group chats and channels also available.'))
message_template = forms.CharField(
label=_('Message template'),
widget=forms.Textarea(attrs={'class': 'span4'}),
help_text=_('Set in standard python\'s {}-format convention, available names are: '
'{project_name}, {url}, {title}, {message}, {tag[%your_tag%]}'),
initial='*[Sentry]* {project_name} {tag[level]}: *{title}*\n```{message}```\n{url}'
)
class TelegramNotificationsPlugin(notify.NotificationPlugin):
title = 'Telegram Notifications'
slug = 'sentry_telegram'
description = package_doc
version = __version__
author = 'Viacheslav Butorov'
author_url = 'https://github.com/butorov/sentry-telegram'
resource_links = [
('Bug Tracker', 'https://github.com/butorov/sentry-telegram/issues'),
('Source', 'https://github.com/butorov/sentry-telegram'),
]
conf_key = 'sentry_telegram'
conf_title = title
project_conf_form = TelegramNotificationsOptionsForm
logger = logging.getLogger('sentry.plugins.sentry_telegram')
def is_configured(self, project, **kwargs):
return bool(self.get_option('api_token', project) and self.get_option('receivers', project))
def get_config(self, project, **kwargs):
return [
{
'name': 'api_token',
'label': 'BotAPI token',
'type': 'text',
'help': 'Read more: https://core.telegram.org/bots/api#authorizing-your-bot',
'placeholder': '123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11',
'validators': [],
'required': True,
},
{
'name': 'receivers',
'label': 'Receivers',
'type': 'textarea',
'help': 'Enter receivers IDs (one per line). Personal messages, group chats and channels also available.',
'validators': [],
'required': True,
},
{
'name': 'message_template',
'label': 'Message Template',
'type': 'textarea',
'help': 'Set in standard python\'s {}-format convention, available names are: '
'{project_name}, {url}, {title}, {message}, {tag[%your_tag%]}',
'validators': [],
'required': True,
'default': '*[Sentry]* {project_name} {tag[level]}: *{title}*\n```{message}```\n{url}'
},
]
def build_message(self, group, event):
names = {
'title': event.title,
'tag': {k:v for k, v in event.tags},
'message': event.message,
'project_name': group.project.name,
'url': group.get_absolute_url(),
}
template = self.get_message_template(group.project)
text = template.format(**names)
return {
'text': text,
'parse_mode': 'Markdown',
}
def build_url(self, project):
return 'https://api.telegram.org/bot%s/sendMessage' % self.get_option('api_token', project)
def get_message_template(self, project):
return self.get_option('message_template', project)
def get_receivers(self, project):
receivers = self.get_option('receivers', project)
if not receivers:
return []
return filter(bool, receivers.strip().splitlines())
def send_message(self, url, payload, receiver):
payload['chat_id'] = receiver
self.logger.debug('Sending message to %s ' % receiver)
response = safe_urlopen(
method='POST',
url=url,
json=payload,
)
self.logger.debug('Response code: %s, content: %s' % (response.status_code, response.content))
def notify_users(self, group, event, fail_silently=False):
self.logger.debug('Received notification for event: %s' % event)
receivers = self.get_receivers(group.project)
self.logger.debug('for receivers: %s' % ', '.join(receivers or ()))
payload = self.build_message(group, event)
self.logger.debug('Built payload: %s' % payload)
url = self.build_url(group.project)
self.logger.debug('Built url: %s' % url)
for receiver in receivers:
safe_execute(self.send_message, url, payload, receiver, _with_transaction=False)
set default value for non-existing tags
If tags set has changed, plugin stops sending notification because of key error. This fixes the problem
# coding: utf-8
import logging
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.plugins.bases import notify
from sentry.http import safe_urlopen
from sentry.utils.safe import safe_execute
from collections import defaultdict
from . import __version__, __doc__ as package_doc
class TelegramNotificationsOptionsForm(notify.NotificationConfigurationForm):
api_token = forms.CharField(
label=_('BotAPI token'),
widget=forms.TextInput(attrs={'placeholder': '123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11'}),
help_text=_('Read more: https://core.telegram.org/bots/api#authorizing-your-bot'),
)
receivers = forms.CharField(
label=_('Receivers'),
widget=forms.Textarea(attrs={'class': 'span6'}),
help_text=_('Enter receivers IDs (one per line). Personal messages, group chats and channels also available.'))
message_template = forms.CharField(
label=_('Message template'),
widget=forms.Textarea(attrs={'class': 'span4'}),
help_text=_('Set in standard python\'s {}-format convention, available names are: '
'{project_name}, {url}, {title}, {message}, {tag[%your_tag%]}'),
initial='*[Sentry]* {project_name} {tag[level]}: *{title}*\n```{message}```\n{url}'
)
class TelegramNotificationsPlugin(notify.NotificationPlugin):
title = 'Telegram Notifications'
slug = 'sentry_telegram'
description = package_doc
version = __version__
author = 'Viacheslav Butorov'
author_url = 'https://github.com/butorov/sentry-telegram'
resource_links = [
('Bug Tracker', 'https://github.com/butorov/sentry-telegram/issues'),
('Source', 'https://github.com/butorov/sentry-telegram'),
]
conf_key = 'sentry_telegram'
conf_title = title
project_conf_form = TelegramNotificationsOptionsForm
logger = logging.getLogger('sentry.plugins.sentry_telegram')
def is_configured(self, project, **kwargs):
return bool(self.get_option('api_token', project) and self.get_option('receivers', project))
def get_config(self, project, **kwargs):
return [
{
'name': 'api_token',
'label': 'BotAPI token',
'type': 'text',
'help': 'Read more: https://core.telegram.org/bots/api#authorizing-your-bot',
'placeholder': '123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11',
'validators': [],
'required': True,
},
{
'name': 'receivers',
'label': 'Receivers',
'type': 'textarea',
'help': 'Enter receivers IDs (one per line). Personal messages, group chats and channels also available.',
'validators': [],
'required': True,
},
{
'name': 'message_template',
'label': 'Message Template',
'type': 'textarea',
'help': 'Set in standard python\'s {}-format convention, available names are: '
'{project_name}, {url}, {title}, {message}, {tag[%your_tag%]}. Undefined tags will be shown as [NA]',
'validators': [],
'required': True,
'default': '*[Sentry]* {project_name} {tag[level]}: *{title}*\n```{message}```\n{url}'
},
]
def build_message(self, group, event):
the_tags = defaultdict(lambda: '[NA]')
the_tags.update({k:v for k, v in event.tags})
names = {
'title': event.title,
'tag': the_tags,
'message': event.message,
'project_name': group.project.name,
'url': group.get_absolute_url(),
}
template = self.get_message_template(group.project)
text = template.format(**names)
return {
'text': text,
'parse_mode': 'Markdown',
}
def build_url(self, project):
return 'https://api.telegram.org/bot%s/sendMessage' % self.get_option('api_token', project)
def get_message_template(self, project):
return self.get_option('message_template', project)
def get_receivers(self, project):
receivers = self.get_option('receivers', project)
if not receivers:
return []
return filter(bool, receivers.strip().splitlines())
def send_message(self, url, payload, receiver):
payload['chat_id'] = receiver
self.logger.debug('Sending message to %s ' % receiver)
response = safe_urlopen(
method='POST',
url=url,
json=payload,
)
self.logger.debug('Response code: %s, content: %s' % (response.status_code, response.content))
def notify_users(self, group, event, fail_silently=False):
self.logger.debug('Received notification for event: %s' % event)
receivers = self.get_receivers(group.project)
self.logger.debug('for receivers: %s' % ', '.join(receivers or ()))
payload = self.build_message(group, event)
self.logger.debug('Built payload: %s' % payload)
url = self.build_url(group.project)
self.logger.debug('Built url: %s' % url)
for receiver in receivers:
safe_execute(self.send_message, url, payload, receiver, _with_transaction=False)
|
from gemdeps import app
from flask import render_template, request, Markup
import json
import os
@app.route('/', methods=['GET', 'POST'])
def index():
completedeplist = {}
gemnames = "["
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
for app in ['diaspora', 'gitlab', 'asciinema']:
appname = app + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appname)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
completedeplist[app] = deps
gemnames += ", ".join([str('"' + x['name'] + '"') for x in deps])
gemnames += ", "
gemnames += "]"
gemnames = Markup(gemnames)
print completedeplist
if request.method == 'GET':
return render_template('index.html', gemnames=gemnames)
else:
apps = request.form.getlist('appname')
gemname = request.form.get('gemname')
gems = {}
flag = 0
for app in apps:
gem = [x for x in completedeplist[app] if x['name'] == gemname]
if gem:
flag = 1
gems[app] = gem
return render_template('index.html',
gemnames=gemnames,
gemname=gemname,
gemlist=gems,
flag=flag)
Integrate progressbar generation
import json
import os
from flask import Markup, render_template, request
from gemdeps import app
@app.route('/', methods=['GET', 'POST'])
def index():
completedeplist = {}
gemnames = "["
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
for app in ['diaspora', 'gitlab', 'asciinema']:
appname = app + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appname)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
completedeplist[app] = deps
gemnames += ", ".join([str('"' + x['name'] + '"') for x in deps])
gemnames += ", "
gemnames += "]"
gemnames = Markup(gemnames)
print completedeplist
if request.method == 'GET':
return render_template('index.html', gemnames=gemnames)
else:
apps = request.form.getlist('appname')
gemname = request.form.get('gemname')
gems = {}
flag = 0
for app in apps:
gem = [x for x in completedeplist[app] if x['name'] == gemname]
if gem:
flag = 1
gems[app] = gem
return render_template('index.html',
gemnames=gemnames,
gemname=gemname,
gemlist=gems,
flag=flag)
@app.route('/status/<appname>')
def status(appname):
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
appfilename = appname + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appfilename)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
packaged_count = 0
unpackaged_count = 0
itp_count = 0
total = 0
for n in deps:
if n['status'] == 'Packaged' or n['status'] == 'NEW':
packaged_count += 1
elif n['status'] == 'ITP':
itp_count += 1
else:
unpackaged_count += 1
total = len(deps)
percent_complete = (packaged_count * 100) / total
return render_template('status.html',
appname=appname.title(),
deps=deps,
packaged_count=packaged_count,
unpackaged_count=unpackaged_count,
itp_count=itp_count,
total=total,
percent_complete=percent_complete
)
|
import sys
from threading import Thread
class cliInterface:
cliThread = None
# Variables for used objects from pubmusic.py
# See this file for init
logger = None
player = None
library = None
def __init__(self, logger, player, library):
# Setting the class variables to our objects
self.logger = logger
self.player = player
self.library = library
self.cliThread = Thread(target=self.cliThreadClass).start()
def cliThreadClass(self):
global input
self.logger.dispLogEntry("info","preparing cli environment")
print("Interactive command line for pubmusic2")
print("Enter \"help\" for a list of available commands")
try: input = raw_input
except NameError: pass
while True:
userInput = input("-> ").strip()
userCommand = userInput.split(" ")[0].lower()
# General commands
if userCommand == "help":
# TODO: Extend help
print("Available commands:")
print("")
print("help - displays this message")
print("exit - closes the application")
print("")
print("play - starts the playback")
print("add - adds a song by id")
print("random - adds a random song")
print("autofill - adds 10 random songs")
print("next - skips to the next song")
print("volume - controls the playback volume")
print("")
print("current - displays the current playing song")
print("library - displays the song library")
print("playlist - displays the current playlist")
print("")
elif userCommand == "exit":
self.player.shutdown()
sys.exit()
# playback control
elif userCommand == "play":
self.player.raw("play")
elif userCommand == "add":
try:
self.player.enqueue(self.library.getSongList()[int(userInput.split(" ")[1])])
except IndexError:
self.logger.dispLogEntry("warning", "Title with given id not found")
elif userCommand == "random":
self.player.enqueue(self.library.getRandomSong())
elif userCommand == "autofill":
for x in range(0,10):
self.player.enqueue(self.library.getRandomSong())
elif userCommand == "next":
self.player.next()
elif userCommand == "volume":
if len(userInput.split(" ")) >= 2:
if userInput.split(" ")[1] == "up":
self.player.volup()
elif userInput.split(" ")[1] == "down":
self.player.voldown()
elif isinstance(int(userInput.split(" ")[1]), int):
# TODO: Check if given value is int
self.player.volume(userInput.split(" ")[1])
else:
print("Not enough arguments given")
# Informative commands
elif userCommand == "current":
print(self.player.getCurrentPlaying())
elif userCommand == "playlist":
for songs in self.player.getPlaylist():
print (self.player.getCleanTitle(songs))
elif userCommand == "library":
i = 0
for song in self.library.getSongList():
print(str(i).zfill(4) + " - " + self.player.getCleanTitle(song))
i = i + 1
# No input given
else:
if userCommand == "":
print("No input was given. Please try again!")
else:
print("Command \"" + userCommand + "\" not found!")
added integer check for volume level
import sys
from threading import Thread
class cliInterface:
cliThread = None
# Variables for used objects from pubmusic.py
# See this file for init
logger = None
player = None
library = None
def __init__(self, logger, player, library):
# Setting the class variables to our objects
self.logger = logger
self.player = player
self.library = library
self.cliThread = Thread(target=self.cliThreadClass).start()
def cliThreadClass(self):
global input
self.logger.dispLogEntry("info","preparing cli environment")
print("Interactive command line for pubmusic2")
print("Enter \"help\" for a list of available commands")
try: input = raw_input
except NameError: pass
while True:
userInput = input("-> ").strip()
userCommand = userInput.split(" ")[0].lower()
# General commands
if userCommand == "help":
# TODO: Extend help
print("Available commands:")
print("")
print("help - displays this message")
print("exit - closes the application")
print("")
print("play - starts the playback")
print("add - adds a song by id")
print("random - adds a random song")
print("autofill - adds 10 random songs")
print("next - skips to the next song")
print("volume - controls the playback volume")
print("")
print("current - displays the current playing song")
print("library - displays the song library")
print("playlist - displays the current playlist")
print("")
elif userCommand == "exit":
self.player.shutdown()
sys.exit()
# playback control
elif userCommand == "play":
self.player.raw("play")
elif userCommand == "add":
try:
self.player.enqueue(self.library.getSongList()[int(userInput.split(" ")[1])])
except IndexError:
self.logger.dispLogEntry("warning", "Title with given id not found")
elif userCommand == "random":
self.player.enqueue(self.library.getRandomSong())
elif userCommand == "autofill":
for x in range(0,10):
self.player.enqueue(self.library.getRandomSong())
elif userCommand == "next":
self.player.next()
elif userCommand == "volume":
if len(userInput.split(" ")) >= 2:
if userInput.split(" ")[1] == "up":
self.player.volup()
elif userInput.split(" ")[1] == "down":
self.player.voldown()
else:
try:
if isinstance(int(userInput.split(" ")[1]), int):
# parameter is a integer
self.player.volume(userInput.split(" ")[1])
except ValueError:
print("given argument has to be \"up\", \"down\" or a number")
else:
print("Not enough arguments given")
# Informative commands
elif userCommand == "current":
print(self.player.getCurrentPlaying())
elif userCommand == "playlist":
for songs in self.player.getPlaylist():
print (self.player.getCleanTitle(songs))
elif userCommand == "library":
i = 0
for song in self.library.getSongList():
print(str(i).zfill(4) + " - " + self.player.getCleanTitle(song))
i = i + 1
# No input given
else:
if userCommand == "":
print("No input was given. Please try again!")
else:
print("Command \"" + userCommand + "\" not found!") |
# -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
"""
from __future__ import absolute_import, division, print_function
import random
from os.path import join, exists, splitext, basename
import uuid
import zipfile
import time
import six
import math
import tornado.wsgi
import tornado.httpserver
from flask import request, redirect, url_for, make_response, current_app, send_file
import logging
import socket
import simplejson as json
from ibeis.control import controller_inject
from ibeis import constants as const
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY, PI, TAU
from ibeis.web import appfuncs as ap
from ibeis.web import zmq_task_queue # NOQA
import utool as ut
DEFAULT_WEB_API_PORT = ut.get_argval('--port', type_=int, default=5000)
register_api = controller_inject.get_ibeis_flask_api(__name__)
register_route = controller_inject.get_ibeis_flask_route(__name__)
PAGE_SIZE = 500
################################################################################
def default_species(ibs):
# hack function
dbname = ibs.get_dbname()
if dbname == 'CHTA_Master':
default_species = 'cheetah'
elif dbname == 'ELPH_Master':
default_species = 'elephant_savanna'
elif dbname == 'GIR_Master':
default_species = 'giraffe_reticulated'
elif dbname == 'GZ_Master':
default_species = 'zebra_grevys'
elif dbname == 'LION_Master':
default_species = 'lion'
elif dbname == 'PZ_Master':
default_species = 'zebra_plains'
elif dbname == 'WD_Master':
default_species = 'wild_dog'
elif dbname == 'NNP_MasterGIRM':
default_species = 'giraffe_masai'
elif 'NNP_' in dbname:
default_species = 'zebra_plains'
elif 'GZC' in dbname:
default_species = 'zebra_plains'
else:
default_species = None
print('[web] DEFAULT SPECIES: %r' % (default_species))
return default_species
def imageset_image_processed(ibs, gid_list):
images_reviewed = [ reviewed == 1 for reviewed in ibs.get_image_reviewed(gid_list) ]
return images_reviewed
def imageset_annot_viewpoint_processed(ibs, aid_list):
annots_reviewed = [ reviewed is not None for reviewed in ibs.get_annot_yaws(aid_list) ]
return annots_reviewed
def imageset_annot_quality_processed(ibs, aid_list):
annots_reviewed = [ reviewed is not None and reviewed is not -1 for reviewed in ibs.get_annot_qualities(aid_list) ]
return annots_reviewed
def imageset_annot_additional_processed(ibs, aid_list, nid_list):
sex_list = ibs.get_annot_sex(aid_list)
age_list = ibs.get_annot_age_months_est(aid_list)
annots_reviewed = [
(nid < 0) or (nid > 0 and sex >= 0 and -1 not in list(age) and list(age).count(None) < 2)
for nid, sex, age in zip(nid_list, sex_list, age_list)
]
return annots_reviewed
def convert_old_viewpoint_to_yaw(view_angle):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'old %15r %.2f -> new %15r %.2f'
>>> for lbl, angle, radians in old_viewpoint_labels:
>>> print(fmtstr % (lbl, angle, lbl, convert_old_viewpoint_to_yaw(angle)))
"""
if view_angle is None:
return None
view_angle = ut.deg_to_rad(view_angle)
yaw = (-view_angle + (TAU / 2)) % TAU
return yaw
def convert_yaw_to_old_viewpoint(yaw):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'original_angle %15r %.2f -> yaw %15r %.2f -> reconstructed_angle %15r %.2f'
>>> for lbl, angle, radians in old_viewpoint_labels:
>>> yaw = convert_old_viewpoint_to_yaw(angle)
>>> reconstructed_angle = convert_yaw_to_old_viewpoint(yaw)
>>> print(fmtstr % (lbl, angle, lbl, yaw, lbl, reconstructed_angle))
"""
if yaw is None:
return None
view_angle = ((TAU / 2) - yaw) % TAU
view_angle = ut.rad_to_deg(view_angle)
return view_angle
################################################################################
@register_route('/')
def root():
return ap.template(None)
@register_route('/view')
def view():
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime)
if unixtime is not None else
'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [ datetime.split(' ') for datetime in datetime_list ]
date_list = [ datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN' for datetime_split in datetime_split_list ]
return date_list
ibs = current_app.ibs
aid_list = ibs.filter_aids_count()
gid_list = ibs.get_annot_gids(aid_list)
nid_list = ibs.get_annot_name_rowids(aid_list)
date_list = _date_list(gid_list)
gid_list_unique = list(set(gid_list))
date_list_unique = _date_list(gid_list_unique)
date_taken_dict = {}
for gid, date in zip(gid_list_unique, date_list_unique):
if date not in date_taken_dict:
date_taken_dict[date] = [0, 0]
date_taken_dict[date][1] += 1
gid_list_all = ibs.get_valid_gids()
date_list_all = _date_list(gid_list_all)
for gid, date in zip(gid_list_all, date_list_all):
if date in date_taken_dict:
date_taken_dict[date][0] += 1
value = 0
label_list = []
value_list = []
index_list = []
seen_set = set()
current_seen_set = set()
previous_seen_set = set()
last_date = None
date_seen_dict = {}
for index, (aid, nid, date) in enumerate(zip(aid_list, nid_list, date_list)):
index_list.append(index + 1)
# Add to counters
if date not in date_seen_dict:
date_seen_dict[date] = [0, 0, 0, 0]
date_seen_dict[date][0] += 1
if nid not in current_seen_set:
current_seen_set.add(nid)
date_seen_dict[date][1] += 1
if nid in previous_seen_set:
date_seen_dict[date][3] += 1
if nid not in seen_set:
seen_set.add(nid)
value += 1
date_seen_dict[date][2] += 1
# Add to register
value_list.append(value)
# Reset step (per day)
if date != last_date and date != 'UNKNOWN':
last_date = date
previous_seen_set = set(current_seen_set)
current_seen_set = set()
label_list.append(date)
else:
label_list.append('')
# def optimization1(x, a, b, c):
# return a * np.log(b * x) + c
# def optimization2(x, a, b, c):
# return a * np.sqrt(x) ** b + c
# def optimization3(x, a, b, c):
# return 1.0 / (a * np.exp(-b * x) + c)
# def process(func, opts, domain, zero_index, zero_value):
# values = func(domain, *opts)
# diff = values[zero_index] - zero_value
# values -= diff
# values[ values < 0.0 ] = 0.0
# values[:zero_index] = 0.0
# values = values.astype(int)
# return list(values)
# optimization_funcs = [
# optimization1,
# optimization2,
# optimization3,
# ]
# # Get data
# x = np.array(index_list)
# y = np.array(value_list)
# # Fit curves
# end = int(len(index_list) * 1.25)
# domain = np.array(range(1, end))
# zero_index = len(value_list) - 1
# zero_value = value_list[zero_index]
# regressed_opts = [ curve_fit(func, x, y)[0] for func in optimization_funcs ]
# prediction_list = [
# process(func, opts, domain, zero_index, zero_value)
# for func, opts in zip(optimization_funcs, regressed_opts)
# ]
# index_list = list(domain)
prediction_list = []
date_seen_dict.pop('UNKNOWN', None)
bar_label_list = sorted(date_seen_dict.keys())
bar_value_list1 = [ date_taken_dict[date][0] for date in bar_label_list ]
bar_value_list2 = [ date_taken_dict[date][1] for date in bar_label_list ]
bar_value_list3 = [ date_seen_dict[date][0] for date in bar_label_list ]
bar_value_list4 = [ date_seen_dict[date][1] for date in bar_label_list ]
bar_value_list5 = [ date_seen_dict[date][2] for date in bar_label_list ]
bar_value_list6 = [ date_seen_dict[date][3] for date in bar_label_list ]
# label_list += ['Models'] + [''] * (len(index_list) - len(label_list) - 1)
# value_list += [0] * (len(index_list) - len(value_list))
# Counts
imgsetid_list = ibs.get_valid_imgsetids()
gid_list = ibs.get_valid_gids()
aid_list = ibs.get_valid_aids()
nid_list = ibs.get_valid_nids()
contrib_list = ibs.get_valid_contrib_rowids()
# nid_list = ibs.get_valid_nids()
aid_list_count = ibs.filter_aids_count()
# gid_list_count = list(set(ibs.get_annot_gids(aid_list_count)))
nid_list_count_dup = ibs.get_annot_name_rowids(aid_list_count)
nid_list_count = list(set(nid_list_count_dup))
# Calculate the Petersen-Lincoln index form the last two days
try:
c1 = bar_value_list4[-2]
c2 = bar_value_list4[-1]
c3 = bar_value_list6[-1]
pl_index = int(math.ceil( (c1 * c2) / c3 ))
pl_error_num = float(c1 * c1 * c2 * (c2 - c3))
pl_error_dom = float(c3 ** 3)
pl_error = int(math.ceil( 1.96 * math.sqrt(pl_error_num / pl_error_dom) ))
except IndexError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
except ZeroDivisionError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
# Get the markers
gid_list_markers = ibs.get_annot_gids(aid_list_count)
gps_list_markers = map(list, ibs.get_image_gps(gid_list_markers))
gps_list_markers_all = map(list, ibs.get_image_gps(gid_list))
REMOVE_DUP_CODE = True
if not REMOVE_DUP_CODE:
# Get the tracks
nid_track_dict = ut.ddict(list)
for nid, gps in zip(nid_list_count_dup, gps_list_markers):
if gps[0] == -1.0 and gps[1] == -1.0:
continue
nid_track_dict[nid].append(gps)
gps_list_tracks = [ nid_track_dict[nid] for nid in sorted(nid_track_dict.keys()) ]
else:
__nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count)
gps_list_tracks = list(map(lambda x: list(map(list, x)), gps_track_list))
valid_aids = ibs.get_valid_aids()
valid_gids = ibs.get_valid_gids()
valid_aids_ = ibs.filter_aids_custom(valid_aids)
valid_gids_ = ibs.filter_gids_custom(valid_gids)
used_gids = list(set( ibs.get_annot_gids(valid_aids) ))
used_contrib_tags = list(set( ibs.get_image_contributor_tag(used_gids) ))
# Get Age and sex (By Annot)
# annot_sex_list = ibs.get_annot_sex(valid_aids_)
# annot_age_months_est_min = ibs.get_annot_age_months_est_min(valid_aids_)
# annot_age_months_est_max = ibs.get_annot_age_months_est_max(valid_aids_)
# age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# for sex, min_age, max_age in zip(annot_sex_list, annot_age_months_est_min, annot_age_months_est_max):
# if sex not in [0, 1]:
# sex = 2
# # continue
# if (min_age is None or min_age < 12) and max_age < 12:
# age_list[sex][0] += 1
# elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
# age_list[sex][1] += 1
# elif 36 <= min_age and (36 <= max_age or max_age is None):
# age_list[sex][2] += 1
# Get Age and sex (By Name)
name_sex_list = ibs.get_name_sex(nid_list_count)
name_age_months_est_mins_list = ibs.get_name_age_months_est_min(nid_list_count)
name_age_months_est_maxs_list = ibs.get_name_age_months_est_max(nid_list_count)
age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
age_unreviewed = 0
age_ambiguous = 0
for nid, sex, min_ages, max_ages in zip(nid_list_count, name_sex_list, name_age_months_est_mins_list, name_age_months_est_maxs_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
# print('[web] Invalid name %r: Cannot have more than one age' % (nid, ))
age_ambiguous += 1
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
# print('[web] Unreviewded name %r: Specify the age for the name' % (nid, ))
age_unreviewed += 1
continue
if sex not in [0, 1]:
sex = 2
# continue
if (min_age is None or min_age < 12) and max_age < 12:
age_list[sex][0] += 1
elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
age_list[sex][1] += 1
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_list[sex][2] += 1
dbinfo_str = dbinfo()
return ap.template('view',
line_index_list=index_list,
line_label_list=label_list,
line_value_list=value_list,
prediction_list=prediction_list,
pl_index=pl_index,
pl_error=pl_error,
gps_list_markers=gps_list_markers,
gps_list_markers_all=gps_list_markers_all,
gps_list_tracks=gps_list_tracks,
bar_label_list=bar_label_list,
bar_value_list1=bar_value_list1,
bar_value_list2=bar_value_list2,
bar_value_list3=bar_value_list3,
bar_value_list4=bar_value_list4,
bar_value_list5=bar_value_list5,
bar_value_list6=bar_value_list6,
age_list=age_list,
age_ambiguous=age_ambiguous,
age_unreviewed=age_unreviewed,
dbinfo_str=dbinfo_str,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
contrib_list=contrib_list,
contrib_list_str=','.join(map(str, contrib_list)),
num_contribs=len(contrib_list),
gid_list_count=valid_gids_,
gid_list_count_str=','.join(map(str, valid_gids_)),
num_gids_count=len(valid_gids_),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
aid_list_count=valid_aids_,
aid_list_count_str=','.join(map(str, valid_aids_)),
num_aids_count=len(valid_aids_),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
nid_list_count=nid_list_count,
nid_list_count_str=','.join(map(str, nid_list_count)),
num_nids_count=len(nid_list_count),
used_gids=used_gids,
num_used_gids=len(used_gids),
used_contribs=used_contrib_tags,
num_used_contribs=len(used_contrib_tags))
@register_route('/view/imagesets')
def view_imagesets():
ibs = current_app.ibs
filtered = True
imgsetid = request.args.get('imgsetid', '')
if len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
else:
imgsetid_list = ibs.get_valid_imgsetids()
filtered = False
start_time_posix_list = ibs.get_imageset_start_time_posix(imgsetid_list)
datetime_list = [
ut.unixtime_to_datetimestr(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ]
aids_list = [ ut.flatten(ibs.get_image_aids(gid_list)) for gid_list in gids_list ]
images_reviewed_list = [ imageset_image_processed(ibs, gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ imageset_annot_viewpoint_processed(ibs, aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ imageset_annot_quality_processed(ibs, aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
imageset_list = zip(
imgsetid_list,
ibs.get_imageset_text(imgsetid_list),
ibs.get_imageset_num_gids(imgsetid_list),
image_processed_list,
ibs.get_imageset_num_aids(imgsetid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
imageset_list.sort(key=lambda t: t[7])
return ap.template('view', 'imagesets',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
imageset_list=imageset_list,
num_imagesets=len(imageset_list))
@register_route('/view/images')
def view_images():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid) for imgsetid_ in imgsetid_list ])
else:
gid_list = ibs.get_valid_gids()
filtered = False
# Page
page_start = min(len(gid_list), (page - 1) * PAGE_SIZE)
page_end = min(len(gid_list), page * PAGE_SIZE)
page_total = int(math.ceil(len(gid_list) / PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(gid_list) else page + 1
gid_list = gid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(gid_list), page_previous, page_next, ))
image_unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_image_imgsetids(gid_list) ],
ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
ibs.get_image_gps(gid_list),
ibs.get_image_party_tag(gid_list),
ibs.get_image_contributor_tag(gid_list),
ibs.get_image_notes(gid_list),
imageset_image_processed(ibs, gid_list),
)
image_list.sort(key=lambda t: t[3])
return ap.template('view', 'images',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/annotations')
def view_annotations():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
else:
aid_list = ibs.get_valid_aids()
filtered = False
# Page
page_start = min(len(aid_list), (page - 1) * PAGE_SIZE)
page_end = min(len(aid_list), page * PAGE_SIZE)
page_total = int(math.ceil(len(aid_list) / PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(aid_list) else page + 1
aid_list = aid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(aid_list), page_previous, page_next, ))
annotation_list = zip(
aid_list,
ibs.get_annot_gids(aid_list),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list) ],
ibs.get_annot_image_names(aid_list),
ibs.get_annot_names(aid_list),
ibs.get_annot_exemplar_flags(aid_list),
ibs.get_annot_species_texts(aid_list),
ibs.get_annot_yaw_texts(aid_list),
ibs.get_annot_quality_texts(aid_list),
ibs.get_annot_sex_texts(aid_list),
ibs.get_annot_age_months_est(aid_list),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(imageset_annot_viewpoint_processed(ibs, aid_list), imageset_annot_quality_processed(ibs, aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return ap.template('view', 'annotations',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/names')
def view_names():
ibs = current_app.ibs
filtered = True
aid_list = []
imgsetid_list = []
gid_list = []
nid = request.args.get('nid', '')
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(nid) > 0:
nid_list = nid.strip().split(',')
nid_list = [ None if nid_ == 'None' or nid_ == '' else int(nid_) for nid_ in nid_list ]
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
else:
nid_list = ibs.get_valid_nids()
filtered = False
# Page
PAGE_SIZE_ = int(PAGE_SIZE / 5)
page_start = min(len(nid_list), (page - 1) * PAGE_SIZE_)
page_end = min(len(nid_list), page * PAGE_SIZE_)
page_total = int(math.ceil(len(nid_list) / PAGE_SIZE_))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(nid_list) else page + 1
nid_list = nid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(nid_list), page_previous, page_next, ))
aids_list = ibs.get_name_aids(nid_list)
annotations_list = [ zip(
aid_list_,
ibs.get_annot_gids(aid_list_),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list_) ],
ibs.get_annot_image_names(aid_list_),
ibs.get_annot_names(aid_list_),
ibs.get_annot_exemplar_flags(aid_list_),
ibs.get_annot_species_texts(aid_list_),
ibs.get_annot_yaw_texts(aid_list_),
ibs.get_annot_quality_texts(aid_list_),
ibs.get_annot_sex_texts(aid_list_),
ibs.get_annot_age_months_est(aid_list_),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(imageset_annot_viewpoint_processed(ibs, aid_list_), imageset_annot_quality_processed(ibs, aid_list_)) ],
) for aid_list_ in aids_list ]
name_list = zip(
nid_list,
annotations_list
)
name_list.sort(key=lambda t: t[0])
return ap.template('view', 'names',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
name_list=name_list,
num_names=len(name_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/turk')
def turk():
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
return ap.template('turk', None, imgsetid=imgsetid)
@register_route('/turk/detection')
def turk_detection():
ibs = current_app.ibs
refer_aid = request.args.get('refer_aid', None)
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
reviewed_list = imageset_image_processed(ibs, gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list_ = ut.filterfalse_items(gid_list, reviewed_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('detection_instructions_seen', 1) == 0
display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0
if not finished:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = ibs.get_image_sizes(gid)
scale_factor = float(ap.TARGET_WIDTH) / float(width)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = int(scale_factor * annot_bbox[0])
temp['top'] = int(scale_factor * annot_bbox[1])
temp['width'] = int(scale_factor * (annot_bbox[2]))
temp['height'] = int(scale_factor * (annot_bbox[3]))
temp['label'] = species
temp['id'] = aid
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif default_species(ibs) is not None:
species = default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
return ap.template('turk', 'detection',
imgsetid=imgsetid,
gid=gid,
refer_aid=refer_aid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
review=review)
@register_route('/turk/detection/dynamic')
def turk_detection_dynamic():
ibs = current_app.ibs
gid = request.args.get('gid', None)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif default_species(ibs) is not None:
species = default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
return ap.template('turk', 'detection_dynamic',
gid=gid,
refer_aid=None,
species=species,
image_path=gpath,
image_src=image_src,
annotation_list=annotation_list,
__wrapper__=False)
def get_turk_annot_args(is_reviewed_func):
"""
Helper to return aids in an imageset or a group review
"""
ibs = current_app.ibs
def _ensureid(_id):
return None if _id == 'None' or _id == '' else int(_id)
imgsetid = request.args.get('imgsetid', '')
src_ag = request.args.get('src_ag', '')
dst_ag = request.args.get('dst_ag', '')
imgsetid = _ensureid(imgsetid)
src_ag = _ensureid(src_ag)
dst_ag = _ensureid(dst_ag)
group_review_flag = src_ag is not None and dst_ag is not None
if not group_review_flag:
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
reviewed_list = is_reviewed_func(ibs, aid_list)
else:
src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag)
dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag)
src_aid_list = ibs.get_gar_aid(src_gar_rowid_list)
dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list)
aid_list = src_aid_list
reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ]
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
if group_review_flag:
aid = aid_list_[0]
else:
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
print('aid = %r' % (aid,))
#print(ut.dict_str(ibs.get_annot_info(aid)))
print(ut.obj_str(ibs.get_annot_info(aid, default=True, nl=True)))
return aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous
@register_route('/turk/viewpoint')
def turk_viewpoint():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = get_turk_annot_args(imageset_annot_viewpoint_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
species = ibs.get_annot_species_texts(aid)
else:
gid = None
gpath = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
return ap.template('turk', 'viewpoint',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/quality')
def turk_quality():
"""
PZ Needs Tags:
17242
14468
14427
15946
14771
14084
4102
6074
3409
GZ Needs Tags;
1302
CommandLine:
python -m ibeis.web.app --exec-turk_quality --db PZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GIRM_Master1
Example:
>>> # SCRIPT
>>> from ibeis.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list_ = ibs.find_unlabeled_name_members(qual=True)
>>> valid_views = ['primary', 'primary1', 'primary-1']
>>> aid_list = ibs.filter_aids_to_viewpoint(aid_list_, valid_views, unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = get_turk_annot_args(imageset_annot_quality_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = ibs.get_annot_qualities(aid)
if value == -1:
value = None
if value == 0:
value = 1
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
imagesettext = ibs.get_imageset_text(imgsetid)
return ap.template('turk', 'quality',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
##@register_route('/turk/viewpoint')
#def old_turk_viewpoint():
# #ibs = current_app.ibs
# #imgsetid = request.args.get('imgsetid', '')
# #imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
# #imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
# #src_ag = request.args.get('src_ag', '')
# #src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
# #dst_ag = request.args.get('dst_ag', '')
# #dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
# #group_review_flag = src_ag is not None and dst_ag is not None
# #if not group_review_flag:
# # gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
# # aid_list = ut.flatten(ibs.get_image_aids(gid_list))
# # reviewed_list = imageset_annot_viewpoint_processed(ibs, aid_list)
# #else:
# # src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag)
# # dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag)
# # src_aid_list = ibs.get_gar_aid(src_gar_rowid_list)
# # dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list)
# # aid_list = src_aid_list
# # reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ]
# #previous = request.args.get('previous', None)
# ibs = current_app.ibs
# tup = get_turk_annot_args(imageset_annot_viewpoint_processed)
# (aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
# value = convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
# review = 'review' in request.args.keys()
# finished = aid is None
# display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0
# if not finished:
# gid = ibs.get_annot_gids(aid)
# gpath = ibs.get_annot_chip_fpath(aid)
# image = ap.open_oriented_image(gpath)
# image_src = ap.embed_image_html(image)
# else:
# gid = None
# gpath = None
# image_src = None
# imagesettext = ibs.get_imageset_text(imgsetid)
# return ap.template('turk', 'viewpoint',
# imgsetid=imgsetid,
# src_ag=src_ag,
# dst_ag=dst_ag,
# gid=gid,
# aid=aid,
# value=value,
# image_path=gpath,
# image_src=image_src,
# previous=previous,
# imagesettext=imagesettext,
# progress=progress,
# finished=finished,
# display_instructions=display_instructions,
# review=review)
#@register_route('/turk/quality')
#def old_turk_quality():
# #ibs = current_app.ibs
# #imgsetid = request.args.get('imgsetid', '')
# #imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
# #gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
# #aid_list = ut.flatten(ibs.get_image_aids(gid_list))
# #reviewed_list = imageset_annot_quality_processed(ibs, aid_list)
# #try:
# # progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
# #except ZeroDivisionError:
# # progress = '0.00'
# #aid = request.args.get('aid', '')
# #if len(aid) > 0:
# # aid = int(aid)
# #else:
# # aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
# # if len(aid_list_) == 0:
# # aid = None
# # else:
# # # aid = aid_list_[0]
# # aid = random.choice(aid_list_)
# #previous = request.args.get('previous', None)
# ibs = current_app.ibs
# tup = get_turk_annot_args(imageset_annot_quality_processed)
# (aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
# value = ibs.get_annot_qualities(aid)
# if value == -1:
# value = None
# if value == 0:
# value = 1
# review = 'review' in request.args.keys()
# finished = aid is None
# display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0
# if not finished:
# gid = ibs.get_annot_gids(aid)
# gpath = ibs.get_annot_chip_fpath(aid)
# image = ap.open_oriented_image(gpath)
# image_src = ap.embed_image_html(image)
# else:
# gid = None
# gpath = None
# image_src = None
# imagesettext = ibs.get_imageset_text(imgsetid)
# return ap.template('turk', 'quality',
# imgsetid=imgsetid,
# src_ag=src_ag,
# dst_ag=dst_ag,
# gid=gid,
# aid=aid,
# value=value,
# image_path=gpath,
# image_src=image_src,
# previous=previous,
# imagesettext=imagesettext,
# progress=progress,
# finished=finished,
# display_instructions=display_instructions,
# review=review)
@register_route('/turk/additional')
def turk_additional():
ibs = current_app.ibs
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_nids(aid_list)
reviewed_list = imageset_annot_additional_processed(ibs, aid_list, nid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value_sex = ibs.get_annot_sex([aid])[0]
if value_sex >= 0:
value_sex += 2
else:
value_sex = None
value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0]
value_age = None
if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None):
value_age = 1
if (value_age_min is 0 or value_age_min is None) and value_age_max == 2:
value_age = 2
elif value_age_min is 3 and value_age_max == 5:
value_age = 3
elif value_age_min is 6 and value_age_max == 11:
value_age = 4
elif value_age_min is 12 and value_age_max == 23:
value_age = 5
elif value_age_min is 24 and value_age_max == 35:
value_age = 6
elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None):
value_age = 7
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('additional_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
name_aid_list = None
nid = ibs.get_annot_name_rowids(aid)
if nid is not None:
name_aid_list = ibs.get_name_aids(nid)
quality_list = ibs.get_annot_qualities(name_aid_list)
quality_text_list = ibs.get_annot_quality_texts(name_aid_list)
yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list)
name_aid_combined_list = list(zip(
name_aid_list,
quality_list,
quality_text_list,
yaw_text_list,
))
name_aid_combined_list.sort(key=lambda t: t[1], reverse=True)
return ap.template('turk', 'additional',
imgsetid=imgsetid,
gid=gid,
aid=aid,
value_sex=value_sex,
value_age=value_age,
image_path=gpath,
name_aid_combined_list=name_aid_combined_list,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/submit/detection', methods=['POST'])
def submit_detection():
ibs = current_app.ibs
method = request.form.get('detection-submit', '')
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid = int(request.form['detection-gid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
# ibs.delete_images(gid)
# print('[web] (DELETED) turk_id: %s, gid: %d' % (turk_id, gid, ))
pass
elif method.lower() == 'clear':
aid_list = ibs.get_image_aids(gid)
ibs.delete_annots(aid_list)
print('[web] (CLEAERED) turk_id: %s, gid: %d' % (turk_id, gid, ))
redirection = request.referrer
if 'gid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&gid=%d' % (redirection, gid, )
else:
redirection = '%s?gid=%d' % (redirection, gid, )
return redirect(redirection)
else:
current_aid_list = ibs.get_image_aids(gid)
# Make new annotations
width, height = ibs.get_image_sizes(gid)
scale_factor = float(width) / float(ap.TARGET_WIDTH)
# Get aids
annotation_list = json.loads(request.form['detection-annotations'])
bbox_list = [
(
int(scale_factor * annot['left']),
int(scale_factor * annot['top']),
int(scale_factor * annot['width']),
int(scale_factor * annot['height']),
)
for annot in annotation_list
]
theta_list = [
float(annot['angle'])
for annot in annotation_list
]
survived_aid_list = [
None if annot['id'] is None else int(annot['id'])
for annot in annotation_list
]
species_list = [
annot['label']
for annot in annotation_list
]
# Delete annotations that didn't survive
kill_aid_list = list(set(current_aid_list) - set(survived_aid_list))
ibs.delete_annots(kill_aid_list)
for aid, bbox, theta, species in zip(survived_aid_list, bbox_list, theta_list, species_list):
if aid is None:
ibs.add_annots([gid], [bbox], theta_list=[theta], species_list=[species])
else:
ibs.set_annot_bboxes([aid], [bbox])
ibs.set_annot_thetas([aid], [theta])
ibs.set_annot_species([aid], [species])
ibs.set_image_reviewed([gid], [1])
print('[web] turk_id: %s, gid: %d, bbox_list: %r, species_list: %r' % (turk_id, gid, annotation_list, species_list))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_detection', imgsetid=imgsetid, previous=gid))
def movegroup_aid(ibs, aid, src_ag, dst_ag):
gar_rowid_list = ibs.get_annot_gar_rowids(aid)
annotgroup_rowid_list = ibs.get_gar_annotgroup_rowid(gar_rowid_list)
src_index = annotgroup_rowid_list.index(src_ag)
src_gar_rowid = gar_rowid_list[src_index]
vals = (aid, src_ag, src_gar_rowid, dst_ag)
print('Moving aid: %s from src_ag: %s (%s) to dst_ag: %s' % vals)
# ibs.delete_gar([src_gar_rowid])
ibs.add_gar([dst_ag], [aid])
@register_route('/submit/viewpoint', methods=['POST'])
def submit_viewpoint():
ibs = current_app.ibs
method = request.form.get('viewpoint-submit', '')
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
src_ag = request.args.get('src_ag', '')
src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
dst_ag = request.args.get('dst_ag', '')
dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
aid = int(request.form['viewpoint-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
if method.lower() == 'make junk':
ibs.set_annot_quality_texts([aid], [const.QUAL_JUNK])
print('[web] (SET AS JUNK) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
if method.lower() == 'rotate left':
theta = ibs.get_annot_thetas(aid)
theta = (theta + PI / 2) % TAU
ibs.set_annot_thetas(aid, theta)
(xtl, ytl, w, h) = ibs.get_annot_bboxes(aid)
diffx = int(round((w / 2.0) - (h / 2.0)))
diffy = int(round((h / 2.0) - (w / 2.0)))
xtl, ytl, w, h = xtl + diffx, ytl + diffy, h, w
ibs.set_annot_bboxes([aid], [(xtl, ytl, w, h)])
print('[web] (ROTATED LEFT) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
if method.lower() == 'rotate right':
theta = ibs.get_annot_thetas(aid)
theta = (theta - PI / 2) % TAU
ibs.set_annot_thetas(aid, theta)
(xtl, ytl, w, h) = ibs.get_annot_bboxes(aid)
diffx = int(round((w / 2.0) - (h / 2.0)))
diffy = int(round((h / 2.0) - (w / 2.0)))
xtl, ytl, w, h = xtl + diffx, ytl + diffy, h, w
ibs.set_annot_bboxes([aid], [(xtl, ytl, w, h)])
print('[web] (ROTATED RIGHT) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
else:
if src_ag is not None and dst_ag is not None:
movegroup_aid(ibs, aid, src_ag, dst_ag)
value = int(request.form['viewpoint-value'])
yaw = convert_old_viewpoint_to_yaw(value)
species_text = request.form['viewpoint-species']
ibs.set_annot_yaws([aid], [yaw], input_is_degrees=False)
ibs.set_annot_species([aid], [species_text])
print('[web] turk_id: %s, aid: %d, yaw: %d' % (turk_id, aid, yaw))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_viewpoint', imgsetid=imgsetid, src_ag=src_ag,
dst_ag=dst_ag, previous=aid))
@register_route('/submit/quality', methods=['POST'])
def submit_quality():
ibs = current_app.ibs
method = request.form.get('quality-submit', '')
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
aid = int(request.form['quality-aid'])
turk_id = request.cookies.get('turk_id', -1)
src_ag = request.args.get('src_ag', '')
src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
dst_ag = request.args.get('dst_ag', '')
dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
if src_ag is not None and dst_ag is not None:
movegroup_aid(ibs, aid, src_ag, dst_ag)
quality = int(request.form['quality-value'])
ibs.set_annot_qualities([aid], [quality])
print('[web] turk_id: %s, aid: %d, quality: %d' % (turk_id, aid, quality))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_quality', imgsetid=imgsetid, src_ag=src_ag,
dst_ag=dst_ag, previous=aid))
@register_route('/submit/additional', methods=['POST'])
def submit_additional():
ibs = current_app.ibs
method = request.form.get('additional-submit', '')
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
aid = int(request.form['additional-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
sex = int(request.form['additional-sex-value'])
age = int(request.form['additional-age-value'])
age_min = None
age_max = None
# Sex
if sex >= 2:
sex -= 2
else:
sex = -1
if age == 1:
age_min = None
age_max = None
elif age == 2:
age_min = None
age_max = 2
elif age == 3:
age_min = 3
age_max = 5
elif age == 4:
age_min = 6
age_max = 11
elif age == 5:
age_min = 12
age_max = 23
elif age == 6:
age_min = 24
age_max = 35
elif age == 7:
age_min = 36
age_max = None
ibs.set_annot_sex([aid], [sex])
nid = ibs.get_annot_name_rowids(aid)
DAN_SPECIAL_WRITE_AGE_TO_ALL_ANOTATIONS = True
if nid is not None and DAN_SPECIAL_WRITE_AGE_TO_ALL_ANOTATIONS:
aid_list = ibs.get_name_aids(nid)
ibs.set_annot_age_months_est_min(aid_list, [age_min] * len(aid_list))
ibs.set_annot_age_months_est_max(aid_list, [age_max] * len(aid_list))
else:
ibs.set_annot_age_months_est_min([aid], [age_min])
ibs.set_annot_age_months_est_max([aid], [age_max])
print('[web] turk_id: %s, aid: %d, sex: %r, age: %r' % (turk_id, aid, sex, age))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_additional', imgsetid=imgsetid, previous=aid))
@register_route('/ajax/cookie')
def set_cookie():
response = make_response('true')
response.set_cookie(request.args['name'], request.args['value'])
print('[web] Set Cookie: %r -> %r' % (request.args['name'], request.args['value'], ))
return response
@register_route('/ajax/image/src/<gid>')
def image_src(gid=None, thumbnail=True, fresh=False, **kwargs):
ibs = current_app.ibs
if thumbnail:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
fresh = fresh or 'fresh' in request.args or 'fresh' in request.form
if fresh:
# print('*' * 80)
# print('\n\n')
# print('RUNNING WITH FRESH')
# print('\n\n')
# print('*' * 80)
# ut.remove_dirs(gpath)
import os
os.remove(gpath)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
else:
gpath = ibs.get_image_paths(gid)
return ap.return_src(gpath)
@register_api('/api/image/<gid>/', methods=['GET'])
def image_base64_api(gid=None, thumbnail=True, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /api/image/<gid>/
"""
return image_src(gid, thumbnail=thumbnail, fresh=fresh, **kwargs)
@register_route('/api/image/src/<gid>/', methods=['GET'])
def image_src_api(gid=None, thumbnail=False, fresh=False, **kwargs):
r"""
Returns the image file of image <gid>
RESTful:
Method: GET
URL: /api/image/src/<gid>/
"""
thumbnail = thumbnail or 'thumbnail' in request.args or 'thumbnail' in request.form
ibs = current_app.ibs
if thumbnail:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
fresh = fresh or 'fresh' in request.args or 'fresh' in request.form
if fresh:
import os
os.remove(gpath)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
else:
gpath = ibs.get_image_paths(gid)
print(gpath)
return send_file(gpath, mimetype='application/unknown')
@register_api('/api/core/names_with_gids/', methods=['GET'])
def get_names_with_gids(ibs):
nid_list = sorted(ibs.get_valid_nids())
name_list = ibs.get_name_texts(nid_list)
gids_list = ibs.get_name_gids(nid_list)
zipped = zip(nid_list, name_list, gids_list)
combined_dict = {
name : (nid, gid_list)
for nid, name, gid_list in zipped
}
return combined_dict
@register_route('/api/csv/names_with_gids/', methods=['GET'])
def get_names_with_gids_csv():
ibs = current_app.ibs
filename = 'names_with_gids.csv'
combined_dict = get_names_with_gids(ibs)
combined_list = [
','.join( map(str, [nid] + [name] + gid_list) )
for name, (nid, gid_list) in sorted(list(combined_dict.iteritems()))
]
combined_str = '\n'.join(combined_list)
max_length = 0
for aid_list in combined_dict.values():
max_length = max(max_length, len(aid_list[1]))
if max_length == 1:
gid_header_str = 'GID'
else:
gid_header_str = ','.join([ 'GID%d' % (i + 1, ) for i in range(max_length) ])
combined_str = 'NID,NAME,%s\n' % (gid_header_str, ) + combined_str
return ap.send_csv_file(combined_str, filename)
@register_route('/api/csv/image_info/', methods=['GET'])
def get_image_info():
import datetime
ibs = current_app.ibs
filename = 'image_info.csv'
gid_list = sorted(ibs.get_valid_gids())
gname_list = ibs.get_image_gnames(gid_list)
datetime_list = ibs.get_image_unixtime(gid_list)
datetime_list_ = [
datetime.datetime.fromtimestamp(datetime_).strftime('%Y-%m-%d %H:%M:%S')
for datetime_ in datetime_list
]
lat_list = ibs.get_image_lat(gid_list)
lon_list = ibs.get_image_lon(gid_list)
note_list = ibs.get_image_notes(gid_list)
party_list = []
contributor_list = []
for note in note_list:
try:
note = note.split(',')
party, contributor = note[:2]
party_list.append(party)
contributor_list.append(contributor)
except:
party_list.append('UNKNOWN')
contributor_list.append('UNKNOWN')
zipped_list = zip(gid_list, gname_list, datetime_list_, lat_list, lon_list,
party_list, contributor_list)
aids_list = ibs.get_image_aids(gid_list)
names_list = [ ibs.get_annot_name_texts(aid_list) for aid_list in aids_list ]
combined_list = [
','.join( map(str, list(zipped) + name_list) )
for zipped, name_list in zip(zipped_list, names_list)
]
max_length = 0
for name_list in names_list:
max_length = max(max_length, len(name_list))
if max_length == 1:
name_header_str = 'NAME'
else:
name_header_str = ','.join([ 'NAME%d' % (i + 1, ) for i in range(max_length) ])
combined_str = '\n'.join(combined_list)
combined_str = 'GID,FILENAME,TIMESTAMP,GPSLAT,GPSLON,PARTY,CONTRIBUTOR,%s\n' % (name_header_str, ) + combined_str
return ap.send_csv_file(combined_str, filename)
@register_route('/api/csv/demographics/', methods=['GET'])
def get_demographic_info():
ibs = current_app.ibs
filename = 'demographics.csv'
nid_list = sorted(ibs.get_valid_nids())
name_list = ibs.get_name_texts(nid_list)
sex_list = ibs.get_name_sex(nid_list)
age_min_list = ibs.get_name_age_months_est_min(nid_list)
age_max_list = ibs.get_name_age_months_est_max(nid_list)
zipped_list = zip(nid_list, name_list, sex_list, age_min_list, age_max_list)
combined_list = [
','.join( map(str, list(zipped)) )
for zipped in zipped_list
]
combined_str = '\n'.join(combined_list)
combined_str = 'NID,NAME,SEX,AGEMIN,AGEMAX\n' + combined_str
return ap.send_csv_file(combined_str, filename)
@register_api('/api/core/gids_with_aids/', methods=['GET'])
def get_gid_with_aids(ibs):
gid_list = ibs.get_valid_gids()
aids_list = ibs.get_image_aids(gid_list)
zipped = zip(gid_list, aids_list)
combined_dict = { gid : aid_list for gid, aid_list in zipped }
return combined_dict
@register_route('/api/csv/gids_with_aids/', methods=['GET'])
def get_gid_with_aids_csv():
ibs = current_app.ibs
combined_dict = get_gid_with_aids(ibs)
filename = 'gids_with_aids.csv'
combined_list = [
','.join( map(str, [gid] + aid_list) )
for gid, aid_list in sorted(list(combined_dict.iteritems()))
]
combined_str = '\n'.join(combined_list)
max_length = 0
for aid_list in combined_dict.values():
max_length = max(max_length, len(aid_list))
if max_length == 1:
aid_header_str = 'AID'
else:
aid_header_str = ','.join([ 'AID%d' % (i + 1, ) for i in range(max_length) ])
combined_str = 'GID,%s\n' % (aid_header_str, ) + combined_str
return ap.send_csv_file(combined_str, filename)
@register_route('/api/csv/image/', methods=['GET'])
def get_gid_list_csv():
filename = 'gids.csv'
ibs = current_app.ibs
gid_list = ibs.get_valid_gids()
return_str = '\n'.join( map(str, gid_list) )
return_str = 'GID\n' + return_str
return ap.send_csv_file(return_str, filename)
@register_route('/api/csv/annot/', methods=['GET'])
def get_aid_list_csv():
filename = 'aids.csv'
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
return_str = '\n'.join( map(str, aid_list) )
return_str = 'AID\n' + return_str
return ap.send_csv_file(return_str, filename)
@register_route('/api/image/view/<gid>/', methods=['GET'])
def image_view_api(gid=None, thumbnail=True, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /api/image/view/<gid>/
"""
encoded = image_src(gid, thumbnail=thumbnail, fresh=fresh, **kwargs)
return ap.template(None, 'single', encoded=encoded)
@register_api('/api/image/zip', methods=['POST'])
def image_upload_zip(**kwargs):
r"""
Returns the gid_list for image files submitted in a ZIP archive. The image
archive should be flat (no folders will be scanned for images) and must be smaller
than 100 MB. The archive can submit multiple images, ideally in JPEG format to save
space. Duplicate image uploads will result in the duplicate images receiving
the same gid based on the hashed pixel values.
Args:
image_zip_archive (binary): the POST variable containing the binary
(multi-form) image archive data
**kwargs: Arbitrary keyword arguments; the kwargs are passed down to
the add_images function
Returns:
gid_list (list if rowids): the list of gids corresponding to the images
submitted. The gids correspond to the image names sorted in
lexigraphical order.
RESTful:
Method: POST
URL: /api/image/zip
"""
ibs = current_app.ibs
# Get image archive
image_archive = request.files.get('image_zip_archive', None)
if image_archive is None:
raise IOError('Image archive not given')
# If the directory already exists, delete it
uploads_path = ibs.get_uploadsdir()
ut.ensuredir(uploads_path)
current_time = time.strftime('%Y_%m_%d_%H_%M_%S')
modifier = 1
upload_path = '%s' % (current_time)
while exists(upload_path):
upload_path = '%s_%04d' % (current_time, modifier)
modifier += 1
upload_path = join(uploads_path, upload_path)
ut.ensuredir(upload_path)
# Extract the content
try:
with zipfile.ZipFile(image_archive, 'r') as zfile:
zfile.extractall(upload_path)
except Exception:
ut.remove_dirs(upload_path)
raise IOError('Image archive extracton failed')
"""
test to ensure Directory and utool do the same thing
from detecttools.directory import Directory
upload_path = ut.truepath('~/Pictures')
gpath_list1 = sorted(ut.list_images(upload_path, recursive=False, full=True))
direct = Directory(upload_path, include_file_extensions='images', recursive=False)
gpath_list = direct.files()
gpath_list = sorted(gpath_list)
assert gpath_list1 == gpath_list
"""
gpath_list = sorted(ut.list_images(upload_path, recursive=False, full=True))
#direct = Directory(upload_path, include_file_extensions='images', recursive=False)
#gpath_list = direct.files()
#gpath_list = sorted(gpath_list)
gid_list = ibs.add_images(gpath_list, **kwargs)
return gid_list
@register_api('/api/image/json/', methods=['POST'])
def add_images_json(ibs, image_uri_list, image_uuid_list, image_width_list,
image_height_list, image_orig_name_list=None, image_ext_list=None,
image_time_posix_list=None, image_gps_lat_list=None,
image_gps_lon_list=None, image_notes_list=None, **kwargs):
"""
REST:
Method: POST
URL: /api/image/json/
Ignore:
sudo pip install boto
Args:
image_uri_list (list) : list of string image uris, most likely HTTP(S) or S3
encoded URLs. Alternatively, this can be a list of dictionaries (JSON
objects) that specify AWS S3 stored assets. An example below:
image_uri_list = [
'http://domain.com/example/asset1.png',
'/home/example/Desktop/example/asset2.jpg',
's3://s3.amazon.com/example-bucket-2/asset1-in-bucket-2.tif',
{
'bucket' : 'example-bucket-1',
'key' : 'example/asset1.png',
'auth_domain' : None, # Uses localhost
'auth_access_id' : None, # Uses system default
'auth_secret_key' : None, # Uses system default
},
{
'bucket' : 'example-bucket-1',
'key' : 'example/asset2.jpg',
# if unspecified, auth uses localhost and system defaults
},
{
'bucket' : 'example-bucket-2',
'key' : 'example/asset1-in-bucket-2.tif',
'auth_domain' : 's3.amazon.com',
'auth_access_id' : '____________________',
'auth_secret_key' : '________________________________________',
},
]
Note that you cannot specify AWS authentication access ids or secret keys
using string uri's. For specific authentication methods, please use the
latter list of dictionaries.
image_uuid_list (list of str) : list of image UUIDs to be used in IBEIS IA
image_width_list (list of int) : list of image widths
image_height_list (list of int) : list of image heights
image_orig_name_list (list of str): list of original image names
image_ext_list (list of str): list of original image names
image_time_posix_list (list of int): list of image's POSIX timestamps
image_gps_lat_list (list of float): list of image's GPS latitude values
image_gps_lon_list (list of float): list of image's GPS longitude values
image_notes_list (list of str) : optional list of any related notes with
the images
**kwargs : key-value pairs passed to the ibs.add_images() function.
CommandLine:
python -m ibeis.web.app --test-add_images_json
Example:
>>> # WEB_DOCTEST
>>> from ibeis.control.IBEISControl import * # NOQA
>>> import ibeis
>>> web_instance = ibeis.opendb(db='testdb1')
>>> _payload = {
>>> 'image_uri_list': [
>>> 'https://upload.wikimedia.org/wikipedia/commons/4/49/Zebra_running_Ngorongoro.jpg',
>>> {
>>> 'bucket' : 'test-asset-store',
>>> 'key' : 'caribwhale/20130903-JAC-0002.JPG',
>>> },
>>> ],
>>> 'image_uuid_list': [
>>> uuid.UUID('7fea8101-7dec-44e3-bf5d-b8287fd231e2'),
>>> uuid.UUID('c081119a-e08e-4863-a710-3210171d27d6'),
>>> ],
>>> 'image_width_list': [
>>> 1992,
>>> 1194,
>>> ],
>>> 'image_height_list': [
>>> 1328,
>>> 401,
>>> ],
>>> }
>>> gid_list = ibeis.web.app.add_images_json(web_instance, **_payload)
>>> print(gid_list)
>>> print(web_instance.get_image_uuids(gid_list))
>>> print(web_instance.get_image_uris(gid_list))
>>> print(web_instance.get_image_paths(gid_list))
>>> print(web_instance.get_image_uris_original(gid_list))
"""
def _get_standard_ext(gpath):
ext = splitext(gpath)[1].lower()
return '.jpg' if ext == '.jpeg' else ext
def _parse_imageinfo(index):
def _resolve_uri():
list_ = image_uri_list
if list_ is None or index >= len(list_) or list_[index] is None:
raise ValueError('Must specify all required fields')
value = list_[index]
if isinstance(value, dict):
value = ut.s3_dict_encode_to_str(value)
return value
def _resolve(list_, default='', assert_=False):
if list_ is None or index >= len(list_) or list_[index] is None:
if assert_:
raise ValueError('Must specify all required fields')
return default
return list_[index]
uri = _resolve_uri()
orig_gname = basename(uri)
ext = _get_standard_ext(uri)
uuid_ = _resolve(image_uuid_list, assert_=True)
if isinstance(uuid_, six.string_types):
uuid_ = uuid.UUID(uuid_)
param_tup = (
uuid_,
uri,
uri,
_resolve(image_orig_name_list, default=orig_gname),
_resolve(image_ext_list, default=ext),
int(_resolve(image_width_list, assert_=True)),
int(_resolve(image_height_list, assert_=True)),
int(_resolve(image_time_posix_list, default=-1)),
float(_resolve(image_gps_lat_list, default=-1.0)),
float(_resolve(image_gps_lon_list, default=-1.0)),
_resolve(image_notes_list),
)
return param_tup
# TODO: FIX ME SO THAT WE DON'T HAVE TO LOCALIZE EVERYTHING
kwargs['auto_localize'] = kwargs.get('auto_localize', True)
kwargs['sanitize'] = kwargs.get('sanitize', False)
index_list = range(len(image_uri_list))
params_gen = ut.generate(_parse_imageinfo, index_list, adjust=True,
force_serial=True, **kwargs)
params_gen = list(params_gen)
gpath_list = [ _[0] for _ in params_gen ]
gid_list = ibs.add_images(gpath_list, params_list=params_gen, **kwargs) # NOQA
# return gid_list
image_uuid_list = ibs.get_image_uuids(gid_list)
return image_uuid_list
@register_api('/api/annot/json/', methods=['POST'])
def add_annots_json(ibs, image_uuid_list, annot_uuid_list, annot_bbox_list,
annot_theta_list=None, annot_species_list=None,
annot_name_list=None, annot_notes_list=None, **kwargs):
"""
REST:
Method: POST
URL: /api/annot/json/
Ignore:
sudo pip install boto
Args:
image_uuid_list (list of str) : list of image UUIDs to be used in IBEIS IA
annot_uuid_list (list of str) : list of annotations UUIDs to be used in IBEIS IA
annot_bbox_list (list of 4-tuple) : list of bounding box coordinates encoded as
a 4-tuple of the values (xtl, ytl, width, height) where xtl is the
'top left corner, x value' and ytl is the 'top left corner, y value'.
annot_theta_list (list of float) : list of radian rotation around center.
Defaults to 0.0 (no rotation).
annot_species_list (list of str) : list of species for the annotation, if known.
If the list is partially known, use None (null in JSON) for unknown entries.
annot_name_list (list of str) : list of names for the annotation, if known.
If the list is partially known, use None (null in JSON) for unknown entries.
annot_notes_list (list of str) : list of notes to be added to the annotation.
**kwargs : key-value pairs passed to the ibs.add_annots() function.
CommandLine:
python -m ibeis.web.app --test-add_annots_json
Example:
>>> import ibeis
>>> from ibeis.control.IBEISControl import * # NOQA
>>> web_instance = ibeis.opendb(db='testdb1')
>>> _payload = {
>>> 'image_uuid_list': [
>>> uuid.UUID('7fea8101-7dec-44e3-bf5d-b8287fd231e2'),
>>> uuid.UUID('c081119a-e08e-4863-a710-3210171d27d6'),
>>> ],
>>> 'annot_uuid_list': [
>>> uuid.UUID('fe1547c5-1425-4757-9b8f-b2b4a47f552d'),
>>> uuid.UUID('86d3959f-7167-4822-b99f-42d453a50745'),
>>> ],
>>> 'annot_bbox_list': [
>>> [0, 0, 1992, 1328],
>>> [0, 0, 1194, 401],
>>> ],
>>> }
>>> aid_list = ibeis.web.app.add_annots_json(web_instance, **_payload)
>>> print(aid_list)
>>> print(web_instance.get_annot_image_uuids(aid_list))
>>> print(web_instance.get_annot_uuids(aid_list))
>>> print(web_instance.get_annot_bboxes(aid_list))
"""
image_uuid_list = [
uuid.UUID(uuid_) if isinstance(uuid_, six.string_types) else uuid_
for uuid_ in image_uuid_list
]
annot_uuid_list = [
uuid.UUID(uuid_) if isinstance(uuid_, six.string_types) else uuid_
for uuid_ in annot_uuid_list
]
gid_list = ibs.get_image_gids_from_uuid(image_uuid_list)
aid_list = ibs.add_annots(gid_list, annot_uuid_list=annot_uuid_list, # NOQA
bbox_list=annot_bbox_list, theta_list=annot_theta_list,
species_list=annot_species_list, name_list=annot_name_list,
notes_list=annot_notes_list, **kwargs)
# return aid_list
annot_uuid_list = ibs.get_annot_uuids(aid_list)
return annot_uuid_list
@register_api('/api/image/json/', methods=['DELETE'])
def delete_images_json(ibs, image_uuid_list):
"""
REST:
Method: POST
URL: /api/image/json/
Args:
image_uuid_list (list of str) : list of image UUIDs to be delete from IBEIS
"""
gid_list = ibs.get_image_gids_from_uuid(image_uuid_list)
ibs.delete_images(gid_list)
return True
@register_api('/api/annot/json/', methods=['DELETE'])
def delete_annots_json(ibs, annot_uuid_list):
"""
REST:
Method: POST
URL: /api/annot/json/
Args:
annot_uuid_list (list of str) : list of annot UUIDs to be delete from IBEIS
"""
aid_list = ibs.get_annot_aids_from_uuid(annot_uuid_list)
ibs.delete_annots(aid_list)
return True
@register_api('/api/image/', methods=['POST'])
def image_upload(cleanup=True, **kwargs):
r"""
Returns the gid for an uploaded image.
Args:
image (image binary): the POST variable containing the binary
(multi-form) image data
**kwargs: Arbitrary keyword arguments; the kwargs are passed down to
the add_images function
Returns:
gid (rowids): gid corresponding to the image submitted.
lexigraphical order.
RESTful:
Method: POST
URL: /api/image/
"""
ibs = current_app.ibs
print('request.files = %s' % (request.files,))
filestore = request.files.get('image', None)
if filestore is None:
raise IOError('Image not given')
uploads_path = ibs.get_uploadsdir()
ut.ensuredir(uploads_path)
current_time = time.strftime('%Y_%m_%d_%H_%M_%S')
modifier = 1
upload_filename = 'upload_%s.png' % (current_time)
while exists(upload_filename):
upload_filename = 'upload_%s_%04d.png' % (current_time, modifier)
modifier += 1
upload_filepath = join(uploads_path, upload_filename)
filestore.save(upload_filepath)
gid_list = ibs.add_images([upload_filepath], **kwargs)
gid = gid_list[0]
if cleanup:
ut.remove_dirs(upload_filepath)
return gid
@register_api('/api/core/helloworld/', methods=['GET', 'POST', 'DELETE', 'PUT'])
def hello_world(*args, **kwargs):
"""
CommandLine:
python -m ibeis.web.app --exec-hello_world
Example:
>>> # SCRIPT
>>> from ibeis.web.app import * # NOQA
>>> import ibeis
>>> web_ibs = ibeis.opendb_bg_web(browser=True, start_job_queue=False, url_suffix='/api/core/helloworld/')
"""
print('------------------ HELLO WORLD ------------------')
print('Args: %r' % (args,))
print('Kwargs: %r' % (kwargs,))
print('request.args: %r' % (request.args,))
print('request.form: %r' % (request.form,))
print('request.url; %r' % (request.url,))
print('request.environ: %s' % (ut.repr3(request.environ),))
print('request: %s' % (ut.repr3(request.__dict__),))
VALID_TURK_MODES = [
('turk_viewpoint', 'Viewpoint'),
('turk_quality', 'Quality'),
]
@register_route('/group_review/')
def group_review():
prefill = request.args.get('prefill', '')
if len(prefill) > 0:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list)
GROUP_BY_PREDICTION = True
if GROUP_BY_PREDICTION:
grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3))
grouped_list = grouped_dict.values()
regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list)))
candidate_aid_list = ut.get_list_column(regrouped_items, 0)
else:
candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list]
elif request.args.get('aid_list', None) is not None:
aid_list = request.args.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
candidate_aid_list = ''
else:
candidate_aid_list = ''
return ap.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=VALID_TURK_MODES)
@register_route('/group_review/submit/', methods=['POST'])
def group_review_submit():
"""
CommandLine:
python -m ibeis.web.app --exec-group_review_submit
Example:
>>> # UNSTABLE_DOCTEST
>>> from ibeis.web.app import * # NOQA
>>> import ibeis
>>> import ibeis.web
>>> ibs = ibeis.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[::2]
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
method = request.form.get('group-review-submit', '')
if method.lower() == 'populate':
redirection = request.referrer
if 'prefill' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&prefill=true' % (redirection, )
else:
redirection = '%s?prefill=true' % (redirection, )
return redirect(redirection)
aid_list = request.form.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
aid_list = []
src_ag, dst_ag = ibs.prepare_annotgroup_review(aid_list)
valid_modes = ut.get_list_column(VALID_TURK_MODES, 0)
mode = request.form.get('group-review-mode', None)
assert mode in valid_modes
return redirect(url_for(mode, src_ag=src_ag, dst_ag=dst_ag))
@register_route('/ajax/annotation/src/<aid>')
def annotation_src(aid=None):
ibs = current_app.ibs
gpath = ibs.get_annot_chip_fpath(aid)
return ap.return_src(gpath)
@register_api('/api/annot/<aid>/', methods=['GET'])
def annotation_src_api(aid=None):
r"""
Returns the base64 encoded image of annotation <aid>
RESTful:
Method: GET
URL: /api/annot/<aid>/
"""
return annotation_src(aid)
@register_route('/display/sightings')
def display_sightings(html_encode=True):
ibs = current_app.ibs
complete = request.args.get('complete', None) is not None
sightings = ibs.report_sightings_str(complete=complete, include_images=True)
if html_encode:
sightings = sightings.replace('\n', '<br/>')
return sightings
@register_route('/download/sightings')
def download_sightings():
filename = 'sightings.csv'
sightings = display_sightings(html_encode=False)
return ap.send_csv_file(sightings, filename)
@register_route('/graph/sightings')
def graph_sightings():
return redirect(url_for('view'))
@register_route('/dbinfo')
def dbinfo():
try:
ibs = current_app.ibs
dbinfo_str = ibs.get_dbinfo_str()
except:
dbinfo_str = ''
dbinfo_str_formatted = '<pre>%s</pre>' % (dbinfo_str, )
return dbinfo_str_formatted
@register_route('/api')
def api():
rules = current_app.url_map.iter_rules()
rule_dict = {}
for rule in rules:
methods = rule.methods
url = str(rule)
if '/api/' in url:
methods -= set(['HEAD', 'OPTIONS'])
if len(methods) == 0:
continue
if len(methods) > 1:
print('methods = %r' % (methods,))
method = list(methods)[0]
if method not in rule_dict.keys():
rule_dict[method] = []
rule_dict[method].append((method, url, ))
for method in rule_dict.keys():
rule_dict[method].sort()
url = '%s/api/core/dbname/' % (current_app.server_url, )
app_auth = controller_inject.get_url_authorization(url)
return ap.template(None, 'api',
app_url=url,
app_name=controller_inject.GLOBAL_APP_NAME,
app_secret=controller_inject.GLOBAL_APP_SECRET,
app_auth=app_auth,
rule_list=rule_dict)
@register_route('/upload')
def upload():
return ap.template(None, 'upload')
@register_route('/404')
def error404(exception=None):
import traceback
exception_str = str(exception)
traceback_str = str(traceback.format_exc())
print('[web] %r' % (exception_str, ))
print('[web] %r' % (traceback_str, ))
return ap.template(None, '404', exception_str=exception_str,
traceback_str=traceback_str)
################################################################################
def test_html_error():
r"""
This test will show what our current errors look like
CommandLine:
python -m ibeis.web.app --exec-test_html_error
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.web.app import * # NOQA
>>> import ibeis
>>> web_ibs = ibeis.opendb_bg_web(browser=True, start_job_queue=False, url_suffix='/api/image/imagesettext/?__format__=True')
"""
pass
def start_tornado(ibs, port=None, browser=None, url_suffix=None):
"""
Initialize the web server
"""
if browser is None:
browser = ut.get_argflag('--browser')
if url_suffix is None:
url_suffix = ''
def _start_tornado(ibs_, port_):
# Get Flask app
app = controller_inject.get_flask_app()
app.ibs = ibs_
# Try to ascertain the socket's domain name
try:
app.server_domain = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
app.server_domain = '127.0.0.1'
app.server_port = port_
# URL for the web instance
app.server_url = 'http://%s:%s' % (app.server_domain, app.server_port)
print('[web] Tornado server starting at %s' % (app.server_url,))
# Launch the web browser to view the web interface and API
if browser:
url = app.server_url + url_suffix
import webbrowser
print('[web] opening browser with url = %r' % (url,))
webbrowser.open(url)
# Start the tornado web handler
# WSGI = Web Server Gateway Interface
# WSGI is Python standard described in detail in PEP 3333
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(app.server_port)
tornado.ioloop.IOLoop.instance().start()
# Set logging level
logging.getLogger().setLevel(logging.INFO)
# Get the port if unspecified
if port is None:
port = DEFAULT_WEB_API_PORT
# Launch the web handler
_start_tornado(ibs, port)
def start_from_ibeis(ibs, port=None, browser=None, precache=None,
url_suffix=None, start_job_queue=True):
"""
Parse command line options and start the server.
CommandLine:
python -m ibeis --db PZ_MTEST --web
python -m ibeis --db PZ_MTEST --web --browser
"""
print('[web] start_from_ibeis()')
if precache is None:
precache = ut.get_argflag('--precache')
if precache:
print('[web] Pre-computing all image thumbnails (with annots)...')
ibs.preprocess_image_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
ibs.preprocess_image_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
ibs.check_chip_existence()
ibs.compute_all_chips()
if start_job_queue:
print('[web] opening job manager')
ibs.load_plugin_module(zmq_task_queue)
#import time
#time.sleep(1)
ibs.initialize_job_manager()
#time.sleep(10)
print('[web] starting tornado')
start_tornado(ibs, port, browser, url_suffix)
print('[web] closing job manager')
ibs.close_job_manager()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
Added functions for Tanya
# -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
"""
from __future__ import absolute_import, division, print_function
import random
from os.path import join, exists, splitext, basename
import uuid
import zipfile
import time
import six
import math
import tornado.wsgi
import tornado.httpserver
from flask import request, redirect, url_for, make_response, current_app, send_file
import logging
import socket
import simplejson as json
from ibeis.control import controller_inject
from ibeis import constants as const
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY, PI, TAU
from ibeis.web import appfuncs as ap
from ibeis.web import zmq_task_queue # NOQA
import utool as ut
DEFAULT_WEB_API_PORT = ut.get_argval('--port', type_=int, default=5000)
register_api = controller_inject.get_ibeis_flask_api(__name__)
register_route = controller_inject.get_ibeis_flask_route(__name__)
PAGE_SIZE = 500
################################################################################
def default_species(ibs):
# hack function
dbname = ibs.get_dbname()
if dbname == 'CHTA_Master':
default_species = 'cheetah'
elif dbname == 'ELPH_Master':
default_species = 'elephant_savanna'
elif dbname == 'GIR_Master':
default_species = 'giraffe_reticulated'
elif dbname == 'GZ_Master':
default_species = 'zebra_grevys'
elif dbname == 'LION_Master':
default_species = 'lion'
elif dbname == 'PZ_Master':
default_species = 'zebra_plains'
elif dbname == 'WD_Master':
default_species = 'wild_dog'
elif dbname == 'NNP_MasterGIRM':
default_species = 'giraffe_masai'
elif 'NNP_' in dbname:
default_species = 'zebra_plains'
elif 'GZC' in dbname:
default_species = 'zebra_plains'
else:
default_species = None
print('[web] DEFAULT SPECIES: %r' % (default_species))
return default_species
def imageset_image_processed(ibs, gid_list):
images_reviewed = [ reviewed == 1 for reviewed in ibs.get_image_reviewed(gid_list) ]
return images_reviewed
def imageset_annot_viewpoint_processed(ibs, aid_list):
annots_reviewed = [ reviewed is not None for reviewed in ibs.get_annot_yaws(aid_list) ]
return annots_reviewed
def imageset_annot_quality_processed(ibs, aid_list):
annots_reviewed = [ reviewed is not None and reviewed is not -1 for reviewed in ibs.get_annot_qualities(aid_list) ]
return annots_reviewed
def imageset_annot_additional_processed(ibs, aid_list, nid_list):
sex_list = ibs.get_annot_sex(aid_list)
age_list = ibs.get_annot_age_months_est(aid_list)
annots_reviewed = [
(nid < 0) or (nid > 0 and sex >= 0 and -1 not in list(age) and list(age).count(None) < 2)
for nid, sex, age in zip(nid_list, sex_list, age_list)
]
return annots_reviewed
def convert_old_viewpoint_to_yaw(view_angle):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'old %15r %.2f -> new %15r %.2f'
>>> for lbl, angle, radians in old_viewpoint_labels:
>>> print(fmtstr % (lbl, angle, lbl, convert_old_viewpoint_to_yaw(angle)))
"""
if view_angle is None:
return None
view_angle = ut.deg_to_rad(view_angle)
yaw = (-view_angle + (TAU / 2)) % TAU
return yaw
def convert_yaw_to_old_viewpoint(yaw):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'original_angle %15r %.2f -> yaw %15r %.2f -> reconstructed_angle %15r %.2f'
>>> for lbl, angle, radians in old_viewpoint_labels:
>>> yaw = convert_old_viewpoint_to_yaw(angle)
>>> reconstructed_angle = convert_yaw_to_old_viewpoint(yaw)
>>> print(fmtstr % (lbl, angle, lbl, yaw, lbl, reconstructed_angle))
"""
if yaw is None:
return None
view_angle = ((TAU / 2) - yaw) % TAU
view_angle = ut.rad_to_deg(view_angle)
return view_angle
################################################################################
@register_route('/')
def root():
return ap.template(None)
@register_route('/view')
def view():
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime)
if unixtime is not None else
'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [ datetime.split(' ') for datetime in datetime_list ]
date_list = [ datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN' for datetime_split in datetime_split_list ]
return date_list
ibs = current_app.ibs
aid_list = ibs.filter_aids_count()
gid_list = ibs.get_annot_gids(aid_list)
nid_list = ibs.get_annot_name_rowids(aid_list)
date_list = _date_list(gid_list)
gid_list_unique = list(set(gid_list))
date_list_unique = _date_list(gid_list_unique)
date_taken_dict = {}
for gid, date in zip(gid_list_unique, date_list_unique):
if date not in date_taken_dict:
date_taken_dict[date] = [0, 0]
date_taken_dict[date][1] += 1
gid_list_all = ibs.get_valid_gids()
date_list_all = _date_list(gid_list_all)
for gid, date in zip(gid_list_all, date_list_all):
if date in date_taken_dict:
date_taken_dict[date][0] += 1
value = 0
label_list = []
value_list = []
index_list = []
seen_set = set()
current_seen_set = set()
previous_seen_set = set()
last_date = None
date_seen_dict = {}
for index, (aid, nid, date) in enumerate(zip(aid_list, nid_list, date_list)):
index_list.append(index + 1)
# Add to counters
if date not in date_seen_dict:
date_seen_dict[date] = [0, 0, 0, 0]
date_seen_dict[date][0] += 1
if nid not in current_seen_set:
current_seen_set.add(nid)
date_seen_dict[date][1] += 1
if nid in previous_seen_set:
date_seen_dict[date][3] += 1
if nid not in seen_set:
seen_set.add(nid)
value += 1
date_seen_dict[date][2] += 1
# Add to register
value_list.append(value)
# Reset step (per day)
if date != last_date and date != 'UNKNOWN':
last_date = date
previous_seen_set = set(current_seen_set)
current_seen_set = set()
label_list.append(date)
else:
label_list.append('')
# def optimization1(x, a, b, c):
# return a * np.log(b * x) + c
# def optimization2(x, a, b, c):
# return a * np.sqrt(x) ** b + c
# def optimization3(x, a, b, c):
# return 1.0 / (a * np.exp(-b * x) + c)
# def process(func, opts, domain, zero_index, zero_value):
# values = func(domain, *opts)
# diff = values[zero_index] - zero_value
# values -= diff
# values[ values < 0.0 ] = 0.0
# values[:zero_index] = 0.0
# values = values.astype(int)
# return list(values)
# optimization_funcs = [
# optimization1,
# optimization2,
# optimization3,
# ]
# # Get data
# x = np.array(index_list)
# y = np.array(value_list)
# # Fit curves
# end = int(len(index_list) * 1.25)
# domain = np.array(range(1, end))
# zero_index = len(value_list) - 1
# zero_value = value_list[zero_index]
# regressed_opts = [ curve_fit(func, x, y)[0] for func in optimization_funcs ]
# prediction_list = [
# process(func, opts, domain, zero_index, zero_value)
# for func, opts in zip(optimization_funcs, regressed_opts)
# ]
# index_list = list(domain)
prediction_list = []
date_seen_dict.pop('UNKNOWN', None)
bar_label_list = sorted(date_seen_dict.keys())
bar_value_list1 = [ date_taken_dict[date][0] for date in bar_label_list ]
bar_value_list2 = [ date_taken_dict[date][1] for date in bar_label_list ]
bar_value_list3 = [ date_seen_dict[date][0] for date in bar_label_list ]
bar_value_list4 = [ date_seen_dict[date][1] for date in bar_label_list ]
bar_value_list5 = [ date_seen_dict[date][2] for date in bar_label_list ]
bar_value_list6 = [ date_seen_dict[date][3] for date in bar_label_list ]
# label_list += ['Models'] + [''] * (len(index_list) - len(label_list) - 1)
# value_list += [0] * (len(index_list) - len(value_list))
# Counts
imgsetid_list = ibs.get_valid_imgsetids()
gid_list = ibs.get_valid_gids()
aid_list = ibs.get_valid_aids()
nid_list = ibs.get_valid_nids()
contrib_list = ibs.get_valid_contrib_rowids()
# nid_list = ibs.get_valid_nids()
aid_list_count = ibs.filter_aids_count()
# gid_list_count = list(set(ibs.get_annot_gids(aid_list_count)))
nid_list_count_dup = ibs.get_annot_name_rowids(aid_list_count)
nid_list_count = list(set(nid_list_count_dup))
# Calculate the Petersen-Lincoln index form the last two days
try:
c1 = bar_value_list4[-2]
c2 = bar_value_list4[-1]
c3 = bar_value_list6[-1]
pl_index = int(math.ceil( (c1 * c2) / c3 ))
pl_error_num = float(c1 * c1 * c2 * (c2 - c3))
pl_error_dom = float(c3 ** 3)
pl_error = int(math.ceil( 1.96 * math.sqrt(pl_error_num / pl_error_dom) ))
except IndexError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
except ZeroDivisionError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
# Get the markers
gid_list_markers = ibs.get_annot_gids(aid_list_count)
gps_list_markers = map(list, ibs.get_image_gps(gid_list_markers))
gps_list_markers_all = map(list, ibs.get_image_gps(gid_list))
REMOVE_DUP_CODE = True
if not REMOVE_DUP_CODE:
# Get the tracks
nid_track_dict = ut.ddict(list)
for nid, gps in zip(nid_list_count_dup, gps_list_markers):
if gps[0] == -1.0 and gps[1] == -1.0:
continue
nid_track_dict[nid].append(gps)
gps_list_tracks = [ nid_track_dict[nid] for nid in sorted(nid_track_dict.keys()) ]
else:
__nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count)
gps_list_tracks = list(map(lambda x: list(map(list, x)), gps_track_list))
valid_aids = ibs.get_valid_aids()
valid_gids = ibs.get_valid_gids()
valid_aids_ = ibs.filter_aids_custom(valid_aids)
valid_gids_ = ibs.filter_gids_custom(valid_gids)
used_gids = list(set( ibs.get_annot_gids(valid_aids) ))
used_contrib_tags = list(set( ibs.get_image_contributor_tag(used_gids) ))
# Get Age and sex (By Annot)
# annot_sex_list = ibs.get_annot_sex(valid_aids_)
# annot_age_months_est_min = ibs.get_annot_age_months_est_min(valid_aids_)
# annot_age_months_est_max = ibs.get_annot_age_months_est_max(valid_aids_)
# age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# for sex, min_age, max_age in zip(annot_sex_list, annot_age_months_est_min, annot_age_months_est_max):
# if sex not in [0, 1]:
# sex = 2
# # continue
# if (min_age is None or min_age < 12) and max_age < 12:
# age_list[sex][0] += 1
# elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
# age_list[sex][1] += 1
# elif 36 <= min_age and (36 <= max_age or max_age is None):
# age_list[sex][2] += 1
# Get Age and sex (By Name)
name_sex_list = ibs.get_name_sex(nid_list_count)
name_age_months_est_mins_list = ibs.get_name_age_months_est_min(nid_list_count)
name_age_months_est_maxs_list = ibs.get_name_age_months_est_max(nid_list_count)
age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
age_unreviewed = 0
age_ambiguous = 0
for nid, sex, min_ages, max_ages in zip(nid_list_count, name_sex_list, name_age_months_est_mins_list, name_age_months_est_maxs_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
# print('[web] Invalid name %r: Cannot have more than one age' % (nid, ))
age_ambiguous += 1
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
# print('[web] Unreviewded name %r: Specify the age for the name' % (nid, ))
age_unreviewed += 1
continue
if sex not in [0, 1]:
sex = 2
# continue
if (min_age is None or min_age < 12) and max_age < 12:
age_list[sex][0] += 1
elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
age_list[sex][1] += 1
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_list[sex][2] += 1
dbinfo_str = dbinfo()
return ap.template('view',
line_index_list=index_list,
line_label_list=label_list,
line_value_list=value_list,
prediction_list=prediction_list,
pl_index=pl_index,
pl_error=pl_error,
gps_list_markers=gps_list_markers,
gps_list_markers_all=gps_list_markers_all,
gps_list_tracks=gps_list_tracks,
bar_label_list=bar_label_list,
bar_value_list1=bar_value_list1,
bar_value_list2=bar_value_list2,
bar_value_list3=bar_value_list3,
bar_value_list4=bar_value_list4,
bar_value_list5=bar_value_list5,
bar_value_list6=bar_value_list6,
age_list=age_list,
age_ambiguous=age_ambiguous,
age_unreviewed=age_unreviewed,
dbinfo_str=dbinfo_str,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
contrib_list=contrib_list,
contrib_list_str=','.join(map(str, contrib_list)),
num_contribs=len(contrib_list),
gid_list_count=valid_gids_,
gid_list_count_str=','.join(map(str, valid_gids_)),
num_gids_count=len(valid_gids_),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
aid_list_count=valid_aids_,
aid_list_count_str=','.join(map(str, valid_aids_)),
num_aids_count=len(valid_aids_),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
nid_list_count=nid_list_count,
nid_list_count_str=','.join(map(str, nid_list_count)),
num_nids_count=len(nid_list_count),
used_gids=used_gids,
num_used_gids=len(used_gids),
used_contribs=used_contrib_tags,
num_used_contribs=len(used_contrib_tags))
@register_route('/view/imagesets')
def view_imagesets():
ibs = current_app.ibs
filtered = True
imgsetid = request.args.get('imgsetid', '')
if len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
else:
imgsetid_list = ibs.get_valid_imgsetids()
filtered = False
start_time_posix_list = ibs.get_imageset_start_time_posix(imgsetid_list)
datetime_list = [
ut.unixtime_to_datetimestr(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ]
aids_list = [ ut.flatten(ibs.get_image_aids(gid_list)) for gid_list in gids_list ]
images_reviewed_list = [ imageset_image_processed(ibs, gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ imageset_annot_viewpoint_processed(ibs, aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ imageset_annot_quality_processed(ibs, aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
imageset_list = zip(
imgsetid_list,
ibs.get_imageset_text(imgsetid_list),
ibs.get_imageset_num_gids(imgsetid_list),
image_processed_list,
ibs.get_imageset_num_aids(imgsetid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
imageset_list.sort(key=lambda t: t[7])
return ap.template('view', 'imagesets',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
imageset_list=imageset_list,
num_imagesets=len(imageset_list))
@register_route('/view/images')
def view_images():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid) for imgsetid_ in imgsetid_list ])
else:
gid_list = ibs.get_valid_gids()
filtered = False
# Page
page_start = min(len(gid_list), (page - 1) * PAGE_SIZE)
page_end = min(len(gid_list), page * PAGE_SIZE)
page_total = int(math.ceil(len(gid_list) / PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(gid_list) else page + 1
gid_list = gid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(gid_list), page_previous, page_next, ))
image_unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_image_imgsetids(gid_list) ],
ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
ibs.get_image_gps(gid_list),
ibs.get_image_party_tag(gid_list),
ibs.get_image_contributor_tag(gid_list),
ibs.get_image_notes(gid_list),
imageset_image_processed(ibs, gid_list),
)
image_list.sort(key=lambda t: t[3])
return ap.template('view', 'images',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/annotations')
def view_annotations():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
else:
aid_list = ibs.get_valid_aids()
filtered = False
# Page
page_start = min(len(aid_list), (page - 1) * PAGE_SIZE)
page_end = min(len(aid_list), page * PAGE_SIZE)
page_total = int(math.ceil(len(aid_list) / PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(aid_list) else page + 1
aid_list = aid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(aid_list), page_previous, page_next, ))
annotation_list = zip(
aid_list,
ibs.get_annot_gids(aid_list),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list) ],
ibs.get_annot_image_names(aid_list),
ibs.get_annot_names(aid_list),
ibs.get_annot_exemplar_flags(aid_list),
ibs.get_annot_species_texts(aid_list),
ibs.get_annot_yaw_texts(aid_list),
ibs.get_annot_quality_texts(aid_list),
ibs.get_annot_sex_texts(aid_list),
ibs.get_annot_age_months_est(aid_list),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(imageset_annot_viewpoint_processed(ibs, aid_list), imageset_annot_quality_processed(ibs, aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return ap.template('view', 'annotations',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/names')
def view_names():
ibs = current_app.ibs
filtered = True
aid_list = []
imgsetid_list = []
gid_list = []
nid = request.args.get('nid', '')
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(nid) > 0:
nid_list = nid.strip().split(',')
nid_list = [ None if nid_ == 'None' or nid_ == '' else int(nid_) for nid_ in nid_list ]
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
else:
nid_list = ibs.get_valid_nids()
filtered = False
# Page
PAGE_SIZE_ = int(PAGE_SIZE / 5)
page_start = min(len(nid_list), (page - 1) * PAGE_SIZE_)
page_end = min(len(nid_list), page * PAGE_SIZE_)
page_total = int(math.ceil(len(nid_list) / PAGE_SIZE_))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(nid_list) else page + 1
nid_list = nid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(nid_list), page_previous, page_next, ))
aids_list = ibs.get_name_aids(nid_list)
annotations_list = [ zip(
aid_list_,
ibs.get_annot_gids(aid_list_),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list_) ],
ibs.get_annot_image_names(aid_list_),
ibs.get_annot_names(aid_list_),
ibs.get_annot_exemplar_flags(aid_list_),
ibs.get_annot_species_texts(aid_list_),
ibs.get_annot_yaw_texts(aid_list_),
ibs.get_annot_quality_texts(aid_list_),
ibs.get_annot_sex_texts(aid_list_),
ibs.get_annot_age_months_est(aid_list_),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(imageset_annot_viewpoint_processed(ibs, aid_list_), imageset_annot_quality_processed(ibs, aid_list_)) ],
) for aid_list_ in aids_list ]
name_list = zip(
nid_list,
annotations_list
)
name_list.sort(key=lambda t: t[0])
return ap.template('view', 'names',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
name_list=name_list,
num_names=len(name_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/turk')
def turk():
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
return ap.template('turk', None, imgsetid=imgsetid)
@register_route('/turk/detection')
def turk_detection():
ibs = current_app.ibs
refer_aid = request.args.get('refer_aid', None)
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
reviewed_list = imageset_image_processed(ibs, gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list_ = ut.filterfalse_items(gid_list, reviewed_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('detection_instructions_seen', 1) == 0
display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0
if not finished:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = ibs.get_image_sizes(gid)
scale_factor = float(ap.TARGET_WIDTH) / float(width)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = int(scale_factor * annot_bbox[0])
temp['top'] = int(scale_factor * annot_bbox[1])
temp['width'] = int(scale_factor * (annot_bbox[2]))
temp['height'] = int(scale_factor * (annot_bbox[3]))
temp['label'] = species
temp['id'] = aid
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif default_species(ibs) is not None:
species = default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
return ap.template('turk', 'detection',
imgsetid=imgsetid,
gid=gid,
refer_aid=refer_aid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
review=review)
@register_route('/turk/detection/dynamic')
def turk_detection_dynamic():
ibs = current_app.ibs
gid = request.args.get('gid', None)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif default_species(ibs) is not None:
species = default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
return ap.template('turk', 'detection_dynamic',
gid=gid,
refer_aid=None,
species=species,
image_path=gpath,
image_src=image_src,
annotation_list=annotation_list,
__wrapper__=False)
def get_turk_annot_args(is_reviewed_func):
"""
Helper to return aids in an imageset or a group review
"""
ibs = current_app.ibs
def _ensureid(_id):
return None if _id == 'None' or _id == '' else int(_id)
imgsetid = request.args.get('imgsetid', '')
src_ag = request.args.get('src_ag', '')
dst_ag = request.args.get('dst_ag', '')
imgsetid = _ensureid(imgsetid)
src_ag = _ensureid(src_ag)
dst_ag = _ensureid(dst_ag)
group_review_flag = src_ag is not None and dst_ag is not None
if not group_review_flag:
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
reviewed_list = is_reviewed_func(ibs, aid_list)
else:
src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag)
dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag)
src_aid_list = ibs.get_gar_aid(src_gar_rowid_list)
dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list)
aid_list = src_aid_list
reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ]
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
if group_review_flag:
aid = aid_list_[0]
else:
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
print('aid = %r' % (aid,))
#print(ut.dict_str(ibs.get_annot_info(aid)))
print(ut.obj_str(ibs.get_annot_info(aid, default=True, nl=True)))
return aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous
@register_route('/turk/viewpoint')
def turk_viewpoint():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = get_turk_annot_args(imageset_annot_viewpoint_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
species = ibs.get_annot_species_texts(aid)
else:
gid = None
gpath = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
return ap.template('turk', 'viewpoint',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/quality')
def turk_quality():
"""
PZ Needs Tags:
17242
14468
14427
15946
14771
14084
4102
6074
3409
GZ Needs Tags;
1302
CommandLine:
python -m ibeis.web.app --exec-turk_quality --db PZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GIRM_Master1
Example:
>>> # SCRIPT
>>> from ibeis.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list_ = ibs.find_unlabeled_name_members(qual=True)
>>> valid_views = ['primary', 'primary1', 'primary-1']
>>> aid_list = ibs.filter_aids_to_viewpoint(aid_list_, valid_views, unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = get_turk_annot_args(imageset_annot_quality_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = ibs.get_annot_qualities(aid)
if value == -1:
value = None
if value == 0:
value = 1
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
imagesettext = ibs.get_imageset_text(imgsetid)
return ap.template('turk', 'quality',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
##@register_route('/turk/viewpoint')
#def old_turk_viewpoint():
# #ibs = current_app.ibs
# #imgsetid = request.args.get('imgsetid', '')
# #imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
# #imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
# #src_ag = request.args.get('src_ag', '')
# #src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
# #dst_ag = request.args.get('dst_ag', '')
# #dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
# #group_review_flag = src_ag is not None and dst_ag is not None
# #if not group_review_flag:
# # gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
# # aid_list = ut.flatten(ibs.get_image_aids(gid_list))
# # reviewed_list = imageset_annot_viewpoint_processed(ibs, aid_list)
# #else:
# # src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag)
# # dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag)
# # src_aid_list = ibs.get_gar_aid(src_gar_rowid_list)
# # dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list)
# # aid_list = src_aid_list
# # reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ]
# #previous = request.args.get('previous', None)
# ibs = current_app.ibs
# tup = get_turk_annot_args(imageset_annot_viewpoint_processed)
# (aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
# value = convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
# review = 'review' in request.args.keys()
# finished = aid is None
# display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0
# if not finished:
# gid = ibs.get_annot_gids(aid)
# gpath = ibs.get_annot_chip_fpath(aid)
# image = ap.open_oriented_image(gpath)
# image_src = ap.embed_image_html(image)
# else:
# gid = None
# gpath = None
# image_src = None
# imagesettext = ibs.get_imageset_text(imgsetid)
# return ap.template('turk', 'viewpoint',
# imgsetid=imgsetid,
# src_ag=src_ag,
# dst_ag=dst_ag,
# gid=gid,
# aid=aid,
# value=value,
# image_path=gpath,
# image_src=image_src,
# previous=previous,
# imagesettext=imagesettext,
# progress=progress,
# finished=finished,
# display_instructions=display_instructions,
# review=review)
#@register_route('/turk/quality')
#def old_turk_quality():
# #ibs = current_app.ibs
# #imgsetid = request.args.get('imgsetid', '')
# #imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
# #gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
# #aid_list = ut.flatten(ibs.get_image_aids(gid_list))
# #reviewed_list = imageset_annot_quality_processed(ibs, aid_list)
# #try:
# # progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
# #except ZeroDivisionError:
# # progress = '0.00'
# #aid = request.args.get('aid', '')
# #if len(aid) > 0:
# # aid = int(aid)
# #else:
# # aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
# # if len(aid_list_) == 0:
# # aid = None
# # else:
# # # aid = aid_list_[0]
# # aid = random.choice(aid_list_)
# #previous = request.args.get('previous', None)
# ibs = current_app.ibs
# tup = get_turk_annot_args(imageset_annot_quality_processed)
# (aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
# value = ibs.get_annot_qualities(aid)
# if value == -1:
# value = None
# if value == 0:
# value = 1
# review = 'review' in request.args.keys()
# finished = aid is None
# display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0
# if not finished:
# gid = ibs.get_annot_gids(aid)
# gpath = ibs.get_annot_chip_fpath(aid)
# image = ap.open_oriented_image(gpath)
# image_src = ap.embed_image_html(image)
# else:
# gid = None
# gpath = None
# image_src = None
# imagesettext = ibs.get_imageset_text(imgsetid)
# return ap.template('turk', 'quality',
# imgsetid=imgsetid,
# src_ag=src_ag,
# dst_ag=dst_ag,
# gid=gid,
# aid=aid,
# value=value,
# image_path=gpath,
# image_src=image_src,
# previous=previous,
# imagesettext=imagesettext,
# progress=progress,
# finished=finished,
# display_instructions=display_instructions,
# review=review)
@register_route('/turk/additional')
def turk_additional():
ibs = current_app.ibs
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_nids(aid_list)
reviewed_list = imageset_annot_additional_processed(ibs, aid_list, nid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value_sex = ibs.get_annot_sex([aid])[0]
if value_sex >= 0:
value_sex += 2
else:
value_sex = None
value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0]
value_age = None
if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None):
value_age = 1
if (value_age_min is 0 or value_age_min is None) and value_age_max == 2:
value_age = 2
elif value_age_min is 3 and value_age_max == 5:
value_age = 3
elif value_age_min is 6 and value_age_max == 11:
value_age = 4
elif value_age_min is 12 and value_age_max == 23:
value_age = 5
elif value_age_min is 24 and value_age_max == 35:
value_age = 6
elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None):
value_age = 7
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('additional_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
name_aid_list = None
nid = ibs.get_annot_name_rowids(aid)
if nid is not None:
name_aid_list = ibs.get_name_aids(nid)
quality_list = ibs.get_annot_qualities(name_aid_list)
quality_text_list = ibs.get_annot_quality_texts(name_aid_list)
yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list)
name_aid_combined_list = list(zip(
name_aid_list,
quality_list,
quality_text_list,
yaw_text_list,
))
name_aid_combined_list.sort(key=lambda t: t[1], reverse=True)
return ap.template('turk', 'additional',
imgsetid=imgsetid,
gid=gid,
aid=aid,
value_sex=value_sex,
value_age=value_age,
image_path=gpath,
name_aid_combined_list=name_aid_combined_list,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/submit/detection', methods=['POST'])
def submit_detection():
ibs = current_app.ibs
method = request.form.get('detection-submit', '')
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid = int(request.form['detection-gid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
# ibs.delete_images(gid)
# print('[web] (DELETED) turk_id: %s, gid: %d' % (turk_id, gid, ))
pass
elif method.lower() == 'clear':
aid_list = ibs.get_image_aids(gid)
ibs.delete_annots(aid_list)
print('[web] (CLEAERED) turk_id: %s, gid: %d' % (turk_id, gid, ))
redirection = request.referrer
if 'gid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&gid=%d' % (redirection, gid, )
else:
redirection = '%s?gid=%d' % (redirection, gid, )
return redirect(redirection)
else:
current_aid_list = ibs.get_image_aids(gid)
# Make new annotations
width, height = ibs.get_image_sizes(gid)
scale_factor = float(width) / float(ap.TARGET_WIDTH)
# Get aids
annotation_list = json.loads(request.form['detection-annotations'])
bbox_list = [
(
int(scale_factor * annot['left']),
int(scale_factor * annot['top']),
int(scale_factor * annot['width']),
int(scale_factor * annot['height']),
)
for annot in annotation_list
]
theta_list = [
float(annot['angle'])
for annot in annotation_list
]
survived_aid_list = [
None if annot['id'] is None else int(annot['id'])
for annot in annotation_list
]
species_list = [
annot['label']
for annot in annotation_list
]
# Delete annotations that didn't survive
kill_aid_list = list(set(current_aid_list) - set(survived_aid_list))
ibs.delete_annots(kill_aid_list)
for aid, bbox, theta, species in zip(survived_aid_list, bbox_list, theta_list, species_list):
if aid is None:
ibs.add_annots([gid], [bbox], theta_list=[theta], species_list=[species])
else:
ibs.set_annot_bboxes([aid], [bbox])
ibs.set_annot_thetas([aid], [theta])
ibs.set_annot_species([aid], [species])
ibs.set_image_reviewed([gid], [1])
print('[web] turk_id: %s, gid: %d, bbox_list: %r, species_list: %r' % (turk_id, gid, annotation_list, species_list))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_detection', imgsetid=imgsetid, previous=gid))
def movegroup_aid(ibs, aid, src_ag, dst_ag):
gar_rowid_list = ibs.get_annot_gar_rowids(aid)
annotgroup_rowid_list = ibs.get_gar_annotgroup_rowid(gar_rowid_list)
src_index = annotgroup_rowid_list.index(src_ag)
src_gar_rowid = gar_rowid_list[src_index]
vals = (aid, src_ag, src_gar_rowid, dst_ag)
print('Moving aid: %s from src_ag: %s (%s) to dst_ag: %s' % vals)
# ibs.delete_gar([src_gar_rowid])
ibs.add_gar([dst_ag], [aid])
@register_route('/submit/viewpoint', methods=['POST'])
def submit_viewpoint():
ibs = current_app.ibs
method = request.form.get('viewpoint-submit', '')
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
src_ag = request.args.get('src_ag', '')
src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
dst_ag = request.args.get('dst_ag', '')
dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
aid = int(request.form['viewpoint-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
if method.lower() == 'make junk':
ibs.set_annot_quality_texts([aid], [const.QUAL_JUNK])
print('[web] (SET AS JUNK) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
if method.lower() == 'rotate left':
theta = ibs.get_annot_thetas(aid)
theta = (theta + PI / 2) % TAU
ibs.set_annot_thetas(aid, theta)
(xtl, ytl, w, h) = ibs.get_annot_bboxes(aid)
diffx = int(round((w / 2.0) - (h / 2.0)))
diffy = int(round((h / 2.0) - (w / 2.0)))
xtl, ytl, w, h = xtl + diffx, ytl + diffy, h, w
ibs.set_annot_bboxes([aid], [(xtl, ytl, w, h)])
print('[web] (ROTATED LEFT) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
if method.lower() == 'rotate right':
theta = ibs.get_annot_thetas(aid)
theta = (theta - PI / 2) % TAU
ibs.set_annot_thetas(aid, theta)
(xtl, ytl, w, h) = ibs.get_annot_bboxes(aid)
diffx = int(round((w / 2.0) - (h / 2.0)))
diffy = int(round((h / 2.0) - (w / 2.0)))
xtl, ytl, w, h = xtl + diffx, ytl + diffy, h, w
ibs.set_annot_bboxes([aid], [(xtl, ytl, w, h)])
print('[web] (ROTATED RIGHT) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
else:
if src_ag is not None and dst_ag is not None:
movegroup_aid(ibs, aid, src_ag, dst_ag)
value = int(request.form['viewpoint-value'])
yaw = convert_old_viewpoint_to_yaw(value)
species_text = request.form['viewpoint-species']
ibs.set_annot_yaws([aid], [yaw], input_is_degrees=False)
ibs.set_annot_species([aid], [species_text])
print('[web] turk_id: %s, aid: %d, yaw: %d' % (turk_id, aid, yaw))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_viewpoint', imgsetid=imgsetid, src_ag=src_ag,
dst_ag=dst_ag, previous=aid))
@register_route('/submit/quality', methods=['POST'])
def submit_quality():
ibs = current_app.ibs
method = request.form.get('quality-submit', '')
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
aid = int(request.form['quality-aid'])
turk_id = request.cookies.get('turk_id', -1)
src_ag = request.args.get('src_ag', '')
src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
dst_ag = request.args.get('dst_ag', '')
dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
if src_ag is not None and dst_ag is not None:
movegroup_aid(ibs, aid, src_ag, dst_ag)
quality = int(request.form['quality-value'])
ibs.set_annot_qualities([aid], [quality])
print('[web] turk_id: %s, aid: %d, quality: %d' % (turk_id, aid, quality))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_quality', imgsetid=imgsetid, src_ag=src_ag,
dst_ag=dst_ag, previous=aid))
@register_route('/submit/additional', methods=['POST'])
def submit_additional():
ibs = current_app.ibs
method = request.form.get('additional-submit', '')
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
aid = int(request.form['additional-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
sex = int(request.form['additional-sex-value'])
age = int(request.form['additional-age-value'])
age_min = None
age_max = None
# Sex
if sex >= 2:
sex -= 2
else:
sex = -1
if age == 1:
age_min = None
age_max = None
elif age == 2:
age_min = None
age_max = 2
elif age == 3:
age_min = 3
age_max = 5
elif age == 4:
age_min = 6
age_max = 11
elif age == 5:
age_min = 12
age_max = 23
elif age == 6:
age_min = 24
age_max = 35
elif age == 7:
age_min = 36
age_max = None
ibs.set_annot_sex([aid], [sex])
nid = ibs.get_annot_name_rowids(aid)
DAN_SPECIAL_WRITE_AGE_TO_ALL_ANOTATIONS = True
if nid is not None and DAN_SPECIAL_WRITE_AGE_TO_ALL_ANOTATIONS:
aid_list = ibs.get_name_aids(nid)
ibs.set_annot_age_months_est_min(aid_list, [age_min] * len(aid_list))
ibs.set_annot_age_months_est_max(aid_list, [age_max] * len(aid_list))
else:
ibs.set_annot_age_months_est_min([aid], [age_min])
ibs.set_annot_age_months_est_max([aid], [age_max])
print('[web] turk_id: %s, aid: %d, sex: %r, age: %r' % (turk_id, aid, sex, age))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_additional', imgsetid=imgsetid, previous=aid))
@register_route('/ajax/cookie')
def set_cookie():
response = make_response('true')
response.set_cookie(request.args['name'], request.args['value'])
print('[web] Set Cookie: %r -> %r' % (request.args['name'], request.args['value'], ))
return response
@register_route('/ajax/image/src/<gid>')
def image_src(gid=None, thumbnail=True, fresh=False, **kwargs):
ibs = current_app.ibs
if thumbnail:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
fresh = fresh or 'fresh' in request.args or 'fresh' in request.form
if fresh:
# print('*' * 80)
# print('\n\n')
# print('RUNNING WITH FRESH')
# print('\n\n')
# print('*' * 80)
# ut.remove_dirs(gpath)
import os
os.remove(gpath)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
else:
gpath = ibs.get_image_paths(gid)
return ap.return_src(gpath)
@register_api('/api/image/<gid>/', methods=['GET'])
def image_base64_api(gid=None, thumbnail=True, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /api/image/<gid>/
"""
return image_src(gid, thumbnail=thumbnail, fresh=fresh, **kwargs)
@register_route('/api/image/src/<gid>/', methods=['GET'])
def image_src_api(gid=None, thumbnail=False, fresh=False, **kwargs):
r"""
Returns the image file of image <gid>
RESTful:
Method: GET
URL: /api/image/src/<gid>/
"""
thumbnail = thumbnail or 'thumbnail' in request.args or 'thumbnail' in request.form
ibs = current_app.ibs
if thumbnail:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
fresh = fresh or 'fresh' in request.args or 'fresh' in request.form
if fresh:
import os
os.remove(gpath)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
else:
gpath = ibs.get_image_paths(gid)
print(gpath)
return send_file(gpath, mimetype='application/unknown')
@register_api('/api/core/names_with_gids/', methods=['GET'])
def get_names_with_gids(ibs):
nid_list = sorted(ibs.get_valid_nids())
name_list = ibs.get_name_texts(nid_list)
gids_list = ibs.get_name_gids(nid_list)
zipped = zip(nid_list, name_list, gids_list)
combined_dict = {
name : (nid, gid_list)
for nid, name, gid_list in zipped
}
return combined_dict
@register_route('/api/csv/names_with_gids/', methods=['GET'])
def get_names_with_gids_csv():
ibs = current_app.ibs
filename = 'names_with_gids.csv'
combined_dict = get_names_with_gids(ibs)
combined_list = [
','.join( map(str, [nid] + [name] + gid_list) )
for name, (nid, gid_list) in sorted(list(combined_dict.iteritems()))
]
combined_str = '\n'.join(combined_list)
max_length = 0
for aid_list in combined_dict.values():
max_length = max(max_length, len(aid_list[1]))
if max_length == 1:
gid_header_str = 'GID'
else:
gid_header_str = ','.join([ 'GID%d' % (i + 1, ) for i in range(max_length) ])
combined_str = 'NID,NAME,%s\n' % (gid_header_str, ) + combined_str
return ap.send_csv_file(combined_str, filename)
@register_route('/api/csv/image_info/', methods=['GET'])
def get_image_info():
import datetime
ibs = current_app.ibs
filename = 'image_info.csv'
gid_list = sorted(ibs.get_valid_gids())
gname_list = ibs.get_image_gnames(gid_list)
datetime_list = ibs.get_image_unixtime(gid_list)
datetime_list_ = [
datetime.datetime.fromtimestamp(datetime_).strftime('%Y-%m-%d %H:%M:%S')
for datetime_ in datetime_list
]
lat_list = ibs.get_image_lat(gid_list)
lon_list = ibs.get_image_lon(gid_list)
note_list = ibs.get_image_notes(gid_list)
party_list = []
contributor_list = []
for note in note_list:
try:
note = note.split(',')
party, contributor = note[:2]
party_list.append(party)
contributor_list.append(contributor)
except:
party_list.append('UNKNOWN')
contributor_list.append('UNKNOWN')
zipped_list = zip(gid_list, gname_list, datetime_list_, lat_list, lon_list,
party_list, contributor_list)
aids_list = ibs.get_image_aids(gid_list)
names_list = [ ibs.get_annot_name_texts(aid_list) for aid_list in aids_list ]
combined_list = [
','.join( map(str, list(zipped) + name_list) )
for zipped, name_list in zip(zipped_list, names_list)
]
max_length = 0
for name_list in names_list:
max_length = max(max_length, len(name_list))
if max_length == 1:
name_header_str = 'NAME'
else:
name_header_str = ','.join([ 'NAME%d' % (i + 1, ) for i in range(max_length) ])
combined_str = '\n'.join(combined_list)
combined_str = 'GID,FILENAME,TIMESTAMP,GPSLAT,GPSLON,PARTY,CONTRIBUTOR,%s\n' % (name_header_str, ) + combined_str
return ap.send_csv_file(combined_str, filename)
@register_route('/api/csv/demographics/', methods=['GET'])
def get_demographic_info():
ibs = current_app.ibs
filename = 'demographics.csv'
nid_list = sorted(ibs.get_valid_nids())
name_list = ibs.get_name_texts(nid_list)
sex_list = ibs.get_name_sex_text(nid_list)
min_ages_list = ibs.get_name_age_months_est_min(nid_list)
max_ages_list = ibs.get_name_age_months_est_max(nid_list)
age_list = []
for min_ages, max_ages in zip(min_ages_list, max_ages_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
age_list.append('AMBIGUOUS')
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
age_list.append('UNREVIEWED')
continue
# Bins
if (min_age is None or min_age < 12) and max_age < 12:
age_list.append('YEARLING')
elif 12 <= min_age and min_age < 24 and 12 <= max_age and max_age < 24:
age_list.append('1 YEARS')
elif 24 <= min_age and min_age < 36 and 24 <= max_age and max_age < 36:
age_list.append('2 YEARS')
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_list.append('3+ YEARS')
else:
age_list.append('UNKNOWN')
zipped_list = zip(nid_list, name_list, sex_list, age_list)
combined_list = [
','.join( map(str, list(zipped)) )
for zipped in zipped_list
]
combined_str = '\n'.join(combined_list)
combined_str = 'NID,NAME,SEX,AGE\n' + combined_str
return ap.send_csv_file(combined_str, filename)
@register_api('/api/core/gids_with_aids/', methods=['GET'])
def get_gid_with_aids(ibs):
gid_list = ibs.get_valid_gids()
aids_list = ibs.get_image_aids(gid_list)
zipped = zip(gid_list, aids_list)
combined_dict = { gid : aid_list for gid, aid_list in zipped }
return combined_dict
@register_route('/api/csv/gids_with_aids/', methods=['GET'])
def get_gid_with_aids_csv():
ibs = current_app.ibs
combined_dict = get_gid_with_aids(ibs)
filename = 'gids_with_aids.csv'
combined_list = [
','.join( map(str, [gid] + aid_list) )
for gid, aid_list in sorted(list(combined_dict.iteritems()))
]
combined_str = '\n'.join(combined_list)
max_length = 0
for aid_list in combined_dict.values():
max_length = max(max_length, len(aid_list))
if max_length == 1:
aid_header_str = 'AID'
else:
aid_header_str = ','.join([ 'AID%d' % (i + 1, ) for i in range(max_length) ])
combined_str = 'GID,%s\n' % (aid_header_str, ) + combined_str
return ap.send_csv_file(combined_str, filename)
@register_route('/api/csv/image/', methods=['GET'])
def get_gid_list_csv():
filename = 'gids.csv'
ibs = current_app.ibs
gid_list = ibs.get_valid_gids()
return_str = '\n'.join( map(str, gid_list) )
return_str = 'GID\n' + return_str
return ap.send_csv_file(return_str, filename)
@register_route('/api/csv/annot/', methods=['GET'])
def get_aid_list_csv():
filename = 'aids.csv'
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
return_str = '\n'.join( map(str, aid_list) )
return_str = 'AID\n' + return_str
return ap.send_csv_file(return_str, filename)
@register_route('/api/image/view/<gid>/', methods=['GET'])
def image_view_api(gid=None, thumbnail=True, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /api/image/view/<gid>/
"""
encoded = image_src(gid, thumbnail=thumbnail, fresh=fresh, **kwargs)
return ap.template(None, 'single', encoded=encoded)
@register_api('/api/image/zip', methods=['POST'])
def image_upload_zip(**kwargs):
r"""
Returns the gid_list for image files submitted in a ZIP archive. The image
archive should be flat (no folders will be scanned for images) and must be smaller
than 100 MB. The archive can submit multiple images, ideally in JPEG format to save
space. Duplicate image uploads will result in the duplicate images receiving
the same gid based on the hashed pixel values.
Args:
image_zip_archive (binary): the POST variable containing the binary
(multi-form) image archive data
**kwargs: Arbitrary keyword arguments; the kwargs are passed down to
the add_images function
Returns:
gid_list (list if rowids): the list of gids corresponding to the images
submitted. The gids correspond to the image names sorted in
lexigraphical order.
RESTful:
Method: POST
URL: /api/image/zip
"""
ibs = current_app.ibs
# Get image archive
image_archive = request.files.get('image_zip_archive', None)
if image_archive is None:
raise IOError('Image archive not given')
# If the directory already exists, delete it
uploads_path = ibs.get_uploadsdir()
ut.ensuredir(uploads_path)
current_time = time.strftime('%Y_%m_%d_%H_%M_%S')
modifier = 1
upload_path = '%s' % (current_time)
while exists(upload_path):
upload_path = '%s_%04d' % (current_time, modifier)
modifier += 1
upload_path = join(uploads_path, upload_path)
ut.ensuredir(upload_path)
# Extract the content
try:
with zipfile.ZipFile(image_archive, 'r') as zfile:
zfile.extractall(upload_path)
except Exception:
ut.remove_dirs(upload_path)
raise IOError('Image archive extracton failed')
"""
test to ensure Directory and utool do the same thing
from detecttools.directory import Directory
upload_path = ut.truepath('~/Pictures')
gpath_list1 = sorted(ut.list_images(upload_path, recursive=False, full=True))
direct = Directory(upload_path, include_file_extensions='images', recursive=False)
gpath_list = direct.files()
gpath_list = sorted(gpath_list)
assert gpath_list1 == gpath_list
"""
gpath_list = sorted(ut.list_images(upload_path, recursive=False, full=True))
#direct = Directory(upload_path, include_file_extensions='images', recursive=False)
#gpath_list = direct.files()
#gpath_list = sorted(gpath_list)
gid_list = ibs.add_images(gpath_list, **kwargs)
return gid_list
@register_api('/api/image/json/', methods=['POST'])
def add_images_json(ibs, image_uri_list, image_uuid_list, image_width_list,
image_height_list, image_orig_name_list=None, image_ext_list=None,
image_time_posix_list=None, image_gps_lat_list=None,
image_gps_lon_list=None, image_notes_list=None, **kwargs):
"""
REST:
Method: POST
URL: /api/image/json/
Ignore:
sudo pip install boto
Args:
image_uri_list (list) : list of string image uris, most likely HTTP(S) or S3
encoded URLs. Alternatively, this can be a list of dictionaries (JSON
objects) that specify AWS S3 stored assets. An example below:
image_uri_list = [
'http://domain.com/example/asset1.png',
'/home/example/Desktop/example/asset2.jpg',
's3://s3.amazon.com/example-bucket-2/asset1-in-bucket-2.tif',
{
'bucket' : 'example-bucket-1',
'key' : 'example/asset1.png',
'auth_domain' : None, # Uses localhost
'auth_access_id' : None, # Uses system default
'auth_secret_key' : None, # Uses system default
},
{
'bucket' : 'example-bucket-1',
'key' : 'example/asset2.jpg',
# if unspecified, auth uses localhost and system defaults
},
{
'bucket' : 'example-bucket-2',
'key' : 'example/asset1-in-bucket-2.tif',
'auth_domain' : 's3.amazon.com',
'auth_access_id' : '____________________',
'auth_secret_key' : '________________________________________',
},
]
Note that you cannot specify AWS authentication access ids or secret keys
using string uri's. For specific authentication methods, please use the
latter list of dictionaries.
image_uuid_list (list of str) : list of image UUIDs to be used in IBEIS IA
image_width_list (list of int) : list of image widths
image_height_list (list of int) : list of image heights
image_orig_name_list (list of str): list of original image names
image_ext_list (list of str): list of original image names
image_time_posix_list (list of int): list of image's POSIX timestamps
image_gps_lat_list (list of float): list of image's GPS latitude values
image_gps_lon_list (list of float): list of image's GPS longitude values
image_notes_list (list of str) : optional list of any related notes with
the images
**kwargs : key-value pairs passed to the ibs.add_images() function.
CommandLine:
python -m ibeis.web.app --test-add_images_json
Example:
>>> # WEB_DOCTEST
>>> from ibeis.control.IBEISControl import * # NOQA
>>> import ibeis
>>> web_instance = ibeis.opendb(db='testdb1')
>>> _payload = {
>>> 'image_uri_list': [
>>> 'https://upload.wikimedia.org/wikipedia/commons/4/49/Zebra_running_Ngorongoro.jpg',
>>> {
>>> 'bucket' : 'test-asset-store',
>>> 'key' : 'caribwhale/20130903-JAC-0002.JPG',
>>> },
>>> ],
>>> 'image_uuid_list': [
>>> uuid.UUID('7fea8101-7dec-44e3-bf5d-b8287fd231e2'),
>>> uuid.UUID('c081119a-e08e-4863-a710-3210171d27d6'),
>>> ],
>>> 'image_width_list': [
>>> 1992,
>>> 1194,
>>> ],
>>> 'image_height_list': [
>>> 1328,
>>> 401,
>>> ],
>>> }
>>> gid_list = ibeis.web.app.add_images_json(web_instance, **_payload)
>>> print(gid_list)
>>> print(web_instance.get_image_uuids(gid_list))
>>> print(web_instance.get_image_uris(gid_list))
>>> print(web_instance.get_image_paths(gid_list))
>>> print(web_instance.get_image_uris_original(gid_list))
"""
def _get_standard_ext(gpath):
ext = splitext(gpath)[1].lower()
return '.jpg' if ext == '.jpeg' else ext
def _parse_imageinfo(index):
def _resolve_uri():
list_ = image_uri_list
if list_ is None or index >= len(list_) or list_[index] is None:
raise ValueError('Must specify all required fields')
value = list_[index]
if isinstance(value, dict):
value = ut.s3_dict_encode_to_str(value)
return value
def _resolve(list_, default='', assert_=False):
if list_ is None or index >= len(list_) or list_[index] is None:
if assert_:
raise ValueError('Must specify all required fields')
return default
return list_[index]
uri = _resolve_uri()
orig_gname = basename(uri)
ext = _get_standard_ext(uri)
uuid_ = _resolve(image_uuid_list, assert_=True)
if isinstance(uuid_, six.string_types):
uuid_ = uuid.UUID(uuid_)
param_tup = (
uuid_,
uri,
uri,
_resolve(image_orig_name_list, default=orig_gname),
_resolve(image_ext_list, default=ext),
int(_resolve(image_width_list, assert_=True)),
int(_resolve(image_height_list, assert_=True)),
int(_resolve(image_time_posix_list, default=-1)),
float(_resolve(image_gps_lat_list, default=-1.0)),
float(_resolve(image_gps_lon_list, default=-1.0)),
_resolve(image_notes_list),
)
return param_tup
# TODO: FIX ME SO THAT WE DON'T HAVE TO LOCALIZE EVERYTHING
kwargs['auto_localize'] = kwargs.get('auto_localize', True)
kwargs['sanitize'] = kwargs.get('sanitize', False)
index_list = range(len(image_uri_list))
params_gen = ut.generate(_parse_imageinfo, index_list, adjust=True,
force_serial=True, **kwargs)
params_gen = list(params_gen)
gpath_list = [ _[0] for _ in params_gen ]
gid_list = ibs.add_images(gpath_list, params_list=params_gen, **kwargs) # NOQA
# return gid_list
image_uuid_list = ibs.get_image_uuids(gid_list)
return image_uuid_list
@register_api('/api/annot/json/', methods=['POST'])
def add_annots_json(ibs, image_uuid_list, annot_uuid_list, annot_bbox_list,
annot_theta_list=None, annot_species_list=None,
annot_name_list=None, annot_notes_list=None, **kwargs):
"""
REST:
Method: POST
URL: /api/annot/json/
Ignore:
sudo pip install boto
Args:
image_uuid_list (list of str) : list of image UUIDs to be used in IBEIS IA
annot_uuid_list (list of str) : list of annotations UUIDs to be used in IBEIS IA
annot_bbox_list (list of 4-tuple) : list of bounding box coordinates encoded as
a 4-tuple of the values (xtl, ytl, width, height) where xtl is the
'top left corner, x value' and ytl is the 'top left corner, y value'.
annot_theta_list (list of float) : list of radian rotation around center.
Defaults to 0.0 (no rotation).
annot_species_list (list of str) : list of species for the annotation, if known.
If the list is partially known, use None (null in JSON) for unknown entries.
annot_name_list (list of str) : list of names for the annotation, if known.
If the list is partially known, use None (null in JSON) for unknown entries.
annot_notes_list (list of str) : list of notes to be added to the annotation.
**kwargs : key-value pairs passed to the ibs.add_annots() function.
CommandLine:
python -m ibeis.web.app --test-add_annots_json
Example:
>>> import ibeis
>>> from ibeis.control.IBEISControl import * # NOQA
>>> web_instance = ibeis.opendb(db='testdb1')
>>> _payload = {
>>> 'image_uuid_list': [
>>> uuid.UUID('7fea8101-7dec-44e3-bf5d-b8287fd231e2'),
>>> uuid.UUID('c081119a-e08e-4863-a710-3210171d27d6'),
>>> ],
>>> 'annot_uuid_list': [
>>> uuid.UUID('fe1547c5-1425-4757-9b8f-b2b4a47f552d'),
>>> uuid.UUID('86d3959f-7167-4822-b99f-42d453a50745'),
>>> ],
>>> 'annot_bbox_list': [
>>> [0, 0, 1992, 1328],
>>> [0, 0, 1194, 401],
>>> ],
>>> }
>>> aid_list = ibeis.web.app.add_annots_json(web_instance, **_payload)
>>> print(aid_list)
>>> print(web_instance.get_annot_image_uuids(aid_list))
>>> print(web_instance.get_annot_uuids(aid_list))
>>> print(web_instance.get_annot_bboxes(aid_list))
"""
image_uuid_list = [
uuid.UUID(uuid_) if isinstance(uuid_, six.string_types) else uuid_
for uuid_ in image_uuid_list
]
annot_uuid_list = [
uuid.UUID(uuid_) if isinstance(uuid_, six.string_types) else uuid_
for uuid_ in annot_uuid_list
]
gid_list = ibs.get_image_gids_from_uuid(image_uuid_list)
aid_list = ibs.add_annots(gid_list, annot_uuid_list=annot_uuid_list, # NOQA
bbox_list=annot_bbox_list, theta_list=annot_theta_list,
species_list=annot_species_list, name_list=annot_name_list,
notes_list=annot_notes_list, **kwargs)
# return aid_list
annot_uuid_list = ibs.get_annot_uuids(aid_list)
return annot_uuid_list
@register_api('/api/image/json/', methods=['DELETE'])
def delete_images_json(ibs, image_uuid_list):
"""
REST:
Method: POST
URL: /api/image/json/
Args:
image_uuid_list (list of str) : list of image UUIDs to be delete from IBEIS
"""
gid_list = ibs.get_image_gids_from_uuid(image_uuid_list)
ibs.delete_images(gid_list)
return True
@register_api('/api/annot/json/', methods=['DELETE'])
def delete_annots_json(ibs, annot_uuid_list):
"""
REST:
Method: POST
URL: /api/annot/json/
Args:
annot_uuid_list (list of str) : list of annot UUIDs to be delete from IBEIS
"""
aid_list = ibs.get_annot_aids_from_uuid(annot_uuid_list)
ibs.delete_annots(aid_list)
return True
@register_api('/api/image/', methods=['POST'])
def image_upload(cleanup=True, **kwargs):
r"""
Returns the gid for an uploaded image.
Args:
image (image binary): the POST variable containing the binary
(multi-form) image data
**kwargs: Arbitrary keyword arguments; the kwargs are passed down to
the add_images function
Returns:
gid (rowids): gid corresponding to the image submitted.
lexigraphical order.
RESTful:
Method: POST
URL: /api/image/
"""
ibs = current_app.ibs
print('request.files = %s' % (request.files,))
filestore = request.files.get('image', None)
if filestore is None:
raise IOError('Image not given')
uploads_path = ibs.get_uploadsdir()
ut.ensuredir(uploads_path)
current_time = time.strftime('%Y_%m_%d_%H_%M_%S')
modifier = 1
upload_filename = 'upload_%s.png' % (current_time)
while exists(upload_filename):
upload_filename = 'upload_%s_%04d.png' % (current_time, modifier)
modifier += 1
upload_filepath = join(uploads_path, upload_filename)
filestore.save(upload_filepath)
gid_list = ibs.add_images([upload_filepath], **kwargs)
gid = gid_list[0]
if cleanup:
ut.remove_dirs(upload_filepath)
return gid
@register_api('/api/core/helloworld/', methods=['GET', 'POST', 'DELETE', 'PUT'])
def hello_world(*args, **kwargs):
"""
CommandLine:
python -m ibeis.web.app --exec-hello_world
Example:
>>> # SCRIPT
>>> from ibeis.web.app import * # NOQA
>>> import ibeis
>>> web_ibs = ibeis.opendb_bg_web(browser=True, start_job_queue=False, url_suffix='/api/core/helloworld/')
"""
print('------------------ HELLO WORLD ------------------')
print('Args: %r' % (args,))
print('Kwargs: %r' % (kwargs,))
print('request.args: %r' % (request.args,))
print('request.form: %r' % (request.form,))
print('request.url; %r' % (request.url,))
print('request.environ: %s' % (ut.repr3(request.environ),))
print('request: %s' % (ut.repr3(request.__dict__),))
VALID_TURK_MODES = [
('turk_viewpoint', 'Viewpoint'),
('turk_quality', 'Quality'),
]
@register_route('/group_review/')
def group_review():
prefill = request.args.get('prefill', '')
if len(prefill) > 0:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list)
GROUP_BY_PREDICTION = True
if GROUP_BY_PREDICTION:
grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3))
grouped_list = grouped_dict.values()
regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list)))
candidate_aid_list = ut.get_list_column(regrouped_items, 0)
else:
candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list]
elif request.args.get('aid_list', None) is not None:
aid_list = request.args.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
candidate_aid_list = ''
else:
candidate_aid_list = ''
return ap.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=VALID_TURK_MODES)
@register_route('/group_review/submit/', methods=['POST'])
def group_review_submit():
"""
CommandLine:
python -m ibeis.web.app --exec-group_review_submit
Example:
>>> # UNSTABLE_DOCTEST
>>> from ibeis.web.app import * # NOQA
>>> import ibeis
>>> import ibeis.web
>>> ibs = ibeis.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[::2]
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
method = request.form.get('group-review-submit', '')
if method.lower() == 'populate':
redirection = request.referrer
if 'prefill' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&prefill=true' % (redirection, )
else:
redirection = '%s?prefill=true' % (redirection, )
return redirect(redirection)
aid_list = request.form.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
aid_list = []
src_ag, dst_ag = ibs.prepare_annotgroup_review(aid_list)
valid_modes = ut.get_list_column(VALID_TURK_MODES, 0)
mode = request.form.get('group-review-mode', None)
assert mode in valid_modes
return redirect(url_for(mode, src_ag=src_ag, dst_ag=dst_ag))
@register_route('/ajax/annotation/src/<aid>')
def annotation_src(aid=None):
ibs = current_app.ibs
gpath = ibs.get_annot_chip_fpath(aid)
return ap.return_src(gpath)
@register_api('/api/annot/<aid>/', methods=['GET'])
def annotation_src_api(aid=None):
r"""
Returns the base64 encoded image of annotation <aid>
RESTful:
Method: GET
URL: /api/annot/<aid>/
"""
return annotation_src(aid)
@register_route('/display/sightings')
def display_sightings(html_encode=True):
ibs = current_app.ibs
complete = request.args.get('complete', None) is not None
sightings = ibs.report_sightings_str(complete=complete, include_images=True)
if html_encode:
sightings = sightings.replace('\n', '<br/>')
return sightings
@register_route('/download/sightings')
def download_sightings():
filename = 'sightings.csv'
sightings = display_sightings(html_encode=False)
return ap.send_csv_file(sightings, filename)
@register_route('/graph/sightings')
def graph_sightings():
return redirect(url_for('view'))
@register_route('/dbinfo')
def dbinfo():
try:
ibs = current_app.ibs
dbinfo_str = ibs.get_dbinfo_str()
except:
dbinfo_str = ''
dbinfo_str_formatted = '<pre>%s</pre>' % (dbinfo_str, )
return dbinfo_str_formatted
@register_route('/api')
def api():
rules = current_app.url_map.iter_rules()
rule_dict = {}
for rule in rules:
methods = rule.methods
url = str(rule)
if '/api/' in url:
methods -= set(['HEAD', 'OPTIONS'])
if len(methods) == 0:
continue
if len(methods) > 1:
print('methods = %r' % (methods,))
method = list(methods)[0]
if method not in rule_dict.keys():
rule_dict[method] = []
rule_dict[method].append((method, url, ))
for method in rule_dict.keys():
rule_dict[method].sort()
url = '%s/api/core/dbname/' % (current_app.server_url, )
app_auth = controller_inject.get_url_authorization(url)
return ap.template(None, 'api',
app_url=url,
app_name=controller_inject.GLOBAL_APP_NAME,
app_secret=controller_inject.GLOBAL_APP_SECRET,
app_auth=app_auth,
rule_list=rule_dict)
@register_route('/upload')
def upload():
return ap.template(None, 'upload')
@register_route('/404')
def error404(exception=None):
import traceback
exception_str = str(exception)
traceback_str = str(traceback.format_exc())
print('[web] %r' % (exception_str, ))
print('[web] %r' % (traceback_str, ))
return ap.template(None, '404', exception_str=exception_str,
traceback_str=traceback_str)
################################################################################
def test_html_error():
r"""
This test will show what our current errors look like
CommandLine:
python -m ibeis.web.app --exec-test_html_error
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.web.app import * # NOQA
>>> import ibeis
>>> web_ibs = ibeis.opendb_bg_web(browser=True, start_job_queue=False, url_suffix='/api/image/imagesettext/?__format__=True')
"""
pass
def start_tornado(ibs, port=None, browser=None, url_suffix=None):
"""
Initialize the web server
"""
if browser is None:
browser = ut.get_argflag('--browser')
if url_suffix is None:
url_suffix = ''
def _start_tornado(ibs_, port_):
# Get Flask app
app = controller_inject.get_flask_app()
app.ibs = ibs_
# Try to ascertain the socket's domain name
try:
app.server_domain = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
app.server_domain = '127.0.0.1'
app.server_port = port_
# URL for the web instance
app.server_url = 'http://%s:%s' % (app.server_domain, app.server_port)
print('[web] Tornado server starting at %s' % (app.server_url,))
# Launch the web browser to view the web interface and API
if browser:
url = app.server_url + url_suffix
import webbrowser
print('[web] opening browser with url = %r' % (url,))
webbrowser.open(url)
# Start the tornado web handler
# WSGI = Web Server Gateway Interface
# WSGI is Python standard described in detail in PEP 3333
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(app.server_port)
tornado.ioloop.IOLoop.instance().start()
# Set logging level
logging.getLogger().setLevel(logging.INFO)
# Get the port if unspecified
if port is None:
port = DEFAULT_WEB_API_PORT
# Launch the web handler
_start_tornado(ibs, port)
def start_from_ibeis(ibs, port=None, browser=None, precache=None,
url_suffix=None, start_job_queue=True):
"""
Parse command line options and start the server.
CommandLine:
python -m ibeis --db PZ_MTEST --web
python -m ibeis --db PZ_MTEST --web --browser
"""
print('[web] start_from_ibeis()')
if precache is None:
precache = ut.get_argflag('--precache')
if precache:
print('[web] Pre-computing all image thumbnails (with annots)...')
ibs.preprocess_image_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
ibs.preprocess_image_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
ibs.check_chip_existence()
ibs.compute_all_chips()
if start_job_queue:
print('[web] opening job manager')
ibs.load_plugin_module(zmq_task_queue)
#import time
#time.sleep(1)
ibs.initialize_job_manager()
#time.sleep(10)
print('[web] starting tornado')
start_tornado(ibs, port, browser, url_suffix)
print('[web] closing job manager')
ibs.close_job_manager()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
import json
import os
import sys
import traceback
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch, SubnetMatch, QueryStringMatch as QSMatch
from mozdef_util.utilities.logger import logger
import geomodel.alert as alert
import geomodel.config as config
import geomodel.locality as locality
_CONFIG_FILE = os.path.join(
os.path.dirname(__file__),
'geomodel_location.json')
class AlertGeoModel(AlertTask):
'''GeoModel alert runs a set of configured queries for events and
constructs locality state for users performing authenticated actions.
When activity is found that indicates a potential compromise of an
account, an alert is produced.
'''
def main(self):
cfg = self._load_config()
if not self.es.index_exists('localities'):
settings = {
"mappings": {
"_doc": {
"dynamic_templates": [
{
"string_fields": {
"mapping": {
"type": "keyword"
},
"match": "*",
"match_mapping_type": "string"
}
},
]
}
}
}
self.es.create_index('localities', settings)
for query_index in range(len(cfg.events)):
try:
self._process(cfg, query_index)
except Exception as err:
traceback.print_exc(file=sys.stdout)
logger.error(
'Error process events; query="{0}"; error={1}'.format(
cfg.events[query_index].lucene_query,
err))
def onAggregation(self, agg):
username = agg['value']
events = agg['events']
cfg = agg['config']
query = locality.wrap_query(self.es)
journal = locality.wrap_journal(self.es)
locs_from_evts = list(filter(
lambda state: state is not None,
map(locality.from_event, events)))
entry_from_es = locality.find(query, username, cfg.localities.es_index)
new_state = locality.State('locality', username, locs_from_evts)
if entry_from_es is None:
entry_from_es = locality.Entry(
'', locality.State('locality', username, []))
# Determine if we should trigger an alert before updating the state.
new_alert = alert.alert(
entry_from_es.state.username,
locs_from_evts + entry_from_es.state.localities)
updated = locality.Update.flat_map(
lambda state: locality.remove_outdated(
state,
cfg.localities.valid_duration_days),
locality.update(entry_from_es.state, new_state))
if updated.did_update:
entry_from_es = locality.Entry(entry_from_es.identifier, updated.state)
journal(entry_from_es, cfg.localities.es_index)
if new_alert is not None:
# TODO: When we update to Python 3.7+, change to asdict(alert_produced)
summary = "{0} is now active in {1},{2}. Previously {3},{4}".format(
username,
entry_from_es.state.localities[-1].city,
entry_from_es.state.localities[-1].country,
entry_from_es.state.localities[-2].city,
entry_from_es.state.localities[-2].country,
)
alert_dict = self.createAlertDict(
summary,
'geomodel',
['geomodel'],
events,
'INFO')
alert_dict['details'] = {
'username': new_alert.username,
'sourceipaddress': new_alert.sourceipaddress,
'origin': dict(new_alert.origin._asdict())
}
return alert_dict
return None
def _process(self, cfg: config.Config, qindex: int):
evt_cfg = cfg.events[qindex]
search = SearchQuery(**evt_cfg.search_window)
search.add_must(QSMatch(evt_cfg.lucene_query))
# Ignore empty usernames
search.add_must_not(TermMatch(evt_cfg.username_path, ''))
# Ignore whitelisted usernames
for whitelisted_username in cfg.whitelist.users:
search.add_must_not(TermMatch(evt_cfg.username_path, whitelisted_username))
# Ignore whitelisted subnets
for whitelisted_subnet in cfg.whitelist.cidrs:
search.add_must_not(SubnetMatch('details.sourceipaddress', whitelisted_subnet))
self.filtersManual(search)
self.searchEventsAggregated(evt_cfg.username_path, samplesLimit=1000)
self.walkAggregations(threshold=1, config=cfg)
def _load_config(self):
with open(_CONFIG_FILE) as cfg_file:
cfg = json.load(cfg_file)
cfg['localities'] = config.Localities(**cfg['localities'])
cfg['events'] = [
config.Events(**evt_cfg)
for evt_cfg in cfg['events']
]
cfg['whitelist'] = config.Whitelist(**cfg['whitelist'])
return config.Config(**cfg)
remove outdated localities before trying to alert so that doing so after doesn't cause problems
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
import json
import os
import sys
import traceback
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch, SubnetMatch, QueryStringMatch as QSMatch
from mozdef_util.utilities.logger import logger
import geomodel.alert as alert
import geomodel.config as config
import geomodel.locality as locality
_CONFIG_FILE = os.path.join(
os.path.dirname(__file__),
'geomodel_location.json')
class AlertGeoModel(AlertTask):
'''GeoModel alert runs a set of configured queries for events and
constructs locality state for users performing authenticated actions.
When activity is found that indicates a potential compromise of an
account, an alert is produced.
'''
def main(self):
cfg = self._load_config()
if not self.es.index_exists('localities'):
settings = {
"mappings": {
"_doc": {
"dynamic_templates": [
{
"string_fields": {
"mapping": {
"type": "keyword"
},
"match": "*",
"match_mapping_type": "string"
}
},
]
}
}
}
self.es.create_index('localities', settings)
for query_index in range(len(cfg.events)):
try:
self._process(cfg, query_index)
except Exception as err:
traceback.print_exc(file=sys.stdout)
logger.error(
'Error process events; query="{0}"; error={1}'.format(
cfg.events[query_index].lucene_query,
err))
def onAggregation(self, agg):
username = agg['value']
events = agg['events']
cfg = agg['config']
query = locality.wrap_query(self.es)
journal = locality.wrap_journal(self.es)
locs_from_evts = list(filter(
lambda state: state is not None,
map(locality.from_event, events)))
entry_from_es = locality.find(query, username, cfg.localities.es_index)
new_state = locality.State('locality', username, locs_from_evts)
if entry_from_es is None:
entry_from_es = locality.Entry(
'', locality.State('locality', username, []))
cleaned = locality.remove_outdated(
entry_from_es.state, cfg.localities.valid_duration_days)
# Determine if we should trigger an alert before updating the state.
new_alert = alert.alert(
cleaned.state.username,
new_state.localities + cleaned.state.localities)
updated = locality.update(cleaned.state, new_state)
if updated.did_update:
entry_from_es = locality.Entry(entry_from_es.identifier, updated.state)
journal(entry_from_es, cfg.localities.es_index)
if new_alert is not None:
# TODO: When we update to Python 3.7+, change to asdict(alert_produced)
summary = "{0} is now active in {1},{2}. Previously {3},{4}".format(
username,
entry_from_es.state.localities[-1].city,
entry_from_es.state.localities[-1].country,
entry_from_es.state.localities[-2].city,
entry_from_es.state.localities[-2].country,
)
alert_dict = self.createAlertDict(
summary,
'geomodel',
['geomodel'],
events,
'INFO')
alert_dict['details'] = {
'username': new_alert.username,
'sourceipaddress': new_alert.sourceipaddress,
'origin': dict(new_alert.origin._asdict())
}
return alert_dict
return None
def _process(self, cfg: config.Config, qindex: int):
evt_cfg = cfg.events[qindex]
search = SearchQuery(**evt_cfg.search_window)
search.add_must(QSMatch(evt_cfg.lucene_query))
# Ignore empty usernames
search.add_must_not(TermMatch(evt_cfg.username_path, ''))
# Ignore whitelisted usernames
for whitelisted_username in cfg.whitelist.users:
search.add_must_not(TermMatch(evt_cfg.username_path, whitelisted_username))
# Ignore whitelisted subnets
for whitelisted_subnet in cfg.whitelist.cidrs:
search.add_must_not(SubnetMatch('details.sourceipaddress', whitelisted_subnet))
self.filtersManual(search)
self.searchEventsAggregated(evt_cfg.username_path, samplesLimit=1000)
self.walkAggregations(threshold=1, config=cfg)
def _load_config(self):
with open(_CONFIG_FILE) as cfg_file:
cfg = json.load(cfg_file)
cfg['localities'] = config.Localities(**cfg['localities'])
cfg['events'] = [
config.Events(**evt_cfg)
for evt_cfg in cfg['events']
]
cfg['whitelist'] = config.Whitelist(**cfg['whitelist'])
return config.Config(**cfg)
|
import swapper
from django.conf import settings
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
class BaseUserDeferrableModal(AcceleratorModel):
user = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
deferrable_modal = models.ForeignKey(
to=swapper.get_model_name(
AcceleratorModel.Meta.app_label, 'DeferrableModal'),
on_delete=models.CASCADE)
is_deferred = models.BooleanField(default=False)
deferred_to = models.DateTimeField(null=True, blank=True)
class Meta(AcceleratorModel.Meta):
abstract = True
verbose_name = 'User Deferrable Modal'
def __str__(self):
return 'User Deferrable Modal: {}'.format(
self.deferrable_modal.name)
[AC-8416] pr feedback
import swapper
from django.conf import settings
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
class BaseUserDeferrableModal(AcceleratorModel):
user = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
deferrable_modal = models.ForeignKey(
to=swapper.get_model_name(
AcceleratorModel.Meta.app_label, 'DeferrableModal'),
on_delete=models.CASCADE)
is_deferred = models.BooleanField(default=False)
deferred_to = models.DateTimeField(null=True, blank=True)
class Meta(AcceleratorModel.Meta):
abstract = True
verbose_name = 'User Deferrable Modal'
def __repr__(self):
return 'User Deferrable Modal: {}'.format(
self.deferrable_modal.name)
|
test directory init 파일 삭제
|
from django.contrib import admin
from django.http import HttpResponseRedirect
class ButtonAdmin(admin.ModelAdmin):
"""
A subclass of this admin will let you add buttons (like history) in the
change view of an entry.
ex.
class FooAdmin(ButtonAdmin):
...
def bar(self, request, obj=None):
if obj != None: obj.bar()
return None # Redirect or Response or None
bar.short_description='Example button'
list_buttons = [ bar ]
change_buttons = [ bar ]
you can then put the following in your admin/change_form.html template:
{% block object-tools %}
{% if change %}{% if not is_popup %}
<ul class="object-tools">
{% for button in buttons %}
<li><a href="{{ button.func_name }}/">{{ button.short_description }}</a></li>
{% endfor %}
<li><a href="history/" class="historylink">History</a></li>
{% if has_absolute_url %}<li><a href="../../../r/{{ content_type_id }}/{{ object_id }}/" class="viewsitelink">View on site</a></li>{% endif%}
</ul>
{% endif %}{% endif %}
{% endblock %}
"""
change_buttons = []
list_buttons = []
def button_view_dispatcher(self, request, url):
# Dispatch the url to a function call
if url is not None:
import re
res = re.match('(.*/)?(?P<id>\d+)/(?P<command>.*)', url)
if res:
if res.group('command') in [b.func_name for b in self.change_buttons]:
obj = self.model._default_manager.get(pk=res.group('id'))
response = getattr(self, res.group('command'))(request, obj)
if response is None:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
return response
else:
res = re.match('(.*/)?(?P<command>.*)', url)
if res:
if res.group('command') in [b.func_name for b in self.list_buttons]:
response = getattr(self, res.group('command'))(request)
if response is None:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
return response
# Delegate to the appropriate method, based on the URL.
from django.contrib.admin.util import unquote
if url is None:
return self.changelist_view(request)
elif url == "add":
return self.add_view(request)
elif url.endswith('/history'):
return self.history_view(request, unquote(url[:-8]))
elif url.endswith('/delete'):
return self.delete_view(request, unquote(url[:-7]))
else:
return self.change_view(request, unquote(url))
def get_urls(self):
from django.conf.urls import url, patterns
from functools import update_wrapper
# Define a wrapper view
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Add the custom button url
urlpatterns = patterns('', url(r'^(.+)/$', wrap(self.button_view_dispatcher), ))
return urlpatterns + super(ButtonAdmin, self).get_urls()
def change_view(self, request, object_id, form_url='', extra_context=None):
if not extra_context:
extra_context = {}
if hasattr(self, 'change_buttons'):
extra_context['buttons'] = self._convert_buttons(self.change_buttons)
if '/' in object_id:
object_id = object_id[:object_id.find('/')]
return super(ButtonAdmin, self).change_view(request, object_id, form_url, extra_context)
def changelist_view(self, request, extra_context=None):
if not extra_context:
extra_context = {}
if hasattr(self, 'list_buttons'):
extra_context['buttons'] = self._convert_buttons(self.list_buttons)
return super(ButtonAdmin, self).changelist_view(request, extra_context)
def _convert_buttons(self, orig_buttons):
buttons = []
for b in orig_buttons:
buttons.append({'func_name': b.func_name, 'short_description': b.short_description})
return buttons
Subtle bug found and fixed in buttonadmin!
Apparently it's possible for HTTP_REFERER to not be set, which caused
my ButtonAdmin class to fail when accessed via Django's test client.
from django.contrib import admin
from django.http import HttpResponseRedirect
class ButtonAdmin(admin.ModelAdmin):
"""
A subclass of this admin will let you add buttons (like history) in the
change view of an entry.
ex.
class FooAdmin(ButtonAdmin):
...
def bar(self, request, obj=None):
if obj != None: obj.bar()
return None # Redirect or Response or None
bar.short_description='Example button'
list_buttons = [ bar ]
change_buttons = [ bar ]
you can then put the following in your admin/change_form.html template:
{% block object-tools %}
{% if change %}{% if not is_popup %}
<ul class="object-tools">
{% for button in buttons %}
<li><a href="{{ button.func_name }}/">{{ button.short_description }}</a></li>
{% endfor %}
<li><a href="history/" class="historylink">History</a></li>
{% if has_absolute_url %}<li><a href="../../../r/{{ content_type_id }}/{{ object_id }}/" class="viewsitelink">View on site</a></li>{% endif%}
</ul>
{% endif %}{% endif %}
{% endblock %}
"""
change_buttons = []
list_buttons = []
def button_view_dispatcher(self, request, url):
# Dispatch the url to a function call
if url is not None:
import re
res = re.match('(.*/)?(?P<id>\d+)/(?P<command>.*)', url)
if res:
if res.group('command') in [b.func_name for b in self.change_buttons]:
obj = self.model._default_manager.get(pk=res.group('id'))
response = getattr(self, res.group('command'))(request, obj)
if response is None:
referer = request.META.get('HTTP_REFERER', '')
return HttpResponseRedirect(referer)
return response
else:
res = re.match('(.*/)?(?P<command>.*)', url)
if res:
if res.group('command') in [b.func_name for b in self.list_buttons]:
response = getattr(self, res.group('command'))(request)
if response is None:
referer = request.META.get('HTTP_REFERER', '')
return HttpResponseRedirect(referer)
return response
# Delegate to the appropriate method, based on the URL.
from django.contrib.admin.util import unquote
if url is None:
return self.changelist_view(request)
elif url == "add":
return self.add_view(request)
elif url.endswith('/history'):
return self.history_view(request, unquote(url[:-8]))
elif url.endswith('/delete'):
return self.delete_view(request, unquote(url[:-7]))
else:
return self.change_view(request, unquote(url))
def get_urls(self):
from django.conf.urls import url, patterns
from functools import update_wrapper
# Define a wrapper view
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Add the custom button url
urlpatterns = patterns('', url(r'^(.+)/$', wrap(self.button_view_dispatcher), ))
return urlpatterns + super(ButtonAdmin, self).get_urls()
def change_view(self, request, object_id, form_url='', extra_context=None):
if not extra_context:
extra_context = {}
if hasattr(self, 'change_buttons'):
extra_context['buttons'] = self._convert_buttons(self.change_buttons)
if '/' in object_id:
object_id = object_id[:object_id.find('/')]
return super(ButtonAdmin, self).change_view(request, object_id, form_url, extra_context)
def changelist_view(self, request, extra_context=None):
if not extra_context:
extra_context = {}
if hasattr(self, 'list_buttons'):
extra_context['buttons'] = self._convert_buttons(self.list_buttons)
return super(ButtonAdmin, self).changelist_view(request, extra_context)
def _convert_buttons(self, orig_buttons):
buttons = []
for b in orig_buttons:
buttons.append({'func_name': b.func_name, 'short_description': b.short_description})
return buttons
|
#!/usr/bin/env python
# Mosflm.py
# Copyright (C) 2006 CCLRC, Graeme Winter
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# 23rd June 2006
#
# A wrapper for the data processing program Mosflm, with the following
# methods to provide functionality:
#
# index: autoindexing functionality (implemented)
# integrate: process a frame or a dataset (implemented)
#
# Internally this will also require cell refinement and so on, but this
# will be implicit - the cell refinement is a local requirement for
# mosflm only - though this will provide some useful functionality
# for diagnosing wrong indexing solutions.
#
# Input requirements:
#
# At the minimum the indexing needs a beam centre, some images to index
# from and the template and directory where these images may be found.
# The indexing will return the most likely solution, or the one specified
# if this has been done. The interface should look like indexing with
# labelit. However, a matrix file to index against could optionally be
# supplied, to help with processing MAD data.
#
# For integration, an appropriate matrix file, beam centre, lattice
# (spacegroup) and mosaic spread are needed, along with (optionally) a
# gain and definately images to process. A resolution limit may also be
# supplied.
#
# The following are good example scripts of how this could work:
#
# [autoindexing + cell refinement]
#
# ipmosflm << eof
# beam 108.9 105.0
# directory /data/graeme/12287
# template 12287_1_E1_###.img
# autoindex dps image 1 ! can add refine after dps
# autoindex dps image 60 ! can add refine after dps
# mosaic estimate
# newmat index.mat
# go
# ! cell refinement stuff - needs more than 2 images
# newmat refined.mat
# postref multi segments 2
# process 1 3
# go
# process 58 60
# go
# eof
#
# [integration]
#
# ipmosflm hklout 12287_1_E1.mtz << eof
# resolution 1.65
# beam 108.9 105.0
# directory /data/graeme/12287
# template 12287_1_E1_###.img
# matrix refined.mat
# mosaic 0.51
# limits esclude 0.9 103.9 208.9 105.5
# limits exclude 103.5 4.4 106.0 209.0
# limits quadriateral 110.6 105.5 107.2 105.8 104.4 4.7 108.7 4.7
# gain 0.13
# separation close
# postref fix all
# process 1 60
# go
# eof
#
# FIXED 16/AUG/06 the distortion & raster parameters decided on in the
# cell refinement stages need to be recycled to use in integration. This is
# demonstrated by running an interactive (through the GUI) mosflm autoindex
# and refine job, then dumping the runit script. The important information is
# in the following records:
#
# Final optimised raster parameters: 15 17 12 5 6
# => RASTER keyword
# Separation parameters updated to 0.71mm in X and 0.71mm in Y
# => SEPARATION keyword
# XCEN YCEN XTOFRA XTOFD YSCALE TILT TWIST
# 108.97 105.31 0.9980 149.71 0.9984 -13 -46
# => BEAM, DISTANCE, DISTORTION keywords (note that the numbers
# are on the next line here)
#
# This should make the resulting integration more effective. The idea
# for this implementation is that the numbers end up in the "integrate
# set parameter" dictionary and are therefore recycled, in the same way
# that the GAIN currently works.
#
# FIXED 23/AUG/06 If the mosaic spread is refined to a negative number
# during the cell refinement, raise an exception asserting
# that the lattice is wrong. This should eliminate that
# lattice and all possibilities above it in symmetry from
# the list of possible lattices, and the next one down
# should be selected. This will require the "list of allowed
# lattices" stuff to be implemented, which is another
# FIXME all of it's own...
#
# FIXED 23/AUG/06 Another one - the raster parameters decided in indexing
# should be used in the cell refinement if the indexer was
# a mosflm and so is the refiner/integrater - which means
# that the indexer needs to be able to store integration
# parameters in the same way that the integrater does...
# Aha - this can go in the payload as something like
# "mosflm integration parameters" - excellent! Here are the
# complaints I am trying to correct:
#
# **** Information ****
# No RASTER keyword has been given.
# (Gives the starting parameters for the measurement box).
# Suitable parameters will be determined automatically.
#
#
# **** Information ****
# No SEPARATION keyword has been given.
# (Gives minimum spot separation before spots are flagged as overlapping.
# Suitable parameters will be determined automatically.
#
# FIXED 23/AUG/06 Yet another one, though this may apply more to a higher
# level application than this module - there should be an
# "estimate resolution" during the integration, so that
# the final set contains good measurements, good profiles.
#
# FIXME 4/SEP/06 Make sure that the SUMMARY files & friends are written
# to named files, to make sure they don't get overwritten.
# Also more careful naming of integrate.log &c. needed.
#
# FIXME 08/SEP/06 Look at the DELX, DELY of profiles in the output, since
# this can be an indicator of funky things going on in
# the integration. I seem to recall that TS00 complains
# about this, with the allegation of crystal slippage.
#
# FIXME 11/SEP/06 Need to mask "dead" areas of the detector. E.g. a static
# mask from the detector class, plus some kind of mask
# computed from the image [the latter is research!]
#
# FIXME 11/SEP/06 Also want to check that the resolution of the data is
# better than (say) 3.5A, because below that Mosflm has
# trouble refining the cell etc. Could add a resolution
# estimate to the output of Indexer, which could either
# invoke labelit.stats_distl or grep the results from
# the Mosflm output...
#
# Look for record "99% have resolution less than"...
#
# FIXED 27/SEP/06 GAIN & detectors - all data processed for one crystal on
# one detector should have the same value for the GAIN -
# this will mean that this has to be recycled. Add a framework
# to integrater to allow parameters to be exported, in
# the same way as they can be recycled via the integrater
# parameter framework. This is done - look at Integrater.
#
# FIXME 19/OCT/06 it may be more reliable to do the indexing first then run a
# separate job to estimate the mosaic spread. Also important
# if this is to be used in DNA... this will need the matrix,
# resolution, raster parameters, refined beam.
#
# FIXED 23/OCT/06 need to be able to do something useful when the cell
# refinement gives a "large" error in something... in
# particular be able to use more images for cell refinement
# and have another go! Done.
#
# FIXME 28/NOV/06 need to rerun integration with the correct GAIN set before
# assessing I/sigma limits, since these will depend on the
# GAIN (however this could be weak - assess the benefit in
# repeating the integration.)
#
# FIXED 06/FEB/07 need to be able to track the autoindex solution number,
# so in cases where I want an exact solution I can fetch
# it out from the list of solutions and FORCE mosflm
# to give me the right answer.
#
# This is going to have to work as follows. If there is
# a "horrible" exception, then the "correct" solution number
# needs to be obtained and set. The indexing done flag needs
# to be set as False, then the _index method should return.
# On the next pass the correct solution should be selected
# and everything should be peachy. On this correct solution
# the recorded solution number should be reset to 0.
#
# FIXME 29/JUN/07 add functionality just to use this as a replacement for
# Diffdump in highly extreme circumstances - note well that
# this could be very slow...
import os
import sys
import math
if not os.environ.has_key('XIA2CORE_ROOT'):
raise RuntimeError, 'XIA2CORE_ROOT not defined'
if not os.environ.has_key('XIA2_ROOT'):
raise RuntimeError, 'XIA2_ROOT not defined'
if not os.path.join(os.environ['XIA2CORE_ROOT'], 'Python') in sys.path:
sys.path.append(os.path.join(os.environ['XIA2CORE_ROOT'],
'Python'))
if not os.environ['XIA2_ROOT'] in sys.path:
sys.path.append(os.environ['XIA2_ROOT'])
from Driver.DriverFactory import DriverFactory
from Decorators.DecoratorFactory import DecoratorFactory
# interfaces that this will present
from Schema.Interfaces.FrameProcessor import FrameProcessor
from Schema.Interfaces.Indexer import Indexer
from Schema.Interfaces.Integrater import Integrater
# output streams &c.
from Handlers.Streams import Admin, Science, Status, Chatter, Debug
from Handlers.Citations import Citations
from Handlers.Flags import Flags
# helpers
from MosflmHelpers import _happy_integrate_lp, \
_parse_mosflm_integration_output, decide_integration_resolution_limit, \
_parse_mosflm_index_output, standard_mask, \
_get_indexing_solution_number
from Modules.GainEstimater import gain
from Handlers.Files import FileHandler
from lib.Guff import auto_logfiler, mean_sd
from lib.SymmetryLib import lattice_to_spacegroup
from Experts.MatrixExpert import transmogrify_matrix
# exceptions
from Schema.Exceptions.BadLatticeError import BadLatticeError
from Schema.Exceptions.IntegrationError import IntegrationError
from Schema.Exceptions.IndexingError import IndexingError
# other classes which are necessary to implement the integrater
# interface (e.g. new version, with reindexing as the finish...)
from Wrappers.CCP4.Reindex import Reindex
def Mosflm(DriverType = None):
'''A factory for MosflmWrapper classes.'''
DriverInstance = DriverFactory.Driver(DriverType)
CCP4DriverInstance = DecoratorFactory.Decorate(DriverInstance, 'ccp4')
class MosflmWrapper(CCP4DriverInstance.__class__,
FrameProcessor,
Indexer,
Integrater):
'''A wrapper for Mosflm, using the CCP4-ified Driver.'''
def __init__(self):
# generic things
CCP4DriverInstance.__class__.__init__(self)
self.set_executable('ipmosflm')
FrameProcessor.__init__(self)
Indexer.__init__(self)
Integrater.__init__(self)
# local parameters used in autoindexing
self._mosflm_autoindex_sol = 0
# local parameters used in cell refinement
self._mosflm_cell_ref_images = None
# local parameters used in integration
self._mosflm_rerun_integration = False
self._mosflm_hklout = ''
self._mosflm_gain = None
return
def diffdump(self, image):
'''Run a diffdump style dump to check the parameters in the
image header...'''
pass
def _estimate_gain(self):
'''Estimate a GAIN appropriate for reducing this set.'''
# pass this in from the frameprocessor interface - bug # 2333
if self.get_gain():
self._mosflm_gain = self.get_gain()
if self._mosflm_gain:
return
images = self.get_matching_images()
gains = []
if len(images) < 10:
# use all images
for i in images:
gains.append(gain(self.get_image_name(i)))
else:
# use 5 from the start and 5 from the end
for i in images[:5]:
gains.append(gain(self.get_image_name(i)))
for i in images[-5:]:
gains.append(gain(self.get_image_name(i)))
self._mosflm_gain = sum(gains) / len(gains)
Chatter.write('Estimate gain of %5.2f' % self._mosflm_gain)
return
def _index_prepare(self):
# prepare to do some autoindexing
if self._indxr_images == []:
self._index_select_images()
return
def _index_select_images(self):
'''Select correct images based on image headers.'''
# FIXME perhaps this should be somewhere central, because
# LabelitScreen will share the same implementation
phi_width = self.get_header_item('phi_width')
images = self.get_matching_images()
# FIXME what to do if phi_width is 0.0? set it
# to 1.0! This should be safe enough... though a warning
# would not go amiss...
if phi_width == 0.0:
Chatter.write('Phi width 0.0? Assuming 1.0!')
phi_width = 1.0
self.add_indexer_image_wedge(images[0])
if int(90.0 / phi_width) in images:
self.add_indexer_image_wedge(int(90.0 / phi_width))
else:
self.add_indexer_image_wedge(images[-1])
return
def _refine_select_images(self, num_wedges, mosaic):
'''Select images for cell refinement based on image headers.'''
# first select the images to use for cell refinement
# if spacegroup >= 75 use one wedge of 2-3 * mosaic spread, min
# 3 images, else use two wedges of this size as near as possible
# to 90 degrees separated. However, is this reliable enough?
# FIXME this needs to be established, in particular in the case
# where the lattice is wrongly assigned
# WARNING this will fail if phi width was 0 - should
# never happen though
if num_wedges > 3:
# allow a rerun later on, perhaps? c/f integrating TS01
# where this failure is an indication that lattice != oI
self._mosflm_cell_ref_images = None
raise IntegrationError, 'cannot cope with more than 3 wedges'
phi_width = self.get_header_item('phi_width')
# FIXME what to do if phi_width is 0.0? set it
# to 1.0! This should be safe enough... though a warning
# would not go amiss...
if phi_width == 0.0:
Chatter.write('Phi width 0.0? Assuming 1.0!')
phi_width = 1.0
min_images = max(4, int(2 * mosaic / phi_width))
# next select what we need from the list...
images = self.get_matching_images()
# bug # 2344 - does this every really help, other than
# being totally literal? if num_wedges == 3 then this
# will probably just end up using all images...
if len(images) < num_wedges * min_images and num_wedges == 2:
raise RuntimeError, 'not enough images to refine unit cell'
cell_ref_images = []
cell_ref_images.append((images[0], images[min_images - 1]))
# FIXME 23/OCT/06 need to be able to cope with more than two
# wedges - in this case have the spread evenly between 0 and
# 90 degrees as that measures all of the required unit cell
# vectors..
if num_wedges == 2:
ideal_last = int(90.0 / phi_width) + min_images
if ideal_last in images:
cell_ref_images.append(
(images[ideal_last - min_images + 1],
images[ideal_last]))
else:
# there aren't 90 degrees of images
cell_ref_images.append((images[-min_images],
images[-1]))
elif num_wedges == 3:
ideal_middle = int(45.0 / phi_width) + min_images
if ideal_middle in images:
cell_ref_images.append((images[ideal_middle - min_images],
images[ideal_middle - 1]))
else:
# there aren't 45 degrees of images
# bug # 2344 - we may be trying to reduce data from
# a partial data set, in which case it is important to
# give this a proper go... now Mosflm can take
# up to 30 frames for postrefinement and I have
# found that 3 x 10 is better than 2 x 15, so
# if this is all you have, then go ahead. Now,
# if the spacegroup is less than 75 then it is
# likely that the refined cell parameters may not
# be perfect, but they will probably be good enough,
# so allow for a little slack (like say up to 0.2A
# or 1% or something...)
# raise RuntimeError, \
# 'not enough data to do 3 wedge cell refinement'
lattice = self.get_integrater_indexer().get_indexer_lattice()
spacegroup_number = lattice_to_spacegroup(lattice)
Chatter.write('Less than 45 degrees so using %d images!' %
min(30, len(images)))
if len(images) <= 30:
# use all 30 images for cell refinement
cell_ref_images = [(min(images), max(images))]
else:
# set this to first ten, middle ten and last ten images
middle = len(images) / 2
cell_ref_images = [(images[0], images[9]),
(images[middle - 4], images[middle + 5]),
(images[-10], images[-1])]
return cell_ref_images
ideal_last = int(90.0 / phi_width) + min_images
if ideal_last in images:
cell_ref_images.append((images[ideal_last - min_images],
images[ideal_last]))
else:
# there aren't 90 degrees of images
cell_ref_images.append((images[-min_images],
images[-1]))
return cell_ref_images
def _index(self):
'''Implement the indexer interface.'''
Citations.cite('mosflm')
self.reset()
_images = []
for i in self._indxr_images:
for j in i:
if not j in _images:
_images.append(j)
_images.sort()
task = 'Autoindex from images:'
for i in _images:
task += ' %s' % self.get_image_name(i)
self.set_task(task)
auto_logfiler(self)
self.start()
self.input('template "%s"' % self.get_template())
self.input('directory "%s"' % self.get_directory())
self.input('newmat xiaindex.mat')
if self.get_beam_prov() == 'user':
self.input('beam %f %f' % self.get_beam())
if self.get_wavelength_prov() == 'user':
self.input('wavelength %f' % self.get_wavelength())
if self.get_distance_prov() == 'user':
self.input('distance %f' % self.get_distance())
# FIXME need to be able to handle an input
# unit cell here - should also be able to
# handle writing in the crystal orientation (which
# would be useful) but I may save that one for
# later... c/f TS02/1VK8
if self._indxr_input_cell:
self.input('cell %f %f %f %f %f %f' % \
self._indxr_input_cell)
if self._indxr_input_lattice != None:
spacegroup_number = lattice_to_spacegroup(
self._indxr_input_lattice)
self.input('symmetry %d' % spacegroup_number)
# FIXME 25/OCT/06 have found that a threshold of 10 works
# better for TS01/LREM - need to make sure that this is
# generally applicable...
for i in _images:
if self._mosflm_autoindex_sol:
self.input(
'autoindex dps refine image %d thresh 10 solu %d' % \
(i, self._mosflm_autoindex_sol))
else:
self.input(
'autoindex dps refine image %d thresh 10' % i)
# now forget this to prevent weird things happening later on
if self._mosflm_autoindex_sol:
self._mosflm_autoindex_sol = 0
self.input('mosaic estimate')
self.input('go')
self.close_wait()
output = self.get_all_output()
intgr_params = { }
# look up other possible indexing solutions (not well - in
# standard settings only!) This is moved earlier as it could
# result in returning if Mosflm has selected the wrong
# solution!
try:
self._indxr_other_lattice_cell = _parse_mosflm_index_output(
output)
# check that the selected unit cell matches - and if
# not raise a "horrible" exception
if self._indxr_input_cell:
for o in output:
if 'Final cell (after refinement)' in o:
indxr_cell = tuple(map(float, o.split()[-6:]))
for j in range(6):
if math.fabs(self._indxr_input_cell[j] -
indxr_cell[j]) > 2.0:
Chatter.write(
'Mosflm autoindexing did not select ' +
'correct (target) unit cell')
raise RuntimeError, \
'something horrible happened in indexing'
except RuntimeError, e:
# check if mosflm rejected a solution we have it
if 'horribl' in str(e):
# ok it did - time to break out the big guns...
if not self._indxr_input_cell:
raise RuntimeError, \
'error in solution selection when not preset'
self._mosflm_autoindex_sol = _get_indexing_solution_number(
output,
self._indxr_input_cell,
self._indxr_input_lattice)
# set the fact that we are not done...
self.set_indexer_done(False)
# and return - hopefully this will restart everything
return
else:
raise e
for o in output:
if 'Final cell (after refinement)' in o:
self._indxr_cell = tuple(map(float, o.split()[-6:]))
if 'Beam coordinates of' in o:
self._indxr_refined_beam = tuple(map(float, o.split(
)[-2:]))
# FIXED this may not be there if this is a repeat indexing!
if 'Symmetry:' in o:
self._indxr_lattice = o.split(':')[1].split()[0]
# so we have to resort to this instead...
if 'Refining solution #' in o:
spagnum = int(o.split(')')[0].split()[-1])
lattice_to_spacegroup_dict = {'aP':1, 'mP':3, 'mC':5,
'oP':16, 'oC':20, 'oF':22,
'oI':23, 'tP':75, 'tI':79,
'hP':143, 'hR':146,
'cP':195, 'cF':196,
'cI':197}
spacegroup_to_lattice = { }
for k in lattice_to_spacegroup_dict.keys():
spacegroup_to_lattice[
lattice_to_spacegroup_dict[k]] = k
self._indxr_lattice = spacegroup_to_lattice[spagnum]
# in here I need to check if the mosaic spread estimation
# has failed. If it has it is likely that the selected
# lattice has too high symmetry, and the "next one down"
# is needed
if 'The mosaicity has been estimated' in o:
self._indxr_mosaic = float(o.split('>')[1].split()[0])
# alternatively this could have failed - which happens
# sometimes...
if 'The mosaicity estimation has not worked for some' in o:
# this is a problem... in particular with the
# mosflm built on linux in CCP4 6.0.1...
# FIXME this should be a specific kind of
# exception e.g. an IndexError
raise IndexingError, 'mosaicity estimation failed'
# or it may alternatively look like this...
if 'The mosaicity has NOT been estimated' in o:
# then consider setting it do a default value...
# equal to the oscillation width (a good guess)
phi_width = self.get_header_item('phi_width')
Chatter.write(
'Mosaic estimation failed, so guessing at %4.2f' % \
phi_width)
self._indxr_mosaic = phi_width
# mosflm doesn't refine this in autoindexing...
if 'Crystal to detector distance of' in o:
self._indxr_refined_distance = float(o.split(
)[5].replace('mm', ''))
# record raster parameters and so on, useful for the
# cell refinement etc - this will be added to a
# payload dictionary of mosflm integration keywords
# look for "measurement box parameters"
if 'parameters have been set to' in o:
intgr_params['raster'] = map(
int, o.split()[-5:])
if '(currently SEPARATION' in o:
intgr_params['separation'] = map(
float, o.replace(')', '').split()[-2:])
# get the resolution estimate out...
if '99% have resolution' in o:
self._indxr_resolution_estimate = float(
o.split()[-2])
# remove this as useless, see bug # 2072
# Science.write('Resolution estimated to be %5.2f A' % \
# self._indxr_resolution_estimate)
# FIXME this needs to be picked up by the integrater
# interface which uses this Indexer, if it's a mosflm
# implementation
self._indxr_payload['mosflm_integration_parameters'] = intgr_params
self._indxr_payload['mosflm_orientation_matrix'] = open(
os.path.join(self.get_working_directory(),
'xiaindex.mat'), 'r').readlines()
return
def _integrate_prepare(self):
'''Prepare for integration - note that if there is a reason
why this is needed to be run again, set self._intgr_prepare_done
as False.'''
# generate the gain if necessary
# bug 2199 remove this it only makes things less
# reliable...
# self._estimate_gain()
# instead read from FP interface if known... bug # 2333
if not self._mosflm_gain and self.get_gain():
self._mosflm_gain = self.get_gain()
# try performing the cell refinement in P1 first and then
# in the correct setting ... not sure how to "insulate" this
# from errors in the cell refinement though... if this
# phase decided that the results were much worse a
# BadLatticeError would have to be raised...
self.reset()
auto_logfiler(self)
rms_deviations = self._mosflm_refine_cell()
# next test the cell refinement with the correct lattice
# and P1 and see how the numbers stack up...
self.reset()
auto_logfiler(self)
rms_deviations_p1 = self._mosflm_test_refine_cell('aP')
images = []
for cri in self._mosflm_cell_ref_images:
for j in range(cri[0], cri[1] + 1):
images.append(j)
if rms_deviations_p1:
Debug.write('Cell refinement comparison:')
Debug.write('Image correct triclinic')
mean = 0.0
mean_p1 = 0.0
for j in range(len(images)):
Debug.write('. %4d %.2f %.2f' % \
(images[j], rms_deviations[j],
rms_deviations_p1[j]))
mean += rms_deviations[j]
mean_p1 += rms_deviations_p1[j]
Debug.write('Average ratio: %.2f' % (mean / mean_p1))
else:
Debug.write('Cell refinement in P1 failed...')
# also look for the images we want to integrate... since this
# is part of the preparation and was causing fun with
# bug # 2040 - going quickly! this resets the integration done
# flag...
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(min(images),
max(images))
return
def _integrate(self):
'''Implement the integrater interface.'''
# cite the program
Citations.cite('mosflm')
# FIXME in here I want to be able to work "fast" or "slow"
# if fast, ignore cell refinement (i.e. to get the pointless
# output quickly.) 30/OCT/06 decide that this is is not
# appropriate for xia2.
# this means that the integration must be able to know
# what "state" it is being run from... this is perhaps best
# achieved by repopulating the indexing results with the output
# of the cell refinement, which have the same prototype.
# fixme should this have
#
# self._determine_pointgroup()
#
# first???
#
# or is that an outside responsibility? yes.
# FIXME 20/OCT/06 this needs to be able to check if (1) the
# cell refinement has already been performed
# for the correct lattice and (2) if there
# if a good reason for rerunning the integration.
# See changes to Integrater.py in Schema /
# Interfaces as to why this is suddenly
# important (in a nutshell, this will handle
# all of the necessary rerunning in a while-loop.)
# by default we don't want to rerun, or we could be here forever
# (and that did happen! :o( )
self._mosflm_rerun_integration = False
wd = self.get_working_directory()
self.reset()
auto_logfiler(self)
self._intgr_hklout = self._mosflm_integrate()
self._mosflm_hklout = self._intgr_hklout
if self._mosflm_rerun_integration and not Flags.get_quick():
# make sure that this is run again...
Chatter.write('Need to rerun the integration...')
self.set_integrater_done(False)
return self._intgr_hklout
def _integrate_finish(self):
'''Finish the integration - if necessary performing reindexing
based on the pointgroup and the reindexing operator.'''
# Check if we need to perform any reindexing... this will
# be the case if we have no reindexing operator and we
# are also in the correct pointgroup. Alternatively we may
# not have a spacegroup set as yet...
if self._intgr_reindex_operator is None and \
self._intgr_spacegroup_number == lattice_to_spacegroup(
self.get_integrater_indexer().get_indexer_lattice()):
return self._mosflm_hklout
if self._intgr_reindex_operator is None and \
self._intgr_spacegroup_number == 0:
return self._mosflm_hklout
Chatter.write('Reindexing to spacegroup %d (%s)' % \
(self._intgr_spacegroup_number,
self._intgr_reindex_operator))
hklin = self._mosflm_hklout
reindex = Reindex()
reindex.set_working_directory(self.get_working_directory())
auto_logfiler(reindex)
reindex.set_operator(self._intgr_reindex_operator)
if self._intgr_spacegroup_number:
reindex.set_spacegroup(self._intgr_spacegroup_number)
hklout = '%s_reindex.mtz' % hklin[:-4]
reindex.set_hklin(hklin)
reindex.set_hklout(hklout)
reindex.reindex()
self._intgr_hklout = hklout
return hklout
def _mosflm_test_refine_cell(self, test_lattice):
'''Test performing cell refinement in with a different
lattice to the one which was selected by the autoindex
procedure.'''
# this version will not actually *change* anything in the class.
# note well that this will need the unit cell to be
# transformed from a centred to a primitive lattice, perhaps.
# yes that is definately the case - the matrix will also
# need to be transformed :o( this is fine, see below.
# assert that this is called after the initial call to
# cell refinement in the correct PG so a lot of this can
# be ignored...
indxr = self.get_integrater_indexer()
lattice = indxr.get_indexer_lattice()
mosaic = indxr.get_indexer_mosaic()
beam = indxr.get_indexer_beam()
distance = indxr.get_indexer_distance()
matrix = indxr.get_indexer_payload('mosflm_orientation_matrix')
input_matrix = ''
for m in matrix:
input_matrix += '%s\n' % m
new_matrix = transmogrify_matrix(lattice, input_matrix,
test_lattice)
spacegroup_number = lattice_to_spacegroup(test_lattice)
if not self._mosflm_cell_ref_images:
raise RuntimeError, 'wedges must be assigned already'
open(os.path.join(self.get_working_directory(),
'test-xiaindex-%s.mat' % lattice),
'w').write(new_matrix)
self.start()
if self._mosflm_gain:
self.input('gain %5.2f' % self._mosflm_gain)
self.input('template "%s"' % self.get_template())
self.input('directory "%s"' % self.get_directory())
self.input('matrix test-xiaindex-%s.mat' % lattice)
self.input('newmat test-xiarefine.mat')
self.input('beam %f %f' % beam)
self.input('distance %f' % distance)
self.input('symmetry %s' % spacegroup_number)
self.input('mosaic %f' % mosaic)
# note well that the beam centre is coming from indexing so
# should be already properly handled
if self.get_wavelength_prov() == 'user':
self.input('wavelength %f' % self.get_wavelength())
# get all of the stored parameter values
parameters = self.get_integrater_parameters('mosflm')
# FIXME 27/SEP/06:
# have to make sure that these are correctly applied -
# that is, be sure that these come actually from autoindexing
# not somehow from a previous instance of data integration...
self.input('!parameters from autoindex run')
for p in parameters.keys():
self.input('%s %s' % (p, str(parameters[p])))
# fudge factors to prevent Mosflm from being too fussy
self.input('refinement residual 10.0')
# set up the cell refinement - allowing quite a lot of
# refinement for tricky cases (e.g. 7.2 SRS insulin SAD
# data collected on MAR IP)
self.input('postref multi segments %d repeat 10' % \
len(self._mosflm_cell_ref_images))
for cri in self._mosflm_cell_ref_images:
self.input('process %d %d' % cri)
self.input('go')
# that should be everything
self.close_wait()
# get the log file
output = self.get_all_output()
rms_values_last = None
for i in range(len(output)):
o = output[i]
if 'Rms positional error (mm) as a function of' in o and True:
images = []
cycles = []
rms_values = { }
j = i + 1
while output[j].split():
if 'Image' in output[j]:
for image in map(int, output[j].replace(
'Image', '').split()):
images.append(image)
else:
cycle = int(output[j].replace(
'Cycle', '').split()[0])
if not cycle in cycles:
cycles.append(cycle)
rms_values[cycle] = []
record = [output[j][k:k + 6] \
for k in range(
11, len(output[j]), 6)]
data = []
for r in record:
if r.strip():
data.append(r.strip())
record = data
try:
values = map(float, record)
for v in values:
rms_values[cycle].append(v)
except ValueError, e:
Chatter.write(
'Error parsing %s as floats' % \
output[j][12:])
j += 1
rms_values_last = rms_values[max(cycles)]
return rms_values_last
def _mosflm_refine_cell(self, set_spacegroup = None):
'''Perform the refinement of the unit cell. This will populate
all of the information needed to perform the integration.'''
# self.reset()
if not self.get_integrater_indexer():
# this wrapper can present the indexer interface
# if needed, so do so. if this set command has
# been called already this should not be used...
self.set_integrater_indexer(self)
# get the things we need from the indexer - beware that if
# the indexer has not yet been run this may spawn other
# jobs...
indxr = self.get_integrater_indexer()
if not indxr.get_indexer_payload('mosflm_orientation_matrix'):
# we will have to do some indexing ourselves - the
# existing indexing job doesn't provide an orientation
# matrix
# FIXME this needs implementing - copy information
# from this indexer to myself, then reset my indexer too me
pass
lattice = indxr.get_indexer_lattice()
mosaic = indxr.get_indexer_mosaic()
cell = indxr.get_indexer_cell()
beam = indxr.get_indexer_beam()
# check to see if there is a special mosflm beam around!
if indxr.get_indexer_payload('mosflm_beam_centre'):
beam = indxr.get_indexer_payload('mosflm_beam_centre')
distance = indxr.get_indexer_distance()
matrix = indxr.get_indexer_payload('mosflm_orientation_matrix')
# check to see if there are parameters which I should be using for
# cell refinement etc in here - if there are, use them - this
# will also appear in integrate, for cases where that will
# be called without cell refinemnt
integration_params = indxr.get_indexer_payload(
'mosflm_integration_parameters')
if integration_params:
# copy them somewhere useful... into the dictionary?
# yes - that way they can be recycled...
# after that, zap them because they will be obsolete!
if integration_params.has_key('separation'):
self.set_integrater_parameter(
'mosflm', 'separation',
'%f %f' % tuple(integration_params['separation']))
if integration_params.has_key('raster'):
self.set_integrater_parameter(
'mosflm', 'raster',
'%d %d %d %d %d' % tuple(integration_params['raster']))
indxr.set_indexer_payload('mosflm_integration_params', None)
# copy these into myself for later reference, if indexer
# is not myself - everything else is copied via the
# cell refinement process...
if indxr != self:
self.set_indexer_input_lattice(lattice)
self.set_indexer_beam(beam)
# here need to check the LATTICE - which will be
# something like tP etc. FIXME how to cope when the
# spacegroup has been explicitly stated?
spacegroup_number = lattice_to_spacegroup(lattice)
# FIXME 11/SEP/06 have an example set of data which will
# make cell refinement "fail" - that is
# not work very well - 9485/3[1VPX]. Therefore
# allow for more image wedges, read output.
#
# What we are looking for in the output is:
#
# INACCURATE CELL PARAMETERS
#
# followed by the dodgy cell parameters, along with the
# associated standard errors. Based on these need to decide
# what extra data would be helpful. Will also want to record
# these standard deviations to decide if the next run of
# cell refinement makes things better... Turns out that this
# example is very low resolution, so don't worry too hard
# about it!
if spacegroup_number >= 75:
num_wedges = 1
else:
num_wedges = 2
# FIXME 23/OCT/06 should only do this if the images are not
# already assigned - for instance, in the case where the cell
# refinement fails and more images are added after that failure
# need to be able to cope with not changing them at this stage...
# self._mosflm_cell_ref_images = None
if not self._mosflm_cell_ref_images:
self._mosflm_cell_ref_images = self._refine_select_images(
num_wedges, mosaic)
# write the matrix file in xiaindex.mat
f = open(os.path.join(self.get_working_directory(),
'xiaindex-%s.mat' % lattice), 'w')
for m in matrix:
f.write(m)
f.close()
# then start the cell refinement
task = 'Refine cell from %d wedges' % \
len(self._mosflm_cell_ref_images)
self.set_task(task)
self.start()
if self._mosflm_gain:
self.input('gain %5.2f' % self._mosflm_gain)
self.input('template "%s"' % self.get_template())
self.input('directory "%s"' % self.get_directory())
self.input('matrix xiaindex-%s.mat' % lattice)
self.input('newmat xiarefine.mat')
self.input('beam %f %f' % beam)
self.input('distance %f' % distance)
# FIXED is this the correct form? - it is now.
# want to be able to test cell refinement in P1
# as a way of investigating how solid the autoindex
# solution is... therefore allow spacegroup to
# be explicitly set...
if set_spacegroup:
self.input('symmetry %s' % set_spacegroup)
else:
self.input('symmetry %s' % spacegroup_number)
self.input('mosaic %f' % mosaic)
# note well that the beam centre is coming from indexing so
# should be already properly handled
if self.get_wavelength_prov() == 'user':
self.input('wavelength %f' % self.get_wavelength())
# get all of the stored parameter values
parameters = self.get_integrater_parameters('mosflm')
# FIXME 27/SEP/06:
# have to make sure that these are correctly applied -
# that is, be sure that these come actually from autoindexing
# not somehow from a previous instance of data integration...
self.input('!parameters from autoindex run')
for p in parameters.keys():
self.input('%s %s' % (p, str(parameters[p])))
# fudge factors to prevent Mosflm from being too fussy
self.input('refinement residual 10.0')
# set up the cell refinement - allowing quite a lot of
# refinement for tricky cases (e.g. 7.2 SRS insulin SAD
# data collected on MAR IP)
self.input('postref multi segments %d repeat 10' % \
len(self._mosflm_cell_ref_images))
for cri in self._mosflm_cell_ref_images:
self.input('process %d %d' % cri)
self.input('go')
# that should be everything
self.close_wait()
# get the log file
output = self.get_all_output()
# then look to see if the cell refinement worked ok - if it
# didn't then this may indicate that the lattice was wrongly
# selected.
cell_refinement_ok = False
for o in output:
if 'Cell refinement is complete' in o:
cell_refinement_ok = True
if not cell_refinement_ok:
Chatter.write(
'Looks like cell refinement failed - more follows...')
# how best to handle this, I don't know... could
#
# (1) raise an exception
# (2) try to figure out the solution myself
#
# probably (1) is better, because this will allow the higher
# level of intelligence to sort it out. don't worry too hard
# about this in the initial version, since labelit indexing
# is pretty damn robust.
# if it succeeded then populate the indexer output (myself)
# with the new information - this can then be used
# transparently in the integration.
# here I need to get the refined distance, mosaic spread, unit
# cell and matrix - should also look the yscale and so on, as
# well as the final rms deviation in phi and distance
# FIRST look for errors, and analysis stuff which may be
# important...
rmsd_range = None
rms_values_last = None
for i in range(len(output)):
o = output[i]
# FIXME 01/NOV/06 dump this stuff from the top (error trapping)
# into a trap_cell_refinement_errors method which is called
# before the rest of the output is parsed...
# look for overall cell refinement failure
if 'Processing will be aborted' in o:
# perhaps try this with more images?
if len(self._mosflm_cell_ref_images) <= 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
self.set_integrater_prepare_done(False)
Science.write(
'Repeating cell refinement with more data.')
return
else:
raise BadLatticeError, 'cell refinement failed'
# look to store the rms deviations on a per-image basis
# this may be used to decide what to do about "inaccurate
# cell parameters" below... may also want to record
# them for comparison with cell refinement with a lower
# spacegroup for solution elimination purposes...
if 'Rms positional error (mm) as a function of' in o and True:
images = []
cycles = []
rms_values = { }
j = i + 1
while output[j].split():
if 'Image' in output[j]:
for image in map(int, output[j].replace(
'Image', '').split()):
images.append(image)
else:
cycle = int(output[j].replace(
'Cycle', '').split()[0])
if not cycle in cycles:
cycles.append(cycle)
rms_values[cycle] = []
record = [output[j][k:k + 6] \
for k in range(
11, len(output[j]), 6)]
data = []
for r in record:
if r.strip():
data.append(r.strip())
record = data
try:
values = map(float, record)
for v in values:
rms_values[cycle].append(v)
except ValueError, e:
Chatter.write(
'Error parsing %s as floats' % \
output[j][12:])
j += 1
# by now we should have recorded everything so...print!
# Chatter.write('Final RMS deviations per image')
# for j in range(len(images)):
# Chatter.write('- %4d %5.3f' % (images[j],
# rms_values_last[j]))
if cycles:
rms_values_last = rms_values[max(cycles)]
else:
rms_values_last = None
if rms_values_last:
rmsd_range = max(rms_values_last), min(rms_values_last)
else:
# there must have been a bigger problem than this!
rmsd_range = 1.0, 1.0
# look for "error" type problems
if 'is greater than the maximum allowed' in o and \
'FINAL weighted residual' in o:
# the weighted residual is too high - this suggests
# a poor indexing solution - jump out and redo
Science.write('Large weighted residual...')
if len(self._mosflm_cell_ref_images) < 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
# set a flag to say cell refinement needs rerunning
# c/f Integrator.py
self.set_integrater_prepare_done(False)
# tell the user what is going on
Science.write(
'Repeating cell refinement with more data.')
# don't update the indexer - the results could be
# wrong!
return
else:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'inaccurate cell parameters'
if 'INACCURATE CELL PARAMETERS' in o:
# get the inaccurate cell parameters in question
parameters = output[i + 3].lower().split()
# and the standard deviations - so we can decide
# if it really has failed
sd_record = output[i + 5].replace(
'A', ' ').replace(',', ' ').split()
sds = map(float, [sd_record[j] for j in range(1, 12, 2)])
Science.write('Standard deviations:')
Science.write('A %4.2f B %4.2f C %4.2f' % \
(tuple(sds[:3])))
Science.write('Alpha %4.2f Beta %4.2f Gamma %4.2f' % \
(tuple(sds[3:6])))
# FIXME 01/NOV/06 this needs to be toned down a little -
# perhaps looking at the relative error in the cell
# parameter, or a weighted "error" of the two combined,
# because this may give rise to an error: TS01 NATIVE LR
# failed in integration with this, because the error
# in a was > 0.1A in 228. Assert perhaps that the error
# should be less than 1.0e-3 * cell axis and less than
# 0.15A?
# inspect rmsd_range
if rmsd_range is None:
raise RuntimeError, 'no rms deviation information'
# interested if max > 2 * min... 2 - 1 / (2 + 1)= 1 / 3
large_rmsd_range = False
if ((rmsd_range[0] - rmsd_range[1]) /
(rmsd_range[0] + rmsd_range[1])) > 0.3333:
large_rmsd_range = True
Science.write(
'Large range in RMSD variation per image')
# and warn about them
Science.write(
'In cell refinement, the following cell parameters')
Science.write(
'have refined poorly:')
for p in parameters:
Science.write('... %s' % p)
# decide what to do about this...
# if this is all cell parameters, abort, else
# consider using more data...
# see how many wedges we are using - if it's 3 already
# then there is probably something more important
# wrong. If it is fewer than this then try again!
if len(self._mosflm_cell_ref_images) < 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
# set a flag to say cell refinement needs rerunning
# c/f Integrator.py
self.set_integrater_prepare_done(False)
# tell the user what is going on
Science.write(
'Repeating cell refinement with more data.')
# don't update the indexer - the results could be
# wrong!
return
else:
if large_rmsd_range:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'inaccurate cell parameters'
Science.write(
'However, will continue to integration.')
if 'One or more cell parameters has changed by more' in o:
# this is a more severe example of the above problem...
Science.write(
'Cell refinement is unstable...')
# so decide what to do about it...
if len(self._mosflm_cell_ref_images) <= 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
self.set_integrater_prepare_done(False)
Science.write(
'Repeating cell refinement with more data.')
return
else:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'unstable cell refinement'
# other possible problems in the cell refinement - a
# negative mosaic spread, for instance
if 'Refined mosaic spread (excluding safety factor)' in o:
mosaic = float(o.split()[-1])
if mosaic < 0.0:
Science.write('Negative mosaic spread (%5.2f)' %
mosaic)
if len(self._mosflm_cell_ref_images) <= 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
self.set_integrater_prepare_done(False)
Science.write(
'Repeating cell refinement with more data.')
return
else:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'negative mosaic spread'
# look generally at the RMS deviation range - is this is
# large then there may be something properly wrong...
# switch this off for a moment as it may be more appropriate
# for this test to look at the results from integration...
if rmsd_range and False:
if ((rmsd_range[0] - rmsd_range[1]) /
(rmsd_range[0] + rmsd_range[1])) > 0.3333:
Science.write(
'Large range in RMSD variation per image')
if len(self._mosflm_cell_ref_images) <= 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
self.set_integrater_prepare_done(False)
Science.write(
'Repeating cell refinement with more data.')
return
else:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'negative mosaic spread'
# AFTER that, read the refined parameters
for i in range(len(output)):
o = output[i]
# FIXED for all of these which follow - the refined values
# for these parameters should only be stored if the cell
# refinement were 100% successful - therefore gather
# them up here and store them at the very end (e.g. once
# success has been confirmed.) 01/NOV/06
# FIXME will these get lost if the indexer in question is
# not this program...? Find out... would be nice to write
# this to Chatter too...
if 'Refined cell' in o:
# feed these back to the indexer
indxr._indxr_cell = tuple(map(float, o.split()[-6:]))
# record the refined cell parameters for getting later
self._intgr_cell = tuple(map(float, o.split()[-6:]))
# FIXME do I need this? I think that the refined distance
# is passed in as an integration parameter (see below)
if 'Detector distance as a' in o:
# look through the "cycles" to get the final refined
# distance
j = i + 1
while output[j].strip() != '':
j += 1
distances = map(float, output[j - 1].split()[2:])
distance = 0.0
for d in distances:
distance += d
distance /= len(distances)
indxr._indxr_refined_distance = distance
if 'YSCALE as a function' in o:
# look through the "cycles" to get the final refined
# yscale value
j = i + 1
while output[j].strip() != '':
j += 1
yscales = map(float, output[j - 1].split()[2:])
yscale = 0.0
for y in yscales:
yscale += y
yscale /= len(yscales)
self.set_integrater_parameter('mosflm',
'distortion yscale',
yscale)
# next look for the distortion & raster parameters
# see FIXME at the top of this file from 16/AUG/06
if 'Final optimised raster parameters:' in o:
self.set_integrater_parameter('mosflm',
'raster',
o.split(':')[1].strip())
if 'Separation parameters updated to' in o:
tokens = o.replace('mm', ' ').split()
self.set_integrater_parameter('mosflm',
'separation',
'%s %s' % \
(tokens[4], tokens[8]))
if 'XCEN YCEN XTOFRA' in o:
numbers = output[i + 1].split()
# this should probably be done via the FrameProcessor
# interface...
self.set_integrater_parameter('mosflm',
'beam',
'%s %s' % \
(numbers[0], numbers[1]))
# FIXME should this go through the FP interface?
# this conflicts with the calculation above
# of the average distance as well...
self.set_integrater_parameter('mosflm',
'distance',
numbers[3])
self.set_integrater_parameter('mosflm',
'distortion tilt',
numbers[5])
self.set_integrater_parameter('mosflm',
'distortion twist',
numbers[6])
# FIXME does this work if this mosflm is not
# the one being used as an indexer? - probably not -
# I will need a getIndexer.setMosaic() or something...
if 'Refined mosaic spread' in o:
indxr._indxr_mosaic = float(o.split()[-1])
# hack... FIXME (maybe?)
# self._indxr_done = True
self.set_indexer_done(True)
# shouldn't need this.. remember that Python deals in pointers!
self.set_indexer_payload('mosflm_orientation_matrix', open(
os.path.join(self.get_working_directory(),
'xiarefine.mat'), 'r').readlines())
indxr.set_indexer_payload('mosflm_orientation_matrix', open(
os.path.join(self.get_working_directory(),
'xiarefine.mat'), 'r').readlines())
return rms_values_last
def _mosflm_integrate(self):
'''Perform the actual integration, based on the results of the
cell refinement or indexing (they have the equivalent form.)'''
# self.reset()
# the only way to get here is through the cell refinement,
# unless we're trying to go fast - which means that we may
# have to create an indexer if fast - if we're going slow
# then this should have been done by the cell refinement
# stage...
# FIXME add "am I going fast" check here
if not self.get_integrater_indexer():
# this wrapper can present the indexer interface
# if needed, so do so. if this set command has
# been called already this should not be used...
self.set_integrater_indexer(self)
# get the things we need from the indexer - beware that if
# the indexer has not yet been run this may spawn other
# jobs...
indxr = self.get_integrater_indexer()
if not indxr.get_indexer_payload('mosflm_orientation_matrix'):
# we will have to do some indexing ourselves - the
# existing indexing job doesn't provide an orientation
# matrix
# FIXME this needs implementing - copy information
# from this indexer to myself, then reset my indexer too me
# FIXME this should probably raise an exception...
pass
lattice = indxr.get_indexer_lattice()
mosaic = indxr.get_indexer_mosaic()
cell = indxr.get_indexer_cell()
beam = indxr.get_indexer_beam()
distance = indxr.get_indexer_distance()
matrix = indxr.get_indexer_payload('mosflm_orientation_matrix')
# check to see if there are parameters which I should be using for
# integration etc in here - if there are, use them - this will
# only happen when the integration is "fast" and they haven't
# been eaten by the cell refinemnt process
integration_params = indxr.get_indexer_payload(
'mosflm_integration_parameters')
if integration_params:
# copy them somewhere useful... into the dictionary?
# yes - that way they can be recycled...
# after that, zap them because they will be obsolete!
if integration_params.has_key('separation'):
self.set_integrater_parameter(
'mosflm', 'separation',
'%f %f' % tuple(integration_params['separation']))
if integration_params.has_key('raster'):
self.set_integrater_parameter(
'mosflm', 'raster',
'%d %d %d %d %d' % tuple(integration_params['raster']))
indxr.set_indexer_payload('mosflm_integration_params', None)
# here need to check the LATTICE - which will be
# something like tP etc. FIXME how to cope when the
# spacegroup has been explicitly stated?
spacegroup_number = lattice_to_spacegroup(lattice)
f = open(os.path.join(self.get_working_directory(),
'xiaintegrate.mat'), 'w')
for m in matrix:
f.write(m)
f.close()
# then start the integration
task = 'Integrate frames %d to %d' % self._intgr_wedge
self.set_task(task)
summary_file = 'summary_%s.log' % spacegroup_number
self.add_command_line('SUMMARY')
self.add_command_line(summary_file)
self.start()
# if the integrater interface has the project, crystal, dataset
# information available, pass this in to mosflm and also switch
# on the harvesrng output. warning! if the harvesting is switched
# on then this means that the files will all go to the same
# place - for the moment move this to cwd.
pname, xname, dname = self.get_integrater_project_info()
if pname != None and xname != None and dname != None:
Chatter.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
# harvest file name will be %s.mosflm_run_start_end % dname
self.input('harvest on')
self.input('pname %s' % pname)
self.input('xname %s' % xname)
self.input('dname %s' % dname)
self.input('ucwd')
self.input('template "%s"' % self.get_template())
self.input('directory "%s"' % self.get_directory())
# generate the mask information from the detector class
mask = standard_mask(self._fp_header['detector_class'])
for m in mask:
self.input(m)
self.input('matrix xiaintegrate.mat')
self.input('beam %f %f' % beam)
self.input('distance %f' % distance)
self.input('symmetry %s' % spacegroup_number)
self.input('mosaic %f' % mosaic)
# note well that the beam centre is coming from indexing so
# should be already properly handled - likewise the distance
if self.get_wavelength_prov() == 'user':
self.input('wavelength %f' % self.get_wavelength())
# get all of the stored parameter values
parameters = self.get_integrater_parameters('mosflm')
for p in parameters.keys():
self.input('%s %s' % (p, str(parameters[p])))
# in here I need to get the GAIN parameter from the sweep
# or from somewhere in memory....
if self._mosflm_gain:
self.input('gain %5.2f' % self._mosflm_gain)
# check for resolution limits
if self._intgr_reso_high > 0.0:
self.input('resolution %f' % self._intgr_reso_high)
# set up the integration
self.input('postref fix all')
self.input('separation close')
# FIXME this is a horrible hack - I at least need to
# sand box this ...
if self.get_header_item('detector') == 'raxis':
self.input('adcoffset 0')
self.input('process %d %d' % self._intgr_wedge)
self.input('go')
# that should be everything
self.close_wait()
# get the log file
output = self.get_all_output()
# record a copy of it, perhaps
if self.get_integrater_sweep_name():
pname, xname, dname = self.get_integrater_project_info()
FileHandler.record_log_file('%s %s %s %s mosflm integrate' % \
(self.get_integrater_sweep_name(),
pname, xname, dname),
self.get_log_file())
# look for things that we want to know...
# that is, the output reflection file name, the updated
# value for the gain (if present,) any warnings, errors,
# or just interesting facts.
integrated_images_first = 1.0e6
integrated_images_last = -1.0e6
# look for major errors
for i in range(len(output)):
o = output[i]
if 'LWBAT: error in ccp4_lwbat' in o:
raise RuntimeError, 'serious mosflm error - inspect %s' % \
self.get_log_file()
for i in range(len(output)):
o = output[i]
if 'Integrating Image' in o:
batch = int(o.split()[2])
if batch < integrated_images_first:
integrated_images_first = batch
if batch > integrated_images_last:
integrated_images_last = batch
if 'ERROR IN DETECTOR GAIN' in o:
# look for the correct gain
for j in range(i, i + 10):
if output[j].split()[:2] == ['set', 'to']:
gain = float(output[j].split()[-1][:-1])
self.set_integrater_parameter('mosflm',
'gain',
gain)
self.set_integrater_export_parameter('mosflm',
'gain',
gain)
Science.write('GAIN found to be %f' % gain)
# this should probably override the input
self._mosflm_gain = gain
self._mosflm_rerun_integration = True
if 'Smoothed value for refined mosaic spread' in o:
mosaic = float(o.split()[-1])
if mosaic < 0.0:
raise BadLatticeError, 'negative mosaic spread'
if 'WRITTEN OUTPUT MTZ FILE' in o:
self._mosflm_hklout = os.path.join(
self.get_working_directory(),
output[i + 1].split()[-1])
Science.write('Integration output: %s' % \
self._mosflm_hklout)
if 'Number of Reflections' in o:
self._intgr_n_ref = int(o.split()[-1])
if 'MOSFLM HAS TERMINATED EARLY' in o:
Chatter.write('Mosflm has failed in integration')
message = 'The input was:\n\n'
for input in self.get_all_input():
message += ' %s' % input
Chatter.write(message)
raise RuntimeError, \
'integration failed: reason unknown (log %s)' % \
self.get_log_file()
self._intgr_batches_out = (integrated_images_first,
integrated_images_last)
Chatter.write('Processed batches %d to %d' % \
self._intgr_batches_out)
# write the report for each image as .*-#$ to Chatter -
# detailed report will be written automagically to science...
parsed_output = _parse_mosflm_integration_output(output)
spot_status = _happy_integrate_lp(parsed_output)
# inspect the output for e.g. very high weighted residuals
images = parsed_output.keys()
images.sort()
max_weighted_residual = 0.0
# FIXME bug 2175 this should probably look at the distribution
# of values rather than the peak, since this is probably a better
# diagnostic of a poor lattice.
residuals = [parsed_output[i]['weighted_residual'] for i in images]
mean, sd = mean_sd(residuals)
Chatter.write('Weighted RMSD: %.2f (%.2f)' % \
(mean, sd))
for i in images:
data = parsed_output[i]
if data['weighted_residual'] > max_weighted_residual:
max_weighted_residual = data['weighted_residual']
if max_weighted_residual > 3.0 and False:
raise BadLatticeError, 'large weighted residual (%4.2f)' % \
max_weighted_residual
if mean > 2.0:
raise BadLatticeError, 'large mean residual (%.2f)' % mean
if len(spot_status) > 60:
Chatter.write('Integration status per image (60/record):')
else:
Chatter.write('Integration status per image:')
for chunk in [spot_status[i:i + 60] \
for i in range(0, len(spot_status), 60)]:
Chatter.write(chunk)
Chatter.write(
'"o" => ok "%" => iffy rmsd "!" => bad rmsd')
Chatter.write(
'"O" => overloaded "#" => many bad "." => blank')
# if we have not processed to a given resolution, fix
# the limit for future reference
# bug # 2040 - this is one place where we could cut out the
# middle-man and make things quicker, by not resetting the
# resolution limit...
if not self._intgr_reso_high and not Flags.get_quick():
resolution = decide_integration_resolution_limit(output)
self.set_integrater_high_resolution(resolution)
Chatter.write('Set resolution limit: %5.2f' % resolution)
return self._mosflm_hklout
return MosflmWrapper()
if __name__ == '__main_old__':
# run a demo test
if not os.environ.has_key('XIA2_ROOT'):
raise RuntimeError, 'XIA2_ROOT not defined'
m = Mosflm()
directory = os.path.join(os.environ['XIA2_ROOT'],
'Data', 'Test', 'Images')
# from Labelit
m.set_beam((108.9, 105.0))
m.setup_from_image(os.path.join(directory, '12287_1_E1_001.img'))
# FIXME 16/AUG/06 this should be set automatically - there is no
# reason to manually specify the images
m.add_indexer_image_wedge(1)
m.add_indexer_image_wedge(90)
# m.set_indexer_input_lattice('aP')
print 'Refined beam is: %6.2f %6.2f' % m.get_indexer_beam()
print 'Distance: %6.2f' % m.get_indexer_distance()
print 'Cell: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % m.get_indexer_cell()
print 'Lattice: %s' % m.get_indexer_lattice()
print 'Mosaic: %6.2f' % m.get_indexer_mosaic()
print 'Matrix:'
for l in m.get_indexer_payload('mosflm_orientation_matrix'):
print l[:-1]
if __name__ == '__main__':
# run a demo test
if not os.environ.has_key('XIA2_ROOT'):
raise RuntimeError, 'XIA2_ROOT not defined'
m = Mosflm()
directory = os.path.join(os.environ['XIA2_ROOT'],
'Data', 'Test', 'Images')
# from Labelit
m.set_beam((108.9, 105.0))
m.setup_from_image(os.path.join(directory, '12287_1_E1_001.img'))
# FIXME 16/AUG/06 this should be set automatically - there is no
# reason to manually specify the images
m.add_indexer_image_wedge(1)
m.add_indexer_image_wedge(90)
# m.set_indexer_input_lattice('aP')
# to test the awkward indexing problems -
# this is not the default solution
# m.set_indexer_input_lattice('mP')
# m.set_indexer_input_cell((51.72, 51.66, 157.89, 90.00, 90.00, 90.00))
print 'Refined beam is: %6.2f %6.2f' % m.get_indexer_beam()
print 'Distance: %6.2f' % m.get_indexer_distance()
print 'Cell: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % m.get_indexer_cell()
print 'Lattice: %s' % m.get_indexer_lattice()
print 'Mosaic: %6.2f' % m.get_indexer_mosaic()
print 'Matrix:'
for l in m.get_indexer_payload('mosflm_orientation_matrix'):
print l[:-1]
if False:
n = Mosflm()
n.setup_from_image(os.path.join(directory, '12287_1_E1_001.img'))
n.set_integrater_indexer(m)
n.integrate()
print 'Refined beam is: %6.2f %6.2f' % n.get_indexer_beam()
print 'Distance: %6.2f' % n.get_indexer_distance()
print 'Cell: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % n.get_indexer_cell()
print 'Lattice: %s' % n.get_indexer_lattice()
print 'Mosaic: %6.2f' % n.get_indexer_mosaic()
print 'Matrix:'
for l in n.get_indexer_payload('mosflm_orientation_matrix'):
print l[:-1]
Require both correct and p1 results
#!/usr/bin/env python
# Mosflm.py
# Copyright (C) 2006 CCLRC, Graeme Winter
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# 23rd June 2006
#
# A wrapper for the data processing program Mosflm, with the following
# methods to provide functionality:
#
# index: autoindexing functionality (implemented)
# integrate: process a frame or a dataset (implemented)
#
# Internally this will also require cell refinement and so on, but this
# will be implicit - the cell refinement is a local requirement for
# mosflm only - though this will provide some useful functionality
# for diagnosing wrong indexing solutions.
#
# Input requirements:
#
# At the minimum the indexing needs a beam centre, some images to index
# from and the template and directory where these images may be found.
# The indexing will return the most likely solution, or the one specified
# if this has been done. The interface should look like indexing with
# labelit. However, a matrix file to index against could optionally be
# supplied, to help with processing MAD data.
#
# For integration, an appropriate matrix file, beam centre, lattice
# (spacegroup) and mosaic spread are needed, along with (optionally) a
# gain and definately images to process. A resolution limit may also be
# supplied.
#
# The following are good example scripts of how this could work:
#
# [autoindexing + cell refinement]
#
# ipmosflm << eof
# beam 108.9 105.0
# directory /data/graeme/12287
# template 12287_1_E1_###.img
# autoindex dps image 1 ! can add refine after dps
# autoindex dps image 60 ! can add refine after dps
# mosaic estimate
# newmat index.mat
# go
# ! cell refinement stuff - needs more than 2 images
# newmat refined.mat
# postref multi segments 2
# process 1 3
# go
# process 58 60
# go
# eof
#
# [integration]
#
# ipmosflm hklout 12287_1_E1.mtz << eof
# resolution 1.65
# beam 108.9 105.0
# directory /data/graeme/12287
# template 12287_1_E1_###.img
# matrix refined.mat
# mosaic 0.51
# limits esclude 0.9 103.9 208.9 105.5
# limits exclude 103.5 4.4 106.0 209.0
# limits quadriateral 110.6 105.5 107.2 105.8 104.4 4.7 108.7 4.7
# gain 0.13
# separation close
# postref fix all
# process 1 60
# go
# eof
#
# FIXED 16/AUG/06 the distortion & raster parameters decided on in the
# cell refinement stages need to be recycled to use in integration. This is
# demonstrated by running an interactive (through the GUI) mosflm autoindex
# and refine job, then dumping the runit script. The important information is
# in the following records:
#
# Final optimised raster parameters: 15 17 12 5 6
# => RASTER keyword
# Separation parameters updated to 0.71mm in X and 0.71mm in Y
# => SEPARATION keyword
# XCEN YCEN XTOFRA XTOFD YSCALE TILT TWIST
# 108.97 105.31 0.9980 149.71 0.9984 -13 -46
# => BEAM, DISTANCE, DISTORTION keywords (note that the numbers
# are on the next line here)
#
# This should make the resulting integration more effective. The idea
# for this implementation is that the numbers end up in the "integrate
# set parameter" dictionary and are therefore recycled, in the same way
# that the GAIN currently works.
#
# FIXED 23/AUG/06 If the mosaic spread is refined to a negative number
# during the cell refinement, raise an exception asserting
# that the lattice is wrong. This should eliminate that
# lattice and all possibilities above it in symmetry from
# the list of possible lattices, and the next one down
# should be selected. This will require the "list of allowed
# lattices" stuff to be implemented, which is another
# FIXME all of it's own...
#
# FIXED 23/AUG/06 Another one - the raster parameters decided in indexing
# should be used in the cell refinement if the indexer was
# a mosflm and so is the refiner/integrater - which means
# that the indexer needs to be able to store integration
# parameters in the same way that the integrater does...
# Aha - this can go in the payload as something like
# "mosflm integration parameters" - excellent! Here are the
# complaints I am trying to correct:
#
# **** Information ****
# No RASTER keyword has been given.
# (Gives the starting parameters for the measurement box).
# Suitable parameters will be determined automatically.
#
#
# **** Information ****
# No SEPARATION keyword has been given.
# (Gives minimum spot separation before spots are flagged as overlapping.
# Suitable parameters will be determined automatically.
#
# FIXED 23/AUG/06 Yet another one, though this may apply more to a higher
# level application than this module - there should be an
# "estimate resolution" during the integration, so that
# the final set contains good measurements, good profiles.
#
# FIXME 4/SEP/06 Make sure that the SUMMARY files & friends are written
# to named files, to make sure they don't get overwritten.
# Also more careful naming of integrate.log &c. needed.
#
# FIXME 08/SEP/06 Look at the DELX, DELY of profiles in the output, since
# this can be an indicator of funky things going on in
# the integration. I seem to recall that TS00 complains
# about this, with the allegation of crystal slippage.
#
# FIXME 11/SEP/06 Need to mask "dead" areas of the detector. E.g. a static
# mask from the detector class, plus some kind of mask
# computed from the image [the latter is research!]
#
# FIXME 11/SEP/06 Also want to check that the resolution of the data is
# better than (say) 3.5A, because below that Mosflm has
# trouble refining the cell etc. Could add a resolution
# estimate to the output of Indexer, which could either
# invoke labelit.stats_distl or grep the results from
# the Mosflm output...
#
# Look for record "99% have resolution less than"...
#
# FIXED 27/SEP/06 GAIN & detectors - all data processed for one crystal on
# one detector should have the same value for the GAIN -
# this will mean that this has to be recycled. Add a framework
# to integrater to allow parameters to be exported, in
# the same way as they can be recycled via the integrater
# parameter framework. This is done - look at Integrater.
#
# FIXME 19/OCT/06 it may be more reliable to do the indexing first then run a
# separate job to estimate the mosaic spread. Also important
# if this is to be used in DNA... this will need the matrix,
# resolution, raster parameters, refined beam.
#
# FIXED 23/OCT/06 need to be able to do something useful when the cell
# refinement gives a "large" error in something... in
# particular be able to use more images for cell refinement
# and have another go! Done.
#
# FIXME 28/NOV/06 need to rerun integration with the correct GAIN set before
# assessing I/sigma limits, since these will depend on the
# GAIN (however this could be weak - assess the benefit in
# repeating the integration.)
#
# FIXED 06/FEB/07 need to be able to track the autoindex solution number,
# so in cases where I want an exact solution I can fetch
# it out from the list of solutions and FORCE mosflm
# to give me the right answer.
#
# This is going to have to work as follows. If there is
# a "horrible" exception, then the "correct" solution number
# needs to be obtained and set. The indexing done flag needs
# to be set as False, then the _index method should return.
# On the next pass the correct solution should be selected
# and everything should be peachy. On this correct solution
# the recorded solution number should be reset to 0.
#
# FIXME 29/JUN/07 add functionality just to use this as a replacement for
# Diffdump in highly extreme circumstances - note well that
# this could be very slow...
import os
import sys
import math
if not os.environ.has_key('XIA2CORE_ROOT'):
raise RuntimeError, 'XIA2CORE_ROOT not defined'
if not os.environ.has_key('XIA2_ROOT'):
raise RuntimeError, 'XIA2_ROOT not defined'
if not os.path.join(os.environ['XIA2CORE_ROOT'], 'Python') in sys.path:
sys.path.append(os.path.join(os.environ['XIA2CORE_ROOT'],
'Python'))
if not os.environ['XIA2_ROOT'] in sys.path:
sys.path.append(os.environ['XIA2_ROOT'])
from Driver.DriverFactory import DriverFactory
from Decorators.DecoratorFactory import DecoratorFactory
# interfaces that this will present
from Schema.Interfaces.FrameProcessor import FrameProcessor
from Schema.Interfaces.Indexer import Indexer
from Schema.Interfaces.Integrater import Integrater
# output streams &c.
from Handlers.Streams import Admin, Science, Status, Chatter, Debug
from Handlers.Citations import Citations
from Handlers.Flags import Flags
# helpers
from MosflmHelpers import _happy_integrate_lp, \
_parse_mosflm_integration_output, decide_integration_resolution_limit, \
_parse_mosflm_index_output, standard_mask, \
_get_indexing_solution_number
from Modules.GainEstimater import gain
from Handlers.Files import FileHandler
from lib.Guff import auto_logfiler, mean_sd
from lib.SymmetryLib import lattice_to_spacegroup
from Experts.MatrixExpert import transmogrify_matrix
# exceptions
from Schema.Exceptions.BadLatticeError import BadLatticeError
from Schema.Exceptions.IntegrationError import IntegrationError
from Schema.Exceptions.IndexingError import IndexingError
# other classes which are necessary to implement the integrater
# interface (e.g. new version, with reindexing as the finish...)
from Wrappers.CCP4.Reindex import Reindex
def Mosflm(DriverType = None):
'''A factory for MosflmWrapper classes.'''
DriverInstance = DriverFactory.Driver(DriverType)
CCP4DriverInstance = DecoratorFactory.Decorate(DriverInstance, 'ccp4')
class MosflmWrapper(CCP4DriverInstance.__class__,
FrameProcessor,
Indexer,
Integrater):
'''A wrapper for Mosflm, using the CCP4-ified Driver.'''
def __init__(self):
# generic things
CCP4DriverInstance.__class__.__init__(self)
self.set_executable('ipmosflm')
FrameProcessor.__init__(self)
Indexer.__init__(self)
Integrater.__init__(self)
# local parameters used in autoindexing
self._mosflm_autoindex_sol = 0
# local parameters used in cell refinement
self._mosflm_cell_ref_images = None
# local parameters used in integration
self._mosflm_rerun_integration = False
self._mosflm_hklout = ''
self._mosflm_gain = None
return
def diffdump(self, image):
'''Run a diffdump style dump to check the parameters in the
image header...'''
pass
def _estimate_gain(self):
'''Estimate a GAIN appropriate for reducing this set.'''
# pass this in from the frameprocessor interface - bug # 2333
if self.get_gain():
self._mosflm_gain = self.get_gain()
if self._mosflm_gain:
return
images = self.get_matching_images()
gains = []
if len(images) < 10:
# use all images
for i in images:
gains.append(gain(self.get_image_name(i)))
else:
# use 5 from the start and 5 from the end
for i in images[:5]:
gains.append(gain(self.get_image_name(i)))
for i in images[-5:]:
gains.append(gain(self.get_image_name(i)))
self._mosflm_gain = sum(gains) / len(gains)
Chatter.write('Estimate gain of %5.2f' % self._mosflm_gain)
return
def _index_prepare(self):
# prepare to do some autoindexing
if self._indxr_images == []:
self._index_select_images()
return
def _index_select_images(self):
'''Select correct images based on image headers.'''
# FIXME perhaps this should be somewhere central, because
# LabelitScreen will share the same implementation
phi_width = self.get_header_item('phi_width')
images = self.get_matching_images()
# FIXME what to do if phi_width is 0.0? set it
# to 1.0! This should be safe enough... though a warning
# would not go amiss...
if phi_width == 0.0:
Chatter.write('Phi width 0.0? Assuming 1.0!')
phi_width = 1.0
self.add_indexer_image_wedge(images[0])
if int(90.0 / phi_width) in images:
self.add_indexer_image_wedge(int(90.0 / phi_width))
else:
self.add_indexer_image_wedge(images[-1])
return
def _refine_select_images(self, num_wedges, mosaic):
'''Select images for cell refinement based on image headers.'''
# first select the images to use for cell refinement
# if spacegroup >= 75 use one wedge of 2-3 * mosaic spread, min
# 3 images, else use two wedges of this size as near as possible
# to 90 degrees separated. However, is this reliable enough?
# FIXME this needs to be established, in particular in the case
# where the lattice is wrongly assigned
# WARNING this will fail if phi width was 0 - should
# never happen though
if num_wedges > 3:
# allow a rerun later on, perhaps? c/f integrating TS01
# where this failure is an indication that lattice != oI
self._mosflm_cell_ref_images = None
raise IntegrationError, 'cannot cope with more than 3 wedges'
phi_width = self.get_header_item('phi_width')
# FIXME what to do if phi_width is 0.0? set it
# to 1.0! This should be safe enough... though a warning
# would not go amiss...
if phi_width == 0.0:
Chatter.write('Phi width 0.0? Assuming 1.0!')
phi_width = 1.0
min_images = max(4, int(2 * mosaic / phi_width))
# next select what we need from the list...
images = self.get_matching_images()
# bug # 2344 - does this every really help, other than
# being totally literal? if num_wedges == 3 then this
# will probably just end up using all images...
if len(images) < num_wedges * min_images and num_wedges == 2:
raise RuntimeError, 'not enough images to refine unit cell'
cell_ref_images = []
cell_ref_images.append((images[0], images[min_images - 1]))
# FIXME 23/OCT/06 need to be able to cope with more than two
# wedges - in this case have the spread evenly between 0 and
# 90 degrees as that measures all of the required unit cell
# vectors..
if num_wedges == 2:
ideal_last = int(90.0 / phi_width) + min_images
if ideal_last in images:
cell_ref_images.append(
(images[ideal_last - min_images + 1],
images[ideal_last]))
else:
# there aren't 90 degrees of images
cell_ref_images.append((images[-min_images],
images[-1]))
elif num_wedges == 3:
ideal_middle = int(45.0 / phi_width) + min_images
if ideal_middle in images:
cell_ref_images.append((images[ideal_middle - min_images],
images[ideal_middle - 1]))
else:
# there aren't 45 degrees of images
# bug # 2344 - we may be trying to reduce data from
# a partial data set, in which case it is important to
# give this a proper go... now Mosflm can take
# up to 30 frames for postrefinement and I have
# found that 3 x 10 is better than 2 x 15, so
# if this is all you have, then go ahead. Now,
# if the spacegroup is less than 75 then it is
# likely that the refined cell parameters may not
# be perfect, but they will probably be good enough,
# so allow for a little slack (like say up to 0.2A
# or 1% or something...)
# raise RuntimeError, \
# 'not enough data to do 3 wedge cell refinement'
lattice = self.get_integrater_indexer().get_indexer_lattice()
spacegroup_number = lattice_to_spacegroup(lattice)
Chatter.write('Less than 45 degrees so using %d images!' %
min(30, len(images)))
if len(images) <= 30:
# use all 30 images for cell refinement
cell_ref_images = [(min(images), max(images))]
else:
# set this to first ten, middle ten and last ten images
middle = len(images) / 2
cell_ref_images = [(images[0], images[9]),
(images[middle - 4], images[middle + 5]),
(images[-10], images[-1])]
return cell_ref_images
ideal_last = int(90.0 / phi_width) + min_images
if ideal_last in images:
cell_ref_images.append((images[ideal_last - min_images],
images[ideal_last]))
else:
# there aren't 90 degrees of images
cell_ref_images.append((images[-min_images],
images[-1]))
return cell_ref_images
def _index(self):
'''Implement the indexer interface.'''
Citations.cite('mosflm')
self.reset()
_images = []
for i in self._indxr_images:
for j in i:
if not j in _images:
_images.append(j)
_images.sort()
task = 'Autoindex from images:'
for i in _images:
task += ' %s' % self.get_image_name(i)
self.set_task(task)
auto_logfiler(self)
self.start()
self.input('template "%s"' % self.get_template())
self.input('directory "%s"' % self.get_directory())
self.input('newmat xiaindex.mat')
if self.get_beam_prov() == 'user':
self.input('beam %f %f' % self.get_beam())
if self.get_wavelength_prov() == 'user':
self.input('wavelength %f' % self.get_wavelength())
if self.get_distance_prov() == 'user':
self.input('distance %f' % self.get_distance())
# FIXME need to be able to handle an input
# unit cell here - should also be able to
# handle writing in the crystal orientation (which
# would be useful) but I may save that one for
# later... c/f TS02/1VK8
if self._indxr_input_cell:
self.input('cell %f %f %f %f %f %f' % \
self._indxr_input_cell)
if self._indxr_input_lattice != None:
spacegroup_number = lattice_to_spacegroup(
self._indxr_input_lattice)
self.input('symmetry %d' % spacegroup_number)
# FIXME 25/OCT/06 have found that a threshold of 10 works
# better for TS01/LREM - need to make sure that this is
# generally applicable...
for i in _images:
if self._mosflm_autoindex_sol:
self.input(
'autoindex dps refine image %d thresh 10 solu %d' % \
(i, self._mosflm_autoindex_sol))
else:
self.input(
'autoindex dps refine image %d thresh 10' % i)
# now forget this to prevent weird things happening later on
if self._mosflm_autoindex_sol:
self._mosflm_autoindex_sol = 0
self.input('mosaic estimate')
self.input('go')
self.close_wait()
output = self.get_all_output()
intgr_params = { }
# look up other possible indexing solutions (not well - in
# standard settings only!) This is moved earlier as it could
# result in returning if Mosflm has selected the wrong
# solution!
try:
self._indxr_other_lattice_cell = _parse_mosflm_index_output(
output)
# check that the selected unit cell matches - and if
# not raise a "horrible" exception
if self._indxr_input_cell:
for o in output:
if 'Final cell (after refinement)' in o:
indxr_cell = tuple(map(float, o.split()[-6:]))
for j in range(6):
if math.fabs(self._indxr_input_cell[j] -
indxr_cell[j]) > 2.0:
Chatter.write(
'Mosflm autoindexing did not select ' +
'correct (target) unit cell')
raise RuntimeError, \
'something horrible happened in indexing'
except RuntimeError, e:
# check if mosflm rejected a solution we have it
if 'horribl' in str(e):
# ok it did - time to break out the big guns...
if not self._indxr_input_cell:
raise RuntimeError, \
'error in solution selection when not preset'
self._mosflm_autoindex_sol = _get_indexing_solution_number(
output,
self._indxr_input_cell,
self._indxr_input_lattice)
# set the fact that we are not done...
self.set_indexer_done(False)
# and return - hopefully this will restart everything
return
else:
raise e
for o in output:
if 'Final cell (after refinement)' in o:
self._indxr_cell = tuple(map(float, o.split()[-6:]))
if 'Beam coordinates of' in o:
self._indxr_refined_beam = tuple(map(float, o.split(
)[-2:]))
# FIXED this may not be there if this is a repeat indexing!
if 'Symmetry:' in o:
self._indxr_lattice = o.split(':')[1].split()[0]
# so we have to resort to this instead...
if 'Refining solution #' in o:
spagnum = int(o.split(')')[0].split()[-1])
lattice_to_spacegroup_dict = {'aP':1, 'mP':3, 'mC':5,
'oP':16, 'oC':20, 'oF':22,
'oI':23, 'tP':75, 'tI':79,
'hP':143, 'hR':146,
'cP':195, 'cF':196,
'cI':197}
spacegroup_to_lattice = { }
for k in lattice_to_spacegroup_dict.keys():
spacegroup_to_lattice[
lattice_to_spacegroup_dict[k]] = k
self._indxr_lattice = spacegroup_to_lattice[spagnum]
# in here I need to check if the mosaic spread estimation
# has failed. If it has it is likely that the selected
# lattice has too high symmetry, and the "next one down"
# is needed
if 'The mosaicity has been estimated' in o:
self._indxr_mosaic = float(o.split('>')[1].split()[0])
# alternatively this could have failed - which happens
# sometimes...
if 'The mosaicity estimation has not worked for some' in o:
# this is a problem... in particular with the
# mosflm built on linux in CCP4 6.0.1...
# FIXME this should be a specific kind of
# exception e.g. an IndexError
raise IndexingError, 'mosaicity estimation failed'
# or it may alternatively look like this...
if 'The mosaicity has NOT been estimated' in o:
# then consider setting it do a default value...
# equal to the oscillation width (a good guess)
phi_width = self.get_header_item('phi_width')
Chatter.write(
'Mosaic estimation failed, so guessing at %4.2f' % \
phi_width)
self._indxr_mosaic = phi_width
# mosflm doesn't refine this in autoindexing...
if 'Crystal to detector distance of' in o:
self._indxr_refined_distance = float(o.split(
)[5].replace('mm', ''))
# record raster parameters and so on, useful for the
# cell refinement etc - this will be added to a
# payload dictionary of mosflm integration keywords
# look for "measurement box parameters"
if 'parameters have been set to' in o:
intgr_params['raster'] = map(
int, o.split()[-5:])
if '(currently SEPARATION' in o:
intgr_params['separation'] = map(
float, o.replace(')', '').split()[-2:])
# get the resolution estimate out...
if '99% have resolution' in o:
self._indxr_resolution_estimate = float(
o.split()[-2])
# remove this as useless, see bug # 2072
# Science.write('Resolution estimated to be %5.2f A' % \
# self._indxr_resolution_estimate)
# FIXME this needs to be picked up by the integrater
# interface which uses this Indexer, if it's a mosflm
# implementation
self._indxr_payload['mosflm_integration_parameters'] = intgr_params
self._indxr_payload['mosflm_orientation_matrix'] = open(
os.path.join(self.get_working_directory(),
'xiaindex.mat'), 'r').readlines()
return
def _integrate_prepare(self):
'''Prepare for integration - note that if there is a reason
why this is needed to be run again, set self._intgr_prepare_done
as False.'''
# generate the gain if necessary
# bug 2199 remove this it only makes things less
# reliable...
# self._estimate_gain()
# instead read from FP interface if known... bug # 2333
if not self._mosflm_gain and self.get_gain():
self._mosflm_gain = self.get_gain()
# try performing the cell refinement in P1 first and then
# in the correct setting ... not sure how to "insulate" this
# from errors in the cell refinement though... if this
# phase decided that the results were much worse a
# BadLatticeError would have to be raised...
self.reset()
auto_logfiler(self)
rms_deviations = self._mosflm_refine_cell()
# next test the cell refinement with the correct lattice
# and P1 and see how the numbers stack up...
self.reset()
auto_logfiler(self)
rms_deviations_p1 = self._mosflm_test_refine_cell('aP')
images = []
for cri in self._mosflm_cell_ref_images:
for j in range(cri[0], cri[1] + 1):
images.append(j)
if rms_deviations and rms_deviations_p1:
Debug.write('Cell refinement comparison:')
Debug.write('Image correct triclinic')
mean = 0.0
mean_p1 = 0.0
for j in range(len(images)):
Debug.write('. %4d %.2f %.2f' % \
(images[j], rms_deviations[j],
rms_deviations_p1[j]))
mean += rms_deviations[j]
mean_p1 += rms_deviations_p1[j]
Debug.write('Average ratio: %.2f' % (mean / mean_p1))
else:
Debug.write('Cell refinement in P1 failed...')
# also look for the images we want to integrate... since this
# is part of the preparation and was causing fun with
# bug # 2040 - going quickly! this resets the integration done
# flag...
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(min(images),
max(images))
return
def _integrate(self):
'''Implement the integrater interface.'''
# cite the program
Citations.cite('mosflm')
# FIXME in here I want to be able to work "fast" or "slow"
# if fast, ignore cell refinement (i.e. to get the pointless
# output quickly.) 30/OCT/06 decide that this is is not
# appropriate for xia2.
# this means that the integration must be able to know
# what "state" it is being run from... this is perhaps best
# achieved by repopulating the indexing results with the output
# of the cell refinement, which have the same prototype.
# fixme should this have
#
# self._determine_pointgroup()
#
# first???
#
# or is that an outside responsibility? yes.
# FIXME 20/OCT/06 this needs to be able to check if (1) the
# cell refinement has already been performed
# for the correct lattice and (2) if there
# if a good reason for rerunning the integration.
# See changes to Integrater.py in Schema /
# Interfaces as to why this is suddenly
# important (in a nutshell, this will handle
# all of the necessary rerunning in a while-loop.)
# by default we don't want to rerun, or we could be here forever
# (and that did happen! :o( )
self._mosflm_rerun_integration = False
wd = self.get_working_directory()
self.reset()
auto_logfiler(self)
self._intgr_hklout = self._mosflm_integrate()
self._mosflm_hklout = self._intgr_hklout
if self._mosflm_rerun_integration and not Flags.get_quick():
# make sure that this is run again...
Chatter.write('Need to rerun the integration...')
self.set_integrater_done(False)
return self._intgr_hklout
def _integrate_finish(self):
'''Finish the integration - if necessary performing reindexing
based on the pointgroup and the reindexing operator.'''
# Check if we need to perform any reindexing... this will
# be the case if we have no reindexing operator and we
# are also in the correct pointgroup. Alternatively we may
# not have a spacegroup set as yet...
if self._intgr_reindex_operator is None and \
self._intgr_spacegroup_number == lattice_to_spacegroup(
self.get_integrater_indexer().get_indexer_lattice()):
return self._mosflm_hklout
if self._intgr_reindex_operator is None and \
self._intgr_spacegroup_number == 0:
return self._mosflm_hklout
Chatter.write('Reindexing to spacegroup %d (%s)' % \
(self._intgr_spacegroup_number,
self._intgr_reindex_operator))
hklin = self._mosflm_hklout
reindex = Reindex()
reindex.set_working_directory(self.get_working_directory())
auto_logfiler(reindex)
reindex.set_operator(self._intgr_reindex_operator)
if self._intgr_spacegroup_number:
reindex.set_spacegroup(self._intgr_spacegroup_number)
hklout = '%s_reindex.mtz' % hklin[:-4]
reindex.set_hklin(hklin)
reindex.set_hklout(hklout)
reindex.reindex()
self._intgr_hklout = hklout
return hklout
def _mosflm_test_refine_cell(self, test_lattice):
'''Test performing cell refinement in with a different
lattice to the one which was selected by the autoindex
procedure.'''
# this version will not actually *change* anything in the class.
# note well that this will need the unit cell to be
# transformed from a centred to a primitive lattice, perhaps.
# yes that is definately the case - the matrix will also
# need to be transformed :o( this is fine, see below.
# assert that this is called after the initial call to
# cell refinement in the correct PG so a lot of this can
# be ignored...
indxr = self.get_integrater_indexer()
lattice = indxr.get_indexer_lattice()
mosaic = indxr.get_indexer_mosaic()
beam = indxr.get_indexer_beam()
distance = indxr.get_indexer_distance()
matrix = indxr.get_indexer_payload('mosflm_orientation_matrix')
input_matrix = ''
for m in matrix:
input_matrix += '%s\n' % m
new_matrix = transmogrify_matrix(lattice, input_matrix,
test_lattice)
spacegroup_number = lattice_to_spacegroup(test_lattice)
if not self._mosflm_cell_ref_images:
raise RuntimeError, 'wedges must be assigned already'
open(os.path.join(self.get_working_directory(),
'test-xiaindex-%s.mat' % lattice),
'w').write(new_matrix)
self.start()
if self._mosflm_gain:
self.input('gain %5.2f' % self._mosflm_gain)
self.input('template "%s"' % self.get_template())
self.input('directory "%s"' % self.get_directory())
self.input('matrix test-xiaindex-%s.mat' % lattice)
self.input('newmat test-xiarefine.mat')
self.input('beam %f %f' % beam)
self.input('distance %f' % distance)
self.input('symmetry %s' % spacegroup_number)
self.input('mosaic %f' % mosaic)
# note well that the beam centre is coming from indexing so
# should be already properly handled
if self.get_wavelength_prov() == 'user':
self.input('wavelength %f' % self.get_wavelength())
# get all of the stored parameter values
parameters = self.get_integrater_parameters('mosflm')
# FIXME 27/SEP/06:
# have to make sure that these are correctly applied -
# that is, be sure that these come actually from autoindexing
# not somehow from a previous instance of data integration...
self.input('!parameters from autoindex run')
for p in parameters.keys():
self.input('%s %s' % (p, str(parameters[p])))
# fudge factors to prevent Mosflm from being too fussy
self.input('refinement residual 10.0')
# set up the cell refinement - allowing quite a lot of
# refinement for tricky cases (e.g. 7.2 SRS insulin SAD
# data collected on MAR IP)
self.input('postref multi segments %d repeat 10' % \
len(self._mosflm_cell_ref_images))
for cri in self._mosflm_cell_ref_images:
self.input('process %d %d' % cri)
self.input('go')
# that should be everything
self.close_wait()
# get the log file
output = self.get_all_output()
rms_values_last = None
for i in range(len(output)):
o = output[i]
if 'Rms positional error (mm) as a function of' in o and True:
images = []
cycles = []
rms_values = { }
j = i + 1
while output[j].split():
if 'Image' in output[j]:
for image in map(int, output[j].replace(
'Image', '').split()):
images.append(image)
else:
cycle = int(output[j].replace(
'Cycle', '').split()[0])
if not cycle in cycles:
cycles.append(cycle)
rms_values[cycle] = []
record = [output[j][k:k + 6] \
for k in range(
11, len(output[j]), 6)]
data = []
for r in record:
if r.strip():
data.append(r.strip())
record = data
try:
values = map(float, record)
for v in values:
rms_values[cycle].append(v)
except ValueError, e:
Chatter.write(
'Error parsing %s as floats' % \
output[j][12:])
j += 1
rms_values_last = rms_values[max(cycles)]
return rms_values_last
def _mosflm_refine_cell(self, set_spacegroup = None):
'''Perform the refinement of the unit cell. This will populate
all of the information needed to perform the integration.'''
# self.reset()
if not self.get_integrater_indexer():
# this wrapper can present the indexer interface
# if needed, so do so. if this set command has
# been called already this should not be used...
self.set_integrater_indexer(self)
# get the things we need from the indexer - beware that if
# the indexer has not yet been run this may spawn other
# jobs...
indxr = self.get_integrater_indexer()
if not indxr.get_indexer_payload('mosflm_orientation_matrix'):
# we will have to do some indexing ourselves - the
# existing indexing job doesn't provide an orientation
# matrix
# FIXME this needs implementing - copy information
# from this indexer to myself, then reset my indexer too me
pass
lattice = indxr.get_indexer_lattice()
mosaic = indxr.get_indexer_mosaic()
cell = indxr.get_indexer_cell()
beam = indxr.get_indexer_beam()
# check to see if there is a special mosflm beam around!
if indxr.get_indexer_payload('mosflm_beam_centre'):
beam = indxr.get_indexer_payload('mosflm_beam_centre')
distance = indxr.get_indexer_distance()
matrix = indxr.get_indexer_payload('mosflm_orientation_matrix')
# check to see if there are parameters which I should be using for
# cell refinement etc in here - if there are, use them - this
# will also appear in integrate, for cases where that will
# be called without cell refinemnt
integration_params = indxr.get_indexer_payload(
'mosflm_integration_parameters')
if integration_params:
# copy them somewhere useful... into the dictionary?
# yes - that way they can be recycled...
# after that, zap them because they will be obsolete!
if integration_params.has_key('separation'):
self.set_integrater_parameter(
'mosflm', 'separation',
'%f %f' % tuple(integration_params['separation']))
if integration_params.has_key('raster'):
self.set_integrater_parameter(
'mosflm', 'raster',
'%d %d %d %d %d' % tuple(integration_params['raster']))
indxr.set_indexer_payload('mosflm_integration_params', None)
# copy these into myself for later reference, if indexer
# is not myself - everything else is copied via the
# cell refinement process...
if indxr != self:
self.set_indexer_input_lattice(lattice)
self.set_indexer_beam(beam)
# here need to check the LATTICE - which will be
# something like tP etc. FIXME how to cope when the
# spacegroup has been explicitly stated?
spacegroup_number = lattice_to_spacegroup(lattice)
# FIXME 11/SEP/06 have an example set of data which will
# make cell refinement "fail" - that is
# not work very well - 9485/3[1VPX]. Therefore
# allow for more image wedges, read output.
#
# What we are looking for in the output is:
#
# INACCURATE CELL PARAMETERS
#
# followed by the dodgy cell parameters, along with the
# associated standard errors. Based on these need to decide
# what extra data would be helpful. Will also want to record
# these standard deviations to decide if the next run of
# cell refinement makes things better... Turns out that this
# example is very low resolution, so don't worry too hard
# about it!
if spacegroup_number >= 75:
num_wedges = 1
else:
num_wedges = 2
# FIXME 23/OCT/06 should only do this if the images are not
# already assigned - for instance, in the case where the cell
# refinement fails and more images are added after that failure
# need to be able to cope with not changing them at this stage...
# self._mosflm_cell_ref_images = None
if not self._mosflm_cell_ref_images:
self._mosflm_cell_ref_images = self._refine_select_images(
num_wedges, mosaic)
# write the matrix file in xiaindex.mat
f = open(os.path.join(self.get_working_directory(),
'xiaindex-%s.mat' % lattice), 'w')
for m in matrix:
f.write(m)
f.close()
# then start the cell refinement
task = 'Refine cell from %d wedges' % \
len(self._mosflm_cell_ref_images)
self.set_task(task)
self.start()
if self._mosflm_gain:
self.input('gain %5.2f' % self._mosflm_gain)
self.input('template "%s"' % self.get_template())
self.input('directory "%s"' % self.get_directory())
self.input('matrix xiaindex-%s.mat' % lattice)
self.input('newmat xiarefine.mat')
self.input('beam %f %f' % beam)
self.input('distance %f' % distance)
# FIXED is this the correct form? - it is now.
# want to be able to test cell refinement in P1
# as a way of investigating how solid the autoindex
# solution is... therefore allow spacegroup to
# be explicitly set...
if set_spacegroup:
self.input('symmetry %s' % set_spacegroup)
else:
self.input('symmetry %s' % spacegroup_number)
self.input('mosaic %f' % mosaic)
# note well that the beam centre is coming from indexing so
# should be already properly handled
if self.get_wavelength_prov() == 'user':
self.input('wavelength %f' % self.get_wavelength())
# get all of the stored parameter values
parameters = self.get_integrater_parameters('mosflm')
# FIXME 27/SEP/06:
# have to make sure that these are correctly applied -
# that is, be sure that these come actually from autoindexing
# not somehow from a previous instance of data integration...
self.input('!parameters from autoindex run')
for p in parameters.keys():
self.input('%s %s' % (p, str(parameters[p])))
# fudge factors to prevent Mosflm from being too fussy
self.input('refinement residual 10.0')
# set up the cell refinement - allowing quite a lot of
# refinement for tricky cases (e.g. 7.2 SRS insulin SAD
# data collected on MAR IP)
self.input('postref multi segments %d repeat 10' % \
len(self._mosflm_cell_ref_images))
for cri in self._mosflm_cell_ref_images:
self.input('process %d %d' % cri)
self.input('go')
# that should be everything
self.close_wait()
# get the log file
output = self.get_all_output()
# then look to see if the cell refinement worked ok - if it
# didn't then this may indicate that the lattice was wrongly
# selected.
cell_refinement_ok = False
for o in output:
if 'Cell refinement is complete' in o:
cell_refinement_ok = True
if not cell_refinement_ok:
Chatter.write(
'Looks like cell refinement failed - more follows...')
# how best to handle this, I don't know... could
#
# (1) raise an exception
# (2) try to figure out the solution myself
#
# probably (1) is better, because this will allow the higher
# level of intelligence to sort it out. don't worry too hard
# about this in the initial version, since labelit indexing
# is pretty damn robust.
# if it succeeded then populate the indexer output (myself)
# with the new information - this can then be used
# transparently in the integration.
# here I need to get the refined distance, mosaic spread, unit
# cell and matrix - should also look the yscale and so on, as
# well as the final rms deviation in phi and distance
# FIRST look for errors, and analysis stuff which may be
# important...
rmsd_range = None
rms_values_last = None
for i in range(len(output)):
o = output[i]
# FIXME 01/NOV/06 dump this stuff from the top (error trapping)
# into a trap_cell_refinement_errors method which is called
# before the rest of the output is parsed...
# look for overall cell refinement failure
if 'Processing will be aborted' in o:
# perhaps try this with more images?
if len(self._mosflm_cell_ref_images) <= 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
self.set_integrater_prepare_done(False)
Science.write(
'Repeating cell refinement with more data.')
return
else:
raise BadLatticeError, 'cell refinement failed'
# look to store the rms deviations on a per-image basis
# this may be used to decide what to do about "inaccurate
# cell parameters" below... may also want to record
# them for comparison with cell refinement with a lower
# spacegroup for solution elimination purposes...
if 'Rms positional error (mm) as a function of' in o and True:
images = []
cycles = []
rms_values = { }
j = i + 1
while output[j].split():
if 'Image' in output[j]:
for image in map(int, output[j].replace(
'Image', '').split()):
images.append(image)
else:
cycle = int(output[j].replace(
'Cycle', '').split()[0])
if not cycle in cycles:
cycles.append(cycle)
rms_values[cycle] = []
record = [output[j][k:k + 6] \
for k in range(
11, len(output[j]), 6)]
data = []
for r in record:
if r.strip():
data.append(r.strip())
record = data
try:
values = map(float, record)
for v in values:
rms_values[cycle].append(v)
except ValueError, e:
Chatter.write(
'Error parsing %s as floats' % \
output[j][12:])
j += 1
# by now we should have recorded everything so...print!
# Chatter.write('Final RMS deviations per image')
# for j in range(len(images)):
# Chatter.write('- %4d %5.3f' % (images[j],
# rms_values_last[j]))
if cycles:
rms_values_last = rms_values[max(cycles)]
else:
rms_values_last = None
if rms_values_last:
rmsd_range = max(rms_values_last), min(rms_values_last)
else:
# there must have been a bigger problem than this!
rmsd_range = 1.0, 1.0
# look for "error" type problems
if 'is greater than the maximum allowed' in o and \
'FINAL weighted residual' in o:
# the weighted residual is too high - this suggests
# a poor indexing solution - jump out and redo
Science.write('Large weighted residual...')
if len(self._mosflm_cell_ref_images) < 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
# set a flag to say cell refinement needs rerunning
# c/f Integrator.py
self.set_integrater_prepare_done(False)
# tell the user what is going on
Science.write(
'Repeating cell refinement with more data.')
# don't update the indexer - the results could be
# wrong!
return
else:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'inaccurate cell parameters'
if 'INACCURATE CELL PARAMETERS' in o:
# get the inaccurate cell parameters in question
parameters = output[i + 3].lower().split()
# and the standard deviations - so we can decide
# if it really has failed
sd_record = output[i + 5].replace(
'A', ' ').replace(',', ' ').split()
sds = map(float, [sd_record[j] for j in range(1, 12, 2)])
Science.write('Standard deviations:')
Science.write('A %4.2f B %4.2f C %4.2f' % \
(tuple(sds[:3])))
Science.write('Alpha %4.2f Beta %4.2f Gamma %4.2f' % \
(tuple(sds[3:6])))
# FIXME 01/NOV/06 this needs to be toned down a little -
# perhaps looking at the relative error in the cell
# parameter, or a weighted "error" of the two combined,
# because this may give rise to an error: TS01 NATIVE LR
# failed in integration with this, because the error
# in a was > 0.1A in 228. Assert perhaps that the error
# should be less than 1.0e-3 * cell axis and less than
# 0.15A?
# inspect rmsd_range
if rmsd_range is None:
raise RuntimeError, 'no rms deviation information'
# interested if max > 2 * min... 2 - 1 / (2 + 1)= 1 / 3
large_rmsd_range = False
if ((rmsd_range[0] - rmsd_range[1]) /
(rmsd_range[0] + rmsd_range[1])) > 0.3333:
large_rmsd_range = True
Science.write(
'Large range in RMSD variation per image')
# and warn about them
Science.write(
'In cell refinement, the following cell parameters')
Science.write(
'have refined poorly:')
for p in parameters:
Science.write('... %s' % p)
# decide what to do about this...
# if this is all cell parameters, abort, else
# consider using more data...
# see how many wedges we are using - if it's 3 already
# then there is probably something more important
# wrong. If it is fewer than this then try again!
if len(self._mosflm_cell_ref_images) < 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
# set a flag to say cell refinement needs rerunning
# c/f Integrator.py
self.set_integrater_prepare_done(False)
# tell the user what is going on
Science.write(
'Repeating cell refinement with more data.')
# don't update the indexer - the results could be
# wrong!
return
else:
if large_rmsd_range:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'inaccurate cell parameters'
Science.write(
'However, will continue to integration.')
if 'One or more cell parameters has changed by more' in o:
# this is a more severe example of the above problem...
Science.write(
'Cell refinement is unstable...')
# so decide what to do about it...
if len(self._mosflm_cell_ref_images) <= 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
self.set_integrater_prepare_done(False)
Science.write(
'Repeating cell refinement with more data.')
return
else:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'unstable cell refinement'
# other possible problems in the cell refinement - a
# negative mosaic spread, for instance
if 'Refined mosaic spread (excluding safety factor)' in o:
mosaic = float(o.split()[-1])
if mosaic < 0.0:
Science.write('Negative mosaic spread (%5.2f)' %
mosaic)
if len(self._mosflm_cell_ref_images) <= 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
self.set_integrater_prepare_done(False)
Science.write(
'Repeating cell refinement with more data.')
return
else:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'negative mosaic spread'
# look generally at the RMS deviation range - is this is
# large then there may be something properly wrong...
# switch this off for a moment as it may be more appropriate
# for this test to look at the results from integration...
if rmsd_range and False:
if ((rmsd_range[0] - rmsd_range[1]) /
(rmsd_range[0] + rmsd_range[1])) > 0.3333:
Science.write(
'Large range in RMSD variation per image')
if len(self._mosflm_cell_ref_images) <= 3:
# set this up to be more images
new_cell_ref_images = self._refine_select_images(
len(self._mosflm_cell_ref_images) + 1,
mosaic)
self._mosflm_cell_ref_images = new_cell_ref_images
self.set_integrater_prepare_done(False)
Science.write(
'Repeating cell refinement with more data.')
return
else:
Science.write(
'Integration will be aborted because of this.')
raise BadLatticeError, 'cell refinement failed: ' + \
'negative mosaic spread'
# AFTER that, read the refined parameters
for i in range(len(output)):
o = output[i]
# FIXED for all of these which follow - the refined values
# for these parameters should only be stored if the cell
# refinement were 100% successful - therefore gather
# them up here and store them at the very end (e.g. once
# success has been confirmed.) 01/NOV/06
# FIXME will these get lost if the indexer in question is
# not this program...? Find out... would be nice to write
# this to Chatter too...
if 'Refined cell' in o:
# feed these back to the indexer
indxr._indxr_cell = tuple(map(float, o.split()[-6:]))
# record the refined cell parameters for getting later
self._intgr_cell = tuple(map(float, o.split()[-6:]))
# FIXME do I need this? I think that the refined distance
# is passed in as an integration parameter (see below)
if 'Detector distance as a' in o:
# look through the "cycles" to get the final refined
# distance
j = i + 1
while output[j].strip() != '':
j += 1
distances = map(float, output[j - 1].split()[2:])
distance = 0.0
for d in distances:
distance += d
distance /= len(distances)
indxr._indxr_refined_distance = distance
if 'YSCALE as a function' in o:
# look through the "cycles" to get the final refined
# yscale value
j = i + 1
while output[j].strip() != '':
j += 1
yscales = map(float, output[j - 1].split()[2:])
yscale = 0.0
for y in yscales:
yscale += y
yscale /= len(yscales)
self.set_integrater_parameter('mosflm',
'distortion yscale',
yscale)
# next look for the distortion & raster parameters
# see FIXME at the top of this file from 16/AUG/06
if 'Final optimised raster parameters:' in o:
self.set_integrater_parameter('mosflm',
'raster',
o.split(':')[1].strip())
if 'Separation parameters updated to' in o:
tokens = o.replace('mm', ' ').split()
self.set_integrater_parameter('mosflm',
'separation',
'%s %s' % \
(tokens[4], tokens[8]))
if 'XCEN YCEN XTOFRA' in o:
numbers = output[i + 1].split()
# this should probably be done via the FrameProcessor
# interface...
self.set_integrater_parameter('mosflm',
'beam',
'%s %s' % \
(numbers[0], numbers[1]))
# FIXME should this go through the FP interface?
# this conflicts with the calculation above
# of the average distance as well...
self.set_integrater_parameter('mosflm',
'distance',
numbers[3])
self.set_integrater_parameter('mosflm',
'distortion tilt',
numbers[5])
self.set_integrater_parameter('mosflm',
'distortion twist',
numbers[6])
# FIXME does this work if this mosflm is not
# the one being used as an indexer? - probably not -
# I will need a getIndexer.setMosaic() or something...
if 'Refined mosaic spread' in o:
indxr._indxr_mosaic = float(o.split()[-1])
# hack... FIXME (maybe?)
# self._indxr_done = True
self.set_indexer_done(True)
# shouldn't need this.. remember that Python deals in pointers!
self.set_indexer_payload('mosflm_orientation_matrix', open(
os.path.join(self.get_working_directory(),
'xiarefine.mat'), 'r').readlines())
indxr.set_indexer_payload('mosflm_orientation_matrix', open(
os.path.join(self.get_working_directory(),
'xiarefine.mat'), 'r').readlines())
return rms_values_last
def _mosflm_integrate(self):
'''Perform the actual integration, based on the results of the
cell refinement or indexing (they have the equivalent form.)'''
# self.reset()
# the only way to get here is through the cell refinement,
# unless we're trying to go fast - which means that we may
# have to create an indexer if fast - if we're going slow
# then this should have been done by the cell refinement
# stage...
# FIXME add "am I going fast" check here
if not self.get_integrater_indexer():
# this wrapper can present the indexer interface
# if needed, so do so. if this set command has
# been called already this should not be used...
self.set_integrater_indexer(self)
# get the things we need from the indexer - beware that if
# the indexer has not yet been run this may spawn other
# jobs...
indxr = self.get_integrater_indexer()
if not indxr.get_indexer_payload('mosflm_orientation_matrix'):
# we will have to do some indexing ourselves - the
# existing indexing job doesn't provide an orientation
# matrix
# FIXME this needs implementing - copy information
# from this indexer to myself, then reset my indexer too me
# FIXME this should probably raise an exception...
pass
lattice = indxr.get_indexer_lattice()
mosaic = indxr.get_indexer_mosaic()
cell = indxr.get_indexer_cell()
beam = indxr.get_indexer_beam()
distance = indxr.get_indexer_distance()
matrix = indxr.get_indexer_payload('mosflm_orientation_matrix')
# check to see if there are parameters which I should be using for
# integration etc in here - if there are, use them - this will
# only happen when the integration is "fast" and they haven't
# been eaten by the cell refinemnt process
integration_params = indxr.get_indexer_payload(
'mosflm_integration_parameters')
if integration_params:
# copy them somewhere useful... into the dictionary?
# yes - that way they can be recycled...
# after that, zap them because they will be obsolete!
if integration_params.has_key('separation'):
self.set_integrater_parameter(
'mosflm', 'separation',
'%f %f' % tuple(integration_params['separation']))
if integration_params.has_key('raster'):
self.set_integrater_parameter(
'mosflm', 'raster',
'%d %d %d %d %d' % tuple(integration_params['raster']))
indxr.set_indexer_payload('mosflm_integration_params', None)
# here need to check the LATTICE - which will be
# something like tP etc. FIXME how to cope when the
# spacegroup has been explicitly stated?
spacegroup_number = lattice_to_spacegroup(lattice)
f = open(os.path.join(self.get_working_directory(),
'xiaintegrate.mat'), 'w')
for m in matrix:
f.write(m)
f.close()
# then start the integration
task = 'Integrate frames %d to %d' % self._intgr_wedge
self.set_task(task)
summary_file = 'summary_%s.log' % spacegroup_number
self.add_command_line('SUMMARY')
self.add_command_line(summary_file)
self.start()
# if the integrater interface has the project, crystal, dataset
# information available, pass this in to mosflm and also switch
# on the harvesrng output. warning! if the harvesting is switched
# on then this means that the files will all go to the same
# place - for the moment move this to cwd.
pname, xname, dname = self.get_integrater_project_info()
if pname != None and xname != None and dname != None:
Chatter.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
# harvest file name will be %s.mosflm_run_start_end % dname
self.input('harvest on')
self.input('pname %s' % pname)
self.input('xname %s' % xname)
self.input('dname %s' % dname)
self.input('ucwd')
self.input('template "%s"' % self.get_template())
self.input('directory "%s"' % self.get_directory())
# generate the mask information from the detector class
mask = standard_mask(self._fp_header['detector_class'])
for m in mask:
self.input(m)
self.input('matrix xiaintegrate.mat')
self.input('beam %f %f' % beam)
self.input('distance %f' % distance)
self.input('symmetry %s' % spacegroup_number)
self.input('mosaic %f' % mosaic)
# note well that the beam centre is coming from indexing so
# should be already properly handled - likewise the distance
if self.get_wavelength_prov() == 'user':
self.input('wavelength %f' % self.get_wavelength())
# get all of the stored parameter values
parameters = self.get_integrater_parameters('mosflm')
for p in parameters.keys():
self.input('%s %s' % (p, str(parameters[p])))
# in here I need to get the GAIN parameter from the sweep
# or from somewhere in memory....
if self._mosflm_gain:
self.input('gain %5.2f' % self._mosflm_gain)
# check for resolution limits
if self._intgr_reso_high > 0.0:
self.input('resolution %f' % self._intgr_reso_high)
# set up the integration
self.input('postref fix all')
self.input('separation close')
# FIXME this is a horrible hack - I at least need to
# sand box this ...
if self.get_header_item('detector') == 'raxis':
self.input('adcoffset 0')
self.input('process %d %d' % self._intgr_wedge)
self.input('go')
# that should be everything
self.close_wait()
# get the log file
output = self.get_all_output()
# record a copy of it, perhaps
if self.get_integrater_sweep_name():
pname, xname, dname = self.get_integrater_project_info()
FileHandler.record_log_file('%s %s %s %s mosflm integrate' % \
(self.get_integrater_sweep_name(),
pname, xname, dname),
self.get_log_file())
# look for things that we want to know...
# that is, the output reflection file name, the updated
# value for the gain (if present,) any warnings, errors,
# or just interesting facts.
integrated_images_first = 1.0e6
integrated_images_last = -1.0e6
# look for major errors
for i in range(len(output)):
o = output[i]
if 'LWBAT: error in ccp4_lwbat' in o:
raise RuntimeError, 'serious mosflm error - inspect %s' % \
self.get_log_file()
for i in range(len(output)):
o = output[i]
if 'Integrating Image' in o:
batch = int(o.split()[2])
if batch < integrated_images_first:
integrated_images_first = batch
if batch > integrated_images_last:
integrated_images_last = batch
if 'ERROR IN DETECTOR GAIN' in o:
# look for the correct gain
for j in range(i, i + 10):
if output[j].split()[:2] == ['set', 'to']:
gain = float(output[j].split()[-1][:-1])
self.set_integrater_parameter('mosflm',
'gain',
gain)
self.set_integrater_export_parameter('mosflm',
'gain',
gain)
Science.write('GAIN found to be %f' % gain)
# this should probably override the input
self._mosflm_gain = gain
self._mosflm_rerun_integration = True
if 'Smoothed value for refined mosaic spread' in o:
mosaic = float(o.split()[-1])
if mosaic < 0.0:
raise BadLatticeError, 'negative mosaic spread'
if 'WRITTEN OUTPUT MTZ FILE' in o:
self._mosflm_hklout = os.path.join(
self.get_working_directory(),
output[i + 1].split()[-1])
Science.write('Integration output: %s' % \
self._mosflm_hklout)
if 'Number of Reflections' in o:
self._intgr_n_ref = int(o.split()[-1])
if 'MOSFLM HAS TERMINATED EARLY' in o:
Chatter.write('Mosflm has failed in integration')
message = 'The input was:\n\n'
for input in self.get_all_input():
message += ' %s' % input
Chatter.write(message)
raise RuntimeError, \
'integration failed: reason unknown (log %s)' % \
self.get_log_file()
self._intgr_batches_out = (integrated_images_first,
integrated_images_last)
Chatter.write('Processed batches %d to %d' % \
self._intgr_batches_out)
# write the report for each image as .*-#$ to Chatter -
# detailed report will be written automagically to science...
parsed_output = _parse_mosflm_integration_output(output)
spot_status = _happy_integrate_lp(parsed_output)
# inspect the output for e.g. very high weighted residuals
images = parsed_output.keys()
images.sort()
max_weighted_residual = 0.0
# FIXME bug 2175 this should probably look at the distribution
# of values rather than the peak, since this is probably a better
# diagnostic of a poor lattice.
residuals = [parsed_output[i]['weighted_residual'] for i in images]
mean, sd = mean_sd(residuals)
Chatter.write('Weighted RMSD: %.2f (%.2f)' % \
(mean, sd))
for i in images:
data = parsed_output[i]
if data['weighted_residual'] > max_weighted_residual:
max_weighted_residual = data['weighted_residual']
if max_weighted_residual > 3.0 and False:
raise BadLatticeError, 'large weighted residual (%4.2f)' % \
max_weighted_residual
if mean > 2.0:
raise BadLatticeError, 'large mean residual (%.2f)' % mean
if len(spot_status) > 60:
Chatter.write('Integration status per image (60/record):')
else:
Chatter.write('Integration status per image:')
for chunk in [spot_status[i:i + 60] \
for i in range(0, len(spot_status), 60)]:
Chatter.write(chunk)
Chatter.write(
'"o" => ok "%" => iffy rmsd "!" => bad rmsd')
Chatter.write(
'"O" => overloaded "#" => many bad "." => blank')
# if we have not processed to a given resolution, fix
# the limit for future reference
# bug # 2040 - this is one place where we could cut out the
# middle-man and make things quicker, by not resetting the
# resolution limit...
if not self._intgr_reso_high and not Flags.get_quick():
resolution = decide_integration_resolution_limit(output)
self.set_integrater_high_resolution(resolution)
Chatter.write('Set resolution limit: %5.2f' % resolution)
return self._mosflm_hklout
return MosflmWrapper()
if __name__ == '__main_old__':
# run a demo test
if not os.environ.has_key('XIA2_ROOT'):
raise RuntimeError, 'XIA2_ROOT not defined'
m = Mosflm()
directory = os.path.join(os.environ['XIA2_ROOT'],
'Data', 'Test', 'Images')
# from Labelit
m.set_beam((108.9, 105.0))
m.setup_from_image(os.path.join(directory, '12287_1_E1_001.img'))
# FIXME 16/AUG/06 this should be set automatically - there is no
# reason to manually specify the images
m.add_indexer_image_wedge(1)
m.add_indexer_image_wedge(90)
# m.set_indexer_input_lattice('aP')
print 'Refined beam is: %6.2f %6.2f' % m.get_indexer_beam()
print 'Distance: %6.2f' % m.get_indexer_distance()
print 'Cell: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % m.get_indexer_cell()
print 'Lattice: %s' % m.get_indexer_lattice()
print 'Mosaic: %6.2f' % m.get_indexer_mosaic()
print 'Matrix:'
for l in m.get_indexer_payload('mosflm_orientation_matrix'):
print l[:-1]
if __name__ == '__main__':
# run a demo test
if not os.environ.has_key('XIA2_ROOT'):
raise RuntimeError, 'XIA2_ROOT not defined'
m = Mosflm()
directory = os.path.join(os.environ['XIA2_ROOT'],
'Data', 'Test', 'Images')
# from Labelit
m.set_beam((108.9, 105.0))
m.setup_from_image(os.path.join(directory, '12287_1_E1_001.img'))
# FIXME 16/AUG/06 this should be set automatically - there is no
# reason to manually specify the images
m.add_indexer_image_wedge(1)
m.add_indexer_image_wedge(90)
# m.set_indexer_input_lattice('aP')
# to test the awkward indexing problems -
# this is not the default solution
# m.set_indexer_input_lattice('mP')
# m.set_indexer_input_cell((51.72, 51.66, 157.89, 90.00, 90.00, 90.00))
print 'Refined beam is: %6.2f %6.2f' % m.get_indexer_beam()
print 'Distance: %6.2f' % m.get_indexer_distance()
print 'Cell: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % m.get_indexer_cell()
print 'Lattice: %s' % m.get_indexer_lattice()
print 'Mosaic: %6.2f' % m.get_indexer_mosaic()
print 'Matrix:'
for l in m.get_indexer_payload('mosflm_orientation_matrix'):
print l[:-1]
if False:
n = Mosflm()
n.setup_from_image(os.path.join(directory, '12287_1_E1_001.img'))
n.set_integrater_indexer(m)
n.integrate()
print 'Refined beam is: %6.2f %6.2f' % n.get_indexer_beam()
print 'Distance: %6.2f' % n.get_indexer_distance()
print 'Cell: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % n.get_indexer_cell()
print 'Lattice: %s' % n.get_indexer_lattice()
print 'Mosaic: %6.2f' % n.get_indexer_mosaic()
print 'Matrix:'
for l in n.get_indexer_payload('mosflm_orientation_matrix'):
print l[:-1]
|
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous training utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import os
from collections import defaultdict
from pydoc import locate
import json
import tensorflow as tf
from tensorflow import gfile
from seq2seq.contrib import rnn_cell
class TrainOptions(object):
"""A collection of options that are passed to the training script
and can be saved to perform inference later.
Args:
task: Name of the training task class.
task_params: A dictionary of parameters passed to the training task.
"""
def __init__(self, model_class, model_params):
self._model_class = model_class
self._model_params = model_params
@property
def model_class(self):
"""Returns the training task parameters"""
return self._model_class
@property
def model_params(self):
"""Returns the training task class"""
return self._model_params
@staticmethod
def path(model_dir):
"""Returns the path to the options file.
Args:
model_dir: The model directory
"""
return os.path.join(model_dir, "train_options.json")
def dump(self, model_dir):
"""Dumps the options to a file in the model directory.
Args:
model_dir: Path to the model directory. The options will be
dumped into a file in this directory.
"""
gfile.MakeDirs(model_dir)
options_dict = {
"model_class": self.model_class,
"model_params": self.model_params,
}
with gfile.GFile(TrainOptions.path(model_dir), "wb") as file:
file.write(json.dumps(options_dict).encode("utf-8"))
@staticmethod
def load(model_dir):
""" Loads options from the given model directory.
Args:
model_dir: Path to the model directory.
"""
with gfile.GFile(TrainOptions.path(model_dir), "rb") as file:
options_dict = json.loads(file.read().decode("utf-8"))
options_dict = defaultdict(None, options_dict)
return TrainOptions(
model_class=options_dict["model_class"],
model_params=options_dict["model_params"])
def cell_from_spec(cell_classname, cell_params):
"""Create a RNN Cell instance from a JSON string.
Args:
cell_classname: Name of the cell class, e.g. "BasicLSTMCell".
cell_params: A dictionary of parameters to pass to the cell constructor.
Returns:
A RNNCell instance.
"""
cell_params = cell_params.copy()
# Find the cell class
cell_class = locate(cell_classname) or getattr(rnn_cell, cell_classname)
# Make sure additional arguments are valid
cell_args = set(inspect.getargspec(cell_class.__init__).args[1:])
for key in cell_params.keys():
if key not in cell_args:
raise ValueError(
"""{} is not a valid argument for {} class. Available arguments
are: {}""".format(key, cell_class.__name__, cell_args))
# Create cell
return cell_class(**cell_params)
def get_rnn_cell(cell_class,
cell_params,
num_layers=1,
dropout_input_keep_prob=1.0,
dropout_output_keep_prob=1.0,
residual_connections=False,
residual_combiner="add",
residual_dense=False):
"""Creates a new RNN Cell
Args:
cell_class: Name of the cell class, e.g. "BasicLSTMCell".
cell_params: A dictionary of parameters to pass to the cell constructor.
num_layers: Number of layers. The cell will be wrapped with
`tf.contrib.rnn.MultiRNNCell`
dropout_input_keep_prob: Dropout keep probability applied
to the input of cell *at each layer*
dropout_output_keep_prob: Dropout keep probability applied
to the output of cell *at each layer*
residual_connections: If true, add residual connections
between all cells
Returns:
An instance of `tf.contrib.rnn.RNNCell`.
"""
#pylint: disable=redefined-variable-type
cells = []
for _ in range(num_layers):
cell = cell_from_spec(cell_class, cell_params)
if dropout_input_keep_prob < 1.0 or dropout_output_keep_prob < 1.0:
cell = tf.contrib.rnn.DropoutWrapper(
cell=cell,
input_keep_prob=dropout_input_keep_prob,
output_keep_prob=dropout_output_keep_prob)
cells.append(cell)
if len(cells) > 1:
final_cell = rnn_cell.ExtendedMultiRNNCell(
cells=cells,
residual_connections=residual_connections,
residual_combiner=residual_combiner,
residual_dense=residual_dense)
else:
final_cell = cells[0]
return final_cell
def create_learning_rate_decay_fn(decay_type,
decay_steps,
decay_rate,
start_decay_at=0,
stop_decay_at=1e9,
min_learning_rate=None,
staircase=False):
"""Creates a function that decays the learning rate.
Args:
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
start_decay_at: Don't decay before this step
stop_decay_at: Don't decay after this step
min_learning_rate: Don't decay below this number
decay_type: A decay function name defined in `tf.train`
staircase: Whether to apply decay in a discrete staircase,
as opposed to continuous, fashion.
Returns:
A function that takes (learning_rate, global_step) as inputs
and returns the learning rate for the given step.
Returns `None` if decay_type is empty or None.
"""
if decay_type is None or decay_type == "":
return None
start_decay_at = tf.to_int32(start_decay_at)
stop_decay_at = tf.to_int32(stop_decay_at)
def decay_fn(learning_rate, global_step):
"""The computed learning rate decay function.
"""
decay_type_fn = getattr(tf.train, decay_type)
decayed_learning_rate = decay_type_fn(
learning_rate=learning_rate,
global_step=tf.minimum(global_step, stop_decay_at) - start_decay_at,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=staircase,
name="decayed_learning_rate")
final_lr = tf.train.piecewise_constant(
x=global_step,
boundaries=[start_decay_at],
values=[learning_rate, decayed_learning_rate])
if min_learning_rate:
final_lr = tf.maximum(final_lr, min_learning_rate)
return final_lr
return decay_fn
def create_input_fn(pipeline,
batch_size,
bucket_boundaries=None,
allow_smaller_final_batch=False):
"""Creates an input function that can be used with tf.learn estimators.
Note that you must pass "factory funcitons" for both the data provider and
featurizer to ensure that everything will be created in the same graph.
Args:
pipeline: An instance of `seq2seq.data.InputPipeline`.
batch_size: Create batches of this size. A queue to hold a
reasonable number of batches in memory is created.
bucket_boundaries: int list, increasing non-negative numbers.
If None, no bucket is performed.
Returns:
An input function that returns `(feature_batch, labels_batch)`
tuples when called.
"""
def input_fn():
"""Creates features and labels.
"""
data_provider = pipeline.make_data_provider()
features_and_labels = pipeline.read_from_data_provider(data_provider)
if bucket_boundaries:
_, batch = tf.contrib.training.bucket_by_sequence_length(
input_length=features_and_labels["source_len"],
bucket_boundaries=bucket_boundaries,
tensors=features_and_labels,
batch_size=batch_size,
keep_input=features_and_labels["source_len"] >= 1,
dynamic_pad=True,
capacity=5000 + 16 * batch_size,
allow_smaller_final_batch=allow_smaller_final_batch,
name="bucket_queue")
else:
batch = tf.train.batch(
tensors=features_and_labels,
enqueue_many=False,
batch_size=batch_size,
dynamic_pad=True,
capacity=5000 + 16 * batch_size,
allow_smaller_final_batch=allow_smaller_final_batch,
name="batch_queue")
# Separate features and labels
features_batch = {k: batch[k] for k in pipeline.feature_keys}
if set(batch.keys()).intersection(pipeline.label_keys):
labels_batch = {k: batch[k] for k in pipeline.label_keys}
else:
labels_batch = None
return features_batch, labels_batch
return input_fn
Also cast global step
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous training utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import os
from collections import defaultdict
from pydoc import locate
import json
import tensorflow as tf
from tensorflow import gfile
from seq2seq.contrib import rnn_cell
class TrainOptions(object):
"""A collection of options that are passed to the training script
and can be saved to perform inference later.
Args:
task: Name of the training task class.
task_params: A dictionary of parameters passed to the training task.
"""
def __init__(self, model_class, model_params):
self._model_class = model_class
self._model_params = model_params
@property
def model_class(self):
"""Returns the training task parameters"""
return self._model_class
@property
def model_params(self):
"""Returns the training task class"""
return self._model_params
@staticmethod
def path(model_dir):
"""Returns the path to the options file.
Args:
model_dir: The model directory
"""
return os.path.join(model_dir, "train_options.json")
def dump(self, model_dir):
"""Dumps the options to a file in the model directory.
Args:
model_dir: Path to the model directory. The options will be
dumped into a file in this directory.
"""
gfile.MakeDirs(model_dir)
options_dict = {
"model_class": self.model_class,
"model_params": self.model_params,
}
with gfile.GFile(TrainOptions.path(model_dir), "wb") as file:
file.write(json.dumps(options_dict).encode("utf-8"))
@staticmethod
def load(model_dir):
""" Loads options from the given model directory.
Args:
model_dir: Path to the model directory.
"""
with gfile.GFile(TrainOptions.path(model_dir), "rb") as file:
options_dict = json.loads(file.read().decode("utf-8"))
options_dict = defaultdict(None, options_dict)
return TrainOptions(
model_class=options_dict["model_class"],
model_params=options_dict["model_params"])
def cell_from_spec(cell_classname, cell_params):
"""Create a RNN Cell instance from a JSON string.
Args:
cell_classname: Name of the cell class, e.g. "BasicLSTMCell".
cell_params: A dictionary of parameters to pass to the cell constructor.
Returns:
A RNNCell instance.
"""
cell_params = cell_params.copy()
# Find the cell class
cell_class = locate(cell_classname) or getattr(rnn_cell, cell_classname)
# Make sure additional arguments are valid
cell_args = set(inspect.getargspec(cell_class.__init__).args[1:])
for key in cell_params.keys():
if key not in cell_args:
raise ValueError(
"""{} is not a valid argument for {} class. Available arguments
are: {}""".format(key, cell_class.__name__, cell_args))
# Create cell
return cell_class(**cell_params)
def get_rnn_cell(cell_class,
cell_params,
num_layers=1,
dropout_input_keep_prob=1.0,
dropout_output_keep_prob=1.0,
residual_connections=False,
residual_combiner="add",
residual_dense=False):
"""Creates a new RNN Cell
Args:
cell_class: Name of the cell class, e.g. "BasicLSTMCell".
cell_params: A dictionary of parameters to pass to the cell constructor.
num_layers: Number of layers. The cell will be wrapped with
`tf.contrib.rnn.MultiRNNCell`
dropout_input_keep_prob: Dropout keep probability applied
to the input of cell *at each layer*
dropout_output_keep_prob: Dropout keep probability applied
to the output of cell *at each layer*
residual_connections: If true, add residual connections
between all cells
Returns:
An instance of `tf.contrib.rnn.RNNCell`.
"""
#pylint: disable=redefined-variable-type
cells = []
for _ in range(num_layers):
cell = cell_from_spec(cell_class, cell_params)
if dropout_input_keep_prob < 1.0 or dropout_output_keep_prob < 1.0:
cell = tf.contrib.rnn.DropoutWrapper(
cell=cell,
input_keep_prob=dropout_input_keep_prob,
output_keep_prob=dropout_output_keep_prob)
cells.append(cell)
if len(cells) > 1:
final_cell = rnn_cell.ExtendedMultiRNNCell(
cells=cells,
residual_connections=residual_connections,
residual_combiner=residual_combiner,
residual_dense=residual_dense)
else:
final_cell = cells[0]
return final_cell
def create_learning_rate_decay_fn(decay_type,
decay_steps,
decay_rate,
start_decay_at=0,
stop_decay_at=1e9,
min_learning_rate=None,
staircase=False):
"""Creates a function that decays the learning rate.
Args:
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
start_decay_at: Don't decay before this step
stop_decay_at: Don't decay after this step
min_learning_rate: Don't decay below this number
decay_type: A decay function name defined in `tf.train`
staircase: Whether to apply decay in a discrete staircase,
as opposed to continuous, fashion.
Returns:
A function that takes (learning_rate, global_step) as inputs
and returns the learning rate for the given step.
Returns `None` if decay_type is empty or None.
"""
if decay_type is None or decay_type == "":
return None
start_decay_at = tf.to_int32(start_decay_at)
stop_decay_at = tf.to_int32(stop_decay_at)
def decay_fn(learning_rate, global_step):
"""The computed learning rate decay function.
"""
global_step = tf.to_int32(global_step)
decay_type_fn = getattr(tf.train, decay_type)
decayed_learning_rate = decay_type_fn(
learning_rate=learning_rate,
global_step=tf.minimum(global_step, stop_decay_at) - start_decay_at,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=staircase,
name="decayed_learning_rate")
final_lr = tf.train.piecewise_constant(
x=global_step,
boundaries=[start_decay_at],
values=[learning_rate, decayed_learning_rate])
if min_learning_rate:
final_lr = tf.maximum(final_lr, min_learning_rate)
return final_lr
return decay_fn
def create_input_fn(pipeline,
batch_size,
bucket_boundaries=None,
allow_smaller_final_batch=False):
"""Creates an input function that can be used with tf.learn estimators.
Note that you must pass "factory funcitons" for both the data provider and
featurizer to ensure that everything will be created in the same graph.
Args:
pipeline: An instance of `seq2seq.data.InputPipeline`.
batch_size: Create batches of this size. A queue to hold a
reasonable number of batches in memory is created.
bucket_boundaries: int list, increasing non-negative numbers.
If None, no bucket is performed.
Returns:
An input function that returns `(feature_batch, labels_batch)`
tuples when called.
"""
def input_fn():
"""Creates features and labels.
"""
data_provider = pipeline.make_data_provider()
features_and_labels = pipeline.read_from_data_provider(data_provider)
if bucket_boundaries:
_, batch = tf.contrib.training.bucket_by_sequence_length(
input_length=features_and_labels["source_len"],
bucket_boundaries=bucket_boundaries,
tensors=features_and_labels,
batch_size=batch_size,
keep_input=features_and_labels["source_len"] >= 1,
dynamic_pad=True,
capacity=5000 + 16 * batch_size,
allow_smaller_final_batch=allow_smaller_final_batch,
name="bucket_queue")
else:
batch = tf.train.batch(
tensors=features_and_labels,
enqueue_many=False,
batch_size=batch_size,
dynamic_pad=True,
capacity=5000 + 16 * batch_size,
allow_smaller_final_batch=allow_smaller_final_batch,
name="batch_queue")
# Separate features and labels
features_batch = {k: batch[k] for k in pipeline.feature_keys}
if set(batch.keys()).intersection(pipeline.label_keys):
labels_batch = {k: batch[k] for k in pipeline.label_keys}
else:
labels_batch = None
return features_batch, labels_batch
return input_fn
|
#!/usr/bin/env python
# encoding: utf-8
"""
visualization.py
Created by Nikolas Tezak on 2011-05-12.
Copyright (c) 2011 . All rights reserved.
Visualize Circuit objects.
"""
import algebra.circuit_algebra as ca
from circuit_components.component import Component, SubComponent
import pyx
from itertools import izip
texrunner = pyx.text.texrunner(mode = 'latex')
HUNIT = +4 # Basic unit for the width of a single Circuit object
# the positive value corresponds to visualizing the channel
# 'flow' from left to right
VUNIT = -1. # Basic unit for the height of a single Circuit object,
# the negative value makes the effective y-axis point downwards
RHMARGIN = .1 # Relative horizontal margin between gridline and Circuit object
RVMARGIN = .2 # Relative vertical margin between gridline and Circuit object
RPLENGTH = .4 # Relative width of a channel permutation
# helper function
def _curve(x1, y1, x2, y2, hunit = HUNIT, vunit = VUNIT):
"""
Return a PyX curved path from (x1, y1) to (x2, y2),
such that the slope at either end is zero.
"""
ax1, ax2, axm = x1 * hunit, x2 * hunit, (x1 + x2) * hunit / 2
ay1, ay2 = y1 * vunit, y2 * vunit
return pyx.path.curve(ax1, ay1, axm, ay1, axm, ay2, ax2, ay2)
def draw_circuit_canvas(circuit, hunit = HUNIT, vunit = VUNIT, rhmargin = RHMARGIN, rvmargin = RVMARGIN, rpermutation_length = RPLENGTH, draw_boxes = True, permutation_arrows = False):
"""
Return a PyX canvas, (rwidth, rheight), in_port_positions, out_port_positions visualizing circuit.
(rwidth, rheight) are the relative width and height of the full objects visualization.
in_port_positions, out_port_positions are tuples that list the (relative) vertical coordinates of the ports of circuit.
"""
if isinstance(circuit, str):
try:
import parse_circuit_strings
parsed = parse_circuit_strings.parse_circuit_string(circuit)
if len(parsed) > 1:
raise Exception('Can currently only process a single expression.')
circuit = parsed[0]
except ImportError, e:
raise ValueError("Could not parse string %r into CircuitExpression: %s" % (circuit, e))
if not isinstance(circuit, ca.Circuit):
raise ValueError()
nc = circuit.cdim
c = pyx.canvas.canvas()
if isinstance(circuit, ca.CIdentity):
# simply create a line going through
c.stroke(pyx.path.line(0, vunit/2, hunit, vunit/2))
return c, (1, 1), (.5,), (.5,)
elif isinstance(circuit, (ca.CSymbol, ca.SeriesInverse, ca.SLH, Component, SubComponent)):
# draw box
b = pyx.path.rect(rhmargin * hunit, rvmargin * vunit, hunit - 2 * rhmargin * hunit, nc * vunit - 2 * rvmargin * vunit)
c.stroke(b)
# draw symbol name
c.text(hunit/2., nc * vunit/2., "$%s$" % circuit.tex() , [pyx.text.halign.boxcenter, pyx.text.valign.middle])
# draw connectors at half-unit positions
connector_positions = tuple((.5 + k) for k in xrange(nc))
for y in connector_positions:
c.stroke(pyx.path.line(0, y * vunit, rhmargin * hunit, y * vunit), [pyx.deco.earrow()])
c.stroke(pyx.path.line(hunit * (1 - rhmargin), y * vunit, hunit, y * vunit))
return c, (1, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.CPermutation):
permutation = circuit.permutation
connector_positions = tuple((k + 0.5) for k in xrange(nc))
target_positions = [connector_positions[permutation[k]] for k in range(nc)]
# draw curves
for y1, y2 in izip(connector_positions, target_positions):
if permutation_arrows:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit), [pyx.deco.earrow()])
else:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit))
if draw_boxes:
b = pyx.path.rect(.5* rhmargin * hunit, .5* rvmargin * vunit, rpermutation_length * hunit - rhmargin * hunit, nc * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.green])
return c, (rpermutation_length, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.SeriesProduct):
# generate graphics of operad subsystems
sub_graphics = [draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows) for op in reversed(circuit.operands)]
# set up first one
previous_csub, previous_dims, previous_c_in, previous_c_out = sub_graphics[0]
hoffset = 0
c.insert(previous_csub)
hoffset += previous_dims[0]
max_height = nc
# this will later become the full series in-port coordinate tuple
first_c_in = previous_c_in
# now add all other operand subsystems
for csub, dims, c_in, c_out in sub_graphics[1:]:
max_height = max(dims[1], max_height)
if previous_c_out != c_in: # vertical port locations don't agree, map signals correspondingly
x1 = hoffset
x2 = hoffset + rpermutation_length
# draw connection curves
for y1, y2 in zip(previous_c_out, c_in):
c.stroke(_curve(x1, y1, x2, y2, hunit = hunit, vunit = vunit))
hoffset += rpermutation_length
previous_c_in, previous_c_out = c_in, c_out
# now insert current system
c.insert(csub, [pyx.trafo.translate(hunit * hoffset, 0)])
hoffset += dims[0]
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, hoffset * hunit - 1. * rhmargin * hunit, max_height * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.red])
return c, (hoffset, max_height), first_c_in, c_out
elif isinstance(circuit, ca.Concatenation):
voffset = 0
total_cin, total_cout = (), ()
widths = [] # stores the component width for each channel(!)
# generate all operand subsystem graphics and stack them vertically
for op in circuit.operands:
csub, dims, c_in, c_out = draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
# add appropriatly offsets to vertical port coordinates
total_cin += tuple(y + voffset for y in c_in)
total_cout += tuple(y + voffset for y in c_out)
c.insert(csub, [pyx.trafo.translate(0, vunit * voffset)])
# keep track of width in all channel for this subsystem
widths += [dims[0]] * op.cdim
voffset += dims[1]
max_width = max(widths)
if max_width > min(widths): # components differ in width => we must extend the narrow component output lines
for x,y in zip(widths, total_cout):
if x == max_width:
continue
ax, ax_to = x * hunit, max_width * hunit
ay = y * vunit
c.stroke(pyx.path.line(ax, ay, ax_to, ay))
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, max_width * hunit - 1. * rhmargin * hunit, voffset * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.blue])
return c, (max_width, voffset), total_cin, total_cout
elif isinstance(circuit, ca.Feedback):
# generate and insert graphics of subsystem
csub, dims, c_in, c_out = draw_circuit_canvas(circuit.operand, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
c.insert(csub, [pyx.trafo.translate(hunit * .5 * rhmargin, 0)])
width, height = dims
# create feedback loop
fb_out, fb_in = circuit.out_in_pair
out_coords = (width + .5 * rhmargin) * hunit, c_out[fb_out] * vunit
in_coords = .5 * rhmargin * hunit, c_in[fb_in] * vunit
upper_y = (height) * vunit
feedback_line = pyx.path.path(pyx.path.moveto(*out_coords), pyx.path.lineto(out_coords[0], upper_y),
pyx.path.lineto(in_coords[0], upper_y), pyx.path.lineto(*in_coords))
c.stroke(feedback_line)
# remove feedback port coordinates
new_c_in = c_in[:fb_in] + c_in[fb_in + 1 :]
new_c_out = c_out[:fb_out] + c_out[fb_out + 1 :]
# extend port connectors a little bit outward,
# such that the feedback loop is not at the edge anymore
for y in new_c_in:
c.stroke(pyx.path.line(0, y * vunit, .5 * rhmargin * hunit, y * vunit))
for y in new_c_out:
c.stroke(pyx.path.line((width + .5 * rhmargin) * hunit, y * vunit, (width + rhmargin) * hunit, y * vunit))
return c, (width + rhmargin, height + rhmargin), new_c_in, new_c_out
raise Exception('Visualization not implemented for type %s' % type(circuit))
def draw_circuit(circuit, filename, direction = 'lr',
hunit = HUNIT, vunit = VUNIT,
rhmargin = RHMARGIN, rvmargin = RVMARGIN,
rpermutation_length = RPLENGTH,
draw_boxes = True,
permutation_arrows = False):
"""
Generate a graphic representation of circuit and store them in a file.
The graphics format is determined from the file extension.
direction may be either 'lr'= left-to-right or 'rl' = right-to-left. In the first case,
the hunit parameter is passed to the draw_circuit_canvas function as its absolute value.
In the second case it is passed as its negative absolute value.
"""
if direction == 'lr':
hunit = abs(hunit)
elif direction == 'rl':
hunit = -abs(hunit)
try:
c, dims, c_in, c_out = draw_circuit_canvas(circuit, hunit = hunit, vunit = vunit,
rhmargin = rhmargin, rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
except ValueError:
"Print no graphics returned for circuit %r" % circuit
return False
if any(filename.endswith(suffix) for suffix in ('.pdf', '.eps', '.ps')):
c.writetofile(filename)
elif any(filename.endswith(suffix) for suffix in ('.png','.jpg')):
c.pipeGS(filename)
return True
def display_circuit(circuit):
import os, subprocess, tempfile
tmp_dir = tempfile.gettempdir()
i = 0
while(os.path.exists(tmp_dir + "/visualize_circuit_%d.pdf" % i)):
i += 1
fname = tmp_dir + "/visualize_circuit_%d.pdf" % i
logname = tmp_dir + "/visualize_circuit_%d-python.log" % i
if draw_circuit(circuit, fname):
with open(logname, 'w') as logfile:
subprocess.call(("qlmanage", "-p", fname), stdout = logfile, stderr = logfile)
def test():
# s = """
# P_sigma(1,2,3,0)
# a_nice_name(3) # test comment
# (a(3) + b(5))
# (c(3) << d(3))
# ((e(3) + cid(1)) << f(4))
# [cid(1) + a(3)]_(1->2)
# """
# for q in filter(None, s.splitlines()):
# display_circuit(q)
s2 = "(P_sigma(4, 0, 2, 3, 5, 1) << FB((((P_sigma(3, 4, 0, 5, 1, 2) << (NAND2(4) + cid(2))) + cid(1)) << (cid(2) + ((P_sigma(1, 0) + cid(1) + P_sigma(1, 0)) << (cid(1) + NAND1(4))))), 3, 5) << P_sigma(3, 4, 5, 0, 1, 2))"
s3 = "((cid(1) + (P_sigma(0, 2, 3, 4, 1) << (((cid(1) + FB(NAND2(4), 0, 0)) << P_sigma(0, 2, 3, 1) << NAND1(4)) + cid(1)))) << P_sigma(5, 0, 1, 2, 3, 4))"
# ps2 = parse_circuit_strings.parse_circuit_string(s2).pop().simplify()
# display_circuit(s2)
# display_circuit(ps2)
# display_circuit(s3)
# display_circuit("((cid(2) + ((cid(3) + P_sigma(1, 0)) << ((P_sigma(0, 3, 2, 1) << NAND1(4)) + cid(1)))) << (cid(2) + P_sigma(1, 2, 3, 4, 0)))")
# display_circuit("(((P_sigma(0, 3, 4, 5, 1, 2) << ((P_sigma(1, 2, 0, 3) << NAND2(4)) + cid(2))) + cid(1)) << P_sigma(2, 4, 5, 0, 1, 3, 6)) << ((cid(2) + ((cid(3) + P_sigma(1, 0)) << ((P_sigma(0, 3, 2, 1) << NAND1(4)) + cid(1)))) << (cid(2) + P_sigma(1, 2, 3, 4, 0)))")
# display_circuit("(((P_sigma(0, 3, 4, 5, 1, 2) << ((P_sigma(1, 2, 0, 3) << NAND2(4)) + cid(2))) + cid(1)) << (cid(1) + ((cid(3) + P_sigma(1, 2, 0)) << (((cid(3) + P_sigma(1, 0)) << ((P_sigma(3, 1, 2, 0) << NAND1(4)) + cid(1))) + cid(1)))) << P_sigma(5, 6, 1, 2, 3, 4, 0))")
# display_circuit("((cid(1) + (P_sigma(0, 2, 3, 4, 1) << (((cid(1) + FB(NAND2(4), 0, 0)) << P_sigma(0, 2, 3, 1) << NAND1(4)) + cid(1)))) << P_sigma(5, 0, 1, 2, 3, 4))")
# display_circuit("((cid(1) + (P_sigma(0, 2, 3, 4, 1) << (((cid(1) + FB(NAND2(4), 0, 0)) << P_sigma(0, 2, 3, 1) << NAND1(4)) + cid(1)))) << P_sigma(5, 0, 1, 2, 3, 4))")
# display_circuit("FB(((BS1(2) + cid(1)) << (cid(1) + (BS2(2) << P_sigma(1, 0)))), 1, 2)")
# display_circuit("(P_sigma(4, 0, 2, 3, 5, 1) << FB(((cid(5) + P_sigma(1, 0)) << ((P_sigma(0, 3, 4, 5, 1, 2) << ((P_sigma(3, 1, 0, 2) << NAND2(4)) + cid(2))) + cid(1)) << (cid(1) + ((cid(3) + P_sigma(1, 2, 0)) << (((cid(3) + P_sigma(1, 0)) << ((P_sigma(3, 1, 2, 0) << NAND1(4)) + cid(1))) + cid(1)))) << P_sigma(5, 6, 1, 2, 3, 4, 0))))")
display_circuit("(P_sigma(4, 0, 2, 3, 5, 1) << FB((((P_sigma(0, 3, 4, 5, 1, 2) << ((P_sigma(3, 1, 0, 2) << NAND2(4)) + cid(2))) + cid(1)) << (cid(2) + (P_sigma(0, 2, 3, 4, 1) << (((cid(2) + P_sigma(1, 0)) << NAND1(4)) + cid(1))))), 5, 4) << P_sigma(2, 3, 4, 0, 1, 5))")
if __name__ == '__main__':
test()
pass
additional edits for last commit
#!/usr/bin/env python
# encoding: utf-8
"""
visualization.py
Created by Nikolas Tezak on 2011-05-12.
Copyright (c) 2011 . All rights reserved.
Visualize Circuit objects.
"""
import algebra.circuit_algebra as ca
from circuit_components.component import Component, SubComponent
import pyx
from itertools import izip
texrunner = pyx.text.texrunner(mode = 'latex')
HUNIT = +4 # Basic unit for the width of a single Circuit object
# the positive value corresponds to visualizing the channel
# 'flow' from left to right
VUNIT = -1. # Basic unit for the height of a single Circuit object,
# the negative value makes the effective y-axis point downwards
RHMARGIN = .1 # Relative horizontal margin between gridline and Circuit object
RVMARGIN = .2 # Relative vertical margin between gridline and Circuit object
RPLENGTH = .4 # Relative width of a channel permutation
# helper function
def _curve(x1, y1, x2, y2, hunit = HUNIT, vunit = VUNIT):
"""
Return a PyX curved path from (x1, y1) to (x2, y2),
such that the slope at either end is zero.
"""
ax1, ax2, axm = x1 * hunit, x2 * hunit, (x1 + x2) * hunit / 2
ay1, ay2 = y1 * vunit, y2 * vunit
return pyx.path.curve(ax1, ay1, axm, ay1, axm, ay2, ax2, ay2)
def draw_circuit_canvas(circuit, hunit = HUNIT, vunit = VUNIT, rhmargin = RHMARGIN, rvmargin = RVMARGIN, rpermutation_length = RPLENGTH, draw_boxes = True, permutation_arrows = False):
"""
Return a PyX canvas, (rwidth, rheight), in_port_positions, out_port_positions visualizing circuit.
(rwidth, rheight) are the relative width and height of the full objects visualization.
in_port_positions, out_port_positions are tuples that list the (relative) vertical coordinates of the ports of circuit.
"""
if isinstance(circuit, str):
try:
import parse_circuit_strings
parsed = parse_circuit_strings.parse_circuit_string(circuit)
if len(parsed) > 1:
raise Exception('Can currently only process a single expression.')
circuit = parsed[0]
except ImportError, e:
raise ValueError("Could not parse string %r into CircuitExpression: %s" % (circuit, e))
if not isinstance(circuit, ca.Circuit):
raise ValueError()
nc = circuit.cdim
c = pyx.canvas.canvas()
if isinstance(circuit, ca.CIdentity):
# simply create a line going through
c.stroke(pyx.path.line(0, vunit/2, hunit, vunit/2))
return c, (1, 1), (.5,), (.5,)
elif isinstance(circuit, (ca.CSymbol, ca.SeriesInverse, ca.SLH, Component, SubComponent)):
# draw box
b = pyx.path.rect(rhmargin * hunit, rvmargin * vunit, hunit - 2 * rhmargin * hunit, nc * vunit - 2 * rvmargin * vunit)
c.stroke(b)
# draw symbol name
c.text(hunit/2., nc * vunit/2., "$%s$" % circuit.tex() , [pyx.text.halign.boxcenter, pyx.text.valign.middle])
# draw connectors at half-unit positions
connector_positions = tuple((.5 + k) for k in xrange(nc))
for y in connector_positions:
c.stroke(pyx.path.line(0, y * vunit, rhmargin * hunit, y * vunit), [pyx.deco.earrow()])
c.stroke(pyx.path.line(hunit * (1 - rhmargin), y * vunit, hunit, y * vunit))
return c, (1, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.CPermutation):
permutation = circuit.permutation
connector_positions = tuple((k + 0.5) for k in xrange(nc))
target_positions = [connector_positions[permutation[k]] for k in range(nc)]
# draw curves
for y1, y2 in izip(connector_positions, target_positions):
if permutation_arrows:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit), [pyx.deco.earrow()])
else:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit))
if draw_boxes:
b = pyx.path.rect(.5* rhmargin * hunit, .5* rvmargin * vunit, rpermutation_length * hunit - rhmargin * hunit, nc * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.green])
return c, (rpermutation_length, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.SeriesProduct):
# generate graphics of operad subsystems
sub_graphics = [draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows) for op in reversed(circuit.operands)]
# set up first one
previous_csub, previous_dims, previous_c_in, previous_c_out = sub_graphics[0]
hoffset = 0
c.insert(previous_csub)
hoffset += previous_dims[0]
max_height = nc
# this will later become the full series in-port coordinate tuple
first_c_in = previous_c_in
# now add all other operand subsystems
for csub, dims, c_in, c_out in sub_graphics[1:]:
max_height = max(dims[1], max_height)
if previous_c_out != c_in: # vertical port locations don't agree, map signals correspondingly
x1 = hoffset
x2 = hoffset + rpermutation_length
# draw connection curves
for y1, y2 in zip(previous_c_out, c_in):
c.stroke(_curve(x1, y1, x2, y2, hunit = hunit, vunit = vunit))
hoffset += rpermutation_length
previous_c_in, previous_c_out = c_in, c_out
# now insert current system
c.insert(csub, [pyx.trafo.translate(hunit * hoffset, 0)])
hoffset += dims[0]
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, hoffset * hunit - 1. * rhmargin * hunit, max_height * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.red])
return c, (hoffset, max_height), first_c_in, c_out
elif isinstance(circuit, ca.Concatenation):
voffset = 0
total_cin, total_cout = (), ()
widths = [] # stores the component width for each channel(!)
# generate all operand subsystem graphics and stack them vertically
for op in circuit.operands:
csub, dims, c_in, c_out = draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
# add appropriatly offsets to vertical port coordinates
total_cin += tuple(y + voffset for y in c_in)
total_cout += tuple(y + voffset for y in c_out)
c.insert(csub, [pyx.trafo.translate(0, vunit * voffset)])
# keep track of width in all channel for this subsystem
widths += [dims[0]] * op.cdim
voffset += dims[1]
max_width = max(widths)
if max_width > min(widths): # components differ in width => we must extend the narrow component output lines
for x,y in zip(widths, total_cout):
if x == max_width:
continue
ax, ax_to = x * hunit, max_width * hunit
ay = y * vunit
c.stroke(pyx.path.line(ax, ay, ax_to, ay))
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, max_width * hunit - 1. * rhmargin * hunit, voffset * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.blue])
return c, (max_width, voffset), total_cin, total_cout
elif isinstance(circuit, ca.Feedback):
# generate and insert graphics of subsystem
csub, dims, c_in, c_out = draw_circuit_canvas(circuit.operand, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
c.insert(csub, [pyx.trafo.translate(hunit * .5 * rhmargin, 0)])
width, height = dims
# create feedback loop
fb_out, fb_in = circuit.out_in_pair
out_coords = (width + .5 * rhmargin) * hunit, c_out[fb_out] * vunit
in_coords = .5 * rhmargin * hunit, c_in[fb_in] * vunit
upper_y = (height) * vunit
feedback_line = pyx.path.path(pyx.path.moveto(*out_coords), pyx.path.lineto(out_coords[0], upper_y),
pyx.path.lineto(in_coords[0], upper_y), pyx.path.lineto(*in_coords))
c.stroke(feedback_line)
# remove feedback port coordinates
new_c_in = c_in[:fb_in] + c_in[fb_in + 1 :]
new_c_out = c_out[:fb_out] + c_out[fb_out + 1 :]
# extend port connectors a little bit outward,
# such that the feedback loop is not at the edge anymore
for y in new_c_in:
c.stroke(pyx.path.line(0, y * vunit, .5 * rhmargin * hunit, y * vunit))
for y in new_c_out:
c.stroke(pyx.path.line((width + .5 * rhmargin) * hunit, y * vunit, (width + rhmargin) * hunit, y * vunit))
return c, (width + rhmargin, height + rhmargin), new_c_in, new_c_out
raise Exception('Visualization not implemented for type %s' % type(circuit))
def draw_circuit(circuit, filename, direction = 'lr',
hunit = HUNIT, vunit = VUNIT,
rhmargin = RHMARGIN, rvmargin = RVMARGIN,
rpermutation_length = RPLENGTH,
draw_boxes = True,
permutation_arrows = False):
"""
Generate a graphic representation of circuit and store them in a file.
The graphics format is determined from the file extension.
direction may be either 'lr'= left-to-right or 'rl' = right-to-left. In the first case,
the hunit parameter is passed to the draw_circuit_canvas function as its absolute value.
In the second case it is passed as its negative absolute value.
"""
if direction == 'lr':
hunit = abs(hunit)
elif direction == 'rl':
hunit = -abs(hunit)
try:
c, dims, c_in, c_out = draw_circuit_canvas(circuit, hunit = hunit, vunit = vunit,
rhmargin = rhmargin, rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
except ValueError:
"Print no graphics returned for circuit %r" % circuit
return False
if any(filename.endswith(suffix) for suffix in ('.pdf', '.eps', '.ps')):
c.writetofile(filename)
elif any(filename.endswith(suffix) for suffix in ('.png','.jpg')):
c.pipeGS(filename)
return True
def display_circuit(circuit):
import os, subprocess, tempfile
tmp_dir = tempfile.gettempdir()
i = 0
while(os.path.exists(tmp_dir + "/visualize_circuit_%d.pdf" % i)):
i += 1
fname = tmp_dir + "/visualize_circuit_%d.pdf" % i
logname = tmp_dir + "/visualize_circuit_%d-python.log" % i
if draw_circuit(circuit, fname):
with open(logname, 'w') as logfile:
subprocess.call(("qlmanage", "-p", fname), stdout = logfile, stderr = logfile)
def test():
# s = """
# P_sigma(1,2,3,0)
# a_nice_name(3) # test comment
# (a(3) + b(5))
# (c(3) << d(3))
# ((e(3) + cid(1)) << f(4))
# [cid(1) + a(3)]_(1->2)
# """
# for q in filter(None, s.splitlines()):
# display_circuit(q)
s2 = "(P_sigma(4, 0, 2, 3, 5, 1) << FB((((P_sigma(3, 4, 0, 5, 1, 2) << (NAND2(4) + cid(2))) + cid(1)) << (cid(2) + ((P_sigma(1, 0) + cid(1) + P_sigma(1, 0)) << (cid(1) + NAND1(4))))), 3, 5) << P_sigma(3, 4, 5, 0, 1, 2))"
s3 = "((cid(1) + (P_sigma(0, 2, 3, 4, 1) << (((cid(1) + FB(NAND2(4), 0, 0)) << P_sigma(0, 2, 3, 1) << NAND1(4)) + cid(1)))) << P_sigma(5, 0, 1, 2, 3, 4))"
# ps2 = parse_circuit_strings.parse_circuit_string(s2).pop().simplify()
# display_circuit(s2)
# display_circuit(ps2)
# display_circuit(s3)
# display_circuit("((cid(2) + ((cid(3) + P_sigma(1, 0)) << ((P_sigma(0, 3, 2, 1) << NAND1(4)) + cid(1)))) << (cid(2) + P_sigma(1, 2, 3, 4, 0)))")
# display_circuit("(((P_sigma(0, 3, 4, 5, 1, 2) << ((P_sigma(1, 2, 0, 3) << NAND2(4)) + cid(2))) + cid(1)) << P_sigma(2, 4, 5, 0, 1, 3, 6)) << ((cid(2) + ((cid(3) + P_sigma(1, 0)) << ((P_sigma(0, 3, 2, 1) << NAND1(4)) + cid(1)))) << (cid(2) + P_sigma(1, 2, 3, 4, 0)))")
# display_circuit("(((P_sigma(0, 3, 4, 5, 1, 2) << ((P_sigma(1, 2, 0, 3) << NAND2(4)) + cid(2))) + cid(1)) << (cid(1) + ((cid(3) + P_sigma(1, 2, 0)) << (((cid(3) + P_sigma(1, 0)) << ((P_sigma(3, 1, 2, 0) << NAND1(4)) + cid(1))) + cid(1)))) << P_sigma(5, 6, 1, 2, 3, 4, 0))")
# display_circuit("((cid(1) + (P_sigma(0, 2, 3, 4, 1) << (((cid(1) + FB(NAND2(4), 0, 0)) << P_sigma(0, 2, 3, 1) << NAND1(4)) + cid(1)))) << P_sigma(5, 0, 1, 2, 3, 4))")
# display_circuit("((cid(1) + (P_sigma(0, 2, 3, 4, 1) << (((cid(1) + FB(NAND2(4), 0, 0)) << P_sigma(0, 2, 3, 1) << NAND1(4)) + cid(1)))) << P_sigma(5, 0, 1, 2, 3, 4))")
# display_circuit("FB(((BS1(2) + cid(1)) << (cid(1) + (BS2(2) << P_sigma(1, 0)))), 1, 2)")
# display_circuit("(P_sigma(4, 0, 2, 3, 5, 1) << FB(((cid(5) + P_sigma(1, 0)) << ((P_sigma(0, 3, 4, 5, 1, 2) << ((P_sigma(3, 1, 0, 2) << NAND2(4)) + cid(2))) + cid(1)) << (cid(1) + ((cid(3) + P_sigma(1, 2, 0)) << (((cid(3) + P_sigma(1, 0)) << ((P_sigma(3, 1, 2, 0) << NAND1(4)) + cid(1))) + cid(1)))) << P_sigma(5, 6, 1, 2, 3, 4, 0))))")
display_circuit("(P_sigma(4, 0, 2, 3, 5, 1) << FB((((P_sigma(0, 3, 4, 5, 1, 2) << ((P_sigma(3, 1, 0, 2) << NAND2(4)) + cid(2))) + cid(1)) << (cid(2) + (P_sigma(0, 2, 3, 4, 1) << (((cid(2) + P_sigma(1, 0)) << NAND1(4)) + cid(1))))), 5, 4) << P_sigma(2, 3, 4, 0, 1, 5))")
if __name__ == '__main__':
test()
pass
|
import numpy
from braces.views import CsrfExemptMixin
from braces.views import JsonRequestResponseMixin
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from matplotlib import pyplot
from matplotlib import style
from matplotlib import ticker
from matplotlib.backends.backend_agg import FigureCanvas
from matplotlib.ticker import AutoMinorLocator
from core.managers.panda_manager import PandaManager
style.use('ggplot')
@cache_page(60 * 60)
def render_plot(request):
panda_manager = PandaManager()
prices = panda_manager.get_grouped_prices('Strength', positions=['MS'], ages=[32], max_price=3 * 10 ** 7)
fig = pyplot.figure(figsize=(16, 9), dpi=120)
ax = fig.add_subplot(1, 1, 1)
x = numpy.array(prices.mean().index)
y = prices.mean()
y_error = prices.std()
pyplot.xticks(x)
ax.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', direction='out', length=4, width=1)
ax.grid(which='minor', alpha=0.4)
ax.grid(which='major', alpha=0.7)
pyplot.errorbar(x, y, yerr=y_error, fmt='o', color='g')
pyplot.ylabel('Preis')
pyplot.title('Spielerpreise')
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
@method_decorator(login_required, name='dispatch')
class TransfersChartView(CsrfExemptMixin, JsonRequestResponseMixin, View):
def get(self, request):
group_by = request.GET.get('group_by', default='Age')
ages = self._to_int_list(request.GET.get('ages', default=None))
strengths = self._to_int_list(request.GET.get('strengths', default=None))
positions = self._to_list(request.GET.get('positions', default=None))
seasons = self._to_int_list(request.GET.get('seasons', default=None))
matchdays = self._to_int_list(request.GET.get('matchdays', default=None))
min_price = self._to_int(request.GET.get('min_price', default=None))
max_price = self._to_int(request.GET.get('max_price', default=None))
if positions == 'All':
positions = None
panda_manager = PandaManager()
prices = panda_manager.get_grouped_prices(group_by,
ages=ages,
strengths=strengths,
positions=positions,
seasons=seasons,
matchdays=matchdays,
min_price=min_price,
max_price=max_price,
)
chart_json = {
"series": [
{
"name": 'Preise',
"data": self._get_data_from_dataframe(prices)
},
],
"categories":
list(map(int, numpy.array(prices.mean().index)))
}
return self.render_json_response(chart_json)
@staticmethod
def _get_data_from_dataframe(prices):
mins = prices.min()
quantiles = prices.quantile([0.25, 0.75])
medians = prices.median()
maxs = prices.max()
data = []
for x_index in numpy.array(prices.mean().index):
data.append([float(mins[x_index]),
float(quantiles[x_index][0.25]),
float(medians[x_index]),
float(quantiles[x_index][0.75]),
float(maxs[x_index])
])
return data
@staticmethod
def _to_int_list(l):
if l:
return list(map(lambda x: int(x), l.split(',')))
return None
@staticmethod
def _to_list(l):
if l:
return l.split(',')
return None
@staticmethod
def _to_int(l):
if l:
return int(l)
return None
@method_decorator(login_required, name='dispatch')
class TransfersView(TemplateView):
template_name = 'core/ofm/transfers.html'
#19 remove unnecessary lmbda function
import numpy
from braces.views import CsrfExemptMixin
from braces.views import JsonRequestResponseMixin
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from matplotlib import pyplot
from matplotlib import style
from matplotlib import ticker
from matplotlib.backends.backend_agg import FigureCanvas
from matplotlib.ticker import AutoMinorLocator
from core.managers.panda_manager import PandaManager
style.use('ggplot')
@cache_page(60 * 60)
def render_plot(request):
panda_manager = PandaManager()
prices = panda_manager.get_grouped_prices('Strength', positions=['MS'], ages=[32], max_price=3 * 10 ** 7)
fig = pyplot.figure(figsize=(16, 9), dpi=120)
ax = fig.add_subplot(1, 1, 1)
x = numpy.array(prices.mean().index)
y = prices.mean()
y_error = prices.std()
pyplot.xticks(x)
ax.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', direction='out', length=4, width=1)
ax.grid(which='minor', alpha=0.4)
ax.grid(which='major', alpha=0.7)
pyplot.errorbar(x, y, yerr=y_error, fmt='o', color='g')
pyplot.ylabel('Preis')
pyplot.title('Spielerpreise')
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
@method_decorator(login_required, name='dispatch')
class TransfersChartView(CsrfExemptMixin, JsonRequestResponseMixin, View):
def get(self, request):
group_by = request.GET.get('group_by', default='Age')
ages = self._to_int_list(request.GET.get('ages', default=None))
strengths = self._to_int_list(request.GET.get('strengths', default=None))
positions = self._to_list(request.GET.get('positions', default=None))
seasons = self._to_int_list(request.GET.get('seasons', default=None))
matchdays = self._to_int_list(request.GET.get('matchdays', default=None))
min_price = self._to_int(request.GET.get('min_price', default=None))
max_price = self._to_int(request.GET.get('max_price', default=None))
if positions == 'All':
positions = None
panda_manager = PandaManager()
prices = panda_manager.get_grouped_prices(group_by,
ages=ages,
strengths=strengths,
positions=positions,
seasons=seasons,
matchdays=matchdays,
min_price=min_price,
max_price=max_price,
)
chart_json = {
"series": [
{
"name": 'Preise',
"data": self._get_data_from_dataframe(prices)
},
],
"categories":
list(map(int, numpy.array(prices.mean().index)))
}
return self.render_json_response(chart_json)
@staticmethod
def _get_data_from_dataframe(prices):
mins = prices.min()
quantiles = prices.quantile([0.25, 0.75])
medians = prices.median()
maxs = prices.max()
data = []
for x_index in numpy.array(prices.mean().index):
data.append([float(mins[x_index]),
float(quantiles[x_index][0.25]),
float(medians[x_index]),
float(quantiles[x_index][0.75]),
float(maxs[x_index])
])
return data
@staticmethod
def _to_int_list(l):
if l:
return list(map(int, l.split(',')))
return None
@staticmethod
def _to_list(l):
if l:
return l.split(',')
return None
@staticmethod
def _to_int(l):
if l:
return int(l)
return None
@method_decorator(login_required, name='dispatch')
class TransfersView(TemplateView):
template_name = 'core/ofm/transfers.html'
|
# This file is part of allegedb, an object-relational mapper for versioned graphs.
# Copyright (C) Zachary Spector. public@zacharyspector.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""WindowDict, the core data structure used by allegedb's caching system.
It resembles a dictionary, more specifically a defaultdict-like where retrieving
a key that isn't set will get the highest set key that is lower than the key
you asked for (and thus, keys must be orderable). It is optimized for retrieval
of the same key and neighboring ones repeatedly and in sequence.
"""
from collections import deque, Mapping, MutableMapping, KeysView, ItemsView, ValuesView
from operator import itemgetter, lt, le
from itertools import chain
try:
import cython
except ImportError:
class cython:
def locals(**kwargs):
def passthru(fun):
return fun
return passthru
cfunc = locals
int = None
bint = None
get0 = itemgetter(0)
get1 = itemgetter(1)
# TODO: cancel changes that would put something back to where it was at the start
# This will complicate the update_window functions though, and I don't think it'll
# improve much apart from a bit of efficiency in that the deltas are smaller
# sometimes.
def update_window(turn_from, tick_from, turn_to, tick_to, updfun, branchd):
"""Iterate over a window of time in ``branchd`` and call ``updfun`` on the values"""
if turn_from in branchd:
# Not including the exact tick you started from because deltas are *changes*
for past_state in branchd[turn_from][tick_from+1:]:
updfun(*past_state)
for midturn in range(turn_from+1, turn_to):
if midturn in branchd:
for past_state in branchd[midturn][:]:
updfun(*past_state)
if turn_to in branchd:
for past_state in branchd[turn_to][:tick_to]:
updfun(*past_state)
def update_backward_window(turn_from, tick_from, turn_to, tick_to, updfun, branchd):
"""Iterate backward over a window of time in ``branchd`` and call ``updfun`` on the values"""
if turn_from in branchd:
for future_state in reversed(branchd[turn_from][:tick_from]):
updfun(*future_state)
for midturn in range(turn_from-1, turn_to, -1):
if midturn in branchd:
for future_state in reversed(branchd[midturn][:]):
updfun(*future_state)
if turn_to in branchd:
for future_state in reversed(branchd[turn_to][tick_to+1:]):
updfun(*future_state)
class HistoryError(KeyError):
"""You tried to access the past in a bad way."""
def __init__(self, *args, deleted=False):
super().__init__(*args)
self.deleted = deleted
def within_history(rev, windowdict):
"""Return whether the windowdict has history at the revision."""
if not windowdict:
return False
begin = windowdict._past[0][0] if windowdict._past else \
windowdict._future[-1][0]
end = windowdict._future[0][0] if windowdict._future else \
windowdict._past[-1][0]
return begin <= rev <= end
class WindowDictKeysView(KeysView):
"""Look through all the keys a WindowDict contains."""
def __contains__(self, rev):
return rev in self._mapping._keys
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from map(get0, past)
if future:
yield from map(get0, reversed(future))
class WindowDictItemsView(ItemsView):
"""Look through everything a WindowDict contains."""
def __contains__(self, item):
(rev, v) = item
mapp = self._mapping
if not within_history(rev, mapp):
return False
for mrev, mv in mapp._past:
if mrev == rev:
return mv == v
for mrev, mv in mapp._future:
if mrev == rev:
return mv == v
return False
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from past
if future:
yield from future
class WindowDictPastFutureKeysView(KeysView):
"""View on a WindowDict's keys relative to last lookup"""
def __iter__(self):
if not self._mapping.stack:
return
yield from map(get0, reversed(self._mapping.stack))
def __contains__(self, item):
return item in self._mapping._keys
class WindowDictPastFutureItemsView(ItemsView):
def __iter__(self):
if not self._mapping.stack:
return
yield from reversed(self._mapping.stack)
def __contains__(self, item):
stack = self._mapping.stack
if not stack or self._out_of_range(item, stack):
return False
return item in stack
class WindowDictPastItemsView(WindowDictPastFutureItemsView):
@staticmethod
def _out_of_range(item, stack):
return item[0] < stack[0][0] or item[0] > stack[-1][0]
class WindowDictFutureItemsView(WindowDictPastFutureItemsView):
"""View on a WindowDict's future items relative to last lookup"""
@staticmethod
def _out_of_range(item, stack):
return item[0] < stack[-1][0] or item[0] > stack[0][0]
class WindowDictPastFutureValuesView(ValuesView):
"""Abstract class for views on the past or future values of a WindowDict"""
def __iter__(self):
stack = self._mapping.stack
if not stack:
return
yield from map(get1, reversed(stack))
def __contains__(self, item):
stack = self._mapping.stack
if not stack:
return False
for v in map(get1, stack):
if v == item:
return True
return False
class WindowDictValuesView(ValuesView):
"""Look through all the values that a WindowDict contains."""
def __contains__(self, value):
past = self._mapping._past
future = self._mapping._future
if past:
for rev, v in past:
if v == value:
return True
if future:
for rev, v in future:
if v == value:
return True
return False
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from map(get1, past)
if future:
yield from map(get1, future)
class WindowDictPastFutureView(Mapping):
"""Abstract class for historical views on WindowDict"""
__slots__ = ('stack',)
def __init__(self, stack):
self.stack = stack
def __len__(self):
stack = self.stack
if not stack:
return 0
return len(stack)
class WindowDictPastView(WindowDictPastFutureView):
"""Read-only mapping of just the past of a WindowDict"""
def __iter__(self):
stack = self.stack
if not stack:
return
yield from map(get0, reversed(stack))
def __getitem__(self, key):
stack = self.stack
if not stack or key < stack[0][0] or key > stack[-1][0]:
raise KeyError
for rev, value in stack:
if rev == key:
return value
raise KeyError
def keys(self):
return WindowDictPastFutureKeysView(self)
def items(self):
return WindowDictPastItemsView(self)
def values(self):
return WindowDictPastFutureValuesView(self)
class WindowDictFutureView(WindowDictPastFutureView):
"""Read-only mapping of just the future of a WindowDict"""
def __iter__(self):
stack = self.stack
if not stack:
return
yield from map(get0, reversed(stack))
def __getitem__(self, key):
stack = self.stack
if not stack or key < stack[-1][0] or key > stack[0][0]:
raise KeyError
for rev, value in stack:
if rev == key:
return value
raise KeyError
def keys(self):
return WindowDictPastFutureKeysView(self)
def items(self):
return WindowDictFutureItemsView(self)
def values(self):
return WindowDictPastFutureValuesView(self)
class WindowDictSlice:
"""A slice of history in which the start is earlier than the stop"""
__slots__ = ['dict', 'slice']
def __init__(self, dict, slice):
self.dict = dict
self.slice = slice
def __reversed__(self):
return iter(WindowDictReverseSlice(self.dict, self.slice))
def __iter__(self):
dic = self.dict
if not dic:
return
slic = self.slice
if slic.step is not None:
for i in range(slic.start or dic.beginning, slic.stop or dic.end, slic.step):
yield dic[i]
if slic.start is None and slic.stop is None:
yield from map(get1, dic._past)
yield from map(get1, reversed(dic._future))
elif None not in (slic.start, slic.stop):
if slic.stop == slic.start:
yield dic[slic.stop]
return
past = dic._past
future = dic._future
if slic.start < slic.stop:
left, right = slic.start, slic.stop
dic.seek(right)
if not past:
return
if past[-1][0] == right:
future.append(past.pop())
cmp = lt
else:
left, right = slic.stop, slic.start
dic.seek(right)
if not past:
return
cmp = le
it = iter(past)
p0, p1 = next(it)
while cmp(p0, left):
p0, p1 = next(it)
else:
yield p1
yield from map(get1, it)
elif slic.start is None:
stac = dic._past + list(reversed(dic._future))
while stac and stac[-1][0] > slic.stop:
stac.pop()
yield from map(get1, stac)
return
else: # slic.stop is None
if not dic._past and not dic._future:
return
chan = chain(dic._past, reversed(dic._future))
nxt = next(chan)
while nxt[0] < slic.start:
try:
nxt = next(chan)
except StopIteration:
return
yield get1(nxt)
yield from map(get1, chan)
class WindowDictReverseSlice:
"""A slice of history in which the start is later than the stop"""
__slots__ = ['dict', 'slice']
def __init__(self, dict, slice):
self.dict = dict
self.slice = slice
def __reversed__(self):
return iter(WindowDictSlice(self.dict, self.slice))
def __iter__(self):
dic = self.dict
if not dic:
return
slic = self.slice
if slic.step is not None:
for i in range(slic.start or dic.end, slic.stop or dic.beginning, slic.step):
yield dic[i]
if slic.start is None and slic.stop is None:
yield from map(get1, dic._future)
yield from map(get1, reversed(dic._past))
elif None not in (slic.start, slic.stop):
if slic.start == slic.stop:
yield dic[slic.stop]
return
if slic.start < slic.stop:
left, right = slic.start, slic.stop
dic.seek(right)
it = reversed(dic._past)
next(it)
cmp = lt
else:
left, right = slic.stop, slic.start
dic.seek(right)
it = reversed(dic._past)
cmp = le
for frev, fv in it:
if cmp(frev, left):
return
yield fv
elif slic.start is None:
stac = dic._past + list(reversed(dic._future))
while stac and stac[-1][0] > slic.stop:
stac.pop()
yield from map(get1, reversed(stac))
else: # slic.stop is None
stac = deque(dic._past)
stac.extend(reversed(dic._future))
while stac and stac[0][0] < slic.start:
stac.popleft()
yield from map(get1, reversed(stac))
class WindowDict(MutableMapping):
"""A dict that keeps every value that a variable has had over time.
Look up a revision number in this dict and it will give you the
effective value as of that revision. Keys should always be
revision numbers.
Optimized for the cases where you look up the same revision
repeatedly, or its neighbors.
This supports slice notation to get all values in a given
time-frame. If you do not supply a step, you'll just get the
values, with no indication of when they're from exactly --
so explicitly supply a step of 1 to get the value at each point in
the slice, or use the ``future`` and ``past`` methods to get read-only
mappings of data relative to a particular revision.
Unlike slices of eg. lists, you can slice with a start greater than the stop
even if you don't supply a step. That will get you values in reverse order.
"""
__slots__ = ('_future', '_past', '_keys')
def future(self, rev=None):
"""Return a Mapping of items after the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictFutureView(self._future)
def past(self, rev=None):
"""Return a Mapping of items at or before the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictPastView(self._past)
@cython.locals(rev=cython.int, past_end=cython.int, future_start=cython.int)
def seek(self, rev):
"""Arrange the caches to help look up the given revision."""
# TODO: binary search? Perhaps only when one or the other
# stack is very large?
if not self:
return
if type(rev) is not int:
raise TypeError("rev must be int")
past = self._past
future = self._future
past_end = -1 if not past else past[-1][0]
future_start = -1 if not future else future[-1][0]
if past and past_end <= rev and (
not future or future_start > rev
):
return
if future:
appender = past.append
popper = future.pop
while future_start <= rev:
appender(popper())
if future:
future_start = future[-1][0]
else:
break
if past:
popper = past.pop
appender = future.append
while past_end > rev:
appender(popper())
if past:
past_end = past[-1][0]
else:
break
def rev_gettable(self, rev: int) -> bool:
if self._past:
return rev >= self._past[0][0]
elif self._future:
return rev >= self._future[0][0]
else:
return False
def rev_before(self, rev: int) -> int:
"""Return the latest past rev on which the value changed."""
self.seek(rev)
if self._past:
return self._past[-1][0]
def rev_after(self, rev: int) -> int:
"""Return the earliest future rev on which the value will change."""
self.seek(rev)
if self._future:
return self._future[-1][0]
def truncate(self, rev: int) -> None:
"""Delete everything after the given revision."""
self.seek(rev)
self._keys.difference_update(map(get0, self._future))
self._future = []
@property
def beginning(self) -> int:
if self._past:
return self._past[0][0]
elif self._future:
return self._future[-1][0]
else:
raise HistoryError("No history yet")
@property
def end(self) -> int:
if self._future:
return self._future[0][0]
elif self._past:
return self._past[-1][0]
else:
raise HistoryError("No history yet")
def keys(self):
return WindowDictKeysView(self)
def items(self):
return WindowDictItemsView(self)
def values(self):
return WindowDictValuesView(self)
def __bool__(self):
return bool(self._past) or bool(self._future)
def __init__(self, data=None):
if not data:
self._past = []
elif hasattr(data, 'items'):
self._past = list(sorted(data.items()))
else:
# assume it's an orderable sequence of pairs
self._past = list(sorted(data))
self._future = []
self._keys = set(map(get0, self._past or ()))
def __iter__(self):
if not self:
return
if self._past:
yield from map(get0, self._past)
if self._future:
yield from map(get0, self._future)
def __contains__(self, item):
return item in self._keys
def __len__(self):
return len(self._past or ()) + len(self._future or ())
def __getitem__(self, rev):
if not self:
raise HistoryError("No history yet")
if isinstance(rev, slice):
if None not in (rev.start, rev.stop) and rev.start > rev.stop:
return WindowDictReverseSlice(self, rev)
return WindowDictSlice(self, rev)
self.seek(rev)
past = self._past
if not past:
raise HistoryError(
"Revision {} is before the start of history".format(rev)
)
return past[-1][1]
@cython.locals(past_start=cython.int, past_end=cython.int, future_start=cython.int, have_past=cython.bint, have_future=cython.bint, rev=cython.int)
def __setitem__(self, rev, v):
if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap'):
v = v.unwrap()
past = self._past
future = self._future
have_past = bool(past)
have_future = bool(future)
past_start = -1 if not have_past else past[0][0]
past_end = -1 if not have_past else past[-1][0]
future_start = -1 if not have_future else future[-1][0]
if not have_past and not have_future:
past.append((rev, v))
elif have_past and rev < past_start:
past.insert(0, (rev, v))
elif have_past and rev == past_start:
past[0] = (rev, v)
elif have_past and rev == past_end:
past[-1] = (rev, v)
elif have_past and (
not have_future or
rev < future_start
) and rev > past_end:
past.append((rev, v))
else:
self.seek(rev)
past = self._past
future = self._future
past_end = -1 if not past else past[-1][0]
if not past:
past.append((rev, v))
elif past_end == rev:
past[-1] = (rev, v)
else:
assert past_end < rev
past.append((rev, v))
self._keys.add(rev)
@cython.locals(rev=cython.int, past_end=cython.int)
def __delitem__(self, rev):
# Not checking for rev's presence at the beginning because
# to do so would likely require iterating thru history,
# which I have to do anyway in deleting.
# But handle degenerate case.
if not self:
raise HistoryError("Tried to delete from an empty WindowDict")
if not self.beginning <= rev <= self.end:
raise HistoryError("Rev outside of history: {}".format(rev))
self.seek(rev)
past = self._past
past_end = -1 if not past else past[-1][0]
if not past or past_end != rev:
raise HistoryError("Rev not present: {}".format(rev))
del self._past[-1]
self._keys.remove(rev)
def __repr__(self):
me = dict(self._past)
me.update(self._future)
return "{}({})".format(self.__class__.__name__, me)
class FuturistWindowDict(WindowDict):
"""A WindowDict that does not let you rewrite the past."""
__slots__ = ('_future', '_past')
def __setitem__(self, rev, v):
if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap'):
v = v.unwrap()
if not self._past or (
self._past and (
not self._future and
rev > self._past[-1][0]
)):
self._past.append((rev, v))
self._keys.add(rev)
return
self.seek(rev)
past = self._past
future = self._future
if future:
raise HistoryError(
"Already have some history after {}".format(rev)
)
if not past or rev > past[-1][0]:
past.append((rev, v))
elif rev == past[-1][0]:
past[-1] = (rev, v)
else:
raise HistoryError(
"Already have some history after {} "
"(and my seek function is broken?)".format(rev)
)
self._keys.add(rev)
class TurnDict(FuturistWindowDict):
__slots__ = ('_future', '_past')
cls = FuturistWindowDict
def __setitem__(self, turn, value):
if type(value) is not FuturistWindowDict:
value = FuturistWindowDict(value)
FuturistWindowDict.__setitem__(self, turn, value)
class SettingsTurnDict(WindowDict):
__slots__ = ('_future', '_past')
cls = WindowDict
def __setitem__(self, turn, value):
if type(value) is not WindowDict:
value = WindowDict(value)
WindowDict.__setitem__(self, turn, value)
Fix an oversight in FuturistWindowDict.__setitem__
# This file is part of allegedb, an object-relational mapper for versioned graphs.
# Copyright (C) Zachary Spector. public@zacharyspector.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""WindowDict, the core data structure used by allegedb's caching system.
It resembles a dictionary, more specifically a defaultdict-like where retrieving
a key that isn't set will get the highest set key that is lower than the key
you asked for (and thus, keys must be orderable). It is optimized for retrieval
of the same key and neighboring ones repeatedly and in sequence.
"""
from collections import deque, Mapping, MutableMapping, KeysView, ItemsView, ValuesView
from operator import itemgetter, lt, le
from itertools import chain
try:
import cython
except ImportError:
class cython:
def locals(**kwargs):
def passthru(fun):
return fun
return passthru
cfunc = locals
int = None
bint = None
get0 = itemgetter(0)
get1 = itemgetter(1)
# TODO: cancel changes that would put something back to where it was at the start
# This will complicate the update_window functions though, and I don't think it'll
# improve much apart from a bit of efficiency in that the deltas are smaller
# sometimes.
def update_window(turn_from, tick_from, turn_to, tick_to, updfun, branchd):
"""Iterate over a window of time in ``branchd`` and call ``updfun`` on the values"""
if turn_from in branchd:
# Not including the exact tick you started from because deltas are *changes*
for past_state in branchd[turn_from][tick_from+1:]:
updfun(*past_state)
for midturn in range(turn_from+1, turn_to):
if midturn in branchd:
for past_state in branchd[midturn][:]:
updfun(*past_state)
if turn_to in branchd:
for past_state in branchd[turn_to][:tick_to]:
updfun(*past_state)
def update_backward_window(turn_from, tick_from, turn_to, tick_to, updfun, branchd):
"""Iterate backward over a window of time in ``branchd`` and call ``updfun`` on the values"""
if turn_from in branchd:
for future_state in reversed(branchd[turn_from][:tick_from]):
updfun(*future_state)
for midturn in range(turn_from-1, turn_to, -1):
if midturn in branchd:
for future_state in reversed(branchd[midturn][:]):
updfun(*future_state)
if turn_to in branchd:
for future_state in reversed(branchd[turn_to][tick_to+1:]):
updfun(*future_state)
class HistoryError(KeyError):
"""You tried to access the past in a bad way."""
def __init__(self, *args, deleted=False):
super().__init__(*args)
self.deleted = deleted
def within_history(rev, windowdict):
"""Return whether the windowdict has history at the revision."""
if not windowdict:
return False
begin = windowdict._past[0][0] if windowdict._past else \
windowdict._future[-1][0]
end = windowdict._future[0][0] if windowdict._future else \
windowdict._past[-1][0]
return begin <= rev <= end
class WindowDictKeysView(KeysView):
"""Look through all the keys a WindowDict contains."""
def __contains__(self, rev):
return rev in self._mapping._keys
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from map(get0, past)
if future:
yield from map(get0, reversed(future))
class WindowDictItemsView(ItemsView):
"""Look through everything a WindowDict contains."""
def __contains__(self, item):
(rev, v) = item
mapp = self._mapping
if not within_history(rev, mapp):
return False
for mrev, mv in mapp._past:
if mrev == rev:
return mv == v
for mrev, mv in mapp._future:
if mrev == rev:
return mv == v
return False
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from past
if future:
yield from future
class WindowDictPastFutureKeysView(KeysView):
"""View on a WindowDict's keys relative to last lookup"""
def __iter__(self):
if not self._mapping.stack:
return
yield from map(get0, reversed(self._mapping.stack))
def __contains__(self, item):
return item in self._mapping._keys
class WindowDictPastFutureItemsView(ItemsView):
def __iter__(self):
if not self._mapping.stack:
return
yield from reversed(self._mapping.stack)
def __contains__(self, item):
stack = self._mapping.stack
if not stack or self._out_of_range(item, stack):
return False
return item in stack
class WindowDictPastItemsView(WindowDictPastFutureItemsView):
@staticmethod
def _out_of_range(item, stack):
return item[0] < stack[0][0] or item[0] > stack[-1][0]
class WindowDictFutureItemsView(WindowDictPastFutureItemsView):
"""View on a WindowDict's future items relative to last lookup"""
@staticmethod
def _out_of_range(item, stack):
return item[0] < stack[-1][0] or item[0] > stack[0][0]
class WindowDictPastFutureValuesView(ValuesView):
"""Abstract class for views on the past or future values of a WindowDict"""
def __iter__(self):
stack = self._mapping.stack
if not stack:
return
yield from map(get1, reversed(stack))
def __contains__(self, item):
stack = self._mapping.stack
if not stack:
return False
for v in map(get1, stack):
if v == item:
return True
return False
class WindowDictValuesView(ValuesView):
"""Look through all the values that a WindowDict contains."""
def __contains__(self, value):
past = self._mapping._past
future = self._mapping._future
if past:
for rev, v in past:
if v == value:
return True
if future:
for rev, v in future:
if v == value:
return True
return False
def __iter__(self):
past = self._mapping._past
future = self._mapping._future
if past:
yield from map(get1, past)
if future:
yield from map(get1, future)
class WindowDictPastFutureView(Mapping):
"""Abstract class for historical views on WindowDict"""
__slots__ = ('stack',)
def __init__(self, stack):
self.stack = stack
def __len__(self):
stack = self.stack
if not stack:
return 0
return len(stack)
class WindowDictPastView(WindowDictPastFutureView):
"""Read-only mapping of just the past of a WindowDict"""
def __iter__(self):
stack = self.stack
if not stack:
return
yield from map(get0, reversed(stack))
def __getitem__(self, key):
stack = self.stack
if not stack or key < stack[0][0] or key > stack[-1][0]:
raise KeyError
for rev, value in stack:
if rev == key:
return value
raise KeyError
def keys(self):
return WindowDictPastFutureKeysView(self)
def items(self):
return WindowDictPastItemsView(self)
def values(self):
return WindowDictPastFutureValuesView(self)
class WindowDictFutureView(WindowDictPastFutureView):
"""Read-only mapping of just the future of a WindowDict"""
def __iter__(self):
stack = self.stack
if not stack:
return
yield from map(get0, reversed(stack))
def __getitem__(self, key):
stack = self.stack
if not stack or key < stack[-1][0] or key > stack[0][0]:
raise KeyError
for rev, value in stack:
if rev == key:
return value
raise KeyError
def keys(self):
return WindowDictPastFutureKeysView(self)
def items(self):
return WindowDictFutureItemsView(self)
def values(self):
return WindowDictPastFutureValuesView(self)
class WindowDictSlice:
"""A slice of history in which the start is earlier than the stop"""
__slots__ = ['dict', 'slice']
def __init__(self, dict, slice):
self.dict = dict
self.slice = slice
def __reversed__(self):
return iter(WindowDictReverseSlice(self.dict, self.slice))
def __iter__(self):
dic = self.dict
if not dic:
return
slic = self.slice
if slic.step is not None:
for i in range(slic.start or dic.beginning, slic.stop or dic.end, slic.step):
yield dic[i]
if slic.start is None and slic.stop is None:
yield from map(get1, dic._past)
yield from map(get1, reversed(dic._future))
elif None not in (slic.start, slic.stop):
if slic.stop == slic.start:
yield dic[slic.stop]
return
past = dic._past
future = dic._future
if slic.start < slic.stop:
left, right = slic.start, slic.stop
dic.seek(right)
if not past:
return
if past[-1][0] == right:
future.append(past.pop())
cmp = lt
else:
left, right = slic.stop, slic.start
dic.seek(right)
if not past:
return
cmp = le
it = iter(past)
p0, p1 = next(it)
while cmp(p0, left):
p0, p1 = next(it)
else:
yield p1
yield from map(get1, it)
elif slic.start is None:
stac = dic._past + list(reversed(dic._future))
while stac and stac[-1][0] > slic.stop:
stac.pop()
yield from map(get1, stac)
return
else: # slic.stop is None
if not dic._past and not dic._future:
return
chan = chain(dic._past, reversed(dic._future))
nxt = next(chan)
while nxt[0] < slic.start:
try:
nxt = next(chan)
except StopIteration:
return
yield get1(nxt)
yield from map(get1, chan)
class WindowDictReverseSlice:
"""A slice of history in which the start is later than the stop"""
__slots__ = ['dict', 'slice']
def __init__(self, dict, slice):
self.dict = dict
self.slice = slice
def __reversed__(self):
return iter(WindowDictSlice(self.dict, self.slice))
def __iter__(self):
dic = self.dict
if not dic:
return
slic = self.slice
if slic.step is not None:
for i in range(slic.start or dic.end, slic.stop or dic.beginning, slic.step):
yield dic[i]
if slic.start is None and slic.stop is None:
yield from map(get1, dic._future)
yield from map(get1, reversed(dic._past))
elif None not in (slic.start, slic.stop):
if slic.start == slic.stop:
yield dic[slic.stop]
return
if slic.start < slic.stop:
left, right = slic.start, slic.stop
dic.seek(right)
it = reversed(dic._past)
next(it)
cmp = lt
else:
left, right = slic.stop, slic.start
dic.seek(right)
it = reversed(dic._past)
cmp = le
for frev, fv in it:
if cmp(frev, left):
return
yield fv
elif slic.start is None:
stac = dic._past + list(reversed(dic._future))
while stac and stac[-1][0] > slic.stop:
stac.pop()
yield from map(get1, reversed(stac))
else: # slic.stop is None
stac = deque(dic._past)
stac.extend(reversed(dic._future))
while stac and stac[0][0] < slic.start:
stac.popleft()
yield from map(get1, reversed(stac))
class WindowDict(MutableMapping):
"""A dict that keeps every value that a variable has had over time.
Look up a revision number in this dict and it will give you the
effective value as of that revision. Keys should always be
revision numbers.
Optimized for the cases where you look up the same revision
repeatedly, or its neighbors.
This supports slice notation to get all values in a given
time-frame. If you do not supply a step, you'll just get the
values, with no indication of when they're from exactly --
so explicitly supply a step of 1 to get the value at each point in
the slice, or use the ``future`` and ``past`` methods to get read-only
mappings of data relative to a particular revision.
Unlike slices of eg. lists, you can slice with a start greater than the stop
even if you don't supply a step. That will get you values in reverse order.
"""
__slots__ = ('_future', '_past', '_keys')
def future(self, rev=None):
"""Return a Mapping of items after the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictFutureView(self._future)
def past(self, rev=None):
"""Return a Mapping of items at or before the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictPastView(self._past)
@cython.locals(rev=cython.int, past_end=cython.int, future_start=cython.int)
def seek(self, rev):
"""Arrange the caches to help look up the given revision."""
# TODO: binary search? Perhaps only when one or the other
# stack is very large?
if not self:
return
if type(rev) is not int:
raise TypeError("rev must be int")
past = self._past
future = self._future
past_end = -1 if not past else past[-1][0]
future_start = -1 if not future else future[-1][0]
if past and past_end <= rev and (
not future or future_start > rev
):
return
if future:
appender = past.append
popper = future.pop
while future_start <= rev:
appender(popper())
if future:
future_start = future[-1][0]
else:
break
if past:
popper = past.pop
appender = future.append
while past_end > rev:
appender(popper())
if past:
past_end = past[-1][0]
else:
break
def rev_gettable(self, rev: int) -> bool:
if self._past:
return rev >= self._past[0][0]
elif self._future:
return rev >= self._future[0][0]
else:
return False
def rev_before(self, rev: int) -> int:
"""Return the latest past rev on which the value changed."""
self.seek(rev)
if self._past:
return self._past[-1][0]
def rev_after(self, rev: int) -> int:
"""Return the earliest future rev on which the value will change."""
self.seek(rev)
if self._future:
return self._future[-1][0]
def truncate(self, rev: int) -> None:
"""Delete everything after the given revision."""
self.seek(rev)
self._keys.difference_update(map(get0, self._future))
self._future = []
@property
def beginning(self) -> int:
if self._past:
return self._past[0][0]
elif self._future:
return self._future[-1][0]
else:
raise HistoryError("No history yet")
@property
def end(self) -> int:
if self._future:
return self._future[0][0]
elif self._past:
return self._past[-1][0]
else:
raise HistoryError("No history yet")
def keys(self):
return WindowDictKeysView(self)
def items(self):
return WindowDictItemsView(self)
def values(self):
return WindowDictValuesView(self)
def __bool__(self):
return bool(self._past) or bool(self._future)
def __init__(self, data=None):
if not data:
self._past = []
elif hasattr(data, 'items'):
self._past = list(sorted(data.items()))
else:
# assume it's an orderable sequence of pairs
self._past = list(sorted(data))
self._future = []
self._keys = set(map(get0, self._past or ()))
def __iter__(self):
if not self:
return
if self._past:
yield from map(get0, self._past)
if self._future:
yield from map(get0, self._future)
def __contains__(self, item):
return item in self._keys
def __len__(self):
return len(self._past or ()) + len(self._future or ())
def __getitem__(self, rev):
if not self:
raise HistoryError("No history yet")
if isinstance(rev, slice):
if None not in (rev.start, rev.stop) and rev.start > rev.stop:
return WindowDictReverseSlice(self, rev)
return WindowDictSlice(self, rev)
self.seek(rev)
past = self._past
if not past:
raise HistoryError(
"Revision {} is before the start of history".format(rev)
)
return past[-1][1]
@cython.locals(past_start=cython.int, past_end=cython.int, future_start=cython.int, have_past=cython.bint, have_future=cython.bint, rev=cython.int)
def __setitem__(self, rev, v):
if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap'):
v = v.unwrap()
past = self._past
future = self._future
have_past = bool(past)
have_future = bool(future)
past_start = -1 if not have_past else past[0][0]
past_end = -1 if not have_past else past[-1][0]
future_start = -1 if not have_future else future[-1][0]
if not have_past and not have_future:
past.append((rev, v))
elif have_past and rev < past_start:
past.insert(0, (rev, v))
elif have_past and rev == past_start:
past[0] = (rev, v)
elif have_past and rev == past_end:
past[-1] = (rev, v)
elif have_past and (
not have_future or
rev < future_start
) and rev > past_end:
past.append((rev, v))
else:
self.seek(rev)
past = self._past
future = self._future
past_end = -1 if not past else past[-1][0]
if not past:
past.append((rev, v))
elif past_end == rev:
past[-1] = (rev, v)
else:
assert past_end < rev
past.append((rev, v))
self._keys.add(rev)
@cython.locals(rev=cython.int, past_end=cython.int)
def __delitem__(self, rev):
# Not checking for rev's presence at the beginning because
# to do so would likely require iterating thru history,
# which I have to do anyway in deleting.
# But handle degenerate case.
if not self:
raise HistoryError("Tried to delete from an empty WindowDict")
if not self.beginning <= rev <= self.end:
raise HistoryError("Rev outside of history: {}".format(rev))
self.seek(rev)
past = self._past
past_end = -1 if not past else past[-1][0]
if not past or past_end != rev:
raise HistoryError("Rev not present: {}".format(rev))
del self._past[-1]
self._keys.remove(rev)
def __repr__(self):
me = dict(self._past)
me.update(self._future)
return "{}({})".format(self.__class__.__name__, me)
class FuturistWindowDict(WindowDict):
"""A WindowDict that does not let you rewrite the past."""
__slots__ = ('_future', '_past')
def __setitem__(self, rev, v):
if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap'):
v = v.unwrap()
if not (self._past or self._future) or (
self._past and (
not self._future and
rev > self._past[-1][0]
)):
self._past.append((rev, v))
self._keys.add(rev)
return
self.seek(rev)
past = self._past
future = self._future
if future:
raise HistoryError(
"Already have some history after {}".format(rev)
)
if not past or rev > past[-1][0]:
past.append((rev, v))
elif rev == past[-1][0]:
past[-1] = (rev, v)
else:
raise HistoryError(
"Already have some history after {} "
"(and my seek function is broken?)".format(rev)
)
self._keys.add(rev)
class TurnDict(FuturistWindowDict):
__slots__ = ('_future', '_past')
cls = FuturistWindowDict
def __setitem__(self, turn, value):
if type(value) is not FuturistWindowDict:
value = FuturistWindowDict(value)
FuturistWindowDict.__setitem__(self, turn, value)
class SettingsTurnDict(WindowDict):
__slots__ = ('_future', '_past')
cls = WindowDict
def __setitem__(self, turn, value):
if type(value) is not WindowDict:
value = WindowDict(value)
WindowDict.__setitem__(self, turn, value)
|
# fMBT, free Model Based Testing tool
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
#
# View._parseDump method contains code that has been published as part
# of the TEMA tool, under the MIT open source license:
#
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This library provides a test interface to Android devices.
Device class implements a test interface that is based on Android
Debug Bridge (adb) and Android monkey.
Device's refreshScreenshot() returns a Screenshot object, from which
bitmaps can be searched for.
Device's refreshView() returns a View object, from which UI elements
can be searched according to their id, class, text and other
properties.
Using this library requires that adb is in PATH.
Tips & tricks
-------------
Take a screenshot and save it to a file
import fmbtandroid
fmbtandroid.Device().refreshScreenshot().save("/tmp/screen.png")
* * *
Print view items on device display
import fmbtandroid
print fmbtandroid.Device().refreshView().dumpTree()
* * *
Open application grid from the home screen, unlock screen if necessary
import fmbtandroid
import time
d = fmbtandroid.Device()
d.pressHome()
time.sleep(1)
whatISee = d.waitAnyBitmap(["lockscreen-lock.png", "home-appgrid.png"])
if "lockscreen-lock.png" in whatISee:
d.swipeBitmap("lockscreen-lock.png", "east")
time.sleep(1)
d.pressHome()
whatISee = d.waitAnyBitmap(["home-appgrid.png"])
assert "home-appgrid.png" in whatISee, "Cannot find appgrid bitmap at"
d.tapBitmap("home-appgrid.png")
* * *
Save generated device ini for modifications
import fmbtandroid
file("/tmp/mydevice.ini", "w").write(fmbtandroid.Device().dumpIni())
* * *
Connect to device based on an ini file
import fmbtandroid
d = fmbtandroid.Device(iniFile=file("/tmp/mydevice.ini"))
d.pressHome()
* * *
Open screenlock by swiping lock.png bitmap on the display to the
east. The lock.png file needs to be in bitmapPath defined in
mydevice.ini.
import fmbtandroid
d = fmbtandroid.Device(iniFile=file("/tmp/mydevice.ini"))
d.refreshScreenshot()
d.swipeBitmap("lock.png", "east")
* * *
Execute a shell command on Android device, show exit status, standard
output and standard error:
import fmbtandroid
status, out, err = fmbtandroid.Device().shellSOE("mkdir /proc/foo")
print 'status: %s, stdout: "%s", stderr: "%s"' % (status, out, err)
* * *
Enable extensive logging with screenshots and highlighted content:
import fmbtandroid, time
d = fmbtandroid.Device()
d.enableVisualLog("example.html")
d.pressHome(); time.sleep(1)
d.refreshScreenshot()
d.tapOcrText("Google"); time.sleep(1)
d.refreshScreenshot()
then view the log:
$ chromium example.html
"""
DEVICE_INI_DEFAULTS = '''
[objects]
appsButtonId = id/0x0
appsButtonClass = BubbleTextView
; [application.NAME] sections:
; gridname = exact caption of the application in application grid (text
; property)
; window = string included in topWindow() when application is running
[homescreen]
window = Launcher
'''
import commands
import os
import random
import re
import shutil
import socket
import StringIO
import subprocess
import tempfile
import time
import uu
import fmbt
import fmbtgti
def _adapterLog(msg):
fmbt.adapterlog("fmbtandroid: %s" % (msg,))
def _logFailedCommand(source, command, exitstatus, stdout, stderr):
_adapterLog('in %s command "%s" failed:\n output: %s\n error: %s\n status: %s' %
(source, command, stdout, stderr, exitstatus))
def _run(command, expectedExitStatus = None):
if type(command) == str: shell=True
else: shell=False
try:
p = subprocess.Popen(command, shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=False)
if expectedExitStatus != None:
out, err = p.communicate()
else:
out, err = ('', None)
except Exception, e:
class fakeProcess(object): pass
p = fakeProcess
p.returncode = 127
out, err = ('', e)
exitStatus = p.returncode
if expectedExitStatus != None:
if ((type(expectedExitStatus) in [list, tuple] and
not exitStatus in expectedExitStatus) or
(type(expectedExitStatus) == int and
not exitStatus == expectedExitStatus)):
msg = 'Unexpected exit status %s from command "%s".\n Output: %s\n Error: %s' % (
exitStatus, command, out, err)
_adapterLog(msg)
if "error: device not found" in err:
raise AndroidDeviceNotFound(msg)
else:
raise Exception(msg)
return (exitStatus, out, err)
class Device(fmbtgti.GUITestInterface):
"""
The Device class provides
- keywords as its methods
- device properties from device's INI file
- view() returns the most recently refreshed View, that contains
items parsed from window dump.
- screenshot() returns the most recently refreshed Screenshot,
bitmaps can be searched from this.
"""
_PARSE_VIEW_RETRY_LIMIT = 10
def __init__(self, deviceName=None, iniFile=None, connect=True):
"""
Connect to given device, or the first not-connected Android
device in the "adb devices" list, if nothing is defined.
Parameters:
deviceName (string, optional):
If deviceName is a device serial number (an item in
the left most column in "adb devices"), connect to
that device. Device information is read from
$FMBTANDROIDHOME/etc/SERIALNUMBER.ini, if it exists.
If deviceName is a nick name, device information is
looked for from $FMBTANDROIDHOME/etc/deviceName.ini,
and the connection is established to the device with
the serial number given in the ini file.
The default is None. The first disconnected device
in the "adb devices" list is connected to. Device
information is read from
$FMBTANDROIDHOME/etc/SERIALNUMBER.ini, if it exists.
iniFile (file object, optional):
A file object that contains device information
ini. Connect to the device with a serial number
given in this file. The default is None.
To create an ini file for a device, use dumpIni. Example:
file("/tmp/test.ini", "w").write(fmbtandroid.Device().dumpIni())
"""
fmbtgti.GUITestInterface.__init__(self)
self._fmbtAndroidHomeDir = os.getenv("FMBTANDROIDHOME", os.getcwd())
self._platformVersion = None
self._lastView = None
self._conf = Ini()
self._loadDeviceAndTestINIs(self._fmbtAndroidHomeDir, deviceName, iniFile)
if deviceName == None:
deviceName = self._conf.value("general", "serial", "")
if connect == False and deviceName == "":
deviceName = "nodevice"
self.setConnection(None)
elif deviceName == "":
# Connect to an unspecified device.
# Go through devices in "adb devices".
listDevicesCommand = ["adb.exe", "devices"]
status, output, err = _run(listDevicesCommand, expectedExitStatus = [0, 127])
if status == 127:
raise Exception('adb not found in PATH. Check your Android SDK installation.')
outputLines = [l.strip() for l in output.splitlines()]
try: deviceLines = outputLines[outputLines.index("List of devices attached")+1:]
except: deviceLines = []
deviceLines = [l for l in deviceLines if l.strip() != ""]
if deviceLines == []:
raise Exception('No devices found with "%s"' % (listDevicesCommand,))
potentialDevices = [line.split()[0] for line in deviceLines]
for deviceName in potentialDevices:
try:
self.serialNumber = deviceName
self._conf.set("general", "serial", self.serialNumber)
self.setConnection(_AndroidDeviceConnection(self.serialNumber))
break
except AndroidConnectionError, e:
continue
else:
raise AndroidConnectionError("Could not connect to device(s): %s." % (
", ".join(potentialDevices)))
# Found a device (deviceName).
self._loadDeviceAndTestINIs(self._fmbtAndroidHomeDir, deviceName, iniFile)
else:
# Device name given, find out the serial number to connect to.
# It may be given in device or test run INI files.
self.serialNumber = self._conf.value("general", "serial", deviceName)
if connect:
self.setConnection(_AndroidDeviceConnection(self.serialNumber))
_deviceIniFilename = self._fmbtAndroidHomeDir + os.sep + "etc" + os.sep + deviceName + ".ini"
self.loadConfig(_deviceIniFilename, override=True, level="device")
# Fetch properties from device configuration
self.nickName = self._conf.value("general", "name", deviceName)
self.phoneNumber = self._conf.value("general", "phonenumber")
# Loading platform-specific configuration requires a
# connection to the device for checking the platform version.
_platformIniFilename = self._fmbtAndroidHomeDir + os.sep + "etc" + os.sep + "android" + self.platformVersion() + ".ini"
# would we need a form-factor ini, too?
self.loadConfig(_platformIniFilename, override=False, level="platform")
self.loadConfig(StringIO.StringIO(DEVICE_INI_DEFAULTS), override=False, level="global default")
self.wlanAP = self._conf.value("environment", "wlanAP")
self.wlanPass = self._conf.value("environment", "wlanPass")
self.btName = self._conf.value("environment", "BTName")
self.btAccessory = self._conf.value("environment", "BTAccessory")
self.serverIP = self._conf.value("environment", "ServerIP")
self.androidUser = self._conf.value("environment", "AndroidUser")
self.voiceMailNumber = self._conf.value("environment", "VoiceMailNumber")
if self._conn: hw = self._conn.recvVariable("build.device")
else: hw = "nohardware"
self.hardware = self._conf.value("general", "hardware", hw)
self.setBitmapPath(self._conf.value("paths", "bitmapPath", self._fmbtAndroidHomeDir + os.sep + "bitmaps" + os.sep + self.hardware + "-" + self.platformVersion() + ":."), self._fmbtAndroidHomeDir)
self.setScreenshotDir(self._conf.value("paths", "screenshotDir", self._fmbtAndroidHomeDir + os.sep + "screenshots"))
def callContact(self, contact):
"""
Call to given contact.
Return True if successful, otherwise False.
"""
callCommand = 'service call phone 1 s16 "%s"' % (contact,)
status, out, err = self.shellSOE(callCommand)
if status != 0:
_logFailedCommand("callContact", callCommand, status, out, err)
return False
else:
return True
def callNumber(self, number):
"""
Call to given phone number.
Return True if successful, otherwise False.
"""
callCommand = "service call phone 2 s16 %s" % (number,)
status, out, err = self.shellSOE(callCommand)
if status != 0:
_logFailedCommand("callNumber", callCommand, status, out, err)
return False
else:
return True
def close(self):
fmbtgti.GUITestInterface.close(self)
if hasattr(self, "_conn"):
del self._conn
if hasattr(self, "_lastView"):
del self._lastView
import gc
gc.collect()
def dumpIni(self):
"""
Returns contents of current device configuration as a string (in
INI format).
"""
return self._conf.dump()
def ini(self):
"""
Returns an Ini object containing effective device
configuration.
"""
return self._conf
def loadConfig(self, filenameOrObj, override=True, level=""):
try:
if type(filenameOrObj) == str:
filename = filenameOrObj
fileObj = file(filenameOrObj)
else:
fileObj = filenameOrObj
filename = getattr(fileObj, "name", "<string>")
if hasattr(fileObj, "seek"):
fileObj.seek(0)
self._conf.addFile(fileObj, override=override)
except Exception, e:
_adapterLog('Loading %s configuration from "%s" failed: %s' % (level, filename, e))
return
_adapterLog('Loaded %s configuration from "%s"' % (level, filename))
def platformVersion(self):
"""
Returns the platform version of the device.
"""
if self._platformVersion == None:
if self._conn:
self._platformVersion = self._conn.recvVariable("build.version.release")
else:
self._platformVersion = "nosoftware"
return self._platformVersion
def pressAppSwitch(self, **pressKeyKwArgs):
"""
Press the app switch button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_APP_SWITCH", **pressKeyKwArgs)
def pressBack(self, **pressKeyKwArgs):
"""
Press the back button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_BACK", **pressKeyKwArgs)
def pressHome(self, **pressKeyKwArgs):
"""
Press the home button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_HOME", **pressKeyKwArgs)
def pressKey(self, keyName, long=False, hold=0.0):
"""
Press a key on the device.
Parameters:
keyName (string):
the name of the key, like KEYCODE_HOME. If KEYCODE_
prefix is not given, it is added. Refer to Android
KeyEvent documentation.
long (boolean, optional):
if True, press the key for long time.
hold (float, optional):
time in seconds to hold the key down.
"""
if not keyName.upper().startswith("KEYCODE_"):
keyName = "KEYCODE_" + keyName
keyName = keyName.upper()
return fmbtgti.GUITestInterface.pressKey(self, keyName, long, hold)
def pressMenu(self, **pressKeyKwArgs):
"""
Press the menu button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_MENU", **pressKeyKwArgs)
def pressPower(self, **pressKeyKwArgs):
"""
Press the power button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_POWER", **pressKeyKwArgs)
def pressVolumeUp(self, **pressKeyKwArgs):
"""
Press the volume up button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_VOLUME_UP", **pressKeyKwArgs)
def pressVolumeDown(self, **pressKeyKwArgs):
"""
Press the volume down button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_VOLUME_DOWN", **pressKeyKwArgs)
def reboot(self, reconnect=True, firstBoot=False):
"""
Reboot the device.
Parameters
reconnect (boolean, optional)
If True, do not return until the device has been
connected after boot. Otherwise return once reboot
command has been sent. The default is True.
firstBoot (boolean, optional)
If True, the device boots like it would have been
flashed. Requires that "adb root" works. The default
is False.
Returns True on success, otherwise False.
"""
return self._conn.reboot(reconnect, firstBoot, 120)
def reconnect(self):
"""
Close connections to the device and reconnect.
"""
self.setConnection(None)
import gc
gc.collect()
try:
self.setConnection(_AndroidDeviceConnection(self.serialNumber))
return True
except Exception, e:
_adapterLog("reconnect failed: %s" % (e,))
return False
def refreshView(self, forcedView=None):
"""
(Re)reads view items on display and updates the latest View
object.
Parameters:
forcedView (View or filename, optional):
use given View object or view file instead of reading
items from the device.
Returns created View object.
"""
def formatErrors(errors):
return "refreshView parse errors:\n %s" % (
"\n ".join(["line %s: %s error: %s" % e for e in errors]),)
if forcedView != None:
if isinstance(forcedView, View):
self._lastView = forcedView
elif type(forcedView) == str:
self._lastView = View(self.screenshotDir(), self.serialNumber, file(forcedView).read())
_adapterLog(formatErrors(self._lastView.errors()))
else:
raise ValueError("forcedView must be a View object or a filename")
return self._lastView
retryCount = 0
while True:
dump = self._conn.recvViewData()
if dump == None: # dump unreadable
return None
view = View(self.screenshotDir(), self.serialNumber, dump)
if len(view.errors()) > 0 and retryCount < self._PARSE_VIEW_RETRY_LIMIT:
_adapterLog(formatErrors(view.errors()))
retryCount += 1
time.sleep(0.2) # sleep before retry
else:
# successfully parsed or parsed with errors but no more retries
self._lastView = view
return view
def shell(self, shellCommand):
"""
Execute shellCommand in adb shell.
shellCommand is a string (arguments separated by whitespace).
Returns output of "adb shell" command.
If you wish to receive exitstatus or standard output and error
separated from shellCommand, refer to shellSOE().
"""
return self._conn._runAdb(["shell", shellCommand])[1]
def shellSOE(self, shellCommand):
"""
Execute shellCommand in adb shell.
shellCommand is a string (arguments separated by whitespace).
Returns tuple (exitStatus, standardOutput, standardError).
Requires tar and uuencode to be available on the device.
"""
return self._conn.shellSOE(shellCommand)
def smsNumber(self, number, message):
"""
Send message using SMS to given number.
Parameters:
number (string)
phone number to which the SMS will be sent
message (string)
the message to be sent.
Returns True on success, otherwise False.
"""
smsCommand = ('am start -a android.intent.action.SENDTO ' +
'-d sms:%s --es sms_body "%s"' +
' --ez exit_on_sent true') % (number, message)
status, out, err = self.shellSOE(smsCommand)
if status != 0:
_logFailedCommand("sms", smsCommand, status, out, err)
return False
_adapterLog("SMS command returned %s" % (out + err,))
time.sleep(2)
self.pressKey("KEYCODE_DPAD_RIGHT")
time.sleep(1)
self.pressKey("KEYCODE_ENTER")
return True
def supportsView(self):
"""
Check if connected device supports reading view data.
View data is needed by refreshView(), view(), verifyText() and
waitText(). It is produced by Android window dump.
Returns True if view data can be read, otherwise False.
"""
try:
self._conn.recvViewData()
return True
except AndroidConnectionError:
return False
def systemProperty(self, propertyName):
"""
Returns Android Monkey Device properties, such as
"clock.uptime", refer to Android Monkey documentation.
"""
return self._conn.recvVariable(propertyName)
def tapId(self, viewItemId, **tapKwArgs):
"""
Find an item with given id from the latest view, and tap it.
"""
assert self._lastView != None, "View required."
items = self._lastView.findItemsById(viewItemId, count=1)
if len(items) > 0:
return self.tapItem(items[0], **tapKwArgs)
else:
_adapterLog("tapItemById(%s): no items found" % (viewItemId,))
return False
def tapText(self, text, partial=False, **tapKwArgs):
"""
Find an item with given text from the latest view, and tap it.
Parameters:
partial (boolean, optional):
refer to verifyText documentation. The default is
False.
tapPos (pair of floats (x,y)):
refer to tapItem documentation.
long, hold (optional):
refer to tap documentation.
Returns True if successful, otherwise False.
"""
assert self._lastView != None, "View required."
items = self._lastView.findItemsByText(text, partial=partial, count=1)
if len(items) == 0: return False
return self.tapItem(items[0], **tapKwArgs)
def topApp(self):
"""
Returns the name of the top application.
"""
return self._conn.recvTopAppWindow()[0]
def topWindow(self):
"""
Returns the name of the top window.
"""
return self._conn.recvTopAppWindow()[1]
def verifyText(self, text, partial=False):
"""
Verify that the last view has at least one item with given
text.
Parameters:
text (string):
text to be searched for in items.
partial (boolean, optional):
if True, match items if item text contains given
text, otherwise match only if item text is equal to
the given text. The default is False (exact match).
"""
assert self._lastView != None, "View required."
return self._lastView.findItemsByText(text, partial=partial, count=1) != []
def view(self):
"""
Returns the last view (the most recently refreshed view).
"""
return self._lastView
def waitText(self, text, partial=False, **waitKwArgs):
"""
Wait until text appears in any view item.
Parameters:
text (string):
text to be waited for.
partial (boolean, optional):
refer to verifyText. The default is False.
waitTime, pollDelay (float, optional):
refer to wait.
Returns True if text appeared within given time limit,
otherwise False.
Updates the last view.
"""
return self.wait(self.refreshView,
self.verifyText, (text,), {'partial': partial},
**waitKwArgs)
def _loadDeviceAndTestINIs(self, homeDir, deviceName, iniFile):
if deviceName != None:
_deviceIniFilename = homeDir + os.sep + "etc" + os.sep + deviceName + ".ini"
self.loadConfig(_deviceIniFilename, override=True, level="device")
if iniFile:
self.loadConfig(iniFile, override=True, level="test")
class Ini:
"""
Container for device configuration loaded from INI files.
INI file syntax:
[section1]
key1 = value1
; commented = out
# commented = out
"""
def __init__(self, iniFile=None):
"""
Initialise the container, optionally with an initial configuration.
Parameters:
iniFile (file object, optional):
load the initial configuration from iniFile.
The default is None: start with empty configuration.
"""
# _conf is a dictionary:
# (section, key) -> value
self._conf = {}
if iniFile:
self.addFile(iniFile)
def addFile(self, iniFile, override=True):
"""
Add values from a file to the current configuration.
Parameters:
iniFile (file object):
load values from this file object.
override (boolean, optional):
If True, loaded values override existing values.
Otherwise, only currently undefined values are
loaded. The default is True.
"""
for line in iniFile:
line = line.strip()
if line.startswith('[') and line.endswith(']'):
section = line[1:-1].strip()
elif line.startswith(";") or line.startswith("#"):
continue
elif '=' in line:
key, value = line.split('=',1)
if override or (section, key.strip()) not in self._conf:
self._conf[(section, key.strip())] = value.strip()
def sections(self):
"""
Returns list of sections in the current configuration.
"""
return list(set([k[0] for k in self._conf.keys()]))
def keys(self, section):
"""
Returns list of keys in a section in the current configuration.
Parameters:
section (string):
the name of the section.
"""
return [k[1] for k in self._conf.keys() if k[0] == section]
def dump(self):
"""
Returns the current configuration as a single string in the
INI format.
"""
lines = []
for section in sorted(self.sections()):
lines.append("[%s]" % (section,))
for key in sorted(self.keys(section)):
lines.append("%-16s = %s" % (key, self._conf[(section, key)]))
lines.append("")
return "\n".join(lines)
def set(self, section, key, value):
"""
Set new value for a key in a section.
Parameters:
section, key (strings):
the section, the key.
value (string):
the new value. If not string already, it will be
converted to string, and it will be loaded as a
string when loaded from file object.
"""
self._conf[(section, key)] = str(value)
def value(self, section, key, default=""):
"""
Returns the value (string) associated with a key in a section.
Parameters:
section, key (strings):
the section and the key.
default (string, optional):
the default value to be used and stored if there is
no value associated to the key in the section. The
default is the empty string.
Reading a value of an undefined key in an undefined section
adds the key and the section to the configuration with the
returned (the default) value. This makes all returned values
visible in dump().
"""
if not (section, key) in self._conf:
self._conf[(section, key)] = default
return self._conf[(section, key)]
# For backward compatibility, someone might be using old _DeviceConf
_DeviceConf = Ini
class ViewItem(fmbtgti.GUIItem):
"""
ViewItem holds the information of a single GUI element.
"""
def __init__(self, className, code, indent, properties, parent, rawProps, dumpFilename):
self._p = properties
self._parent = parent
self._className = className
self._code = code
self._indent = indent
self._children = []
self._rawProps = ""
if not "scrolling:mScrollX" in self._p:
self._p["scrolling:mScrollX"] = 0
self._p["scrolling:mScrollY"] = 0
fmbtgti.GUIItem.__init__(self, className, self._calculateBbox(), dumpFilename)
def addChild(self,child): self._children.append(child)
def _calculateBbox(self):
left = int(self._p["layout:mLeft"])
top = int(self._p["layout:mTop"])
parent = self._parent
while parent:
pp = parent._p
left += int(pp["layout:mLeft"]) - int(pp["scrolling:mScrollX"])
top += int(pp["layout:mTop"]) - int(pp["scrolling:mScrollY"])
parent = parent._parent
height = int(self._p["layout:getHeight()"])
width = int(self._p["layout:getWidth()"])
return (left, top, left + width, top + height)
def children(self): return self._children
def className(self): return self._className
def code(self): return self._code
def indent(self): return self._indent
def id(self): return self.property("mID")
def parent(self): return self._parent
def properties(self): return self._p
def property(self, propertyName):
return self._p.get(propertyName, None)
def text(self): return self.property("text:mText")
def visible(self):
return self._p.get("getVisibility()", "") == "VISIBLE"
def dump(self):
p = self._p
return ("ViewItem(\n\tchildren = %d\n\tclassName = '%s'\n\tcode = '%s'\n\t" +
"indent = %d\n\tproperties = {\n\t\t%s\n\t})") % (
len(self._children), self._className, self._code, self._indent,
'\n\t\t'.join(['"%s": %s' % (key, p[key]) for key in sorted(p.keys())]))
def __str__(self):
return ("ViewItem(className='%s', id=%s, bbox=%s)" % (
self._className, self.id(), self.bbox()))
class View(object):
"""
View provides interface to screen dumps from Android. It parses
the dump to a hierarchy of ViewItems. find* methods enable searching
for ViewItems based on their properties.
"""
def __init__(self, screenshotDir, serialNumber, dump):
self.screenshotDir = screenshotDir
self.serialNumber = serialNumber
self._viewItems = []
self._errors = []
self._lineRegEx = re.compile("(?P<indent>\s*)(?P<class>[\w.$]+)@(?P<id>[0-9A-Fa-f]{8} )(?P<properties>.*)")
self._olderAndroidLineRegEx = re.compile("(?P<indent>\s*)(?P<class>[\w.$]+)@(?P<id>\w)(?P<properties>.*)")
self._propRegEx = re.compile("(?P<prop>(?P<name>[^=]+)=(?P<len>\d+),)(?P<data>[^\s]* ?)")
self._dump = dump
self._rawDumpFilename = self.screenshotDir + os.sep + fmbtgti._filenameTimestamp() + "-" + self.serialNumber + ".view"
file(self._rawDumpFilename, "w").write(self._dump)
try: self._parseDump(dump, self._rawDumpFilename)
except Exception, e:
self._errors.append((-1, "", "Parser error"))
def viewItems(self): return self._viewItems
def errors(self): return self._errors
def dumpRaw(self): return self._dump
def dumpItems(self, itemList = None):
if itemList == None: itemList = self._viewItems
l = []
for i in itemList:
l.append(self._dumpItem(i))
return '\n'.join(l)
def dumpTree(self, rootItem = None):
l = []
if rootItem != None:
l.extend(self._dumpSubTree(rootItem, 0))
else:
for i in self._viewItems:
if i._indent == 0:
l.extend(self._dumpSubTree(i, 0))
return '\n'.join(l)
def _dumpSubTree(self, viewItem, indent):
l = []
i = viewItem
l.append(" "*indent + self._dumpItem(viewItem))
for i in viewItem.children():
l.extend(self._dumpSubTree(i, indent + 4))
return l
def _dumpItem(self, viewItem):
i = viewItem
if i.text() != None: t = '"%s"' % (i.text(),)
else: t = None
return "id=%s cls=%s text=%s bbox=%s" % (
i.id(), i.className(), t, i.bbox())
def findItems(self, comparator, count=-1, searchRootItem=None, searchItems=None):
foundItems = []
if count == 0: return foundItems
if searchRootItem != None:
# find from searchRootItem and its children
if comparator(searchRootItem):
foundItems.append(searchRootItem)
for c in searchRootItem.children():
foundItems.extend(self.findItems(comparator, count=count-len(foundItems), searchRootItem=c))
else:
if searchItems != None:
# find from listed items only
searchDomain = searchItems
else:
# find from all items
searchDomain = self._viewItems
for i in searchDomain:
if comparator(i):
foundItems.append(i)
if count > 0 and len(foundItems) >= count:
break
return foundItems
def findItemsByText(self, text, partial=False, count=-1, searchRootItem=None, searchItems=None):
"""
Searches the GUI hiearhy for a object with a given text
"""
if partial:
c = lambda item: (
item.properties().get("text:mText", "").find(text) != -1 )
else:
c = lambda item: (
item.properties().get("text:mText", None) == text )
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems)
def findItemsById(self, id, count=-1, searchRootItem=None, searchItems=None):
c = lambda item: item.properties().get("mID", "") == id
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems)
def findItemsByClass(self, className, partial=True, count=-1, searchRootItem=None, searchItems=None):
if partial: c = lambda item: item.className().find(className) != -1
else: c = lambda item: item.className() == className
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems)
def findItemsByIdAndClass(self, id, className, partial=True, count=-1, searchRootItem=None, searchItems=None):
idOk = self.findItemsById(id, count=-1, searchRootItem=searchRootItem)
return self.findItemsByClass(className, partial=partial, count=count, searchItems=idOk)
def findItemsByRawProps(self, s, count=-1, searchRootItem=None, searchItems=None):
c = lambda item: item._rawProps.find(s) != -1
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems)
def save(self, fileOrDirName):
shutil.copy(self._rawDumpFilename, fileOrDirName)
def _parseDump(self, dump, rawDumpFilename):
"""
Process the raw dump data and create a tree of ViewItems
"""
# This code originates from tema-android-adapter-3.2,
# AndroidAdapter/guireader.py.
self._viewItems = []
cellLayout = ""
parent = None
previousItem = None
currentIndent = 0
visible = True
self.TOP_PAGED_VIEW = ""
for lineIndex, line in enumerate(dump.splitlines()):
if line == "DONE.":
break
# separate indent, class and properties for each GUI object
# TODO: branch here according to self._androidVersion
matcher = self._lineRegEx.match(line)
if not matcher:
# FIXME: this hack falls back to old format,
# should branch according to self._androidVersion!
matcher = self._olderAndroidLineRegEx.match(line)
if not matcher:
self._errors.append((lineIndex + 1, line, "Illegal line"))
continue # skip this line
className = matcher.group("class")
# Indent specifies the hierarchy level of the object
indent = len(matcher.group("indent"))
# If the indent is bigger that previous, this object is a
# child for the previous object
if indent > currentIndent:
parent = self._viewItems[-1]
elif indent < currentIndent:
for tmp in range(0, currentIndent - indent):
parent = parent.parent()
currentIndent = indent
propertiesData = matcher.group("properties")
properties = {}
index = 0
x = 0
y = 0
# Process the properties of each GUI object
while index < len(propertiesData):
# Separate name and value for each property [^=]*=
propMatch = self._propRegEx.match(propertiesData[index:-1])
if not propMatch or len(propMatch.group("data")) < int(propMatch.group("len")):
if not propMatch.group("data"):
self._errors.append((lineIndex, propertiesData[index:-1], "Illegal property"))
return None
startFrom = index + propertiesData[index:-1].find(propMatch.group("data"))
currFixedData = propertiesData[startFrom:(startFrom + int(propMatch.group("len")))]
length = int(propMatch.group("len"))
# [^=]+=?, == data
properties[propMatch.group("name")] = currFixedData[0:length].lstrip()
else:
length = int(propMatch.group("len"))
# [^=]+=?, == data
properties[propMatch.group("name")] = propMatch.group("data")[0:length].lstrip()
index += len(propMatch.group("prop")) + length + 1
self._viewItems.append(ViewItem(matcher.group("class"), matcher.group("id"), indent, properties, parent, matcher.group("properties"), self._rawDumpFilename))
if parent:
parent.addChild(self._viewItems[-1])
return self._viewItems
def __str__(self):
return 'View(items=%s, dump="%s")' % (
len(self._viewItems), self._rawDumpFilename)
class _AndroidDeviceConnection:
"""
Connection to the Android Device being tested.
"""
_m_host = 'localhost'
_m_port = random.randint(20000, 29999)
_w_host = 'localhost'
_w_port = _m_port + 1
def __init__(self, serialNumber, stopOnError=True):
self._serialNumber = serialNumber
self._stopOnError = stopOnError
self._shellSupportsTar = False
try:
self._resetMonkey()
self._resetWindow()
# check supported features
outputLines = self._runAdb(["shell","tar"])[1].splitlines()
if len(outputLines) == 1 and "bin" in outputLines[0]:
self._shellSupportsTar = False
else:
self._shellSupportsTar = True
finally:
# Next _AndroidDeviceConnection instance will use different ports
self._w_port = _AndroidDeviceConnection._w_port
self._m_port = _AndroidDeviceConnection._m_port
_AndroidDeviceConnection._w_port += 100
_AndroidDeviceConnection._m_port += 100
def __del__(self):
try: self._monkeySocket.close()
except: pass
def target(self):
return self._serialNumber
def _cat(self, remoteFilename):
fd, filename = tempfile.mkstemp("fmbtandroid-cat-")
os.close(fd)
self._runAdb("pull '%s' %s" % (remoteFilename, filename), 0)
contents = file(filename).read()
os.remove(filename)
return contents
def _runAdb(self, command, expectedExitStatus=0):
if not self._stopOnError:
expect = None
else:
expect = expectedExitStatus
if type(command) == list:
command = ["adb", "-s", self._serialNumber] + command
else:
command = ["adb", "-s", self._serialNumber, command]
return _run(command, expectedExitStatus = expect)
def _runSetupCmd(self, cmd, expectedExitStatus = 0):
_adapterLog('setting up connections: "%s"' % (cmd,))
exitStatus, _, _ = self._runAdb(cmd, expectedExitStatus)
if exitStatus == 0: return True
else: return True
def _resetWindow(self):
setupCommands = [["shell", "service" , "call", "window", "1", "i32", "4939"],
["forward", "tcp:"+str(self._w_port), "tcp:4939"]]
for c in setupCommands:
self._runSetupCmd(c)
def _resetMonkey(self, timeout=3, pollDelay=.25):
self._runSetupCmd(["shell","monkey","--port","1080"], None)
time.sleep(pollDelay)
endTime = time.time() + timeout
while time.time() < endTime:
self._runSetupCmd(["forward","tcp:"+str(self._m_port), "tcp:1080"])
try:
self._monkeySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._monkeySocket.connect((self._m_host, self._m_port))
self._monkeySocket.setblocking(0)
self._monkeySocket.settimeout(1.0)
self._platformVersion = self._monkeyCommand("getvar build.version.release", retry=0)[1]
if len(self._platformVersion) > 0:
self._monkeySocket.settimeout(5.0)
return True
except Exception, e:
pass
time.sleep(pollDelay)
if self._stopOnError:
msg = 'Android monkey error: cannot connect to "adb shell monkey --port 1080" to device %s' % (self._serialNumber)
_adapterLog(msg)
raise AndroidConnectionError(msg)
else:
return False
def _monkeyCommand(self, command, retry=3):
try:
self._monkeySocket.sendall(command + "\n")
data = self._monkeySocket.recv(4096).strip()
if len(data) == 0 and retry > 0:
return self._monkeyCommand(command, retry-1)
if data == "OK":
return True, None
elif data.startswith("OK:"):
return True, data.split("OK:")[1]
_adapterLog("monkeyCommand failing... command: '%s' response: '%s'" % (command, data))
return False, None
except socket.error:
try: self._monkeySocket.close()
except: pass
if retry > 0:
self._resetMonkey()
return self._monkeyCommand(command, retry=retry-1)
else:
raise AndroidConnectionError('Android monkey socket connection lost while sending command "%s"' % (command,))
def reboot(self, reconnect, firstBootAfterFlashing, timeout):
if firstBootAfterFlashing:
self._runAdb("root")
time.sleep(2)
self._runAdb(["shell","rm","/data/data/com.android.launcher/shared_prefs/com.android.launcher2.prefs.xml"])
self._runAdb("reboot")
_adapterLog("rebooting " + self._serialNumber)
if reconnect:
self._runAdb("wait-for-device")
endTime = time.time() + timeout
while time.time() < endTime:
try:
if self._resetMonkey(timeout=1, pollDelay=1):
break
except AndroidConnectionError:
pass
time.sleep(1)
else:
_adapterLog("reboot: reconnecting to " + self._serialNumber + " failed")
return False
self._resetWindow()
return True
def recvVariable(self, variableName):
ok, value = self._monkeyCommand("getvar " + variableName)
if ok: return value
else:
# LOG: getvar variableName failed
return None
def recvScreenSize(self):
try:
height = int(self.recvVariable("display.height"))
width = int(self.recvVariable("display.width"))
except TypeError:
return None, None
return width, height
def recvTopAppWindow(self):
_, output, _ = self._runAdb(["shell","dumpsys","window"], 0)
if self._platformVersion >= "4.2":
s = re.findall("mCurrentFocus=Window\{(#?[0-9A-Fa-f]{8})( [^ ]*)? (?P<winName>[^}]*)\}", output)
else:
s = re.findall("mCurrentFocus=Window\{(#?[0-9A-Fa-f]{8}) (?P<winName>[^ ]*) [^ ]*\}", output)
if s and len(s[0][-1].strip()) > 1: topWindowName = s[0][-1]
else: topWindowName = None
s = re.findall("mFocusedApp=AppWindowToken.*ActivityRecord\{#?[0-9A-Fa-f]{8}( [^ ]*)? (?P<appName>[^}]*)\}", output)
if s and len(s[0][-1].strip()) > 1:
topAppName = s[0][-1].strip()
else:
topAppName = None
return topAppName, topWindowName
def sendTap(self, xCoord, yCoord):
return self._monkeyCommand("tap " + str(xCoord) + " " + str(yCoord))[0]
def sendKeyUp(self, key):
return self._monkeyCommand("key up " + key)[0]
def sendKeyDown(self, key):
return self._monkeyCommand("key down " + key)[0]
def sendTouchUp(self, xCoord, yCoord):
return self._monkeyCommand("touch up " + str(xCoord) + " " + str(yCoord))[0]
def sendTouchDown(self, xCoord, yCoord):
return self._monkeyCommand("touch down " + str(xCoord) + " " + str(yCoord))[0]
def sendTouchMove(self, xCoord, yCoord):
return self._monkeyCommand("touch move " + str(xCoord) + " " + str(yCoord))[0]
def sendTrackBallMove(self, dx, dy):
return self._monkeyCommand("trackball " + str(dx) + " " + str(dy))[0]
def sendPress(self, key):
return self._monkeyCommand("press " + key)[0]
def sendType(self, text):
for lineIndex, line in enumerate(text.split('\n')):
if lineIndex > 0: self.sendPress("KEYCODE_ENTER")
for wordIndex, word in enumerate(line.split(' ')):
if wordIndex > 0: self.sendPress("KEYCODE_SPACE")
if len(word) > 0 and not self._monkeyCommand("type " + word)[0]:
_adapterLog('sendType("%s") failed when sending word "%s"' %
(text, word))
return False
return True
def recvScreenshot(self, filename):
"""
Capture a screenshot and copy the image file to given path or
system temp folder.
Returns True on success, otherwise False.
"""
remotefile = '/sdcard/' + os.path.basename(filename)
self._runAdb(['shell', 'screencap', '-p', remotefile], 0)
status, out, err = self._runAdb(['pull', remotefile, filename], [0, 1])
if status != 0:
raise FMBTAndroidError("Failed to fetch screenshot from the device: %s. SD card required." % ((out + err).strip(),))
status, _, _ = self._runAdb(['shell','rm', remotefile], 0)
return True
def shellSOE(self, shellCommand):
fd, filename = tempfile.mkstemp(prefix="fmbtandroid-shellcmd-")
remotename = '/sdcard/' + os.path.basename(filename)
os.write(fd, shellCommand + "\n")
os.close(fd)
self._runAdb(["push",filename,remotename],0)
cmd = "shell 'source %s >%s.out 2>%s.err; echo $? > %s.status" % ((remotename,)*4)
if self._shellSupportsTar:
# do everything we can in one command to minimise adb
# commands: execute command, record results, package,
# print uuencoded package and remove remote temp files
cmd += "; cd %s; tar czf - %s.out %s.err %s.status | uuencode %s.tar.gz; rm -f %s*'" % (
(os.path.dirname(remotename),) + ((os.path.basename(remotename),) * 5))
status, output, error = self._runAdb(cmd, 0)
file(filename, "w").write(output)
uu.decode(filename, out_file=filename + ".tar.gz")
import tarfile
tar = tarfile.open(filename + ".tar.gz")
basename = os.path.basename(filename)
stdout = tar.extractfile(basename + ".out").read()
stderr = tar.extractfile(basename + ".err").read()
try: exitstatus = int(tar.extractfile(basename + ".status").read())
except: exitstatus = None
os.remove(filename)
os.remove(filename + ".tar.gz")
else:
# need to pull files one by one, slow.
cmd += "'"
self._runAdb(cmd, 0)
stdout = self._cat(remotename + ".out")
stderr = self._cat(remotename + ".err")
try: exitstatus = int(self._cat(remotename + ".status"))
except: exitstatus = None
self._runAdb(["shell","rm -f "+remotename])
return exitstatus, stdout, stderr
def recvViewData(self, retry=3):
_dataBufferLen = 4096 * 16
try:
self._windowSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._windowSocket.connect( (self._w_host, self._w_port) )
# DUMP -1: get foreground window info
if self._windowSocket.sendall("DUMP -1\n") == 0:
# LOG: readGUI cannot write to window socket
raise AndroidConnectionError("writing socket failed")
# Read until a "DONE" line
data = ""
while True:
try: newData = self._windowSocket.recv(_dataBufferLen)
except socket.timeout:
continue
data += newData
if data.splitlines()[-1] == "DONE" or newData == '':
break
return data
except Exception, msg:
_adapterLog("recvViewData: window socket error: %s" % (msg,))
if retry > 0:
self._resetWindow()
return self.recvViewData(retry=retry-1)
else:
msg = "recvViewData: cannot read window socket"
_adapterLog(msg)
raise AndroidConnectionError(msg)
finally:
try: self._windowSocket.close()
except: pass
class FMBTAndroidError(Exception): pass
class AndroidConnectionError(FMBTAndroidError): pass
class AndroidConnectionLost(AndroidConnectionError): pass
class AndroidDeviceNotFound(AndroidConnectionError): pass
fixed: fmbtandroid (broken by windows port)
# fMBT, free Model Based Testing tool
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
#
# View._parseDump method contains code that has been published as part
# of the TEMA tool, under the MIT open source license:
#
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This library provides a test interface to Android devices.
Device class implements a test interface that is based on Android
Debug Bridge (adb) and Android monkey.
Device's refreshScreenshot() returns a Screenshot object, from which
bitmaps can be searched for.
Device's refreshView() returns a View object, from which UI elements
can be searched according to their id, class, text and other
properties.
Using this library requires that adb is in PATH.
Tips & tricks
-------------
Take a screenshot and save it to a file
import fmbtandroid
fmbtandroid.Device().refreshScreenshot().save("/tmp/screen.png")
* * *
Print view items on device display
import fmbtandroid
print fmbtandroid.Device().refreshView().dumpTree()
* * *
Open application grid from the home screen, unlock screen if necessary
import fmbtandroid
import time
d = fmbtandroid.Device()
d.pressHome()
time.sleep(1)
whatISee = d.waitAnyBitmap(["lockscreen-lock.png", "home-appgrid.png"])
if "lockscreen-lock.png" in whatISee:
d.swipeBitmap("lockscreen-lock.png", "east")
time.sleep(1)
d.pressHome()
whatISee = d.waitAnyBitmap(["home-appgrid.png"])
assert "home-appgrid.png" in whatISee, "Cannot find appgrid bitmap at"
d.tapBitmap("home-appgrid.png")
* * *
Save generated device ini for modifications
import fmbtandroid
file("/tmp/mydevice.ini", "w").write(fmbtandroid.Device().dumpIni())
* * *
Connect to device based on an ini file
import fmbtandroid
d = fmbtandroid.Device(iniFile=file("/tmp/mydevice.ini"))
d.pressHome()
* * *
Open screenlock by swiping lock.png bitmap on the display to the
east. The lock.png file needs to be in bitmapPath defined in
mydevice.ini.
import fmbtandroid
d = fmbtandroid.Device(iniFile=file("/tmp/mydevice.ini"))
d.refreshScreenshot()
d.swipeBitmap("lock.png", "east")
* * *
Execute a shell command on Android device, show exit status, standard
output and standard error:
import fmbtandroid
status, out, err = fmbtandroid.Device().shellSOE("mkdir /proc/foo")
print 'status: %s, stdout: "%s", stderr: "%s"' % (status, out, err)
* * *
Enable extensive logging with screenshots and highlighted content:
import fmbtandroid, time
d = fmbtandroid.Device()
d.enableVisualLog("example.html")
d.pressHome(); time.sleep(1)
d.refreshScreenshot()
d.tapOcrText("Google"); time.sleep(1)
d.refreshScreenshot()
then view the log:
$ chromium example.html
"""
DEVICE_INI_DEFAULTS = '''
[objects]
appsButtonId = id/0x0
appsButtonClass = BubbleTextView
; [application.NAME] sections:
; gridname = exact caption of the application in application grid (text
; property)
; window = string included in topWindow() when application is running
[homescreen]
window = Launcher
'''
import commands
import os
import platform
import random
import re
import shutil
import socket
import StringIO
import subprocess
import tempfile
import time
import uu
import fmbt
import fmbtgti
def _adapterLog(msg):
fmbt.adapterlog("fmbtandroid: %s" % (msg,))
def _logFailedCommand(source, command, exitstatus, stdout, stderr):
_adapterLog('in %s command "%s" failed:\n output: %s\n error: %s\n status: %s' %
(source, command, stdout, stderr, exitstatus))
if platform.system() == "Windows":
_g_closeFds = False
_g_adbExecutable = "adb.exe"
else:
_g_closeFds = True
_g_adbExecutable = "adb"
def _run(command, expectedExitStatus = None):
if type(command) == str: shell=True
else: shell=False
try:
p = subprocess.Popen(command, shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=_g_closeFds)
if expectedExitStatus != None:
out, err = p.communicate()
else:
out, err = ('', None)
except Exception, e:
class fakeProcess(object): pass
p = fakeProcess
p.returncode = 127
out, err = ('', e)
exitStatus = p.returncode
if expectedExitStatus != None:
if ((type(expectedExitStatus) in [list, tuple] and
not exitStatus in expectedExitStatus) or
(type(expectedExitStatus) == int and
not exitStatus == expectedExitStatus)):
msg = 'Unexpected exit status %s from command "%s".\n Output: %s\n Error: %s' % (
exitStatus, command, out, err)
_adapterLog(msg)
if "error: device not found" in err:
raise AndroidDeviceNotFound(msg)
else:
raise Exception(msg)
return (exitStatus, out, err)
class Device(fmbtgti.GUITestInterface):
"""
The Device class provides
- keywords as its methods
- device properties from device's INI file
- view() returns the most recently refreshed View, that contains
items parsed from window dump.
- screenshot() returns the most recently refreshed Screenshot,
bitmaps can be searched from this.
"""
_PARSE_VIEW_RETRY_LIMIT = 10
def __init__(self, deviceName=None, iniFile=None, connect=True):
"""
Connect to given device, or the first not-connected Android
device in the "adb devices" list, if nothing is defined.
Parameters:
deviceName (string, optional):
If deviceName is a device serial number (an item in
the left most column in "adb devices"), connect to
that device. Device information is read from
$FMBTANDROIDHOME/etc/SERIALNUMBER.ini, if it exists.
If deviceName is a nick name, device information is
looked for from $FMBTANDROIDHOME/etc/deviceName.ini,
and the connection is established to the device with
the serial number given in the ini file.
The default is None. The first disconnected device
in the "adb devices" list is connected to. Device
information is read from
$FMBTANDROIDHOME/etc/SERIALNUMBER.ini, if it exists.
iniFile (file object, optional):
A file object that contains device information
ini. Connect to the device with a serial number
given in this file. The default is None.
To create an ini file for a device, use dumpIni. Example:
file("/tmp/test.ini", "w").write(fmbtandroid.Device().dumpIni())
"""
fmbtgti.GUITestInterface.__init__(self)
self._fmbtAndroidHomeDir = os.getenv("FMBTANDROIDHOME", os.getcwd())
self._platformVersion = None
self._lastView = None
self._conf = Ini()
self._loadDeviceAndTestINIs(self._fmbtAndroidHomeDir, deviceName, iniFile)
if deviceName == None:
deviceName = self._conf.value("general", "serial", "")
if connect == False and deviceName == "":
deviceName = "nodevice"
self.setConnection(None)
elif deviceName == "":
# Connect to an unspecified device.
# Go through devices in "adb devices".
listDevicesCommand = [_g_adbExecutable, "devices"]
status, output, err = _run(listDevicesCommand, expectedExitStatus = [0, 127])
if status == 127:
raise Exception('adb not found in PATH. Check your Android SDK installation.')
outputLines = [l.strip() for l in output.splitlines()]
try: deviceLines = outputLines[outputLines.index("List of devices attached")+1:]
except: deviceLines = []
deviceLines = [l for l in deviceLines if l.strip() != ""]
if deviceLines == []:
raise Exception('No devices found with "%s"' % (listDevicesCommand,))
potentialDevices = [line.split()[0] for line in deviceLines]
for deviceName in potentialDevices:
try:
self.serialNumber = deviceName
self._conf.set("general", "serial", self.serialNumber)
self.setConnection(_AndroidDeviceConnection(self.serialNumber))
break
except AndroidConnectionError, e:
continue
else:
raise AndroidConnectionError("Could not connect to device(s): %s." % (
", ".join(potentialDevices)))
# Found a device (deviceName).
self._loadDeviceAndTestINIs(self._fmbtAndroidHomeDir, deviceName, iniFile)
else:
# Device name given, find out the serial number to connect to.
# It may be given in device or test run INI files.
self.serialNumber = self._conf.value("general", "serial", deviceName)
if connect:
self.setConnection(_AndroidDeviceConnection(self.serialNumber))
_deviceIniFilename = self._fmbtAndroidHomeDir + os.sep + "etc" + os.sep + deviceName + ".ini"
self.loadConfig(_deviceIniFilename, override=True, level="device")
# Fetch properties from device configuration
self.nickName = self._conf.value("general", "name", deviceName)
self.phoneNumber = self._conf.value("general", "phonenumber")
# Loading platform-specific configuration requires a
# connection to the device for checking the platform version.
_platformIniFilename = self._fmbtAndroidHomeDir + os.sep + "etc" + os.sep + "android" + self.platformVersion() + ".ini"
# would we need a form-factor ini, too?
self.loadConfig(_platformIniFilename, override=False, level="platform")
self.loadConfig(StringIO.StringIO(DEVICE_INI_DEFAULTS), override=False, level="global default")
self.wlanAP = self._conf.value("environment", "wlanAP")
self.wlanPass = self._conf.value("environment", "wlanPass")
self.btName = self._conf.value("environment", "BTName")
self.btAccessory = self._conf.value("environment", "BTAccessory")
self.serverIP = self._conf.value("environment", "ServerIP")
self.androidUser = self._conf.value("environment", "AndroidUser")
self.voiceMailNumber = self._conf.value("environment", "VoiceMailNumber")
if self._conn: hw = self._conn.recvVariable("build.device")
else: hw = "nohardware"
self.hardware = self._conf.value("general", "hardware", hw)
self.setBitmapPath(self._conf.value("paths", "bitmapPath", self._fmbtAndroidHomeDir + os.sep + "bitmaps" + os.sep + self.hardware + "-" + self.platformVersion() + ":."), self._fmbtAndroidHomeDir)
self.setScreenshotDir(self._conf.value("paths", "screenshotDir", self._fmbtAndroidHomeDir + os.sep + "screenshots"))
def callContact(self, contact):
"""
Call to given contact.
Return True if successful, otherwise False.
"""
callCommand = 'service call phone 1 s16 "%s"' % (contact,)
status, out, err = self.shellSOE(callCommand)
if status != 0:
_logFailedCommand("callContact", callCommand, status, out, err)
return False
else:
return True
def callNumber(self, number):
"""
Call to given phone number.
Return True if successful, otherwise False.
"""
callCommand = "service call phone 2 s16 %s" % (number,)
status, out, err = self.shellSOE(callCommand)
if status != 0:
_logFailedCommand("callNumber", callCommand, status, out, err)
return False
else:
return True
def close(self):
fmbtgti.GUITestInterface.close(self)
if hasattr(self, "_conn"):
del self._conn
if hasattr(self, "_lastView"):
del self._lastView
import gc
gc.collect()
def dumpIni(self):
"""
Returns contents of current device configuration as a string (in
INI format).
"""
return self._conf.dump()
def ini(self):
"""
Returns an Ini object containing effective device
configuration.
"""
return self._conf
def loadConfig(self, filenameOrObj, override=True, level=""):
try:
if type(filenameOrObj) == str:
filename = filenameOrObj
fileObj = file(filenameOrObj)
else:
fileObj = filenameOrObj
filename = getattr(fileObj, "name", "<string>")
if hasattr(fileObj, "seek"):
fileObj.seek(0)
self._conf.addFile(fileObj, override=override)
except Exception, e:
_adapterLog('Loading %s configuration from "%s" failed: %s' % (level, filename, e))
return
_adapterLog('Loaded %s configuration from "%s"' % (level, filename))
def platformVersion(self):
"""
Returns the platform version of the device.
"""
if self._platformVersion == None:
if self._conn:
self._platformVersion = self._conn.recvVariable("build.version.release")
else:
self._platformVersion = "nosoftware"
return self._platformVersion
def pressAppSwitch(self, **pressKeyKwArgs):
"""
Press the app switch button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_APP_SWITCH", **pressKeyKwArgs)
def pressBack(self, **pressKeyKwArgs):
"""
Press the back button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_BACK", **pressKeyKwArgs)
def pressHome(self, **pressKeyKwArgs):
"""
Press the home button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_HOME", **pressKeyKwArgs)
def pressKey(self, keyName, long=False, hold=0.0):
"""
Press a key on the device.
Parameters:
keyName (string):
the name of the key, like KEYCODE_HOME. If KEYCODE_
prefix is not given, it is added. Refer to Android
KeyEvent documentation.
long (boolean, optional):
if True, press the key for long time.
hold (float, optional):
time in seconds to hold the key down.
"""
if not keyName.upper().startswith("KEYCODE_"):
keyName = "KEYCODE_" + keyName
keyName = keyName.upper()
return fmbtgti.GUITestInterface.pressKey(self, keyName, long, hold)
def pressMenu(self, **pressKeyKwArgs):
"""
Press the menu button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_MENU", **pressKeyKwArgs)
def pressPower(self, **pressKeyKwArgs):
"""
Press the power button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_POWER", **pressKeyKwArgs)
def pressVolumeUp(self, **pressKeyKwArgs):
"""
Press the volume up button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_VOLUME_UP", **pressKeyKwArgs)
def pressVolumeDown(self, **pressKeyKwArgs):
"""
Press the volume down button.
Optional parameters are the same as for pressKey.
"""
return self.pressKey("KEYCODE_VOLUME_DOWN", **pressKeyKwArgs)
def reboot(self, reconnect=True, firstBoot=False):
"""
Reboot the device.
Parameters
reconnect (boolean, optional)
If True, do not return until the device has been
connected after boot. Otherwise return once reboot
command has been sent. The default is True.
firstBoot (boolean, optional)
If True, the device boots like it would have been
flashed. Requires that "adb root" works. The default
is False.
Returns True on success, otherwise False.
"""
return self._conn.reboot(reconnect, firstBoot, 120)
def reconnect(self):
"""
Close connections to the device and reconnect.
"""
self.setConnection(None)
import gc
gc.collect()
try:
self.setConnection(_AndroidDeviceConnection(self.serialNumber))
return True
except Exception, e:
_adapterLog("reconnect failed: %s" % (e,))
return False
def refreshView(self, forcedView=None):
"""
(Re)reads view items on display and updates the latest View
object.
Parameters:
forcedView (View or filename, optional):
use given View object or view file instead of reading
items from the device.
Returns created View object.
"""
def formatErrors(errors):
return "refreshView parse errors:\n %s" % (
"\n ".join(["line %s: %s error: %s" % e for e in errors]),)
if forcedView != None:
if isinstance(forcedView, View):
self._lastView = forcedView
elif type(forcedView) == str:
self._lastView = View(self.screenshotDir(), self.serialNumber, file(forcedView).read())
_adapterLog(formatErrors(self._lastView.errors()))
else:
raise ValueError("forcedView must be a View object or a filename")
return self._lastView
retryCount = 0
while True:
dump = self._conn.recvViewData()
if dump == None: # dump unreadable
return None
view = View(self.screenshotDir(), self.serialNumber, dump)
if len(view.errors()) > 0 and retryCount < self._PARSE_VIEW_RETRY_LIMIT:
_adapterLog(formatErrors(view.errors()))
retryCount += 1
time.sleep(0.2) # sleep before retry
else:
# successfully parsed or parsed with errors but no more retries
self._lastView = view
return view
def shell(self, shellCommand):
"""
Execute shellCommand in adb shell.
shellCommand is a string (arguments separated by whitespace).
Returns output of "adb shell" command.
If you wish to receive exitstatus or standard output and error
separated from shellCommand, refer to shellSOE().
"""
return self._conn._runAdb(["shell", shellCommand])[1]
def shellSOE(self, shellCommand):
"""
Execute shellCommand in adb shell.
shellCommand is a string (arguments separated by whitespace).
Returns tuple (exitStatus, standardOutput, standardError).
Requires tar and uuencode to be available on the device.
"""
return self._conn.shellSOE(shellCommand)
def smsNumber(self, number, message):
"""
Send message using SMS to given number.
Parameters:
number (string)
phone number to which the SMS will be sent
message (string)
the message to be sent.
Returns True on success, otherwise False.
"""
smsCommand = ('am start -a android.intent.action.SENDTO ' +
'-d sms:%s --es sms_body "%s"' +
' --ez exit_on_sent true') % (number, message)
status, out, err = self.shellSOE(smsCommand)
if status != 0:
_logFailedCommand("sms", smsCommand, status, out, err)
return False
_adapterLog("SMS command returned %s" % (out + err,))
time.sleep(2)
self.pressKey("KEYCODE_DPAD_RIGHT")
time.sleep(1)
self.pressKey("KEYCODE_ENTER")
return True
def supportsView(self):
"""
Check if connected device supports reading view data.
View data is needed by refreshView(), view(), verifyText() and
waitText(). It is produced by Android window dump.
Returns True if view data can be read, otherwise False.
"""
try:
self._conn.recvViewData()
return True
except AndroidConnectionError:
return False
def systemProperty(self, propertyName):
"""
Returns Android Monkey Device properties, such as
"clock.uptime", refer to Android Monkey documentation.
"""
return self._conn.recvVariable(propertyName)
def tapId(self, viewItemId, **tapKwArgs):
"""
Find an item with given id from the latest view, and tap it.
"""
assert self._lastView != None, "View required."
items = self._lastView.findItemsById(viewItemId, count=1)
if len(items) > 0:
return self.tapItem(items[0], **tapKwArgs)
else:
_adapterLog("tapItemById(%s): no items found" % (viewItemId,))
return False
def tapText(self, text, partial=False, **tapKwArgs):
"""
Find an item with given text from the latest view, and tap it.
Parameters:
partial (boolean, optional):
refer to verifyText documentation. The default is
False.
tapPos (pair of floats (x, y)):
refer to tapItem documentation.
long, hold (optional):
refer to tap documentation.
Returns True if successful, otherwise False.
"""
assert self._lastView != None, "View required."
items = self._lastView.findItemsByText(text, partial=partial, count=1)
if len(items) == 0: return False
return self.tapItem(items[0], **tapKwArgs)
def topApp(self):
"""
Returns the name of the top application.
"""
return self._conn.recvTopAppWindow()[0]
def topWindow(self):
"""
Returns the name of the top window.
"""
return self._conn.recvTopAppWindow()[1]
def verifyText(self, text, partial=False):
"""
Verify that the last view has at least one item with given
text.
Parameters:
text (string):
text to be searched for in items.
partial (boolean, optional):
if True, match items if item text contains given
text, otherwise match only if item text is equal to
the given text. The default is False (exact match).
"""
assert self._lastView != None, "View required."
return self._lastView.findItemsByText(text, partial=partial, count=1) != []
def view(self):
"""
Returns the last view (the most recently refreshed view).
"""
return self._lastView
def waitText(self, text, partial=False, **waitKwArgs):
"""
Wait until text appears in any view item.
Parameters:
text (string):
text to be waited for.
partial (boolean, optional):
refer to verifyText. The default is False.
waitTime, pollDelay (float, optional):
refer to wait.
Returns True if text appeared within given time limit,
otherwise False.
Updates the last view.
"""
return self.wait(self.refreshView,
self.verifyText, (text,), {'partial': partial},
**waitKwArgs)
def _loadDeviceAndTestINIs(self, homeDir, deviceName, iniFile):
if deviceName != None:
_deviceIniFilename = homeDir + os.sep + "etc" + os.sep + deviceName + ".ini"
self.loadConfig(_deviceIniFilename, override=True, level="device")
if iniFile:
self.loadConfig(iniFile, override=True, level="test")
class Ini:
"""
Container for device configuration loaded from INI files.
INI file syntax:
[section1]
key1 = value1
; commented = out
# commented = out
"""
def __init__(self, iniFile=None):
"""
Initialise the container, optionally with an initial configuration.
Parameters:
iniFile (file object, optional):
load the initial configuration from iniFile.
The default is None: start with empty configuration.
"""
# _conf is a dictionary:
# (section, key) -> value
self._conf = {}
if iniFile:
self.addFile(iniFile)
def addFile(self, iniFile, override=True):
"""
Add values from a file to the current configuration.
Parameters:
iniFile (file object):
load values from this file object.
override (boolean, optional):
If True, loaded values override existing values.
Otherwise, only currently undefined values are
loaded. The default is True.
"""
for line in iniFile:
line = line.strip()
if line.startswith('[') and line.endswith(']'):
section = line[1:-1].strip()
elif line.startswith(";") or line.startswith("#"):
continue
elif '=' in line:
key, value = line.split('=', 1)
if override or (section, key.strip()) not in self._conf:
self._conf[(section, key.strip())] = value.strip()
def sections(self):
"""
Returns list of sections in the current configuration.
"""
return list(set([k[0] for k in self._conf.keys()]))
def keys(self, section):
"""
Returns list of keys in a section in the current configuration.
Parameters:
section (string):
the name of the section.
"""
return [k[1] for k in self._conf.keys() if k[0] == section]
def dump(self):
"""
Returns the current configuration as a single string in the
INI format.
"""
lines = []
for section in sorted(self.sections()):
lines.append("[%s]" % (section,))
for key in sorted(self.keys(section)):
lines.append("%-16s = %s" % (key, self._conf[(section, key)]))
lines.append("")
return "\n".join(lines)
def set(self, section, key, value):
"""
Set new value for a key in a section.
Parameters:
section, key (strings):
the section, the key.
value (string):
the new value. If not string already, it will be
converted to string, and it will be loaded as a
string when loaded from file object.
"""
self._conf[(section, key)] = str(value)
def value(self, section, key, default=""):
"""
Returns the value (string) associated with a key in a section.
Parameters:
section, key (strings):
the section and the key.
default (string, optional):
the default value to be used and stored if there is
no value associated to the key in the section. The
default is the empty string.
Reading a value of an undefined key in an undefined section
adds the key and the section to the configuration with the
returned (the default) value. This makes all returned values
visible in dump().
"""
if not (section, key) in self._conf:
self._conf[(section, key)] = default
return self._conf[(section, key)]
# For backward compatibility, someone might be using old _DeviceConf
_DeviceConf = Ini
class ViewItem(fmbtgti.GUIItem):
"""
ViewItem holds the information of a single GUI element.
"""
def __init__(self, className, code, indent, properties, parent, rawProps, dumpFilename):
self._p = properties
self._parent = parent
self._className = className
self._code = code
self._indent = indent
self._children = []
self._rawProps = ""
if not "scrolling:mScrollX" in self._p:
self._p["scrolling:mScrollX"] = 0
self._p["scrolling:mScrollY"] = 0
fmbtgti.GUIItem.__init__(self, className, self._calculateBbox(), dumpFilename)
def addChild(self, child): self._children.append(child)
def _calculateBbox(self):
left = int(self._p["layout:mLeft"])
top = int(self._p["layout:mTop"])
parent = self._parent
while parent:
pp = parent._p
left += int(pp["layout:mLeft"]) - int(pp["scrolling:mScrollX"])
top += int(pp["layout:mTop"]) - int(pp["scrolling:mScrollY"])
parent = parent._parent
height = int(self._p["layout:getHeight()"])
width = int(self._p["layout:getWidth()"])
return (left, top, left + width, top + height)
def children(self): return self._children
def className(self): return self._className
def code(self): return self._code
def indent(self): return self._indent
def id(self): return self.property("mID")
def parent(self): return self._parent
def properties(self): return self._p
def property(self, propertyName):
return self._p.get(propertyName, None)
def text(self): return self.property("text:mText")
def visible(self):
return self._p.get("getVisibility()", "") == "VISIBLE"
def dump(self):
p = self._p
return ("ViewItem(\n\tchildren = %d\n\tclassName = '%s'\n\tcode = '%s'\n\t" +
"indent = %d\n\tproperties = {\n\t\t%s\n\t})") % (
len(self._children), self._className, self._code, self._indent,
'\n\t\t'.join(['"%s": %s' % (key, p[key]) for key in sorted(p.keys())]))
def __str__(self):
return ("ViewItem(className='%s', id=%s, bbox=%s)" % (
self._className, self.id(), self.bbox()))
class View(object):
"""
View provides interface to screen dumps from Android. It parses
the dump to a hierarchy of ViewItems. find* methods enable searching
for ViewItems based on their properties.
"""
def __init__(self, screenshotDir, serialNumber, dump):
self.screenshotDir = screenshotDir
self.serialNumber = serialNumber
self._viewItems = []
self._errors = []
self._lineRegEx = re.compile("(?P<indent>\s*)(?P<class>[\w.$]+)@(?P<id>[0-9A-Fa-f]{8} )(?P<properties>.*)")
self._olderAndroidLineRegEx = re.compile("(?P<indent>\s*)(?P<class>[\w.$]+)@(?P<id>\w)(?P<properties>.*)")
self._propRegEx = re.compile("(?P<prop>(?P<name>[^=]+)=(?P<len>\d+),)(?P<data>[^\s]* ?)")
self._dump = dump
self._rawDumpFilename = self.screenshotDir + os.sep + fmbtgti._filenameTimestamp() + "-" + self.serialNumber + ".view"
file(self._rawDumpFilename, "w").write(self._dump)
try: self._parseDump(dump, self._rawDumpFilename)
except Exception, e:
self._errors.append((-1, "", "Parser error"))
def viewItems(self): return self._viewItems
def errors(self): return self._errors
def dumpRaw(self): return self._dump
def dumpItems(self, itemList = None):
if itemList == None: itemList = self._viewItems
l = []
for i in itemList:
l.append(self._dumpItem(i))
return '\n'.join(l)
def dumpTree(self, rootItem = None):
l = []
if rootItem != None:
l.extend(self._dumpSubTree(rootItem, 0))
else:
for i in self._viewItems:
if i._indent == 0:
l.extend(self._dumpSubTree(i, 0))
return '\n'.join(l)
def _dumpSubTree(self, viewItem, indent):
l = []
i = viewItem
l.append(" "*indent + self._dumpItem(viewItem))
for i in viewItem.children():
l.extend(self._dumpSubTree(i, indent + 4))
return l
def _dumpItem(self, viewItem):
i = viewItem
if i.text() != None: t = '"%s"' % (i.text(),)
else: t = None
return "id=%s cls=%s text=%s bbox=%s" % (
i.id(), i.className(), t, i.bbox())
def findItems(self, comparator, count=-1, searchRootItem=None, searchItems=None):
foundItems = []
if count == 0: return foundItems
if searchRootItem != None:
# find from searchRootItem and its children
if comparator(searchRootItem):
foundItems.append(searchRootItem)
for c in searchRootItem.children():
foundItems.extend(self.findItems(comparator, count=count-len(foundItems), searchRootItem=c))
else:
if searchItems != None:
# find from listed items only
searchDomain = searchItems
else:
# find from all items
searchDomain = self._viewItems
for i in searchDomain:
if comparator(i):
foundItems.append(i)
if count > 0 and len(foundItems) >= count:
break
return foundItems
def findItemsByText(self, text, partial=False, count=-1, searchRootItem=None, searchItems=None):
"""
Searches the GUI hiearhy for a object with a given text
"""
if partial:
c = lambda item: (
item.properties().get("text:mText", "").find(text) != -1 )
else:
c = lambda item: (
item.properties().get("text:mText", None) == text )
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems)
def findItemsById(self, id, count=-1, searchRootItem=None, searchItems=None):
c = lambda item: item.properties().get("mID", "") == id
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems)
def findItemsByClass(self, className, partial=True, count=-1, searchRootItem=None, searchItems=None):
if partial: c = lambda item: item.className().find(className) != -1
else: c = lambda item: item.className() == className
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems)
def findItemsByIdAndClass(self, id, className, partial=True, count=-1, searchRootItem=None, searchItems=None):
idOk = self.findItemsById(id, count=-1, searchRootItem=searchRootItem)
return self.findItemsByClass(className, partial=partial, count=count, searchItems=idOk)
def findItemsByRawProps(self, s, count=-1, searchRootItem=None, searchItems=None):
c = lambda item: item._rawProps.find(s) != -1
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems)
def save(self, fileOrDirName):
shutil.copy(self._rawDumpFilename, fileOrDirName)
def _parseDump(self, dump, rawDumpFilename):
"""
Process the raw dump data and create a tree of ViewItems
"""
# This code originates from tema-android-adapter-3.2,
# AndroidAdapter/guireader.py.
self._viewItems = []
cellLayout = ""
parent = None
previousItem = None
currentIndent = 0
visible = True
self.TOP_PAGED_VIEW = ""
for lineIndex, line in enumerate(dump.splitlines()):
if line == "DONE.":
break
# separate indent, class and properties for each GUI object
# TODO: branch here according to self._androidVersion
matcher = self._lineRegEx.match(line)
if not matcher:
# FIXME: this hack falls back to old format,
# should branch according to self._androidVersion!
matcher = self._olderAndroidLineRegEx.match(line)
if not matcher:
self._errors.append((lineIndex + 1, line, "Illegal line"))
continue # skip this line
className = matcher.group("class")
# Indent specifies the hierarchy level of the object
indent = len(matcher.group("indent"))
# If the indent is bigger that previous, this object is a
# child for the previous object
if indent > currentIndent:
parent = self._viewItems[-1]
elif indent < currentIndent:
for tmp in range(0, currentIndent - indent):
parent = parent.parent()
currentIndent = indent
propertiesData = matcher.group("properties")
properties = {}
index = 0
x = 0
y = 0
# Process the properties of each GUI object
while index < len(propertiesData):
# Separate name and value for each property [^=]*=
propMatch = self._propRegEx.match(propertiesData[index:-1])
if not propMatch or len(propMatch.group("data")) < int(propMatch.group("len")):
if not propMatch.group("data"):
self._errors.append((lineIndex, propertiesData[index:-1], "Illegal property"))
return None
startFrom = index + propertiesData[index:-1].find(propMatch.group("data"))
currFixedData = propertiesData[startFrom:(startFrom + int(propMatch.group("len")))]
length = int(propMatch.group("len"))
# [^=]+=?, == data
properties[propMatch.group("name")] = currFixedData[0:length].lstrip()
else:
length = int(propMatch.group("len"))
# [^=]+=?, == data
properties[propMatch.group("name")] = propMatch.group("data")[0:length].lstrip()
index += len(propMatch.group("prop")) + length + 1
self._viewItems.append(ViewItem(matcher.group("class"), matcher.group("id"), indent, properties, parent, matcher.group("properties"), self._rawDumpFilename))
if parent:
parent.addChild(self._viewItems[-1])
return self._viewItems
def __str__(self):
return 'View(items=%s, dump="%s")' % (
len(self._viewItems), self._rawDumpFilename)
class _AndroidDeviceConnection:
"""
Connection to the Android Device being tested.
"""
_m_host = 'localhost'
_m_port = random.randint(20000, 29999)
_w_host = 'localhost'
_w_port = _m_port + 1
def __init__(self, serialNumber, stopOnError=True):
self._serialNumber = serialNumber
self._stopOnError = stopOnError
self._shellSupportsTar = False
try:
self._resetMonkey()
self._resetWindow()
# check supported features
outputLines = self._runAdb(["shell", "tar"])[1].splitlines()
if len(outputLines) == 1 and "bin" in outputLines[0]:
self._shellSupportsTar = False
else:
self._shellSupportsTar = True
finally:
# Next _AndroidDeviceConnection instance will use different ports
self._w_port = _AndroidDeviceConnection._w_port
self._m_port = _AndroidDeviceConnection._m_port
_AndroidDeviceConnection._w_port += 100
_AndroidDeviceConnection._m_port += 100
def __del__(self):
try: self._monkeySocket.close()
except: pass
def target(self):
return self._serialNumber
def _cat(self, remoteFilename):
fd, filename = tempfile.mkstemp("fmbtandroid-cat-")
os.close(fd)
self._runAdb(["pull", remoteFilename, filename], 0)
contents = file(filename).read()
os.remove(filename)
return contents
def _runAdb(self, command, expectedExitStatus=0):
if not self._stopOnError:
expect = None
else:
expect = expectedExitStatus
if type(command) == list:
command = ["adb", "-s", self._serialNumber] + command
else:
command = ["adb", "-s", self._serialNumber, command]
return _run(command, expectedExitStatus = expect)
def _runSetupCmd(self, cmd, expectedExitStatus = 0):
_adapterLog('setting up connections: "%s"' % (cmd,))
exitStatus, _, _ = self._runAdb(cmd, expectedExitStatus)
if exitStatus == 0: return True
else: return True
def _resetWindow(self):
setupCommands = [["shell", "service" , "call", "window", "1", "i32", "4939"],
["forward", "tcp:"+str(self._w_port), "tcp:4939"]]
for c in setupCommands:
self._runSetupCmd(c)
def _resetMonkey(self, timeout=3, pollDelay=.25):
self._runSetupCmd(["shell", "monkey", "--port", "1080"], None)
time.sleep(pollDelay)
endTime = time.time() + timeout
while time.time() < endTime:
self._runSetupCmd(["forward", "tcp:"+str(self._m_port), "tcp:1080"])
try:
self._monkeySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._monkeySocket.connect((self._m_host, self._m_port))
self._monkeySocket.setblocking(0)
self._monkeySocket.settimeout(1.0)
self._platformVersion = self._monkeyCommand("getvar build.version.release", retry=0)[1]
if len(self._platformVersion) > 0:
self._monkeySocket.settimeout(5.0)
return True
except Exception, e:
pass
time.sleep(pollDelay)
if self._stopOnError:
msg = 'Android monkey error: cannot connect to "adb shell monkey --port 1080" to device %s' % (self._serialNumber)
_adapterLog(msg)
raise AndroidConnectionError(msg)
else:
return False
def _monkeyCommand(self, command, retry=3):
try:
self._monkeySocket.sendall(command + "\n")
data = self._monkeySocket.recv(4096).strip()
if len(data) == 0 and retry > 0:
return self._monkeyCommand(command, retry-1)
if data == "OK":
return True, None
elif data.startswith("OK:"):
return True, data.split("OK:")[1]
_adapterLog("monkeyCommand failing... command: '%s' response: '%s'" % (command, data))
return False, None
except socket.error:
try: self._monkeySocket.close()
except: pass
if retry > 0:
self._resetMonkey()
return self._monkeyCommand(command, retry=retry-1)
else:
raise AndroidConnectionError('Android monkey socket connection lost while sending command "%s"' % (command,))
def reboot(self, reconnect, firstBootAfterFlashing, timeout):
if firstBootAfterFlashing:
self._runAdb("root")
time.sleep(2)
self._runAdb(["shell", "rm", "/data/data/com.android.launcher/shared_prefs/com.android.launcher2.prefs.xml"])
self._runAdb("reboot")
_adapterLog("rebooting " + self._serialNumber)
if reconnect:
self._runAdb("wait-for-device")
endTime = time.time() + timeout
while time.time() < endTime:
try:
if self._resetMonkey(timeout=1, pollDelay=1):
break
except AndroidConnectionError:
pass
time.sleep(1)
else:
_adapterLog("reboot: reconnecting to " + self._serialNumber + " failed")
return False
self._resetWindow()
return True
def recvVariable(self, variableName):
ok, value = self._monkeyCommand("getvar " + variableName)
if ok: return value
else:
# LOG: getvar variableName failed
return None
def recvScreenSize(self):
try:
height = int(self.recvVariable("display.height"))
width = int(self.recvVariable("display.width"))
except TypeError:
return None, None
return width, height
def recvTopAppWindow(self):
_, output, _ = self._runAdb(["shell", "dumpsys", "window"], 0)
if self._platformVersion >= "4.2":
s = re.findall("mCurrentFocus=Window\{(#?[0-9A-Fa-f]{8})( [^ ]*)? (?P<winName>[^}]*)\}", output)
else:
s = re.findall("mCurrentFocus=Window\{(#?[0-9A-Fa-f]{8}) (?P<winName>[^ ]*) [^ ]*\}", output)
if s and len(s[0][-1].strip()) > 1: topWindowName = s[0][-1]
else: topWindowName = None
s = re.findall("mFocusedApp=AppWindowToken.*ActivityRecord\{#?[0-9A-Fa-f]{8}( [^ ]*)? (?P<appName>[^}]*)\}", output)
if s and len(s[0][-1].strip()) > 1:
topAppName = s[0][-1].strip()
else:
topAppName = None
return topAppName, topWindowName
def sendTap(self, xCoord, yCoord):
return self._monkeyCommand("tap " + str(xCoord) + " " + str(yCoord))[0]
def sendKeyUp(self, key):
return self._monkeyCommand("key up " + key)[0]
def sendKeyDown(self, key):
return self._monkeyCommand("key down " + key)[0]
def sendTouchUp(self, xCoord, yCoord):
return self._monkeyCommand("touch up " + str(xCoord) + " " + str(yCoord))[0]
def sendTouchDown(self, xCoord, yCoord):
return self._monkeyCommand("touch down " + str(xCoord) + " " + str(yCoord))[0]
def sendTouchMove(self, xCoord, yCoord):
return self._monkeyCommand("touch move " + str(xCoord) + " " + str(yCoord))[0]
def sendTrackBallMove(self, dx, dy):
return self._monkeyCommand("trackball " + str(dx) + " " + str(dy))[0]
def sendPress(self, key):
return self._monkeyCommand("press " + key)[0]
def sendType(self, text):
for lineIndex, line in enumerate(text.split('\n')):
if lineIndex > 0: self.sendPress("KEYCODE_ENTER")
for wordIndex, word in enumerate(line.split(' ')):
if wordIndex > 0: self.sendPress("KEYCODE_SPACE")
if len(word) > 0 and not self._monkeyCommand("type " + word)[0]:
_adapterLog('sendType("%s") failed when sending word "%s"' %
(text, word))
return False
return True
def recvScreenshot(self, filename):
"""
Capture a screenshot and copy the image file to given path or
system temp folder.
Returns True on success, otherwise False.
"""
remotefile = '/sdcard/' + os.path.basename(filename)
self._runAdb(['shell', 'screencap', '-p', remotefile], 0)
status, out, err = self._runAdb(['pull', remotefile, filename], [0, 1])
if status != 0:
raise FMBTAndroidError("Failed to fetch screenshot from the device: %s. SD card required." % ((out + err).strip(),))
status, _, _ = self._runAdb(['shell', 'rm', remotefile], 0)
return True
def shellSOE(self, shellCommand):
fd, filename = tempfile.mkstemp(prefix="fmbtandroid-shellcmd-")
remotename = '/sdcard/' + os.path.basename(filename)
os.write(fd, shellCommand + "\n")
os.close(fd)
self._runAdb(["push", filename, remotename], 0)
cmd = "shell 'source %s >%s.out 2>%s.err; echo $? > %s.status" % ((remotename,)*4)
if self._shellSupportsTar:
# do everything we can in one command to minimise adb
# commands: execute command, record results, package,
# print uuencoded package and remove remote temp files
cmd += "; cd %s; tar czf - %s.out %s.err %s.status | uuencode %s.tar.gz; rm -f %s*'" % (
(os.path.dirname(remotename),) + ((os.path.basename(remotename),) * 5))
status, output, error = self._runAdb(cmd, 0)
file(filename, "w").write(output)
uu.decode(filename, out_file=filename + ".tar.gz")
import tarfile
tar = tarfile.open(filename + ".tar.gz")
basename = os.path.basename(filename)
stdout = tar.extractfile(basename + ".out").read()
stderr = tar.extractfile(basename + ".err").read()
try: exitstatus = int(tar.extractfile(basename + ".status").read())
except: exitstatus = None
os.remove(filename)
os.remove(filename + ".tar.gz")
else:
# need to pull files one by one, slow.
cmd += "'"
self._runAdb(cmd, 0)
stdout = self._cat(remotename + ".out")
stderr = self._cat(remotename + ".err")
try: exitstatus = int(self._cat(remotename + ".status"))
except: exitstatus = None
self._runAdb(["shell", "rm -f "+remotename])
return exitstatus, stdout, stderr
def recvViewData(self, retry=3):
_dataBufferLen = 4096 * 16
try:
self._windowSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._windowSocket.connect( (self._w_host, self._w_port) )
# DUMP -1: get foreground window info
if self._windowSocket.sendall("DUMP -1\n") == 0:
# LOG: readGUI cannot write to window socket
raise AndroidConnectionError("writing socket failed")
# Read until a "DONE" line
data = ""
while True:
try: newData = self._windowSocket.recv(_dataBufferLen)
except socket.timeout:
continue
data += newData
if data.splitlines()[-1] == "DONE" or newData == '':
break
return data
except Exception, msg:
_adapterLog("recvViewData: window socket error: %s" % (msg,))
if retry > 0:
self._resetWindow()
return self.recvViewData(retry=retry-1)
else:
msg = "recvViewData: cannot read window socket"
_adapterLog(msg)
raise AndroidConnectionError(msg)
finally:
try: self._windowSocket.close()
except: pass
class FMBTAndroidError(Exception): pass
class AndroidConnectionError(FMBTAndroidError): pass
class AndroidConnectionLost(AndroidConnectionError): pass
class AndroidDeviceNotFound(AndroidConnectionError): pass
|
import vtk, sys
class Scene3D:
'''A class to manage a 3D scene using VTK actors.
Each instance of this class has its own `vtkRenderer`, it can be used
to display the scene interactlively and/or save a still image in png
format. The actual 3D rendering is done by calling the `render` method.
'''
def __init__(self, display=True, ren_size=(600, 600), name='scene_3d', background=(1., 1., 1.)):
'''Initialization called when creating a new `Scene3D` object.
*Parameters*
**display**: a boolean to control if the scene has to be displayed
interactively to the user (default True). If True, a frame counter
is used when saving images using the 's' key pressed callback. If
False a single image is save using the base name.
**ren_size**: a tuple with two value to set the size of the image in
pixels (defalut 600x600).
**name**: a string to used to describe the scene, it is used in
particular when saving the scene as an image (default is 'scene_3d').
'''
ren = vtk.vtkRenderer()
ren.SetBackground(background)
self.renderer = ren
# Create a window for the renderer
self.renWin = vtk.vtkRenderWindow()
self.renWin.AddRenderer(self.renderer)
self.renWin.SetSize(ren_size)
self.display = display
self.name = name
self.frame_counter = 0
self.verbose = True
def add(self, actor):
'''Add a given actor to the 3D scene.
*Parameters*
**actor** a VTK actor to add to the renderer.
'''
self.renderer.AddActor(actor)
def get_renderer(self):
'''Get the vtk renderer attached to this 3d scene.'''
return self.renderer
def set_camera(self, cam):
'''Set the camera for the 3D scene.
*Parameters*
**cam** a VTK camera to attach to the renderer.
'''
self.renderer.SetActiveCamera(cam)
def save_frame(self):
'''Render the 3D scene and save a png image.
When using the internal frame counter, it is incremented by 1 each
time this method is called.'''
w2i = vtk.vtkWindowToImageFilter()
writer = vtk.vtkPNGWriter()
w2i.SetInput(self.renWin)
w2i.Update()
writer.SetInputConnection(w2i.GetOutputPort())
if self.display:
file_name = '%s_%04d.png' % (self.name, self.frame_counter)
else:
file_name = '%s.png' % self.name
if self.verbose:
print 'writing still image ' + file_name
writer.SetFileName(file_name)
self.renWin.Render()
writer.Write()
self.frame_counter += 1
del writer, w2i
def print_camera_settings(self):
'''Print out the active camera settings.'''
cam = self.renderer.GetActiveCamera()
print 'Camera settings:'
print ' * position: %s' % (cam.GetPosition(),)
print ' * focal point: %s' % (cam.GetFocalPoint(),)
print ' * up vector: %s' % (cam.GetViewUp(),)
print ' * clipping range: %s' % (cam.GetViewUp(),)
def pymicro_callback(self, obj, event):
'''Standard key pressed callback to attach to the 3d scene.
This fuction can be used directly to be attached to the rendering
window `vtkRenderWindowInteractor`. It handles user events by
pressing keys:
*s save a png image of the scene
*c print the current camera settings
*q exit the interactive rendering
'''
key = obj.GetKeySym()
if key == 's':
self.save_frame()
elif key == 'c':
self.print_camera_settings()
elif key == 'q':
if self.verbose:
print "Bye, thanks for using pymicro."
sys.exit(0)
def render(self, key_pressed_callback=None):
'''Render the VTK scene in 3D.
This function does the actual 3D rendering using the `vtkRenderer`
of the object. It can be used to display the scene interactlively
and/or save a still image in png format.
*Parameters*
**key_pressed_callback** a function (functions are first class variables)
called in interactive mode when a key is pressed.
'''
if self.display:
# start the initialization and rendering
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(self.renWin)
if key_pressed_callback:
iren.AddObserver("KeyPressEvent", self.pymicro_callback)
self.renWin.Render()
iren.Initialize()
iren.Start()
else:
self.save_frame()
make scene3d more simple to use with better defaults
import vtk, sys, os
class Scene3D:
'''A class to manage a 3D scene using VTK actors.
Each instance of this class has its own `vtkRenderer`, it can be used
to display the scene interactlively and/or save a still image in png
format. The actual 3D rendering is done by calling the `render` method.
'''
def __init__(self, display=True, ren_size=(600, 600), name=None, background=(1., 1., 1.)):
'''Initialization called when creating a new `Scene3D` object.
:param display: a boolean to control if the scene has to be displayed
interactively to the user (default True). If True, a frame counter
is used when saving images using the 's' key pressed callback. If
False a single image is save using the base name.
:param ren_size: a tuple with two value to set the size of the image
in pixels (defalut 600x600).
:param name: a string to used to describe the scene, it is used in
particular when saving the scene as an image. If not set, the file
name of the Python script will be used or 'scene_3d' if run
interactively.
:param background: the background of the scene (white by default).
'''
ren = vtk.vtkRenderer()
ren.SetBackground(background)
self.renderer = ren
# Create a window for the renderer
self.renWin = vtk.vtkRenderWindow()
self.renWin.AddRenderer(self.renderer)
self.renWin.SetSize(ren_size)
self.display = display
if name == None:
if '__file__' in globals():
self.name = os.path.splitext(__file__)[0]
else:
self.name = 'scene_3d'
else:
self.name = name
self.frame_counter = 0
self.verbose = True
def add(self, actor):
'''Add a given actor to the 3D scene.
*Parameters*
**actor** a VTK actor to add to the renderer.
'''
self.renderer.AddActor(actor)
def get_renderer(self):
'''Get the vtk renderer attached to this 3d scene.'''
return self.renderer
def set_camera(self, cam):
'''Set the camera for the 3D scene.
*Parameters*
**cam** a VTK camera to attach to the renderer.
'''
self.renderer.SetActiveCamera(cam)
def save_frame(self):
'''Render the 3D scene and save a png image.
When using the internal frame counter, it is incremented by 1 each
time this method is called.'''
w2i = vtk.vtkWindowToImageFilter()
writer = vtk.vtkPNGWriter()
w2i.SetInput(self.renWin)
w2i.Update()
writer.SetInputConnection(w2i.GetOutputPort())
if self.display:
file_name = '%s_%04d.png' % (self.name, self.frame_counter)
else:
file_name = '%s.png' % self.name
if self.verbose:
print 'writing still image ' + file_name
writer.SetFileName(file_name)
self.renWin.Render()
writer.Write()
self.frame_counter += 1
del writer, w2i
def print_camera_settings(self):
'''Print out the active camera settings.'''
cam = self.renderer.GetActiveCamera()
print 'Camera settings:'
print ' * position: %s' % (cam.GetPosition(),)
print ' * focal point: %s' % (cam.GetFocalPoint(),)
print ' * up vector: %s' % (cam.GetViewUp(),)
print ' * clipping range: %s' % (cam.GetViewUp(),)
def pymicro_callback(self, obj, event):
'''Standard key pressed callback to attach to the 3d scene.
This fuction can be used directly to be attached to the rendering
window `vtkRenderWindowInteractor`. It handles user events by
pressing keys:
*s save a png image of the scene
*c print the current camera settings
*q exit the interactive rendering
'''
key = obj.GetKeySym()
if key == 's':
self.save_frame()
elif key == 'c':
self.print_camera_settings()
elif key == 'q':
if self.verbose:
print "Bye, thanks for using pymicro."
sys.exit(0)
def render(self, key_pressed_callback=None):
'''Render the VTK scene in 3D.
This function does the actual 3D rendering using the `vtkRenderer`
of the object. It can be used to display the scene interactlively
and/or save a still image in png format.
*Parameters*
**key_pressed_callback** a function (functions are first class variables)
called in interactive mode when a key is pressed.
'''
if self.display:
# start the initialization and rendering
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(self.renWin)
if key_pressed_callback:
iren.AddObserver("KeyPressEvent", self.pymicro_callback)
self.renWin.Render()
iren.Initialize()
iren.Start()
else:
self.save_frame()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from cryptography.fernet import Fernet
from girder.utility import config
from girder.utility.model_importer import ModelImporter
from girder.exceptions import AccessException
from girder.plugins.minerva.constants import PluginSettings
def findNamedFolder(currentUser, user, parent, parentType, name, create=False,
joinShareGroup=None, public=False):
folders = \
[ModelImporter.model('folder').filter(folder, currentUser) for folder in
ModelImporter.model('folder').childFolders(parent=parent,
parentType=parentType, user=currentUser, filters={'name': name})]
# folders should have len of 0 or 1, since we are looking in a
# user folder for a folder with a certain name
if len(folders) == 0:
if create and currentUser:
folder = ModelImporter.model('folder').createFolder(
parent, name, parentType=parentType, public=public,
creator=currentUser)
if joinShareGroup:
groupModel = ModelImporter.model('group')
datasetSharingGroup = groupModel.findOne(query={
'name': PluginSettings.DATASET_SHARING_GROUP_NAME
})
ModelImporter.model('folder').setGroupAccess(
folder, datasetSharingGroup, 0, currentUser=currentUser, save=True)
return folder
else:
return None
else:
return folders[0]
def findMinervaFolder(currentUser, user, create=False):
return findNamedFolder(currentUser, user, user, 'user',
PluginSettings.MINERVA_FOLDER, create)
def findPublicFolder(currentUser, user, create=False):
return findNamedFolder(currentUser, user, user, 'user',
'Public', create)
def findSharedFolder(currentUser, user, create=False):
minervaSharedFolder = findNamedFolder(
currentUser, user, user, 'user', PluginSettings.MINERVA_SHARED_DATASET,
create, joinShareGroup=True, public=False)
return minervaSharedFolder
def findDatasetFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.DATASET_FOLDER, create)
def findSharedDatasetFolders(currentUser):
folderModel = ModelImporter.model('folder')
groupModel = ModelImporter.model('group')
datasetSharingGroup = groupModel.findOne(query={
'name': PluginSettings.DATASET_SHARING_GROUP_NAME
})
if not datasetSharingGroup:
raise AccessException('user group "{0}" doesn\'t exist'.format(
PluginSettings.DATASET_SHARING_GROUP_NAME))
if datasetSharingGroup['_id'] not in currentUser['groups']:
raise AccessException('user doesn\'t belong to user group "{0}"'.format(
PluginSettings.DATASET_SHARING_GROUP_NAME))
folders = folderModel.find({
'public': False,
'baseParentType': 'user',
'parentCollection': 'user',
'access.groups.id': datasetSharingGroup['_id'],
'name': PluginSettings.MINERVA_SHARED_DATASET
})
return folders
def findSourceFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.SOURCE_FOLDER, create)
def findSessionFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.SESSION_FOLDER, create)
def findNamedCollection(currentUser, name, create=False):
collections = \
[ModelImporter.model('collection').filter(c, currentUser) for c in
ModelImporter.model('collection').textSearch(name, user=currentUser)]
# collections should have len of 0 or 1, since we are looking
# for a collection with a certain name
if len(collections) == 0:
if create:
return ModelImporter.model('collection').createCollection(
name, description='', public=True, creator=currentUser)
else:
return None
else:
return collections[0]
def findMinervaCollection(currentUser, create=False):
return findNamedCollection(currentUser, PluginSettings.MINERVA_COLLECTION,
create)
def findAnalysisFolder(currentUser, create=False):
minervaCollection = findMinervaCollection(currentUser, create)
if minervaCollection is None:
return None
else:
analysisFolder = findNamedFolder(currentUser, currentUser,
minervaCollection, 'collection',
'analysis', create, public=True)
return analysisFolder
def findAnalysisByName(currentUser, name):
analysisFolder = findAnalysisFolder(currentUser)
filters = {}
filters['$text'] = {
'$search': name
}
analyses = [ModelImporter.model('item').filter(item, currentUser)
for item in
ModelImporter.model('folder').childItems(folder=analysisFolder,
filters=filters)]
if len(analyses) > 0:
return analyses[0]
else:
return None
def mM(item, minerva_metadata=None):
if minerva_metadata is None:
if 'meta' not in item or 'minerva' not in item['meta']:
return {}
return item['meta']['minerva']
else:
return updateMinervaMetadata(item, minerva_metadata)
def updateMinervaMetadata(item, minerva_metadata):
if 'meta' not in item:
item['meta'] = {}
item['meta']['minerva'] = minerva_metadata
ModelImporter.model('item').setMetadata(item, item['meta'])
return item['meta']['minerva']
def decryptCredentials(credentials):
cur_config = config.getConfig()
key = cur_config['minerva']['crypto_key']
f = Fernet(key)
return f.decrypt(bytes(credentials))
def encryptCredentials(credentials):
cur_config = config.getConfig()
key = cur_config['minerva']['crypto_key']
f = Fernet(key)
return f.encrypt(bytes(credentials))
def jobMM(job, minerva_metadata=None, save=True):
if minerva_metadata is None:
if 'meta' not in job or 'minerva' not in job['meta']:
return {}
return job['meta']['minerva']
else:
if 'meta' not in job:
job['meta'] = {}
job['meta']['minerva'] = minerva_metadata
if save:
ModelImporter.model('job', 'jobs').save(job)
return job['meta']['minerva']
def addJobOutput(job, output, output_type='dataset', save=True):
mm = jobMM(job)
outputs = mm.get('outputs', [])
job_output = None
if output_type == 'dataset':
job_output = {
'type': 'dataset',
'dataset_id': output.get('_id')
}
else:
raise NotImplementedError('unknown job output %s' % output_type)
outputs.append(job_output)
mm['outputs'] = outputs
jobMM(job, mm, save)
Remove an unnecessary requirement when looking up shared folders
and apply a format update
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from cryptography.fernet import Fernet
from girder.utility import config
from girder.utility.model_importer import ModelImporter
from girder.exceptions import AccessException
from girder.plugins.minerva.constants import PluginSettings
def findNamedFolder(currentUser, user, parent, parentType, name, create=False,
joinShareGroup=None, public=False):
folders = \
[ModelImporter.model('folder').filter(folder, currentUser) for folder in
ModelImporter.model('folder').childFolders(
parent=parent, parentType=parentType,
user=currentUser, filters={'name': name})]
# folders should have len of 0 or 1, since we are looking in a
# user folder for a folder with a certain name
if len(folders) == 0:
if create and currentUser:
folder = ModelImporter.model('folder').createFolder(
parent, name, parentType=parentType, public=public,
creator=currentUser)
if joinShareGroup:
groupModel = ModelImporter.model('group')
datasetSharingGroup = groupModel.findOne(query={
'name': PluginSettings.DATASET_SHARING_GROUP_NAME
})
ModelImporter.model('folder').setGroupAccess(
folder, datasetSharingGroup, 0, currentUser=currentUser, save=True)
return folder
else:
return None
else:
return folders[0]
def findMinervaFolder(currentUser, user, create=False):
return findNamedFolder(currentUser, user, user, 'user',
PluginSettings.MINERVA_FOLDER, create)
def findPublicFolder(currentUser, user, create=False):
return findNamedFolder(currentUser, user, user, 'user',
'Public', create)
def findSharedFolder(currentUser, user, create=False):
minervaSharedFolder = findNamedFolder(
currentUser, user, user, 'user', PluginSettings.MINERVA_SHARED_DATASET,
create, joinShareGroup=True, public=False)
return minervaSharedFolder
def findDatasetFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.DATASET_FOLDER, create)
def findSharedDatasetFolders(currentUser):
folderModel = ModelImporter.model('folder')
groupModel = ModelImporter.model('group')
datasetSharingGroup = groupModel.findOne(query={
'name': PluginSettings.DATASET_SHARING_GROUP_NAME
})
if not datasetSharingGroup:
raise AccessException('user group "{0}" doesn\'t exist'.format(
PluginSettings.DATASET_SHARING_GROUP_NAME))
if datasetSharingGroup['_id'] not in currentUser['groups']:
raise AccessException('user doesn\'t belong to user group "{0}"'.format(
PluginSettings.DATASET_SHARING_GROUP_NAME))
folders = folderModel.find({
'baseParentType': 'user',
'parentCollection': 'user',
'access.groups.id': datasetSharingGroup['_id'],
'name': PluginSettings.MINERVA_SHARED_DATASET
})
return folders
def findSourceFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.SOURCE_FOLDER, create)
def findSessionFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.SESSION_FOLDER, create)
def findNamedCollection(currentUser, name, create=False):
collections = \
[ModelImporter.model('collection').filter(c, currentUser) for c in
ModelImporter.model('collection').textSearch(name, user=currentUser)]
# collections should have len of 0 or 1, since we are looking
# for a collection with a certain name
if len(collections) == 0:
if create:
return ModelImporter.model('collection').createCollection(
name, description='', public=True, creator=currentUser)
else:
return None
else:
return collections[0]
def findMinervaCollection(currentUser, create=False):
return findNamedCollection(currentUser, PluginSettings.MINERVA_COLLECTION,
create)
def findAnalysisFolder(currentUser, create=False):
minervaCollection = findMinervaCollection(currentUser, create)
if minervaCollection is None:
return None
else:
analysisFolder = findNamedFolder(currentUser, currentUser,
minervaCollection, 'collection',
'analysis', create, public=True)
return analysisFolder
def findAnalysisByName(currentUser, name):
analysisFolder = findAnalysisFolder(currentUser)
filters = {}
filters['$text'] = {
'$search': name
}
analyses = [ModelImporter.model('item').filter(item, currentUser)
for item in
ModelImporter.model('folder').childItems(folder=analysisFolder,
filters=filters)]
if len(analyses) > 0:
return analyses[0]
else:
return None
def mM(item, minerva_metadata=None):
if minerva_metadata is None:
if 'meta' not in item or 'minerva' not in item['meta']:
return {}
return item['meta']['minerva']
else:
return updateMinervaMetadata(item, minerva_metadata)
def updateMinervaMetadata(item, minerva_metadata):
if 'meta' not in item:
item['meta'] = {}
item['meta']['minerva'] = minerva_metadata
ModelImporter.model('item').setMetadata(item, item['meta'])
return item['meta']['minerva']
def decryptCredentials(credentials):
cur_config = config.getConfig()
key = cur_config['minerva']['crypto_key']
f = Fernet(key)
return f.decrypt(bytes(credentials))
def encryptCredentials(credentials):
cur_config = config.getConfig()
key = cur_config['minerva']['crypto_key']
f = Fernet(key)
return f.encrypt(bytes(credentials))
def jobMM(job, minerva_metadata=None, save=True):
if minerva_metadata is None:
if 'meta' not in job or 'minerva' not in job['meta']:
return {}
return job['meta']['minerva']
else:
if 'meta' not in job:
job['meta'] = {}
job['meta']['minerva'] = minerva_metadata
if save:
ModelImporter.model('job', 'jobs').save(job)
return job['meta']['minerva']
def addJobOutput(job, output, output_type='dataset', save=True):
mm = jobMM(job)
outputs = mm.get('outputs', [])
job_output = None
if output_type == 'dataset':
job_output = {
'type': 'dataset',
'dataset_id': output.get('_id')
}
else:
raise NotImplementedError('unknown job output %s' % output_type)
outputs.append(job_output)
mm['outputs'] = outputs
jobMM(job, mm, save)
|
import logging
import string
import os
from matplotlib import pyplot as plt
plt.ioff()
AXIS_FONT = {'fontname': 'Arial', 'size': '14'}
from math import log as mlog2
from collections import Counter, defaultdict
from read import map_to_precursors, map_to_precursors_on_fly, precursor_sequence
from utils import safe_dirs
from progressbar import ProgressBar
from seqcluster.libs.utils import file_exists
from seqcluster.function.rnafold import run_rnafold
from seqcluster.html import HTML
from seqcluster import templates
logger = logging.getLogger('html')
def _get_link(c):
"""Gives html link tag for cluster link information"""
return "<a href=%s/maps.html>%s</a>" % (c, c)
def _get_ann(dbs, features):
"""
Gives format to annotation for html table output
"""
value = ""
for db, feature in zip(dbs, features):
value += db + ":" + feature
return value
def _parse(profile, size):
total = Counter()
for sample in profile:
for pos in range(len(size)):
total[pos] += profile[sample][pos]
return total.values()
def make_profile(data, out_dir, args):
"""
Make data report for each cluster
"""
main_table = []
header = ['id', 'ann']
n = len(data[0])
bar = ProgressBar(maxval=n)
bar.start()
bar.update(0)
for itern, c in enumerate(data[0]):
bar.update(itern)
logger.debug("creating cluser: {}".format(c))
safe_dirs(os.path.join(out_dir, c))
valid, ann, pos_structure = _single_cluster(c, data, args)
data[0][c].update({'profile': pos_structure})
loci = data[0][c]['loci']
data[0][c]['precursor'] = {"seq": precursor_sequence(loci[0][0:5], args.ref)}
data[0][c]['precursor']["colors"] = _parse(data[0][c]['profile'], data[0][c]['precursor']["seq"])
data[0][c]['precursor'].update(run_rnafold(data[0][c]['precursor']['seq']))
return data
def _expand(dat, counts, start, end):
"""
expand the same counts from start to end
"""
for pos in range(start, end):
for s in counts:
dat[s][pos] += counts[s]
return dat
def _convert_to_df(in_file, freq, raw_file):
"""
convert data frame into table with pandas
"""
dat = defaultdict(Counter)
if isinstance(in_file, (str, unicode)):
with open(in_file) as in_handle:
for line in in_handle:
cols = line.strip().split("\t")
counts = freq[cols[3]]
dat = _expand(dat, counts, int(cols[1]), int(cols[2]))
else:
if raw_file:
out_handle = open(raw_file, "w")
for name in in_file:
counts = freq[name]
if raw_file:
print >>out_handle, "%s\t%s\t%s\t%s\t%s\t%s" % ("chr", in_file[name][0], in_file[name][1], name, sum(counts.values()), "+")
dat = _expand(dat, counts, in_file[name][0], in_file[name][1])
for s in dat:
for p in dat[s]:
dat[s][p] = mlog2(dat[s][p] + 1)
return dat
def _make(c):
"""
create html from template, adding figure,
annotation and sequences counts
"""
ann = defaultdict(list)
for pos in c['ann']:
for db in pos:
ann[db] += list(pos[db])
logger.debug(ann)
valid = [l for l in c['valid']]
ann_list = [", ".join(list(set(ann[feature]))) for feature in ann if feature in valid]
return valid, ann_list
def _single_cluster(c, data, args):
"""
Map sequences on precursors and create
expression profile
"""
valid, ann = 0, 0
raw_file = None
freq = defaultdict()
[freq.update({s.keys()[0]: s.values()[0]}) for s in data[0][c]['freq']]
names = [s.keys()[0] for s in data[0][c]['seqs']]
seqs = [s.values()[0] for s in data[0][c]['seqs']]
loci = data[0][c]['loci']
if loci[0][3] - loci[0][2] > 500:
logger.info("locus bigger > 500 nt, skipping: %s" % loci)
return valid, ann, {}
if not file_exists(out_file):
if args.razer:
logger.debug("map with razer all sequences to all loci %s " % loci)
map_to_precursors(seqs, names, {loci[0][0]: [loci[0][0:5]]}, out_file, args)
else:
logger.debug("map with C fn all sequences to all loci %s " % loci)
if args.debug:
raw_file = out_file
out_file = map_to_precursors_on_fly(seqs, names, loci[0][0:5], args)
logger.debug("plot sequences on loci")
df = _convert_to_df(out_file, freq, raw_file)
if df:
valid, ann = _make(data[0][c])
return valid, ann, df
remove matlib import
import logging
import string
import os
from math import log as mlog2
from collections import Counter, defaultdict
from read import map_to_precursors, map_to_precursors_on_fly, precursor_sequence
from utils import safe_dirs
from progressbar import ProgressBar
from seqcluster.libs.utils import file_exists
from seqcluster.function.rnafold import run_rnafold
from seqcluster.html import HTML
from seqcluster import templates
logger = logging.getLogger('html')
def _get_link(c):
"""Gives html link tag for cluster link information"""
return "<a href=%s/maps.html>%s</a>" % (c, c)
def _get_ann(dbs, features):
"""
Gives format to annotation for html table output
"""
value = ""
for db, feature in zip(dbs, features):
value += db + ":" + feature
return value
def _parse(profile, size):
total = Counter()
for sample in profile:
for pos in range(len(size)):
total[pos] += profile[sample][pos]
return total.values()
def make_profile(data, out_dir, args):
"""
Make data report for each cluster
"""
main_table = []
header = ['id', 'ann']
n = len(data[0])
bar = ProgressBar(maxval=n)
bar.start()
bar.update(0)
for itern, c in enumerate(data[0]):
bar.update(itern)
logger.debug("creating cluser: {}".format(c))
safe_dirs(os.path.join(out_dir, c))
valid, ann, pos_structure = _single_cluster(c, data, args)
data[0][c].update({'profile': pos_structure})
loci = data[0][c]['loci']
data[0][c]['precursor'] = {"seq": precursor_sequence(loci[0][0:5], args.ref)}
data[0][c]['precursor']["colors"] = _parse(data[0][c]['profile'], data[0][c]['precursor']["seq"])
data[0][c]['precursor'].update(run_rnafold(data[0][c]['precursor']['seq']))
return data
def _expand(dat, counts, start, end):
"""
expand the same counts from start to end
"""
for pos in range(start, end):
for s in counts:
dat[s][pos] += counts[s]
return dat
def _convert_to_df(in_file, freq, raw_file):
"""
convert data frame into table with pandas
"""
dat = defaultdict(Counter)
if isinstance(in_file, (str, unicode)):
with open(in_file) as in_handle:
for line in in_handle:
cols = line.strip().split("\t")
counts = freq[cols[3]]
dat = _expand(dat, counts, int(cols[1]), int(cols[2]))
else:
if raw_file:
out_handle = open(raw_file, "w")
for name in in_file:
counts = freq[name]
if raw_file:
print >>out_handle, "%s\t%s\t%s\t%s\t%s\t%s" % ("chr", in_file[name][0], in_file[name][1], name, sum(counts.values()), "+")
dat = _expand(dat, counts, in_file[name][0], in_file[name][1])
for s in dat:
for p in dat[s]:
dat[s][p] = mlog2(dat[s][p] + 1)
return dat
def _make(c):
"""
create html from template, adding figure,
annotation and sequences counts
"""
ann = defaultdict(list)
for pos in c['ann']:
for db in pos:
ann[db] += list(pos[db])
logger.debug(ann)
valid = [l for l in c['valid']]
ann_list = [", ".join(list(set(ann[feature]))) for feature in ann if feature in valid]
return valid, ann_list
def _single_cluster(c, data, args):
"""
Map sequences on precursors and create
expression profile
"""
valid, ann = 0, 0
raw_file = None
freq = defaultdict()
[freq.update({s.keys()[0]: s.values()[0]}) for s in data[0][c]['freq']]
names = [s.keys()[0] for s in data[0][c]['seqs']]
seqs = [s.values()[0] for s in data[0][c]['seqs']]
loci = data[0][c]['loci']
if loci[0][3] - loci[0][2] > 500:
logger.info("locus bigger > 500 nt, skipping: %s" % loci)
return valid, ann, {}
if not file_exists(out_file):
if args.razer:
logger.debug("map with razer all sequences to all loci %s " % loci)
map_to_precursors(seqs, names, {loci[0][0]: [loci[0][0:5]]}, out_file, args)
else:
logger.debug("map with C fn all sequences to all loci %s " % loci)
if args.debug:
raw_file = out_file
out_file = map_to_precursors_on_fly(seqs, names, loci[0][0:5], args)
logger.debug("plot sequences on loci")
df = _convert_to_df(out_file, freq, raw_file)
if df:
valid, ann = _make(data[0][c])
return valid, ann, df
|
from datetime import datetime
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
)
from django.urls import reverse
from memoized import memoized
from tastypie import fields
from tastypie.authentication import Authentication
from tastypie.bundle import Bundle
from tastypie.exceptions import BadRequest
from casexml.apps.case.xform import get_case_updates
from corehq.apps.api.query_adapters import GroupQuerySetAdapter
from corehq.apps.api.resources.pagination import DoesNothingPaginatorCompat
from corehq.apps.api.es import ElasticAPIQuerySet, FormESView, es_query_from_get_params
from corehq.apps.api.fields import (
ToManyDictField,
ToManyDocumentsField,
ToManyListDictField,
UseIfRequested,
)
from corehq.apps.api.models import ESCase, ESXFormInstance
from corehq.apps.api.resources import (
CouchResourceMixin,
DomainSpecificResourceMixin,
HqBaseResource,
SimpleSortableResourceMixin,
v0_1,
v0_3,
)
from corehq.apps.api.resources.auth import (
DomainAdminAuthentication,
LoginAndDomainAuthentication,
RequirePermissionAuthentication,
)
from corehq.apps.api.resources.meta import CustomResourceMeta
from corehq.apps.api.resources.v0_1 import _safe_bool
from corehq.apps.api.serializers import (
CommCareCaseSerializer,
XFormInstanceSerializer,
)
from corehq.apps.api.util import get_obj, get_object_or_not_exist
from corehq.apps.app_manager.app_schemas.case_properties import (
get_all_case_properties,
)
from corehq.apps.app_manager.dbaccessors import (
get_all_built_app_results,
get_apps_in_domain,
)
from corehq.apps.app_manager.models import Application, RemoteApp, LinkedApplication
from corehq.apps.groups.models import Group
from corehq.apps.users.models import CouchUser, HqPermissions
from corehq.apps.users.util import format_username
from corehq.motech.repeaters.models import CommCareCase, Repeater, get_all_repeater_types
from corehq.util.view_utils import absolute_reverse
from no_exceptions.exceptions import Http400
# By the time a test case is running, the resource is already instantiated,
# so as a hack until this can be remedied, there is a global that
# can be set to provide a mock.
MOCK_XFORM_ES = None
class XFormInstanceResource(SimpleSortableResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
"""This version of the form resource is built of Elasticsearch data
which gets wrapped by ``ESXFormInstance``.
No type conversion is done e.g. dates and some fields are named differently than in the
Python models.
"""
id = fields.CharField(attribute='_id', readonly=True, unique=True)
domain = fields.CharField(attribute='domain')
form = fields.DictField(attribute='form_data')
type = fields.CharField(attribute='type')
version = fields.CharField(attribute='version')
uiversion = fields.CharField(attribute='uiversion', blank=True, null=True)
metadata = fields.DictField(attribute='metadata', blank=True, null=True)
received_on = fields.CharField(attribute="received_on")
edited_on = fields.CharField(attribute="edited_on", null=True)
server_modified_on = fields.CharField(attribute="server_modified_on")
indexed_on = fields.CharField(attribute='inserted_at')
app_id = fields.CharField(attribute='app_id', null=True)
build_id = fields.CharField(attribute='build_id', null=True)
initial_processing_complete = fields.BooleanField(
attribute='initial_processing_complete', null=True)
problem = fields.CharField(attribute='problem', null=True)
archived = fields.CharField(readonly=True)
def dehydrate_archived(self, bundle):
return bundle.obj.is_archived
cases = UseIfRequested(
ToManyDocumentsField(
'corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute=lambda xform: _cases_referenced_by_xform(xform)
)
)
attachments = fields.DictField(readonly=True, null=True)
def dehydrate_attachments(self, bundle):
attachments_dict = getattr(bundle.obj, 'blobs', None)
if not attachments_dict:
return {}
domain = bundle.obj.domain
form_id = bundle.obj._id
def _normalize_meta(name, meta):
return {
'content_type': meta.content_type,
'length': meta.content_length,
'url': absolute_reverse('api_form_attachment', args=(domain, form_id, name))
}
return {
name: _normalize_meta(name, meta) for name, meta in attachments_dict.items()
}
is_phone_submission = fields.BooleanField(readonly=True)
def dehydrate_is_phone_submission(self, bundle):
headers = getattr(bundle.obj, 'openrosa_headers', None)
if not headers:
return False
return headers.get('HTTP_X_OPENROSA_VERSION') is not None
edited_by_user_id = fields.CharField(readonly=True, null=True)
def dehydrate_edited_by_user_id(self, bundle):
if bundle.obj.edited_on:
return (getattr(bundle.obj, 'auth_context') or {}).get('user_id', None)
def obj_get(self, bundle, **kwargs):
instance_id = kwargs['pk']
domain = kwargs['domain']
return self.xform_es(domain).get_document(instance_id)
def xform_es(self, domain):
return MOCK_XFORM_ES or FormESView(domain)
def obj_get_list(self, bundle, domain, **kwargs):
try:
es_query = es_query_from_get_params(bundle.request.GET, domain, ['include_archived'])
except Http400 as e:
raise BadRequest(str(e))
# Note that FormESView is used only as an ES client, for `run_query` against the proper index
return ElasticAPIQuerySet(
payload=es_query,
model=ESXFormInstance,
es_client=self.xform_es(domain)
).order_by('-received_on')
def detail_uri_kwargs(self, bundle_or_obj):
return {
'pk': get_obj(bundle_or_obj).form_id
}
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(HqPermissions.edit_data)
object_class = ESXFormInstance
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'form'
ordering = ['received_on', 'server_modified_on', 'indexed_on']
serializer = XFormInstanceSerializer(formats=['json'])
def _cases_referenced_by_xform(esxform):
"""Get a list of cases referenced by ESXFormInstance
Note: this does not load cases referenced in stock transactions
because ESXFormInstance does not have access to form XML, which
is needed to find stock transactions.
"""
assert esxform.domain, esxform.form_id
case_ids = set(cu.id for cu in get_case_updates(esxform))
return CommCareCase.objects.get_cases(list(case_ids), esxform.domain)
class RepeaterResource(CouchResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
id = fields.CharField(attribute='_id', readonly=True, unique=True)
type = fields.CharField(attribute='doc_type')
domain = fields.CharField(attribute='domain')
url = fields.CharField(attribute='url')
version = fields.CharField(attribute='version', null=True)
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
if isinstance(bundle_or_obj, Bundle):
obj = bundle_or_obj.obj
elif bundle_or_obj is None:
return None
else:
obj = bundle_or_obj
return reverse('api_dispatch_detail', kwargs=dict(resource_name=self._meta.resource_name,
domain=obj.domain,
api_name=self._meta.api_name,
pk=obj._id))
def obj_get_list(self, bundle, domain, **kwargs):
repeaters = Repeater.by_domain(domain)
return list(repeaters)
def obj_get(self, bundle, **kwargs):
return get_object_or_not_exist(Repeater, kwargs['pk'], kwargs['domain'],
additional_doc_types=list(get_all_repeater_types()))
def obj_create(self, bundle, request=None, **kwargs):
bundle.obj.domain = kwargs['domain']
bundle = self._update(bundle)
bundle.obj.save()
return bundle
def obj_update(self, bundle, **kwargs):
bundle.obj = Repeater.get(kwargs['pk'])
assert bundle.obj.domain == kwargs['domain']
bundle = self._update(bundle)
assert bundle.obj.domain == kwargs['domain']
bundle.obj.save()
return bundle
def _update(self, bundle):
for key, value in bundle.data.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
return bundle
class Meta(CustomResourceMeta):
authentication = DomainAdminAuthentication()
object_class = Repeater
resource_name = 'data-forwarding'
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post']
class CommCareCaseResource(SimpleSortableResourceMixin, v0_3.CommCareCaseResource, DomainSpecificResourceMixin):
xforms_by_name = UseIfRequested(ToManyListDictField(
'corehq.apps.api.resources.v0_4.XFormInstanceResource',
attribute='xforms_by_name'
))
xforms_by_xmlns = UseIfRequested(ToManyListDictField(
'corehq.apps.api.resources.v0_4.XFormInstanceResource',
attribute='xforms_by_xmlns'
))
child_cases = UseIfRequested(
ToManyDictField(
'corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute='child_cases'
)
)
parent_cases = UseIfRequested(
ToManyDictField(
'corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute='parent_cases'
)
)
domain = fields.CharField(attribute='domain')
date_modified = fields.CharField(attribute='modified_on', default="1900-01-01")
indexed_on = fields.CharField(attribute='inserted_at', default="1900-01-01")
server_date_modified = fields.CharField(attribute='server_modified_on', default="1900-01-01")
server_date_opened = fields.CharField(attribute='server_opened_on', default="1900-01-01")
opened_by = fields.CharField(attribute='opened_by', null=True)
closed_by = fields.CharField(attribute='closed_by', null=True)
def obj_get(self, bundle, **kwargs):
case_id = kwargs['pk']
domain = kwargs['domain']
return self.case_es(domain).get_document(case_id)
class Meta(v0_3.CommCareCaseResource.Meta):
max_limit = 5000
serializer = CommCareCaseSerializer()
ordering = ['server_date_modified', 'date_modified', 'indexed_on']
object_class = ESCase
class GroupResource(CouchResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
id = fields.CharField(attribute='get_id', unique=True, readonly=True)
domain = fields.CharField(attribute='domain')
name = fields.CharField(attribute='name')
users = fields.ListField(attribute='get_user_ids')
case_sharing = fields.BooleanField(attribute='case_sharing', default=False)
reporting = fields.BooleanField(default=True, attribute='reporting')
metadata = fields.DictField(attribute='metadata', null=True, blank=True)
def obj_get(self, bundle, **kwargs):
return get_object_or_not_exist(Group, kwargs['pk'], kwargs['domain'])
def obj_get_list(self, bundle, domain, **kwargs):
return GroupQuerySetAdapter(domain)
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(HqPermissions.edit_commcare_users)
object_class = Group
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'group'
class SingleSignOnResource(HqBaseResource, DomainSpecificResourceMixin):
"""
This resource does not require "authorization" per se, but
rather allows a POST of username and password and returns
just the authenticated user, if the credentials and domain
are correct.
"""
def post_list(self, request, **kwargs):
domain = kwargs.get('domain')
request.domain = domain
username = request.POST.get('username')
password = request.POST.get('password')
if username is None:
return HttpResponseBadRequest('Missing required parameter: username')
if password is None:
return HttpResponseBadRequest('Missing required parameter: password')
if '@' not in username:
username = format_username(username, domain)
# Convert to the appropriate type of user
couch_user = CouchUser.get_by_username(username)
if couch_user is None or not couch_user.is_member_of(domain) or not couch_user.check_password(password):
return HttpResponseForbidden()
if couch_user.is_commcare_user():
user_resource = v0_1.CommCareUserResource()
elif couch_user.is_web_user():
user_resource = v0_1.WebUserResource()
else:
return HttpResponseForbidden()
bundle = user_resource.build_bundle(obj=couch_user, request=request)
bundle = user_resource.full_dehydrate(bundle)
return user_resource.create_response(request, bundle, response_class=HttpResponse)
def get_list(self, bundle, **kwargs):
return HttpResponseForbidden()
def get_detail(self, bundle, **kwargs):
return HttpResponseForbidden()
class Meta(CustomResourceMeta):
authentication = Authentication()
resource_name = 'sso'
detail_allowed_methods = []
list_allowed_methods = ['post']
class BaseApplicationResource(CouchResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
def obj_get_list(self, bundle, domain, **kwargs):
return sorted(get_apps_in_domain(domain, include_remote=False),
key=lambda app: app.date_created or datetime.min)
def obj_get(self, bundle, **kwargs):
# support returning linked applications upon receiving an application request
return get_object_or_not_exist(Application, kwargs['pk'], kwargs['domain'],
additional_doc_types=[LinkedApplication._doc_type])
class Meta(CustomResourceMeta):
authentication = LoginAndDomainAuthentication(allow_session_auth=True)
object_class = Application
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'application'
paginator_class = DoesNothingPaginatorCompat
class ApplicationResource(BaseApplicationResource):
id = fields.CharField(attribute='_id')
name = fields.CharField(attribute='name')
version = fields.IntegerField(attribute='version')
is_released = fields.BooleanField(attribute='is_released', null=True)
built_on = fields.DateTimeField(attribute='built_on', null=True)
build_comment = fields.CharField(attribute='build_comment', null=True)
built_from_app_id = fields.CharField(attribute='copy_of', null=True)
modules = fields.ListField()
versions = fields.ListField()
@staticmethod
def dehydrate_versions(bundle):
app = bundle.obj
if app.copy_of:
return []
results = get_all_built_app_results(app.domain, app.get_id)
return [
{
'id': result['value']['_id'],
'built_on': result['value']['built_on'],
'build_comment': result['value']['build_comment'],
'is_released': result['value']['is_released'],
'version': result['value']['version'],
}
for result in results
]
@memoized
def get_all_case_properties_local(self, app):
return get_all_case_properties(app, exclude_invalid_properties=False)
def dehydrate_module(self, app, module, langs):
"""
Convert a Module object to a JValue representation
with just the good parts.
NOTE: This is not a tastypie "magic"-name method to
dehydrate the "module" field; there is no such field.
"""
try:
dehydrated = {}
dehydrated['name'] = module.name
dehydrated['case_type'] = module.case_type
all_case_properties = self.get_all_case_properties_local(app)
dehydrated['case_properties'] = all_case_properties[module.case_type]
dehydrated['unique_id'] = module.unique_id
dehydrated['forms'] = []
for form in module.forms:
form_unique_id = form.unique_id
form_jvalue = {
'xmlns': form.xmlns,
'name': form.name,
'questions': form.get_questions(
langs,
include_triggers=True,
include_groups=True,
include_translations=True,
include_fixtures=True,
),
'unique_id': form_unique_id,
}
dehydrated['forms'].append(form_jvalue)
return dehydrated
except Exception as e:
return {
'error': str(e)
}
def dehydrate_modules(self, bundle):
app = bundle.obj
# support returning linked applications upon receiving an application list request
if app.doc_type in [Application._doc_type, LinkedApplication._doc_type]:
return [self.dehydrate_module(app, module, app.langs) for module in bundle.obj.modules]
elif app.doc_type == RemoteApp._doc_type:
return []
def dehydrate(self, bundle):
if not _safe_bool(bundle, "extras"):
return super(ApplicationResource, self).dehydrate(bundle)
else:
app_data = {}
app_data.update(bundle.obj._doc)
app_data.update(bundle.data)
return app_data
remove repeaterresource code
from datetime import datetime
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
)
from memoized import memoized
from tastypie import fields
from tastypie.authentication import Authentication
from tastypie.exceptions import BadRequest
from casexml.apps.case.xform import get_case_updates
from corehq.apps.api.query_adapters import GroupQuerySetAdapter
from corehq.apps.api.resources.pagination import DoesNothingPaginatorCompat
from corehq.apps.api.es import ElasticAPIQuerySet, FormESView, es_query_from_get_params
from corehq.apps.api.fields import (
ToManyDictField,
ToManyDocumentsField,
ToManyListDictField,
UseIfRequested,
)
from corehq.apps.api.models import ESCase, ESXFormInstance
from corehq.apps.api.resources import (
CouchResourceMixin,
DomainSpecificResourceMixin,
HqBaseResource,
SimpleSortableResourceMixin,
v0_1,
v0_3,
)
from corehq.apps.api.resources.auth import (
LoginAndDomainAuthentication,
RequirePermissionAuthentication,
)
from corehq.apps.api.resources.meta import CustomResourceMeta
from corehq.apps.api.resources.v0_1 import _safe_bool
from corehq.apps.api.serializers import (
CommCareCaseSerializer,
XFormInstanceSerializer,
)
from corehq.apps.api.util import get_obj, get_object_or_not_exist
from corehq.apps.app_manager.app_schemas.case_properties import (
get_all_case_properties,
)
from corehq.apps.app_manager.dbaccessors import (
get_all_built_app_results,
get_apps_in_domain,
)
from corehq.apps.app_manager.models import Application, RemoteApp, LinkedApplication
from corehq.apps.groups.models import Group
from corehq.apps.users.models import CouchUser, HqPermissions
from corehq.apps.users.util import format_username
from corehq.motech.repeaters.models import CommCareCase
from corehq.util.view_utils import absolute_reverse
from no_exceptions.exceptions import Http400
# By the time a test case is running, the resource is already instantiated,
# so as a hack until this can be remedied, there is a global that
# can be set to provide a mock.
MOCK_XFORM_ES = None
class XFormInstanceResource(SimpleSortableResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
"""This version of the form resource is built of Elasticsearch data
which gets wrapped by ``ESXFormInstance``.
No type conversion is done e.g. dates and some fields are named differently than in the
Python models.
"""
id = fields.CharField(attribute='_id', readonly=True, unique=True)
domain = fields.CharField(attribute='domain')
form = fields.DictField(attribute='form_data')
type = fields.CharField(attribute='type')
version = fields.CharField(attribute='version')
uiversion = fields.CharField(attribute='uiversion', blank=True, null=True)
metadata = fields.DictField(attribute='metadata', blank=True, null=True)
received_on = fields.CharField(attribute="received_on")
edited_on = fields.CharField(attribute="edited_on", null=True)
server_modified_on = fields.CharField(attribute="server_modified_on")
indexed_on = fields.CharField(attribute='inserted_at')
app_id = fields.CharField(attribute='app_id', null=True)
build_id = fields.CharField(attribute='build_id', null=True)
initial_processing_complete = fields.BooleanField(
attribute='initial_processing_complete', null=True)
problem = fields.CharField(attribute='problem', null=True)
archived = fields.CharField(readonly=True)
def dehydrate_archived(self, bundle):
return bundle.obj.is_archived
cases = UseIfRequested(
ToManyDocumentsField(
'corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute=lambda xform: _cases_referenced_by_xform(xform)
)
)
attachments = fields.DictField(readonly=True, null=True)
def dehydrate_attachments(self, bundle):
attachments_dict = getattr(bundle.obj, 'blobs', None)
if not attachments_dict:
return {}
domain = bundle.obj.domain
form_id = bundle.obj._id
def _normalize_meta(name, meta):
return {
'content_type': meta.content_type,
'length': meta.content_length,
'url': absolute_reverse('api_form_attachment', args=(domain, form_id, name))
}
return {
name: _normalize_meta(name, meta) for name, meta in attachments_dict.items()
}
is_phone_submission = fields.BooleanField(readonly=True)
def dehydrate_is_phone_submission(self, bundle):
headers = getattr(bundle.obj, 'openrosa_headers', None)
if not headers:
return False
return headers.get('HTTP_X_OPENROSA_VERSION') is not None
edited_by_user_id = fields.CharField(readonly=True, null=True)
def dehydrate_edited_by_user_id(self, bundle):
if bundle.obj.edited_on:
return (getattr(bundle.obj, 'auth_context') or {}).get('user_id', None)
def obj_get(self, bundle, **kwargs):
instance_id = kwargs['pk']
domain = kwargs['domain']
return self.xform_es(domain).get_document(instance_id)
def xform_es(self, domain):
return MOCK_XFORM_ES or FormESView(domain)
def obj_get_list(self, bundle, domain, **kwargs):
try:
es_query = es_query_from_get_params(bundle.request.GET, domain, ['include_archived'])
except Http400 as e:
raise BadRequest(str(e))
# Note that FormESView is used only as an ES client, for `run_query` against the proper index
return ElasticAPIQuerySet(
payload=es_query,
model=ESXFormInstance,
es_client=self.xform_es(domain)
).order_by('-received_on')
def detail_uri_kwargs(self, bundle_or_obj):
return {
'pk': get_obj(bundle_or_obj).form_id
}
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(HqPermissions.edit_data)
object_class = ESXFormInstance
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'form'
ordering = ['received_on', 'server_modified_on', 'indexed_on']
serializer = XFormInstanceSerializer(formats=['json'])
def _cases_referenced_by_xform(esxform):
"""Get a list of cases referenced by ESXFormInstance
Note: this does not load cases referenced in stock transactions
because ESXFormInstance does not have access to form XML, which
is needed to find stock transactions.
"""
assert esxform.domain, esxform.form_id
case_ids = set(cu.id for cu in get_case_updates(esxform))
return CommCareCase.objects.get_cases(list(case_ids), esxform.domain)
class CommCareCaseResource(SimpleSortableResourceMixin, v0_3.CommCareCaseResource, DomainSpecificResourceMixin):
xforms_by_name = UseIfRequested(ToManyListDictField(
'corehq.apps.api.resources.v0_4.XFormInstanceResource',
attribute='xforms_by_name'
))
xforms_by_xmlns = UseIfRequested(ToManyListDictField(
'corehq.apps.api.resources.v0_4.XFormInstanceResource',
attribute='xforms_by_xmlns'
))
child_cases = UseIfRequested(
ToManyDictField(
'corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute='child_cases'
)
)
parent_cases = UseIfRequested(
ToManyDictField(
'corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute='parent_cases'
)
)
domain = fields.CharField(attribute='domain')
date_modified = fields.CharField(attribute='modified_on', default="1900-01-01")
indexed_on = fields.CharField(attribute='inserted_at', default="1900-01-01")
server_date_modified = fields.CharField(attribute='server_modified_on', default="1900-01-01")
server_date_opened = fields.CharField(attribute='server_opened_on', default="1900-01-01")
opened_by = fields.CharField(attribute='opened_by', null=True)
closed_by = fields.CharField(attribute='closed_by', null=True)
def obj_get(self, bundle, **kwargs):
case_id = kwargs['pk']
domain = kwargs['domain']
return self.case_es(domain).get_document(case_id)
class Meta(v0_3.CommCareCaseResource.Meta):
max_limit = 5000
serializer = CommCareCaseSerializer()
ordering = ['server_date_modified', 'date_modified', 'indexed_on']
object_class = ESCase
class GroupResource(CouchResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
id = fields.CharField(attribute='get_id', unique=True, readonly=True)
domain = fields.CharField(attribute='domain')
name = fields.CharField(attribute='name')
users = fields.ListField(attribute='get_user_ids')
case_sharing = fields.BooleanField(attribute='case_sharing', default=False)
reporting = fields.BooleanField(default=True, attribute='reporting')
metadata = fields.DictField(attribute='metadata', null=True, blank=True)
def obj_get(self, bundle, **kwargs):
return get_object_or_not_exist(Group, kwargs['pk'], kwargs['domain'])
def obj_get_list(self, bundle, domain, **kwargs):
return GroupQuerySetAdapter(domain)
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(HqPermissions.edit_commcare_users)
object_class = Group
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'group'
class SingleSignOnResource(HqBaseResource, DomainSpecificResourceMixin):
"""
This resource does not require "authorization" per se, but
rather allows a POST of username and password and returns
just the authenticated user, if the credentials and domain
are correct.
"""
def post_list(self, request, **kwargs):
domain = kwargs.get('domain')
request.domain = domain
username = request.POST.get('username')
password = request.POST.get('password')
if username is None:
return HttpResponseBadRequest('Missing required parameter: username')
if password is None:
return HttpResponseBadRequest('Missing required parameter: password')
if '@' not in username:
username = format_username(username, domain)
# Convert to the appropriate type of user
couch_user = CouchUser.get_by_username(username)
if couch_user is None or not couch_user.is_member_of(domain) or not couch_user.check_password(password):
return HttpResponseForbidden()
if couch_user.is_commcare_user():
user_resource = v0_1.CommCareUserResource()
elif couch_user.is_web_user():
user_resource = v0_1.WebUserResource()
else:
return HttpResponseForbidden()
bundle = user_resource.build_bundle(obj=couch_user, request=request)
bundle = user_resource.full_dehydrate(bundle)
return user_resource.create_response(request, bundle, response_class=HttpResponse)
def get_list(self, bundle, **kwargs):
return HttpResponseForbidden()
def get_detail(self, bundle, **kwargs):
return HttpResponseForbidden()
class Meta(CustomResourceMeta):
authentication = Authentication()
resource_name = 'sso'
detail_allowed_methods = []
list_allowed_methods = ['post']
class BaseApplicationResource(CouchResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
def obj_get_list(self, bundle, domain, **kwargs):
return sorted(get_apps_in_domain(domain, include_remote=False),
key=lambda app: app.date_created or datetime.min)
def obj_get(self, bundle, **kwargs):
# support returning linked applications upon receiving an application request
return get_object_or_not_exist(Application, kwargs['pk'], kwargs['domain'],
additional_doc_types=[LinkedApplication._doc_type])
class Meta(CustomResourceMeta):
authentication = LoginAndDomainAuthentication(allow_session_auth=True)
object_class = Application
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'application'
paginator_class = DoesNothingPaginatorCompat
class ApplicationResource(BaseApplicationResource):
id = fields.CharField(attribute='_id')
name = fields.CharField(attribute='name')
version = fields.IntegerField(attribute='version')
is_released = fields.BooleanField(attribute='is_released', null=True)
built_on = fields.DateTimeField(attribute='built_on', null=True)
build_comment = fields.CharField(attribute='build_comment', null=True)
built_from_app_id = fields.CharField(attribute='copy_of', null=True)
modules = fields.ListField()
versions = fields.ListField()
@staticmethod
def dehydrate_versions(bundle):
app = bundle.obj
if app.copy_of:
return []
results = get_all_built_app_results(app.domain, app.get_id)
return [
{
'id': result['value']['_id'],
'built_on': result['value']['built_on'],
'build_comment': result['value']['build_comment'],
'is_released': result['value']['is_released'],
'version': result['value']['version'],
}
for result in results
]
@memoized
def get_all_case_properties_local(self, app):
return get_all_case_properties(app, exclude_invalid_properties=False)
def dehydrate_module(self, app, module, langs):
"""
Convert a Module object to a JValue representation
with just the good parts.
NOTE: This is not a tastypie "magic"-name method to
dehydrate the "module" field; there is no such field.
"""
try:
dehydrated = {}
dehydrated['name'] = module.name
dehydrated['case_type'] = module.case_type
all_case_properties = self.get_all_case_properties_local(app)
dehydrated['case_properties'] = all_case_properties[module.case_type]
dehydrated['unique_id'] = module.unique_id
dehydrated['forms'] = []
for form in module.forms:
form_unique_id = form.unique_id
form_jvalue = {
'xmlns': form.xmlns,
'name': form.name,
'questions': form.get_questions(
langs,
include_triggers=True,
include_groups=True,
include_translations=True,
include_fixtures=True,
),
'unique_id': form_unique_id,
}
dehydrated['forms'].append(form_jvalue)
return dehydrated
except Exception as e:
return {
'error': str(e)
}
def dehydrate_modules(self, bundle):
app = bundle.obj
# support returning linked applications upon receiving an application list request
if app.doc_type in [Application._doc_type, LinkedApplication._doc_type]:
return [self.dehydrate_module(app, module, app.langs) for module in bundle.obj.modules]
elif app.doc_type == RemoteApp._doc_type:
return []
def dehydrate(self, bundle):
if not _safe_bool(bundle, "extras"):
return super(ApplicationResource, self).dehydrate(bundle)
else:
app_data = {}
app_data.update(bundle.obj._doc)
app_data.update(bundle.data)
return app_data
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import urllib.error
import urllib.parse
import urllib.request
import ssl
from typing import Optional
from flask import (
Blueprint,
redirect,
flash,
request,
render_template,
abort,
url_for,
Response,
send_file,
make_response,
g,
)
from sqlalchemy import or_
from bouncer.constants import EDIT, CREATE, DELETE, READ # type: ignore
import cfg
from app import flash_error
from app.auth.acls import requires, skip_authorization, ensure
from app.exceptions import InvalidIdentifierException, InvalidProducts
from app.vulnerability.views.details import VulnerabilityDetails
from data.database import DEFAULT_DATABASE as db
from data.forms import (VulnerabilityDeleteForm, VulnerabilityDetailsForm,
VulnerabilityProposalReject,
VulnerabilityProposalApprove,
VulnerabilityProposalAssign,
VulnerabilityProposalPublish,
VulnerabilityProposalUnassign)
from data.models import RepositoryFilesSchema, Vulnerability
from data.models.vulnerability import (ANNOTATE, VulnerabilityState, ASSIGN,
APPROVE, REJECT)
from lib.utils import (create_json_response, update_products,
get_vulnerability_details, clean_vulnerability_changes)
from lib.vcs_management import get_vcs_handler
bp = Blueprint("vuln", __name__, url_prefix="/")
def view_vuln(vcdb_id, use_template):
try:
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability_details.validate_and_simplify_id()
if not vulnerability_details.vulnerability_view:
abort(404)
except InvalidIdentifierException as err:
return flash_error(str(err), "frontend.serve_index")
return render_template(use_template,
vulnerability_details=vulnerability_details)
@bp.route("/vuln", methods=["POST"])
def vuln_view_post():
return view_vuln(None, "vulnerability/view_overview.html")
@bp.route("/<vcdb_id>/review/<vuln_id>", methods=["GET", "POST"])
@skip_authorization # authz is done inline
def vuln_review(vcdb_id, vuln_id):
vulnerability_details = get_vulnerability_details(vcdb_id,
simplify_id=False)
vuln = vulnerability_details.get_or_create_vulnerability()
proposal_vulnerability_details = get_vulnerability_details(
None, vuln_id=vuln_id, simplify_id=False)
proposal_vuln = proposal_vulnerability_details \
.get_or_create_vulnerability()
ensure(READ, proposal_vuln)
form_reject = VulnerabilityProposalReject()
form_approve = VulnerabilityProposalApprove()
form_assign = VulnerabilityProposalAssign()
form_unassign = VulnerabilityProposalUnassign()
form_publish = VulnerabilityProposalPublish()
if request.method == 'POST':
if (request.form["review_response"] == "assign"
and form_assign.validate_on_submit()):
ensure(ASSIGN, proposal_vuln)
if proposal_vuln.is_reviewable():
proposal_vuln.accept_review(g.user)
db.session.add(proposal_vuln)
db.session.commit()
flash("The review was successfully assigned to you.",
"success")
return redirect(request.url)
flash_error("This entry is not in a reviewable state.")
if (request.form["review_response"] == "unassign"
and form_unassign.validate_on_submit()):
ensure(ASSIGN, proposal_vuln)
if proposal_vuln.is_reviewer(g.user):
proposal_vuln.deny_review()
db.session.add(proposal_vuln)
db.session.commit()
flash("You successfully unassigned yourself from this review.",
"success")
return redirect(request.url)
flash_error("This entry is not assigned to you.")
if (request.form["review_response"] == "approve"
and form_approve.validate_on_submit()):
ensure(APPROVE, proposal_vuln)
proposal_vuln.accept_change()
db.session.add(proposal_vuln)
db.session.commit()
flash(
"You approved the proposal. "
"Waiting for the entry to be published by an admin.",
"success")
return redirect(request.url)
if (request.form["review_response"] == "reject"
and form_reject.validate_on_submit()):
ensure(REJECT, proposal_vuln)
proposal_vuln.deny_change(g.user,
form_reject.data["review_feedback"])
db.session.add(proposal_vuln)
db.session.commit()
flash("Waiting for the author to address your feedback.",
"success")
return redirect(request.url)
if (request.form["review_response"] == "publish"
and form_publish.validate_on_submit()):
ensure('PUBLISH', proposal_vuln)
proposal_vuln.publish_change()
db.session.add(proposal_vuln)
db.session.commit()
# This might be the first entry of its kind
# so no archiving is necessary.
if vuln.state:
vuln.archive_entry()
db.session.add(vuln)
db.session.commit()
flash("Entry was successfully published.", "success")
return redirect(request.url)
# Published entries can't be reviewed.
# if view.state == VulnerabilityState.PUBLISHED:
# raise RequestRedirect("/" + str(vcdb_id))
return render_template(
"vulnerability/review/review.html",
proposal_vulnerability_details=proposal_vulnerability_details,
vulnerability_details=vulnerability_details,
form_assign=form_assign,
form_unassign=form_unassign,
form_reject=form_reject,
form_approve=form_approve,
form_publish=form_publish)
# Create a catch all route for vulnerability identifiers.
@bp.route("/<vcdb_id>")
@skip_authorization
def vuln_view(vcdb_id=None):
vulnerability_details = get_vulnerability_details(vcdb_id)
view = vulnerability_details.vulnerability_view
use_template = "vulnerability/view_details.html"
if view.annotated:
use_template = "vulnerability/view_overview.html"
return render_template(use_template,
vulnerability_details=vulnerability_details)
@bp.route("/<vcdb_id>/details")
@skip_authorization
def vuln_view_details(vcdb_id):
return view_vuln(vcdb_id, "vulnerability/view_details.html")
@bp.route("/<vcdb_id>/editor")
@skip_authorization
def vuln_editor(vcdb_id):
vulnerability_details = get_vulnerability_details(vcdb_id)
ensure(ANNOTATE, vulnerability_details.get_vulnerability())
return view_vuln(vcdb_id, "vulnerability/code_editor.html")
@bp.route("/<vcdb_id>/tree")
@skip_authorization
def vuln_file_tree(vcdb_id):
vulnerability_details = get_vulnerability_details(vcdb_id)
view = vulnerability_details.vulnerability_view
master_commit = view.master_commit
if not master_commit:
abort(404)
status_code = 200
content_type = "text/json"
response_msg = master_commit.tree_cache
if not response_msg:
try:
vulnerability_details.fetch_tree_cache(skip_errors=False,
max_timeout=10)
response_msg = master_commit.tree_cache
except urllib.error.HTTPError as err:
status_code = err.code
response_msg = "".join([
"VCS proxy is unreachable (it might be down).",
"\r\nHTTPError\r\n",
err.read(),
])
content_type = "text/plain"
except urllib.error.URLError as err:
status_code = 400
response_msg = "".join([
"VCS proxy is unreachable (it might be down).",
"\r\nURLError\r\n",
str(err.reason),
])
content_type = "text/plain"
except Exception: # pylint: disable=broad-except
status_code = 400
content_type = "text/plain"
response_msg = "VCS proxy is unreachable (it might be down)."
return Response(response=response_msg,
status=status_code,
content_type=content_type)
@bp.route("/<vcdb_id>/annotation_data")
@skip_authorization
def annotation_data(vcdb_id):
vulnerability_details = get_vulnerability_details(vcdb_id)
vulnerability_details.validate_and_simplify_id()
view = vulnerability_details.vulnerability_view
master_commit = view.master_commit
if not master_commit:
logging.error("Vuln (id: %r) has no linked Git commits!", view.id)
return create_json_response("Entry has no linked Git link!", 404)
master_commit = vulnerability_details.get_master_commit()
files_schema = RepositoryFilesSchema(many=True)
return files_schema.jsonify(master_commit.repository_files)
@bp.route("/<vcdb_id>/file_provider")
@skip_authorization
def file_provider(vcdb_id):
vulnerability_details = get_vulnerability_details(vcdb_id)
vulnerability_details.validate_and_simplify_id()
item_hash = request.args.get("item_hash", 0, type=str)
item_path = request.args.get("item_path", None, type=str)
proxy_target = (cfg.GCE_VCS_PROXY_URL + url_for(
"vcs_proxy.main_api",
repo_url=vulnerability_details.repo_url,
item_path=item_path,
item_hash=item_hash,
)[1:])
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.load_verify_locations(cafile=cfg.APP_CERT_FILE)
ctx.verify_mode = ssl.CERT_REQUIRED
result = urllib.request.urlopen(proxy_target, context=ctx) # nosec
except urllib.error.HTTPError as err:
return Response(response=err.read(),
status=err.code,
content_type="text/plain")
return send_file(result, mimetype="application/octet-stream")
@bp.route("/<vcdb_id>/embed")
@skip_authorization
def embed(vcdb_id):
try:
section_id = int(request.args.get("sid", -1))
start_line = int(request.args.get("start_line", 1))
end_line = int(request.args.get("end_line", -1))
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability_details.validate_and_simplify_id()
view = vulnerability_details.vulnerability_view
if not view:
return make_response(("No vulnerability found", 404))
if not view.master_commit:
return make_response(
(f"Vuln (id: {view.id}) has no linked Git commits!", 404))
master_commit = vulnerability_details.get_master_commit()
files_schema = RepositoryFilesSchema(many=True)
# Hack to quickly retrieve the full data.
custom_data = json.loads(
files_schema.jsonify(master_commit.repository_files).data)
settings = {
"section_id": section_id,
"startLine": start_line,
"endLine": end_line,
"entry_data": custom_data,
}
return render_template(
"vulnerability/embedded.html",
vulnerability_details=vulnerability_details,
embed_settings=settings,
)
except (ValueError, InvalidIdentifierException):
return make_response(("No vulnerability found", 404))
@bp.route("/<vcdb_id>/create", methods=["GET", "POST"])
@bp.route("/create", methods=["GET", "POST"])
@requires(CREATE, Vulnerability)
def create_vuln(vcdb_id=None):
return _create_vuln_internal(vcdb_id)
def _create_vuln_internal(vcdb_id=None):
try:
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability = vulnerability_details.get_or_create_vulnerability()
except InvalidIdentifierException as err:
return flash_error(str(err), "frontend.serve_index")
if vulnerability.id:
logging.debug("Preexisting vulnerability entry found: %r",
vulnerability.id)
delete_form = VulnerabilityDeleteForm()
if delete_form.validate_on_submit():
db.session.delete(vulnerability)
# Remove the entry.
db.session.commit()
flash("The entry was deleted.", "success")
return redirect("/")
form = VulnerabilityDetailsForm(obj=vulnerability)
commit = form.data["commits"][0]
if not commit["repo_name"]:
logging.info("Empty repository name. %r", commit)
repo_url = commit["repo_url"]
vcs_handler = get_vcs_handler(None, repo_url)
if vcs_handler:
logging.info("Found name. %r", vcs_handler.repo_name)
form.commits[0].repo_name.process_data(vcs_handler.repo_name)
if form.validate_on_submit():
try:
form.populate_obj(vulnerability)
db.session.add(vulnerability)
db.session.commit()
# TODO: Improve this hack to assign a new vcdb_id here.
# Currently, we are just piggy backing on the auto increment
# of the primary key to ensure uniqueness.
# This will likely be prone to race conditions.
vulnerability.vcdb_id = vulnerability.id
db.session.add(vulnerability)
db.session.commit()
logging.debug("Successfully created/updated entry: %r",
vulnerability.id)
flash("Successfully created/updated entry.", "success")
return redirect(
url_for("vuln.vuln_view", vcdb_id=vulnerability.vcdb_id))
except InvalidIdentifierException as err:
flash_error(str(err))
return render_template("vulnerability/create.html",
vulnerability_details=vulnerability_details,
form=form)
def add_proposal(vuln: Vulnerability,
form: VulnerabilityDetailsForm) -> Optional[Vulnerability]:
"""
Attempts to create a proposal entry which is basically a copy of an
existing Vulnerability entry.
:param vuln:
:param form:
:return: A new Vulnerability copy of the existing entry.
"""
vuln_clone = vuln.copy()
form.populate_obj(vuln_clone)
try:
update_products(vuln_clone)
except InvalidProducts as ex:
flash_error(ex.args[0])
return None
with db.session.no_autoflush:
changes = vuln.diff(vuln_clone)
# ignore metadata
clean_vulnerability_changes(changes)
if not changes:
flash_error("No changes detected. "
"Please modify the entry first to propose a change")
return None
logging.debug("Detected changes: %r", changes)
vuln_clone.version = None
vuln_clone.prev_version = vuln.version
vuln_clone.state = VulnerabilityState.READY
vuln_clone.creator = g.user
# Reset any previous feedback data.
vuln_clone.reviewer_id = None
vuln_clone.review_feedback = None
db.session.add(vuln_clone)
db.session.commit()
if not vuln_clone.vcdb_id:
# TODO: Improve this hack to assign a new vcdb_id here.
# Currently, we are just piggy backing on the auto increment of
# the primary key to ensure uniqueness.
# This will likely be prone to race conditions.
vuln_clone.vcdb_id = vuln_clone.id
db.session.add(vuln_clone)
db.session.commit()
flash("Your proposal will be reviewed soon.", "success")
return vuln_clone
@bp.route("/<vcdb_id>/edit", methods=["GET", "POST"])
@bp.route("/edit", methods=["GET", "POST"])
@requires(EDIT, Vulnerability)
def edit_vuln(vcdb_id=None):
return _edit_vuln_internal(vcdb_id)
def _can_add_proposal(vuln):
# Conditions for creating a proposal:
"""
- No pending open proposals by the same user.
- Proposals can only be made for currently PUBLISHED entries only.
"""
# TODO: Simplify or move away the query below.
existing_user_proposals = Vulnerability.query.filter(
or_(Vulnerability.vcdb_id == vuln.vcdb_id,
Vulnerability.cve_id == vuln.cve_id),
Vulnerability.creator == g.user,
Vulnerability.state != VulnerabilityState.PUBLISHED,
Vulnerability.state != VulnerabilityState.ARCHIVED).first()
if existing_user_proposals:
flash_error("You already have a pending proposal for this entry. "
"Please go to your proposals section.")
return False
return True
def _edit_vuln_internal(vcdb_id: str = None):
vulnerability_details = get_vulnerability_details(vcdb_id,
simplify_id=False)
view = vulnerability_details.vulnerability_view
vuln = vulnerability_details.get_or_create_vulnerability()
if not _can_add_proposal(vuln):
return redirect(url_for("vuln.vuln_view", vcdb_id=vcdb_id))
# Populate the form data from the vulnerability view if necessary.
# Updating the vuln instance allows to easier diff the changes.
if vuln.comment == "":
vuln.comment = view.comment
form = VulnerabilityDetailsForm(obj=vuln)
form_submitted = form.validate_on_submit()
commit = form.data["commits"][0]
if form_submitted and commit["commit_link"]:
vcs_handler = get_vcs_handler(None, commit["commit_link"])
if not vcs_handler:
flash_error("Invalid commit link specified.")
return render_template("vulnerability/edit.html",
vulnerability_details=vulnerability_details,
form=form)
logging.info("Found name. %r", vcs_handler.repo_name)
form.commits[0].repo_name.process_data(vcs_handler.repo_name)
form.commits[0].repo_url.process_data(vcs_handler.repo_url)
form.commits[0].commit_hash.process_data(vcs_handler.commit_hash)
if form_submitted:
proposal_vuln = add_proposal(vuln, form)
if proposal_vuln:
return redirect(
url_for('vuln.vuln_review',
vcdb_id=view.id,
vuln_id=proposal_vuln.vcdb_id))
with db.session.no_autoflush:
return render_template("vulnerability/edit.html",
vulnerability_details=vulnerability_details,
form=form)
@bp.route("/<vcdb_id>/delete", methods=["DELETE", "POST", "GET"])
@bp.route("/delete", methods=["DELETE", "POST", "GET"])
@requires(DELETE, Vulnerability)
def delete_vuln(vcdb_id=None):
# TODO implement revert & delete
del vcdb_id
abort(404)
vulnerability_details = get_vulnerability_details(vcdb_id,
None,
simplify_id=False)
vuln = vulnerability_details.get_vulnerability()
if not vuln:
abort(404)
if vuln.state == VulnerabilityState.PUBLISHED:
flash_error("Can't delete a published entry w/o reverting it first")
return redirect(url_for('vuln.vuln_view'))
if vuln.state == VulnerabilityState.ARCHIVED:
flash_error("Can't delete an archived")
return redirect(url_for('vuln.vuln_view'))
repr VCS proxy response on error
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import urllib.error
import urllib.parse
import urllib.request
import ssl
from typing import Optional
from flask import (
Blueprint,
redirect,
flash,
request,
render_template,
abort,
url_for,
Response,
send_file,
make_response,
g,
)
from sqlalchemy import or_
from bouncer.constants import EDIT, CREATE, DELETE, READ # type: ignore
import cfg
from app import flash_error
from app.auth.acls import requires, skip_authorization, ensure
from app.exceptions import InvalidIdentifierException, InvalidProducts
from app.vulnerability.views.details import VulnerabilityDetails
from data.database import DEFAULT_DATABASE as db
from data.forms import (VulnerabilityDeleteForm, VulnerabilityDetailsForm,
VulnerabilityProposalReject,
VulnerabilityProposalApprove,
VulnerabilityProposalAssign,
VulnerabilityProposalPublish,
VulnerabilityProposalUnassign)
from data.models import RepositoryFilesSchema, Vulnerability
from data.models.vulnerability import (ANNOTATE, VulnerabilityState, ASSIGN,
APPROVE, REJECT)
from lib.utils import (create_json_response, update_products,
get_vulnerability_details, clean_vulnerability_changes)
from lib.vcs_management import get_vcs_handler
bp = Blueprint("vuln", __name__, url_prefix="/")
def view_vuln(vcdb_id, use_template):
try:
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability_details.validate_and_simplify_id()
if not vulnerability_details.vulnerability_view:
abort(404)
except InvalidIdentifierException as err:
return flash_error(str(err), "frontend.serve_index")
return render_template(use_template,
vulnerability_details=vulnerability_details)
@bp.route("/vuln", methods=["POST"])
def vuln_view_post():
return view_vuln(None, "vulnerability/view_overview.html")
@bp.route("/<vcdb_id>/review/<vuln_id>", methods=["GET", "POST"])
@skip_authorization # authz is done inline
def vuln_review(vcdb_id, vuln_id):
vulnerability_details = get_vulnerability_details(vcdb_id,
simplify_id=False)
vuln = vulnerability_details.get_or_create_vulnerability()
proposal_vulnerability_details = get_vulnerability_details(
None, vuln_id=vuln_id, simplify_id=False)
proposal_vuln = proposal_vulnerability_details \
.get_or_create_vulnerability()
ensure(READ, proposal_vuln)
form_reject = VulnerabilityProposalReject()
form_approve = VulnerabilityProposalApprove()
form_assign = VulnerabilityProposalAssign()
form_unassign = VulnerabilityProposalUnassign()
form_publish = VulnerabilityProposalPublish()
if request.method == 'POST':
if (request.form["review_response"] == "assign"
and form_assign.validate_on_submit()):
ensure(ASSIGN, proposal_vuln)
if proposal_vuln.is_reviewable():
proposal_vuln.accept_review(g.user)
db.session.add(proposal_vuln)
db.session.commit()
flash("The review was successfully assigned to you.",
"success")
return redirect(request.url)
flash_error("This entry is not in a reviewable state.")
if (request.form["review_response"] == "unassign"
and form_unassign.validate_on_submit()):
ensure(ASSIGN, proposal_vuln)
if proposal_vuln.is_reviewer(g.user):
proposal_vuln.deny_review()
db.session.add(proposal_vuln)
db.session.commit()
flash("You successfully unassigned yourself from this review.",
"success")
return redirect(request.url)
flash_error("This entry is not assigned to you.")
if (request.form["review_response"] == "approve"
and form_approve.validate_on_submit()):
ensure(APPROVE, proposal_vuln)
proposal_vuln.accept_change()
db.session.add(proposal_vuln)
db.session.commit()
flash(
"You approved the proposal. "
"Waiting for the entry to be published by an admin.",
"success")
return redirect(request.url)
if (request.form["review_response"] == "reject"
and form_reject.validate_on_submit()):
ensure(REJECT, proposal_vuln)
proposal_vuln.deny_change(g.user,
form_reject.data["review_feedback"])
db.session.add(proposal_vuln)
db.session.commit()
flash("Waiting for the author to address your feedback.",
"success")
return redirect(request.url)
if (request.form["review_response"] == "publish"
and form_publish.validate_on_submit()):
ensure('PUBLISH', proposal_vuln)
proposal_vuln.publish_change()
db.session.add(proposal_vuln)
db.session.commit()
# This might be the first entry of its kind
# so no archiving is necessary.
if vuln.state:
vuln.archive_entry()
db.session.add(vuln)
db.session.commit()
flash("Entry was successfully published.", "success")
return redirect(request.url)
# Published entries can't be reviewed.
# if view.state == VulnerabilityState.PUBLISHED:
# raise RequestRedirect("/" + str(vcdb_id))
return render_template(
"vulnerability/review/review.html",
proposal_vulnerability_details=proposal_vulnerability_details,
vulnerability_details=vulnerability_details,
form_assign=form_assign,
form_unassign=form_unassign,
form_reject=form_reject,
form_approve=form_approve,
form_publish=form_publish)
# Create a catch all route for vulnerability identifiers.
@bp.route("/<vcdb_id>")
@skip_authorization
def vuln_view(vcdb_id=None):
vulnerability_details = get_vulnerability_details(vcdb_id)
view = vulnerability_details.vulnerability_view
use_template = "vulnerability/view_details.html"
if view.annotated:
use_template = "vulnerability/view_overview.html"
return render_template(use_template,
vulnerability_details=vulnerability_details)
@bp.route("/<vcdb_id>/details")
@skip_authorization
def vuln_view_details(vcdb_id):
return view_vuln(vcdb_id, "vulnerability/view_details.html")
@bp.route("/<vcdb_id>/editor")
@skip_authorization
def vuln_editor(vcdb_id):
vulnerability_details = get_vulnerability_details(vcdb_id)
ensure(ANNOTATE, vulnerability_details.get_vulnerability())
return view_vuln(vcdb_id, "vulnerability/code_editor.html")
@bp.route("/<vcdb_id>/tree")
@skip_authorization
def vuln_file_tree(vcdb_id):
vulnerability_details = get_vulnerability_details(vcdb_id)
view = vulnerability_details.vulnerability_view
master_commit = view.master_commit
if not master_commit:
abort(404)
status_code = 200
content_type = "text/json"
response_msg = master_commit.tree_cache
if not response_msg:
try:
vulnerability_details.fetch_tree_cache(skip_errors=False,
max_timeout=10)
response_msg = master_commit.tree_cache
except urllib.error.HTTPError as err:
status_code = err.code
with err:
body = repr(err.read())
response_msg = "".join([
"VCS proxy is unreachable (it might be down).",
"\r\nHTTPError\r\n",
body,
])
content_type = "text/plain"
except urllib.error.URLError as err:
status_code = 400
response_msg = "".join([
"VCS proxy is unreachable (it might be down).",
"\r\nURLError\r\n",
str(err.reason),
])
content_type = "text/plain"
except Exception: # pylint: disable=broad-except
status_code = 400
content_type = "text/plain"
response_msg = "VCS proxy is unreachable (it might be down)."
return Response(response=response_msg,
status=status_code,
content_type=content_type)
@bp.route("/<vcdb_id>/annotation_data")
@skip_authorization
def annotation_data(vcdb_id):
vulnerability_details = get_vulnerability_details(vcdb_id)
vulnerability_details.validate_and_simplify_id()
view = vulnerability_details.vulnerability_view
master_commit = view.master_commit
if not master_commit:
logging.error("Vuln (id: %r) has no linked Git commits!", view.id)
return create_json_response("Entry has no linked Git link!", 404)
master_commit = vulnerability_details.get_master_commit()
files_schema = RepositoryFilesSchema(many=True)
return files_schema.jsonify(master_commit.repository_files)
@bp.route("/<vcdb_id>/file_provider")
@skip_authorization
def file_provider(vcdb_id):
vulnerability_details = get_vulnerability_details(vcdb_id)
vulnerability_details.validate_and_simplify_id()
item_hash = request.args.get("item_hash", 0, type=str)
item_path = request.args.get("item_path", None, type=str)
proxy_target = (cfg.GCE_VCS_PROXY_URL + url_for(
"vcs_proxy.main_api",
repo_url=vulnerability_details.repo_url,
item_path=item_path,
item_hash=item_hash,
)[1:])
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.load_verify_locations(cafile=cfg.APP_CERT_FILE)
ctx.verify_mode = ssl.CERT_REQUIRED
result = urllib.request.urlopen(proxy_target, context=ctx) # nosec
except urllib.error.HTTPError as err:
return Response(response=err.read(),
status=err.code,
content_type="text/plain")
return send_file(result, mimetype="application/octet-stream")
@bp.route("/<vcdb_id>/embed")
@skip_authorization
def embed(vcdb_id):
try:
section_id = int(request.args.get("sid", -1))
start_line = int(request.args.get("start_line", 1))
end_line = int(request.args.get("end_line", -1))
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability_details.validate_and_simplify_id()
view = vulnerability_details.vulnerability_view
if not view:
return make_response(("No vulnerability found", 404))
if not view.master_commit:
return make_response(
(f"Vuln (id: {view.id}) has no linked Git commits!", 404))
master_commit = vulnerability_details.get_master_commit()
files_schema = RepositoryFilesSchema(many=True)
# Hack to quickly retrieve the full data.
custom_data = json.loads(
files_schema.jsonify(master_commit.repository_files).data)
settings = {
"section_id": section_id,
"startLine": start_line,
"endLine": end_line,
"entry_data": custom_data,
}
return render_template(
"vulnerability/embedded.html",
vulnerability_details=vulnerability_details,
embed_settings=settings,
)
except (ValueError, InvalidIdentifierException):
return make_response(("No vulnerability found", 404))
@bp.route("/<vcdb_id>/create", methods=["GET", "POST"])
@bp.route("/create", methods=["GET", "POST"])
@requires(CREATE, Vulnerability)
def create_vuln(vcdb_id=None):
return _create_vuln_internal(vcdb_id)
def _create_vuln_internal(vcdb_id=None):
try:
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability = vulnerability_details.get_or_create_vulnerability()
except InvalidIdentifierException as err:
return flash_error(str(err), "frontend.serve_index")
if vulnerability.id:
logging.debug("Preexisting vulnerability entry found: %r",
vulnerability.id)
delete_form = VulnerabilityDeleteForm()
if delete_form.validate_on_submit():
db.session.delete(vulnerability)
# Remove the entry.
db.session.commit()
flash("The entry was deleted.", "success")
return redirect("/")
form = VulnerabilityDetailsForm(obj=vulnerability)
commit = form.data["commits"][0]
if not commit["repo_name"]:
logging.info("Empty repository name. %r", commit)
repo_url = commit["repo_url"]
vcs_handler = get_vcs_handler(None, repo_url)
if vcs_handler:
logging.info("Found name. %r", vcs_handler.repo_name)
form.commits[0].repo_name.process_data(vcs_handler.repo_name)
if form.validate_on_submit():
try:
form.populate_obj(vulnerability)
db.session.add(vulnerability)
db.session.commit()
# TODO: Improve this hack to assign a new vcdb_id here.
# Currently, we are just piggy backing on the auto increment
# of the primary key to ensure uniqueness.
# This will likely be prone to race conditions.
vulnerability.vcdb_id = vulnerability.id
db.session.add(vulnerability)
db.session.commit()
logging.debug("Successfully created/updated entry: %r",
vulnerability.id)
flash("Successfully created/updated entry.", "success")
return redirect(
url_for("vuln.vuln_view", vcdb_id=vulnerability.vcdb_id))
except InvalidIdentifierException as err:
flash_error(str(err))
return render_template("vulnerability/create.html",
vulnerability_details=vulnerability_details,
form=form)
def add_proposal(vuln: Vulnerability,
form: VulnerabilityDetailsForm) -> Optional[Vulnerability]:
"""
Attempts to create a proposal entry which is basically a copy of an
existing Vulnerability entry.
:param vuln:
:param form:
:return: A new Vulnerability copy of the existing entry.
"""
vuln_clone = vuln.copy()
form.populate_obj(vuln_clone)
try:
update_products(vuln_clone)
except InvalidProducts as ex:
flash_error(ex.args[0])
return None
with db.session.no_autoflush:
changes = vuln.diff(vuln_clone)
# ignore metadata
clean_vulnerability_changes(changes)
if not changes:
flash_error("No changes detected. "
"Please modify the entry first to propose a change")
return None
logging.debug("Detected changes: %r", changes)
vuln_clone.version = None
vuln_clone.prev_version = vuln.version
vuln_clone.state = VulnerabilityState.READY
vuln_clone.creator = g.user
# Reset any previous feedback data.
vuln_clone.reviewer_id = None
vuln_clone.review_feedback = None
db.session.add(vuln_clone)
db.session.commit()
if not vuln_clone.vcdb_id:
# TODO: Improve this hack to assign a new vcdb_id here.
# Currently, we are just piggy backing on the auto increment of
# the primary key to ensure uniqueness.
# This will likely be prone to race conditions.
vuln_clone.vcdb_id = vuln_clone.id
db.session.add(vuln_clone)
db.session.commit()
flash("Your proposal will be reviewed soon.", "success")
return vuln_clone
@bp.route("/<vcdb_id>/edit", methods=["GET", "POST"])
@bp.route("/edit", methods=["GET", "POST"])
@requires(EDIT, Vulnerability)
def edit_vuln(vcdb_id=None):
return _edit_vuln_internal(vcdb_id)
def _can_add_proposal(vuln):
# Conditions for creating a proposal:
"""
- No pending open proposals by the same user.
- Proposals can only be made for currently PUBLISHED entries only.
"""
# TODO: Simplify or move away the query below.
existing_user_proposals = Vulnerability.query.filter(
or_(Vulnerability.vcdb_id == vuln.vcdb_id,
Vulnerability.cve_id == vuln.cve_id),
Vulnerability.creator == g.user,
Vulnerability.state != VulnerabilityState.PUBLISHED,
Vulnerability.state != VulnerabilityState.ARCHIVED).first()
if existing_user_proposals:
flash_error("You already have a pending proposal for this entry. "
"Please go to your proposals section.")
return False
return True
def _edit_vuln_internal(vcdb_id: str = None):
vulnerability_details = get_vulnerability_details(vcdb_id,
simplify_id=False)
view = vulnerability_details.vulnerability_view
vuln = vulnerability_details.get_or_create_vulnerability()
if not _can_add_proposal(vuln):
return redirect(url_for("vuln.vuln_view", vcdb_id=vcdb_id))
# Populate the form data from the vulnerability view if necessary.
# Updating the vuln instance allows to easier diff the changes.
if vuln.comment == "":
vuln.comment = view.comment
form = VulnerabilityDetailsForm(obj=vuln)
form_submitted = form.validate_on_submit()
commit = form.data["commits"][0]
if form_submitted and commit["commit_link"]:
vcs_handler = get_vcs_handler(None, commit["commit_link"])
if not vcs_handler:
flash_error("Invalid commit link specified.")
return render_template("vulnerability/edit.html",
vulnerability_details=vulnerability_details,
form=form)
logging.info("Found name. %r", vcs_handler.repo_name)
form.commits[0].repo_name.process_data(vcs_handler.repo_name)
form.commits[0].repo_url.process_data(vcs_handler.repo_url)
form.commits[0].commit_hash.process_data(vcs_handler.commit_hash)
if form_submitted:
proposal_vuln = add_proposal(vuln, form)
if proposal_vuln:
return redirect(
url_for('vuln.vuln_review',
vcdb_id=view.id,
vuln_id=proposal_vuln.vcdb_id))
with db.session.no_autoflush:
return render_template("vulnerability/edit.html",
vulnerability_details=vulnerability_details,
form=form)
@bp.route("/<vcdb_id>/delete", methods=["DELETE", "POST", "GET"])
@bp.route("/delete", methods=["DELETE", "POST", "GET"])
@requires(DELETE, Vulnerability)
def delete_vuln(vcdb_id=None):
# TODO implement revert & delete
del vcdb_id
abort(404)
vulnerability_details = get_vulnerability_details(vcdb_id,
None,
simplify_id=False)
vuln = vulnerability_details.get_vulnerability()
if not vuln:
abort(404)
if vuln.state == VulnerabilityState.PUBLISHED:
flash_error("Can't delete a published entry w/o reverting it first")
return redirect(url_for('vuln.vuln_view'))
if vuln.state == VulnerabilityState.ARCHIVED:
flash_error("Can't delete an archived")
return redirect(url_for('vuln.vuln_view'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.