input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>fbmeshd/linkstatsd/collector.py
#!/usr/bin/env python3
# Copyright (c) 2004-present, Facebook, Inc.
# All rights reserved.
import asyncio
import json
import logging
import os
import random
import re
import time
import typing
from collections import defaultdict
from datetime import MINYEAR, datetime, timedelta
from distutils.util import strtobool
from pathlib import Path
from typing import Any, DefaultDict, Dict, List, Optional, Tuple
import psutil
from magma.common.sdwatchdog import SDWatchdogTask
from magma.magmad.check.network_check import ping
from marconi.lib import meshquery, wifiproperties
from prometheus_client import REGISTRY, Gauge
from thrift.transport.TTransport import TTransportException
from wifi.protos.mconfig.wifi_mconfigs_pb2 import Linkstatsd as LinkstatsMconfig
TX_BYTES = Gauge("tx_bytes", "bytes sent on the link", ["link"])
RX_BYTES = Gauge("rx_bytes", "bytes received on the link", ["link"])
TX_RETRIED = Gauge("tx_retried_packets", "packets retried", ["link"])
TX_FAILED = Gauge("tx_failed_packets", "packets failed", ["link"])
RX_PACKETS = Gauge("rx_packets", "packets received on the link", ["link"])
RX_DROP_MISC = Gauge("rx_drop_misc", "dropped rx packets", ["link"])
RX_DROP_MISC_PCT = Gauge("rx_drop_misc_pct", "dropped rx packets percentage", ["link"])
TX_LAST_BITRATE = Gauge("tx_last_bitrate", "last tx frame bitrate (Mbps)", ["link"])
RX_LAST_BITRATE = Gauge("rx_last_bitrate", "last rx frame bitrate (Mbps)", ["link"])
EXPECTED_THROUGHPUT = Gauge(
"expected_throughput", "expected throughput (Mbps)", ["link"]
)
FAIL_AVERAGE = Gauge("fail_average", "tx fail average (pct)", ["link"])
FCS_ERRORS = Gauge("fcs_errors", "mpdus with frame check errors", ["dev"])
RX_MPDUS = Gauge("rx_mpdus", "mpdus received on this device", ["dev"])
INACTIVE_TIME = Gauge("inactive_time", "link inactive time (ms)", ["link"])
SIGNAL = Gauge("signal", "link signal power (dbm)", ["link"])
PING_STATS = Gauge("ping", "ping metrics", ["host", "metric"])
TMPFS_KBS = Gauge("tmpfs_kbs", "tmpfs filesystem size")
ETH0_TX_BYTES = Gauge("eth0_tx_bytes", "bytes sent on eth0")
ETH0_RX_BYTES = Gauge("eth0_rx_bytes", "bytes received on eth0")
WLAN_TX_BYTES = Gauge("wlan_tx_bytes", "total bytes sent on wlan_soma")
WLAN_RX_BYTES = Gauge("wlan_rx_bytes", "total bytes received on wlan_soma")
WLAN_CLIENTS = Gauge("wlan_clients", "total number of clients on wlan_soma")
WLAN_DHCP_LEASES = Gauge("wlan_dhcp_leases", "total number of dhcp leases on wlan_soma")
PEER_COUNT = Gauge("peer_count", "number of mesh0 peers")
PEER_ESTAB_COUNT = Gauge("peer_estab_count", "number of mesh0 peers in estab")
FREQUENCY = Gauge("frequency", "frequency (hz) in use", ["dev"])
RSSI_THRESHOLD = Gauge("rssi_threshold", "RSSI threshold for new peers")
CHANNEL_ACTIVE_TIME = Gauge(
"channel_active_time", "channel active time (ms) since last read", ["dev"]
)
CHANNEL_BUSY_TIME = Gauge(
"channel_busy_time", "channel busy time (ms) since last read", ["dev"]
)
CHANNEL_TRANSMIT_TIME = Gauge(
"channel_transmit_time", "channel transmit time (ms) since last read", ["dev"]
)
CHANNEL_RECEIVE_TIME = Gauge(
"channel_receive_time", "channel receive time (ms) since last read", ["dev"]
)
CHANNEL_NOISE_LEVEL = Gauge("channel_noise_level", "channel noise level (dBm)", ["dev"])
AIRTIME_METRIC = Gauge("airtime_metric", "802.11s metric to nexthop", ["link"])
TX_QUEUE_BUSY_RATE = Gauge(
"txq_busy_rate", "Fraction of time that firmware tx queues are non-empty", ["dev"]
)
IMAGE_BUILD_TIME = Gauge("image_build_time", "image build time (epoch)")
IMAGE_METADATA_FILE = "/METADATA"
DROPPED_FRAMES_CONG = Gauge(
"kernel_dropped_frames_congestion",
"kernel debug: dropped frames (congestion) on mesh0",
)
DROPPED_FRAMES_NOROUTE = Gauge(
"kernel_dropped_frames_no_route", "kernel debug: dropped frames( no-route) on mesh0"
)
DROPPED_FRAMES_TTL = Gauge(
"kernel_dropped_frames_ttl", "kernel debug: dropped frames (ttl) on mesh0"
)
FWDED_MCAST = Gauge(
"kernel_fwded_mcast", "kernel debug: forwarded multicast frames on mesh0"
)
FWDED_UNICAST = Gauge(
"kernel_fwded_unicast", "kernel debug: forwarded unicast frames on mesh0"
)
HOPS_TO_GATEWAY = Gauge("ap_hops_to_gateway", "number of hops to gateway")
HOPS_FROM_GATEWAY = Gauge("ap_hops_from_gateway", "number of hops from gateway")
ASYMMETRIC_ROUTE = Gauge(
"asymmetric_route", "first/last hop to/from gateway is different"
)
METRIC_TO_GATEWAY = Gauge(
"metric_to_gateway_11s", "a11s shortest path metric to gateway"
)
IPERF_TO_GATE_TRAFFIC = Gauge(
"iperf_to_gate", "iperf traffic results to gate right now"
)
IPERF_TO_GATE_RESULT = Gauge(
"iperf_to_gate_result",
"iperf throughput results to gate, runs from last 24 hours",
["agg"],
)
UPTIME = Gauge("uptime", "uptime of the device")
TX_QUEUE_SIZE = Gauge("tx_queue_size", "frames in tx queue", ["queue"])
PERSISTENT_FREE = Gauge("persistent_free_bytes", "free bytes in /persistent")
SERVICE_COUNTERS = Gauge(
"service_counters",
"number or systemd service starts in the last sampling period",
["starts"],
)
IS_SYSTEM_DEGRADED = Gauge(
"is_system_degraded", "returns 0 if system is up and running, 1 otherwise"
)
DNS_REQUESTS = Gauge(
"dns_requests", "number of DNS requests in the last sampling period", ["source"]
)
PROCESS_MEMORY_RSS = Gauge("process_memory_rss", "process RSS", ["process"])
OVERHEAD_PACKETS = Gauge(
"overhead_packets",
"Overheads per service (i.e. source of overhead) in packets",
["source"],
)
OVERHEAD_BYTES = Gauge(
"overhead_bytes",
"Overheads per service (i.e. source of overhead) in bytes",
["source"],
)
TTL_EXCEEDED = Gauge(
"ttl_exceeded", "TTL exceeded messages (minus traceroute and LLMNR)"
)
MESH_HISTOGRAM_PACKETS = Gauge(
"mesh_packets", "Histogram of packet sizes on the mesh", ["len"]
)
MESH_HISTOGRAM_BYTES = Gauge(
"mesh_bytes", "Histogram of total bytes per packet size", ["len"]
)
MESH_QOS_PACKETS = Gauge("mesh_qos_packets", "Packet counts per IP QOS level", ["prio"])
MESH_QOS_BYTES = Gauge("mesh_qos_bytes", "Total bytes per IP QOS level", ["prio"])
MESH_PROTOCOL_PACKETS = Gauge(
"mesh_proto_packets", "Packet counts per protocol", ["proto"]
)
MESH_PROTOCOL_BYTES = Gauge("mesh_proto_bytes", "Total bytes per protocol", ["proto"])
PROCESS_UPTIME = Gauge("process_uptime", "process uptime", ["process"])
GATE_PING_AVG = Gauge("gate_ping_avg", "Gate ping average (ms)", ["prio"])
GATE_PING_P50 = Gauge("gate_ping_p50", "Gate ping median (ms)", ["prio"])
GATE_PING_P90 = Gauge("gate_ping_p90", "Gate ping 90 pctile (ms)", ["prio"])
GATE_PING_P99 = Gauge("gate_ping_p99", "Gate ping 99 pctile (ms)", ["prio"])
GATE_PING_LOSS = Gauge("gate_ping_loss", "Gate ping loss rate", ["prio"])
MESH_STABILITY = Gauge(
"mesh_stability", "Counts the number of route/peer changes", ["type"]
)
RATE_LIMITING_MSG_CT = Gauge(
"rate_limiting_message_count", "Rate limiting messages in past 120 seconds"
)
AVG_SIGNAL = Gauge(
"avg_signal", "signal averaged over samples collected every sec", ["link"]
)
AVG_TX_BITRATE = Gauge(
"avg_tx_bitrate", "tx_bitrate averaged over samples collected every sec", ["link"]
)
AVG_HIRES_IN_SPEED = Gauge(
"avg_hires_in_speed", "input speed in B/s averaged across all active sessions"
)
AVG_HIRES_OUT_SPEED = Gauge(
"avg_hires_out_speed", "output speed in B/s averaged across all active sessions"
)
MIN_HIRES_IN_SPEED = Gauge(
"min_hires_in_speed", "minimum input speed in B/s across all active sessions"
)
MIN_HIRES_OUT_SPEED = Gauge(
"min_hires_out_speed", "minimum output speed in B/s across all active sessions"
)
TCP_PKT_RETR_PCT = Gauge(
"tcp_pkt_retr_pct",
"percent of sampled TCP packets retransmitted across all connected sessions",
)
TCP_PKT_RETR_PCT_P10 = Gauge(
"tcp_pkt_retr_pct_p10", "percent of sampled TCP packets retransmitted 10th pctile"
)
TCP_PKT_RETR_PCT_P50 = Gauge(
"tcp_pkt_retr_pct_p50", "percent of sampled TCP packets retransmitted 50th pctile"
)
TCP_PKT_RETR_PCT_P90 = Gauge(
"tcp_pkt_retr_pct_p90", "percent of sampled TCP packets retransmitted 90th pctile"
)
TCP_PKT_RETR_PCT_P99 = Gauge(
"tcp_pkt_retr_pct_p99", "percent of sampled TCP packets retransmitted 99th pctile"
)
COREDUMPS = Gauge("coredumps", "Number of coredumps on a device")
DROP_MONITOR_ACTION = Gauge(
"drop_monitor_action", "Rate of neighbor drops by the drop-monitor"
)
DROP_MONITOR_PING_FAIL = Gauge(
"drop_monitor_ping_fail", "Rate of ping fails on nexthop links"
)
DROP_MONITOR_NO_PEER = Gauge(
"drop_monitor_no_peer", "Rate of non-existent peers on nexthop links"
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_ROUTE_DAMPENER_DEFAULT_ROUTE_DAMPENED_AVG = Gauge(
"fbmeshd_gateway_connectivity_monitor_route_dampener_default_route_dampened_avg",
"Default route dampened average over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_ROUTE_DAMPENER_DEFAULT_ROUTE_HISTORY_AVG = Gauge(
"fbmeshd_gateway_connectivity_monitor_route_dampener_default_route_history_avg",
"Default route dampened average over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_SUCCESS_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_success_sum",
"Probe WAN connectivity success sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_IN_USE_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_in_use_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_SOCKET_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_socket_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_SETSOCKOPT_REUSEADDR_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_setsockopt_reuseaddr_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_FCNTL_GETFL_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_fcntl_getfl_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_FCNTL_SETFL_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_fcntl_setfl_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_SETSOCKOPT_BINDTODEVICE_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_setsockopt_bindtodevice_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_NOT_EINPROGRESS_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_not_einprogress_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_TIMEOUT_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_timeout_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_GETSOCKOPT_ERROR_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_getsockopt_error_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_ERR_NON_ZERO_SUM = Gauge(
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_err_non_zero_sum",
"Probe WAN connectivity failure sum over time (s)",
["period"],
)
CPU_AVG_USAGE_PCT = Gauge(
"cpu_avg_percent", "System-wide average CPU utilization as a percentage"
)
# Assuming all SoMA internal mesh addresses are in the 172.16.0.0/24 subnet
MESH_PREFIX = "172.16.0"
IPERF_RESULT_PATH = Path("/persistent/last_iperf_result")
class LinkstatsCollector(SDWatchdogTask):
"""
Periodically polls mesh interfaces for link/network stats
And if enable_ping flag is set in the config, periodically pings
list of hosts (set in the config)
"""
def __init__(
self,
loop: asyncio.AbstractEventLoop,
config: Dict[str, str],
mconfig: LinkstatsMconfig,
) -> None:
super().__init__(int(config["sampling_period"]), loop)
self.config = config
self.mconfig = mconfig
self.mesh0_hw_addr = ""
self.mesh0_ipv4 = ""
# Ping parameters
self.gateway = ""
self.gateway_hop_count = 0
self.external_ping_params = [
ping.PingCommandParams(
host, self.mconfig.ping_num_packets, self.mconfig.ping_timeout_secs
)
for host in self.mconfig.ping_host_list
]
self._cpu_usage_prev = None
self._cpu_time_prev = 0.0
# For each way of determining the nexthop, store
# MAC address of nexthop on default route, without colons
self._nexthop_mac_sources: Dict[str, str] = {}
# Absolute metrics
# Values of these metrics are set to the values of their corresponding
# stats
self._abs_metrics = {
"tx bitrate": TX_LAST_BITRATE,
"rx bitrate": RX_LAST_BITRATE,
"expected throughput": EXPECTED_THROUGHPUT,
"fail_avg": FAIL_AVERAGE,
"inactive time": INACTIVE_TIME,
"signal": SIGNAL,
"wlan clients": WLAN_CLIENTS,
"wlan dhcp leases": WLAN_DHCP_LEASES,
"frequency": FREQUENCY,
"channel active time": CHANNEL_ACTIVE_TIME,
"channel busy time": CHANNEL_BUSY_TIME,
"channel receive time": CHANNEL_RECEIVE_TIME,
"channel transmit time": CHANNEL_TRANSMIT_TIME,
"noise": CHANNEL_NOISE_LEVEL,
"tx queue size": TX_QUEUE_SIZE,
"rssi_threshold": RSSI_THRESHOLD,
"is_system_degraded": IS_SYSTEM_DEGRADED,
"VmRSS": PROCESS_MEMORY_RSS,
"process_uptime": PROCESS_UPTIME,
"gate_ping_avg": GATE_PING_AVG,
"gate_ping_p50": GATE_PING_P50,
"gate_ping_p90": GATE_PING_P90,
"gate_ping_p99": GATE_PING_P99,
"gate_ping_loss": GATE_PING_LOSS,
"avg_tx_bitrate": AVG_TX_BITRATE,
"avg_signal": AVG_SIGNAL,
"rx drop misc pct": RX_DROP_MISC_PCT,
"tmpfs_kbs": TMPFS_KBS,
"fbmeshd_gateway_connectivity_monitor_route_dampener_default_route_dampened_avg": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_ROUTE_DAMPENER_DEFAULT_ROUTE_DAMPENED_AVG,
"fbmeshd_gateway_connectivity_monitor_route_dampener_default_route_history_avg": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_ROUTE_DAMPENER_DEFAULT_ROUTE_HISTORY_AVG,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_success_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_SUCCESS_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_in_use_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_IN_USE_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_socket_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_SOCKET_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_setsockopt_reuseaddr_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_SETSOCKOPT_REUSEADDR_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_fcntl_getfl_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_FCNTL_GETFL_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_fcntl_setfl_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_FCNTL_SETFL_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_setsockopt_bindtodevice_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_SETSOCKOPT_BINDTODEVICE_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_not_einprogress_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_NOT_EINPROGRESS_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_timeout_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_TIMEOUT_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_getsockopt_error_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_GETSOCKOPT_ERROR_SUM,
"fbmeshd_gateway_connectivity_monitor_probe_wan_connectivity_failed_err_non_zero_sum": FBMESHD_GATEWAY_CONNECTIVITY_MONITOR_PROBE_WAN_CONNECTIVITY_FAILED_ERR_NON_ZERO_SUM,
}
# Delta metrics
# Values of these metrics are set to the differences of consecutive
# samples' values
self._delta_metrics = {
"rx bytes": [RX_BYTES, {}],
"tx bytes": [TX_BYTES, {}],
"tx retries": [TX_RETRIED, {}],
"tx failed": [TX_FAILED, {}],
"rx packets": [RX_PACKETS, {}],
"rx drop misc": [RX_DROP_MISC, {}],
"eth0 tx bytes": [ETH0_TX_BYTES, {}],
| |
KMinor: u.dimensionless or unitless
:return: inner diameter of pipe
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[KMinor, ">=0", "K minor"],
[HeadLossMinor.magnitude, ">0", "Headloss due to expansion"])
return (np.sqrt(4 * FlowRate / np.pi)
* (KMinor / (2 * u.gravity * HeadLossMinor)) ** (1/4)
).to(u.m)
@ut.list_handler()
def diam_pipe(FlowRate, HeadLoss, Length, Nu, PipeRough, KMinor):
"""Return the pipe inner diameter that would result in the given total head
loss.
This function applies to both laminar and turbulent flow and
incorporates both minor and major losses.
:param FlowRate: flow rate of pipe
:type FlowRate: u.m**3/u.s
:param HeadLoss: total head loss from major and minor losses
:type HeadLoss: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param PipeRough: roughness of pipe
:type PipeRough: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: inner diameter of pipe
:rtype: u.m
"""
if KMinor == 0:
Diam = diam_major_pipe(FlowRate, HeadLoss, Length, Nu,
PipeRough)
else:
Diam = max(diam_major_pipe(FlowRate, HeadLoss,
Length, Nu, PipeRough),
diam_minor_pipe(FlowRate, HeadLoss, KMinor))
err = 1.00
while err > 0.001:
DiamPrev = Diam
HLFricNew = (HeadLoss * headloss_major_pipe(FlowRate, Diam, Length,
Nu, PipeRough
)
/ (headloss_major_pipe(FlowRate, Diam, Length,
Nu, PipeRough
)
+ headloss_minor_pipe(FlowRate, Diam, KMinor
)
)
)
Diam = diam_major_pipe(FlowRate, HLFricNew, Length, Nu, PipeRough
)
err = abs(Diam - DiamPrev) / ((Diam + DiamPrev) / 2)
return Diam.to(u.m)
@ut.list_handler()
def pipe_ID(FlowRate, Pressure):
"""Return the inner diameter of a pipe for a given pressure
recovery constraint.
:param FlowRate: flow rate of pipe
:type FlowRate: u.m**3/u.s
:param Pressure: pressure recovery constraint
:type Pressure: u.m
:return: inner diameter of pipe
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Pressure.magnitude, ">0", "Pressure"])
return np.sqrt(FlowRate/((np.pi/4)*np.sqrt(2*u.gravity*Pressure))).to(u.m)
############################ Weirs ############################
@ut.list_handler()
def width_rect_weir(FlowRate, Height):
"""
.. deprecated::
`width_rect_weir` is deprecated; use `width_weir_rect` instead.
"""
warnings.warn('width_rect_weir is deprecated; use '
'width_weir_rect instead.', UserWarning)
return width_weir_rect(FlowRate, Height)
@ut.list_handler()
def width_weir_rect(FlowRate, Height):
"""Return the width of a rectangular weir given its flow rate and the
height of the water above the weir. For a weir that is a vertical pipe,
this value is the circumference.
:param FlowRate: flow rate over weir
:type FlowRate: u.m**3/u.s
:param Height: height of water above weir
:type Height: u.m
:return: width of weir
:rtypes: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Height.magnitude, ">0", "Height"])
return ((3 / 2) * FlowRate / (con.VC_ORIFICE_RATIO
* np.sqrt(2 * u.gravity) * Height ** (3 / 2))
).to(u.m)
@ut.list_handler()
def headloss_weir(FlowRate, Width):
"""
.. deprecated::
`headloss_weir` is deprecated; use `headloss_weir_rect` instead.
"""
warnings.warn('headloss_weir is deprecated; use '
'headloss_weir_rect instead.', UserWarning)
return headloss_weir_rect(FlowRate, Width)
@ut.list_handler()
def headloss_weir_rect(FlowRate, Width):
"""Return the head loss of a rectangular or vertical pipe weir.
Head loss for a weir is the difference in height between the water
upstream of the weir and the top of the weir.
:param FlowRate: flow rate over weir
:type FlowRate: u.m**3/u.s
:param Width: width of weir (circumference for a vertical pipe)
:type Width: u.m
:return: head loss of weir
:rtypes: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Width.magnitude, ">0", "Width"])
return ((((3/2) * FlowRate
/ (con.VC_ORIFICE_RATIO * np.sqrt(2 * u.gravity) * Width)
) ** 2).to(u.m**3)) ** (1/3)
@ut.list_handler()
def flow_rect_weir(Height, Width):
"""
.. deprecated::
`flow_rect_weir` is deprecated; use `flow_weir_rect` instead.
"""
warnings.warn('flow_rect_weir is deprecated; use '
'flow_weir_rect instead.', UserWarning)
return flow_weir_rect(Height, Width)
@ut.list_handler()
def flow_weir_rect(Height, Width):
"""Return the flow rate of a rectangular or vertical pipe weir.
:param Height: height of water above weir
:type Height: u.m
:param Width: width of weir (circumference for a vertical pipe)
:type Width: u.m
:return: flow of weir
:rtype: u.m**3/u.s
"""
ut.check_range([Height.magnitude, ">0", "Height"],
[Width.magnitude, ">0", "Width"])
return ((2/3) * con.VC_ORIFICE_RATIO
* (np.sqrt(2*u.gravity) * Height**(3/2))
* Width).to(u.m**3/u.s)
######################## Porous Media ########################
class DeprecatedFunctionError(Exception):
def __init__(self, message):
self.message = message
@ut.list_handler()
def headloss_kozeny(Length, DiamMedia=None, ApproachVel=None, Porosity=None, Nu=None, *, Diam=None, Vel=None):
"""
.. deprecated::
`headloss_kozeny` is deprecated; use `headloss_ergun` instead.
"""
raise DeprecatedFunctionError("This function is deprecated. Please use headloss_ergun.")
@ut.list_handler()
def re_ergun(ApproachVel, DiamMedia, Temperature, Porosity):
"""Return the Reynolds number for flow through porous media.
:param ApproachVel: approach velocity or superficial fluid velocity
:type ApproachVel: u.m/u.s
:param DiamMedia: particle diameter
:type DiamMedia: u.m
:param Temperature: temperature of porous medium
:type Temperature: u.degK
:param Porosity: porosity of porous medium
:type Porosity: u.dimensionless or unitless
:return: Reynolds number for flow through porous media
:rtype: u.dimensionless
"""
ut.check_range([ApproachVel.magnitude, ">0", "ApproachVel"],
[DiamMedia.magnitude, ">0", "DiamMedia"],
[Porosity, "0-1", "Porosity"])
if Porosity == 1:
raise ValueError("Porosity is " + str(Porosity) + " must be great than\
or equal to 0 and less than 1")
return (ApproachVel * DiamMedia /
(viscosity_kinematic_water(Temperature)
* (1 - Porosity))).to(u.dimensionless)
@ut.list_handler()
def fric_ergun(ApproachVel, DiamMedia, Temperature, Porosity):
"""Return the friction factor for flow through porous media.
:param ApproachVel: superficial fluid velocity (VelSuperficial?)
:type ApproachVel: u.m/u.s
:param DiamMedia: particle diameter
:type DiamMedia: u.m
:param Temperature: temperature of porous medium
:type Temperature: u.degK
:param Porosity: porosity of porous medium
:type Porosity: u.dimensionless or unitless
:return: friction factor for flow through porous media
:rtype: u.dimensionless
"""
return (300 / re_ergun(ApproachVel, DiamMedia, Temperature, Porosity)
+ 3.5 * u.dimensionless)
@ut.list_handler()
def headloss_ergun(ApproachVel, DiamMedia, Temperature, Porosity, Length):
"""Return the frictional head loss for flow through porous media.
:param ApproachVel: superficial fluid velocity (VelSuperficial?)
:type ApproachVel: u.m/u.s
:param DiamMedia: particle diameter
:type DiamMedia: u.m
:param Temperature: temperature of porous medium
:type Temperature: u.degK
:param Porosity: porosity of porous medium
:type Porosity: u.dimensionless or unitless
:param Length: length of pipe or duct
:type Length: u.m
:return: frictional head loss for flow through porous media
:rtype: u.m
"""
return (fric_ergun(ApproachVel, DiamMedia, Temperature, Porosity)
* Length / DiamMedia * ApproachVel**2 / (2*u.gravity) * (1-Porosity)
/ Porosity**3).to(u.m)
@ut.list_handler()
def g_cs_ergun(ApproachVel, DiamMedia, Temperature, Porosity):
"""Camp Stein velocity gradient for flow through porous media.
:param ApproachVel: superficial fluid velocity (VelSuperficial?)
:type ApproachVel: u.m/u.s
:param DiamMedia: particle diameter
:type DiamMedia: u.m
:param Temperature: temperature of porous medium
:type Temperature: u.degK
:param Porosity: porosity of porous medium
:type Porosity: u.dimensionless or unitless
:return: Camp Stein velocity gradient for flow through porous media
:rtype: u.Hz
"""
return np.sqrt(fric_ergun(ApproachVel, DiamMedia, Temperature, Porosity)
* ApproachVel**3 * (1-Porosity)
/ (2 * viscosity_kinematic_water(Temperature) * DiamMedia
* Porosity**4)).to(u.Hz)
######################## Miscellaneous ########################
@ut.list_handler()
def height_water_critical(FlowRate, Width):
"""Return the critical local water height.
:param FlowRate: flow rate of water
:type FlowRate: u.m**3/u.s
:param Width: width of channel (????????)
:type Width: u.m
:return: critical water height
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Width.magnitude, ">0", "Width"])
return ((FlowRate / (Width * np.sqrt(1*u.gravity))) ** (2/3)).to(u.m)
@ut.list_handler()
def vel_horizontal(HeightWaterCritical):
"""Return the horizontal velocity. (at the critical water depth??????)
:param HeightWaterCritical: critical water height
:type HeightWaterCritical: u.m
:return: horizontal velocity
:rtype: u.m/u.s
"""
ut.check_range([HeightWaterCritical.magnitude, ">0", "Critical height of water"])
return np.sqrt(u.gravity * HeightWaterCritical).to(u.m/u.s)
@ut.list_handler()
def manifold_id_alt(q, pr_max):
"""Return the inner diameter of a manifold when major losses are
negligible.
"""
manifold_id_alt = np.sqrt(
4 * q / (
np.pi * np.sqrt(
2 * u.gravity * pr_max
)
)
)
return manifold_id_alt
@ut.list_handler()
def manifold_id(q, h, l, q_ratio, nu, eps, k, n):
id_new = 2 * u.inch
id_old = 0 * u.inch
error = 1
while error > 0.01:
id_old = id_new
id_new = (
((8 * q ** 2) / (u.gravity * np.pi ** 2 * h)) *
(
(
1 + fric_pipe(q, id_old, nu, eps) *
(1 / 3 + 1 / (2 * n) + 1 / (6 * n ** 2))
) /
(1 - q_ratio ** 2)
)
) ** (1 / 4)
error = np.abs(id_old - id_new) / id_new
return id_new
@ut.list_handler()
def manifold_nd(q, h, l, q_ratio, nu, eps, k, n, sdr):
manifold_nd = pipe.ND_SDR_available(
manifold_id(q, h, l, q_ratio, nu, eps, k, n),
sdr
)
return manifold_nd
@ut.list_handler()
def horiz_chan_w(q, depth, hl, l, nu, eps, manifold, k):
hl = min(hl, depth / 3)
horiz_chan_w_new = q / ((depth - hl) * np.sqrt(2 * u.gravity * hl))
error = 1
i = 0
while error > 0.001 and i < 20:
w = horiz_chan_w_new
i = i + 1
horiz_chan_w_new = np.sqrt(
(
1 + k +
fric_rect(q, w, depth - hl, nu, eps, True) *
(l / (4 * radius_hydraulic_rect(w, depth - hl, True))) *
(1 - (2 * (int(manifold) / 3)))
) / | |
dataonly = False: If True only the resulting dataframes are returned
"""
try:
if keep_dim:
dfs = self.keep_get_dict(pat, start, slut, start_ofset, slut_ofset)
else:
dfs = self.keep_var_dict(pat, start, slut, start_ofset, slut_ofset)
if showtype == 'growth':
dfs = {v: vdf.pct_change()*100. for v, vdf in dfs.items()}
dftype = 'Growth'
elif showtype == 'change':
dfs = {v: vdf.diff() for v, vdf in dfs.items()}
dftype = 'Change'
else:
dftype = 'Level'
if keep_dim:
if diff:
dfsres = {v: (vdf.subtract(
vdf.iloc[:, 0], axis=0)).iloc[:, 1:] for v, vdf in dfs.items()}
aspct=' '
elif diffpct:
dfsres = {v: (vdf.subtract(
vdf.iloc[:, 0], axis=0).divide(
vdf.iloc[:, 0], axis=0)*100).iloc[:, 1:] for v, vdf in dfs.items()}
aspct= ' in percent '
else:
dfsres = dfs
else:
first_scenario = dfs[list(dfs.keys())[0]]
if diff:
dfsres = {v: vdf - first_scenario
for i,(v, vdf) in enumerate(dfs.items()) if i >= 1}
aspct=' '
elif diffpct:
dfsres = {v: (vdf/first_scenario-1.)*100
for i,(v, vdf) in enumerate(dfs.items()) if i >= 1}
aspct= ' in percent '
else:
dfsres = dfs
assert not(diff and diffpct) ,"keep_plot can't be called with both diff and diffpct"
if dataonly:
return dfsres
xtrans = trans if trans else self.var_description
figs = {v: self.plot_basis(v, df*mul, legend=legend,
scale=scale, trans=xtrans,
title=f'Difference{aspct}to "{df.columns[0] if keep_dim else list(self.keep_solutions.keys())[0] }" for {dftype}:' if (diff or diffpct) else f'{dftype}:',
yunit=yunit,
ylabel='Percent' if showtype == 'growth' else ylabel,
xlabel='',
dec=2 if (showtype == 'growth' or diffpct) and not dec else dec)
for v, df in dfsres.items()}
if type(vline) == type(None): # to delete vline
if hasattr(self, 'vline'):
del self.vline
else:
if vline or hasattr(self, 'vline'):
if vline:
self.vline = vline
for xtime, text in self.vline:
model.keep_add_vline(figs, xtime, text)
if savefig:
figpath = Path(savefig)
suffix = figpath.suffix if figpath.suffix else '.png'
stem = figpath.stem
parent = figpath.parent
parent.mkdir(parents=True, exist_ok=True)
for v, f in figs.items():
# breakpoint()
location = parent / f'{stem}_{v}{suffix}'
f.savefig(location)
return figs
except ZeroDivisionError:
print('no keept solution')
@staticmethod
def keep_add_vline(figs, time, text=' Calibration time'):
''' adds a vertical line with text to figs a dict with matplotlib figures) from keep_plot'''
# breakpoint()
for keep, fig in figs.items():
ymax = fig.axes[0].get_ylim()[1]
try:
fig.axes[0].axvline(time, linewidth=3, color='r', ls='dashed')
fig.axes[0].annotate(text, xy=(time, ymax),
fontsize=13, va='top')
except:
fig.axes[0].axvline(pd.to_datetime(
time), linewidth=3, color='r', ls='dashed')
fig.axes[0].annotate(
text, xy=(pd.to_datetime(time), ymax), fontsize=13, va='top')
def keep_viz(self, pat='*', smpl=('', ''), selectfrom={}, legend=1, dec='', use_descriptions=True,
select_width='', select_height='200px', vline=[]):
"""
Plots the keept dataframes
Args:
pat (str, optional): a string of variables to select pr default. Defaults to '*'.
smpl (tuple with 2 elements, optional): the selected smpl, has to match the dataframe index used. Defaults to ('','').
selectfrom (list, optional): the variables to select from, Defaults to [] -> all endogeneous variables .
legend (bool, optional)c: DESCRIPTION. legends or to the right of the curve. Defaults to 1.
dec (string, optional): decimals on the y-axis. Defaults to '0'.
use_descriptions : Use the variable descriptions from the model
Returns:
None.
self.keep_wiz_figs is set to a dictionary containing the figures. Can be used to produce publication
quality files.
"""
from ipywidgets import interact, Dropdown, Checkbox, IntRangeSlider, SelectMultiple, Layout
from ipywidgets import interactive, ToggleButtons, SelectionRangeSlider, RadioButtons
from ipywidgets import interactive_output, HBox, VBox, link, Dropdown,Output
minper = self.lastdf.index[0]
maxper = self.lastdf.index[-1]
options = [(ind, nr) for nr, ind in enumerate(self.lastdf.index)]
with self.set_smpl(*smpl):
show_per = self.current_per[:]
init_start = self.lastdf.index.get_loc(show_per[0])
init_end = self.lastdf.index.get_loc(show_per[-1])
defaultvar = self.vlist(pat)
_selectfrom = [s.upper() for s in selectfrom] if selectfrom else sorted(
list(list(self.keep_solutions.values())[0].columns))
var_maxlen = max(len(v) for v in _selectfrom)
if use_descriptions and self.var_description:
select_display = [
f'{v} :{self.var_description[v]}' for v in _selectfrom]
defaultvar = [
f'{v} :{self.var_description[v]}' for v in self.vlist(pat)]
width = select_width if select_width else '90%'
else:
select_display = [fr'{v}' for v in _selectfrom]
defaultvar = [fr'{v}' for v in self.vlist(pat)]
width = select_width if select_width else '50%'
def explain(i_smpl, selected_vars, diff, showtype, scale, legend):
vars = ' '.join(v.split(' ', 1)[0] for v in selected_vars)
smpl = (self.lastdf.index[i_smpl[0]], self.lastdf.index[i_smpl[1]])
if type(diff) == str:
diffpct = True
ldiff = False
else:
ldiff = diff
diffpct = False
# print(ldiff,diffpct)
with self.set_smpl(*smpl):
self.keep_wiz_figs = self.keep_plot(vars, diff=ldiff, diffpct = diffpct, scale=scale, showtype=showtype,
legend=legend, dec=dec, vline=vline)
plt.show()
description_width = 'initial'
description_width_long = 'initial'
keep_keys = list(self.keep_solutions.keys())
keep_first = keep_keys[0]
# breakpoint()
i_smpl = SelectionRangeSlider(value=[init_start, init_end], continuous_update=False, options=options, min=minper,
max=maxper, layout=Layout(width='75%'), description='Show interval')
selected_vars = SelectMultiple(value=defaultvar, options=select_display, layout=Layout(width=width, height=select_height, font="monospace"),
description='Select one or more', style={'description_width': description_width})
selected_vars2 = SelectMultiple(value=defaultvar, options=select_display, layout=Layout(width=width, height=select_height),
description='Select one or more', style={'description_width': description_width})
diff = RadioButtons(options=[('No', False), ('Yes', True), ('In percent', 'pct')], description=fr'Difference to: "{keep_first}"',
value=False, style={'description_width': 'auto'}, layout=Layout(width='auto'))
# diff_select = Dropdown(options=keep_keys,value=keep_first, description = fr'to:')
showtype = RadioButtons(options=[('Level', 'level'), ('Growth', 'growth')],
description='Data type', value='level', style={'description_width': description_width})
scale = RadioButtons(options=[('Linear', 'linear'), ('Log', 'log')], description='Y-scale',
value='linear', style={'description_width': description_width})
# legend = ToggleButtons(options=[('Yes',1),('No',0)], description = 'Legends',value=1,style={'description_width': description_width})
legend = RadioButtons(options=[('Yes', 1), ('No', 0)], description='Legends', value=legend, style={
'description_width': description_width})
# breakpoint()
l = link((selected_vars, 'value'),
(selected_vars2, 'value')) # not used
select = HBox([selected_vars])
options1 = diff
options2 = HBox([scale, legend, showtype])
ui = VBox([select, options1, options2, i_smpl])
show = interactive_output(explain, {'i_smpl': i_smpl, 'selected_vars': selected_vars, 'diff': diff, 'showtype': showtype,
'scale': scale, 'legend': legend})
# display(ui, show)
display(ui)
display(show)
return
def keep_viz_prefix(self, pat='*', smpl=('', ''), selectfrom={}, legend=1, dec='', use_descriptions=True,
select_width='', select_height='200px', vline=[],prefix_dict={},add_var_name=False,short=False):
"""
Plots the keept dataframes
Args:
pat (str, optional): a string of variables to select pr default. Defaults to '*'.
smpl (tuple with 2 elements, optional): the selected smpl, has to match the dataframe index used. Defaults to ('','').
selectfrom (list, optional): the variables to select from, Defaults to [] -> all keept variables .
legend (bool, optional)c: DESCRIPTION. legends or to the right of the curve. Defaults to 1.
dec (string, optional): decimals on the y-axis. Defaults to '0'.
use_descriptions : Use the variable descriptions from the model
Returns:
None.
self.keep_wiz_figs is set to a dictionary containing the figures. Can be used to produce publication
quality files.
"""
from ipywidgets import interact, Dropdown, Checkbox, IntRangeSlider, SelectMultiple, Layout
from ipywidgets import Select
from ipywidgets import interactive, ToggleButtons, SelectionRangeSlider, RadioButtons
from ipywidgets import interactive_output, HBox, VBox, link, Dropdown,Output
minper = self.lastdf.index[0]
maxper = self.lastdf.index[-1]
options = [(ind, nr) for nr, ind in enumerate(self.lastdf.index)]
with self.set_smpl(*smpl):
show_per = self.current_per[:]
init_start = self.lastdf.index.get_loc(show_per[0])
init_end = self.lastdf.index.get_loc(show_per[-1])
keepvar = sorted (list(self.keep_solutions.values())[0].columns)
defaultvar = [v for v in self.vlist(pat) if v in keepvar]
_selectfrom = [s.upper() for s in selectfrom] if selectfrom else keepvar
gross_selectfrom = [(f'{(v+" ") if add_var_name else ""}{self.var_description[v] if use_descriptions else v}',v) for v in _selectfrom]
width = select_width if select_width else '50%' if use_descriptions else '50%'
def explain(i_smpl, selected_vars, diff, showtype, scale, legend):
vars = ' '.join(v for v in selected_vars)
smpl = (self.lastdf.index[i_smpl[0]], self.lastdf.index[i_smpl[1]])
if type(diff) == str:
diffpct = True
ldiff = False
else:
ldiff = diff
diffpct = False
with self.set_smpl(*smpl):
self.keep_wiz_figs = self.keep_plot(vars, diff=ldiff, diffpct = diffpct, scale=scale, showtype=showtype,
legend=legend, dec=dec, vline=vline)
plt.show()
description_width = 'initial'
description_width_long = 'initial'
keep_keys = list(self.keep_solutions.keys())
keep_first = keep_keys[0]
select_prefix = [(c,iso) for iso,c in prefix_dict.items()]
# breakpoint()
i_smpl = SelectionRangeSlider(value=[init_start, init_end], continuous_update=False, options=options, min=minper,
max=maxper, layout=Layout(width='75%'), description='Show interval')
selected_vars = SelectMultiple(value=defaultvar, options=gross_selectfrom, layout=Layout(width=width, height=select_height, font="monospace"),
description='Select one or more', style={'description_width': description_width})
diff = RadioButtons(options=[('No', False), ('Yes', True), ('In percent', 'pct')], description=fr'Difference to: "{keep_first}"',
value=False, style={'description_width': 'auto'}, layout=Layout(width='auto'))
showtype = RadioButtons(options=[('Level', 'level'), ('Growth', 'growth')],
description='Data type', value='level', style={'description_width': description_width})
scale = RadioButtons(options=[('Linear', 'linear'), ('Log', 'log')], description='Y-scale',
value='linear', style={'description_width': description_width})
legend = RadioButtons(options=[('Yes', 1), ('No', 0)], description='Legends', value=legend, style={
'description_width': description_width})
# breakpoint()
def get_prefix(g):
try:
current_suffix = {v[len(g['old'][0]):] for v in selected_vars.value}
except:
current_suffix = ''
new_prefix = g['new']
selected_prefix_var = [(des,variable) for des,variable in gross_selectfrom
if any([variable.startswith(n) for n in new_prefix])]
selected_vars.options = selected_prefix_var
if current_suffix:
new_selection = [f'{n}{c}' for c in current_suffix for n in new_prefix
if f'{n}{c}' in {s | |
<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
import sys
from types import GeneratorType
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.common.dimensiondata import DimensionDataAPIException, NetworkDomainServicePlan
from libcloud.common.dimensiondata import DimensionDataServerCpuSpecification, DimensionDataServerDisk, DimensionDataServerVMWareTools
from libcloud.common.dimensiondata import DimensionDataTag, DimensionDataTagKey
from libcloud.common.dimensiondata import DimensionDataIpAddress, \
DimensionDataIpAddressList, DimensionDataChildIpAddressList, \
DimensionDataPortList, DimensionDataPort, DimensionDataChildPortList
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver as DimensionData
from libcloud.compute.drivers.dimensiondata import DimensionDataNic
from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation
from libcloud.test import MockHttp, unittest, MockRawResponse, StorageMockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
from libcloud.utils.xml import fixxpath, findtext, findall
class DimensionData_v2_4_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
DimensionData.connectionCls.active_api_version = '2.4'
DimensionData.connectionCls.conn_class = DimensionDataMockHttp
DimensionData.connectionCls.rawResponseCls = \
DimensionDataMockRawResponse
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
with self.assertRaises(InvalidCredsError):
self.driver.list_nodes()
def test_get_account_details(self):
DimensionDataMockHttp.type = None
ret = self.driver.connection.get_account_details()
self.assertEqual(ret.full_name, '<NAME>')
self.assertEqual(ret.first_name, 'Test')
self.assertEqual(ret.email, '<EMAIL>')
def test_list_locations_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
self.assertEqual(len(ret), 5)
first_loc = ret[0]
self.assertEqual(first_loc.id, 'NA3')
self.assertEqual(first_loc.name, 'US - West')
self.assertEqual(first_loc.country, 'US')
def test_list_nodes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 7)
def test_node_extras(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(isinstance(ret[0].extra['vmWareTools'], DimensionDataServerVMWareTools))
self.assertTrue(isinstance(ret[0].extra['cpu'], DimensionDataServerCpuSpecification))
self.assertTrue(isinstance(ret[0].extra['disks'], list))
self.assertTrue(isinstance(ret[0].extra['disks'][0], DimensionDataServerDisk))
self.assertEqual(ret[0].extra['disks'][0].size_gb, 10)
self.assertTrue(isinstance(ret[1].extra['disks'], list))
self.assertTrue(isinstance(ret[1].extra['disks'][0], DimensionDataServerDisk))
self.assertEqual(ret[1].extra['disks'][0].size_gb, 10)
def test_server_states(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(ret[0].state == 'running')
self.assertTrue(ret[1].state == 'starting')
self.assertTrue(ret[2].state == 'stopping')
self.assertTrue(ret[3].state == 'reconfiguring')
self.assertTrue(ret[4].state == 'running')
self.assertTrue(ret[5].state == 'terminated')
self.assertTrue(ret[6].state == 'stopped')
self.assertEqual(len(ret), 7)
def test_list_nodes_response_PAGINATED(self):
DimensionDataMockHttp.type = 'PAGINATED'
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 9)
def test_paginated_mcp2_call_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'EMPTY'
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server')
empty_node_list = []
for node_list in node_list_generator:
empty_node_list.extend(node_list)
self.assertTrue(len(empty_node_list) == 0)
def test_paginated_mcp2_call_PAGED_THEN_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'PAGED_THEN_EMPTY'
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server')
final_node_list = []
for node_list in node_list_generator:
final_node_list.extend(node_list)
self.assertTrue(len(final_node_list) == 2)
def test_paginated_mcp2_call_with_page_size(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = 'PAGESIZE50'
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2('server/server', page_size=50)
self.assertTrue(isinstance(node_list_generator, GeneratorType))
# We're making sure here the filters make it to the URL
# See _caas_2_4_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS for asserts
def test_list_nodes_response_strings_ALLFILTERS(self):
DimensionDataMockHttp.type = 'ALLFILTERS'
ret = self.driver.list_nodes(ex_location='fake_loc', ex_name='fake_name',
ex_ipv6='fake_ipv6', ex_ipv4='fake_ipv4', ex_vlan='fake_vlan',
ex_image='fake_image', ex_deployed=True,
ex_started=True, ex_state='fake_state',
ex_network='fake_network', ex_network_domain='fake_network_domain')
self.assertTrue(isinstance(ret, list))
self.assertEqual(len(ret), 7)
node = ret[3]
self.assertTrue(isinstance(node.extra['disks'], list))
self.assertTrue(isinstance(node.extra['disks'][0], DimensionDataServerDisk))
self.assertEqual(node.size.id, '1')
self.assertEqual(node.image.id, '3ebf3c0f-90fe-4a8b-8585-6e65b316592c')
self.assertEqual(node.image.name, 'WIN2008S/32')
disk = node.extra['disks'][0]
self.assertEqual(disk.id, "c2e1f199-116e-4dbc-9960-68720b832b0a")
self.assertEqual(disk.scsi_id, 0)
self.assertEqual(disk.size_gb, 50)
self.assertEqual(disk.speed, "STANDARD")
self.assertEqual(disk.state, "NORMAL")
def test_list_nodes_response_LOCATION(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
first_loc = ret[0]
ret = self.driver.list_nodes(ex_location=first_loc)
for node in ret:
self.assertEqual(node.extra['datacenterId'], 'NA3')
def test_list_nodes_response_LOCATION_STR(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes(ex_location='NA3')
for node in ret:
self.assertEqual(node.extra['datacenterId'], 'NA3')
def test_list_sizes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 1)
size = ret[0]
self.assertEqual(size.name, 'default')
def test_reboot_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.reboot()
self.assertTrue(ret is True)
def test_reboot_node_response_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
with self.assertRaises(DimensionDataAPIException):
node.reboot()
def test_destroy_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.destroy()
self.assertTrue(ret is True)
def test_destroy_node_response_RESOURCE_BUSY(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
with self.assertRaises(DimensionDataAPIException):
node.destroy()
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 3)
self.assertEqual(images[0].name, 'RedHat 6 64-bit 2 CPU')
self.assertEqual(images[0].id, 'c14b1a46-2428-44c1-9c1a-b20e6418d08c')
self.assertEqual(images[0].extra['location'].id, 'NA9')
self.assertEqual(images[0].extra['cpu'].cpu_count, 2)
self.assertEqual(images[0].extra['OS_displayName'], 'REDHAT6/64')
def test_clean_failed_deployment_response_with_node(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_clean_failed_deployment(node)
self.assertTrue(ret is True)
def test_clean_failed_deployment_response_with_node_id(self):
node = 'e75ead52-692f-4314-8725-c8a4f4d13a87'
ret = self.driver.ex_clean_failed_deployment(node)
self.assertTrue(ret is True)
def test_ex_list_customer_images(self):
images = self.driver.ex_list_customer_images()
self.assertEqual(len(images), 3)
self.assertEqual(images[0].name, 'ImportedCustomerImage')
self.assertEqual(images[0].id, '5234e5c7-01de-4411-8b6e-baeb8d91cf5d')
self.assertEqual(images[0].extra['location'].id, 'NA9')
self.assertEqual(images[0].extra['cpu'].cpu_count, 4)
self.assertEqual(images[0].extra['OS_displayName'], 'REDHAT6/64')
def test_create_mcp1_node_optional_param(self):
root_pw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
cpu_spec = DimensionDataServerCpuSpecification(cpu_count='4',
cores_per_socket='2',
performance='STANDARD')
disks = [DimensionDataServerDisk(scsi_id='0', speed='HIGHPERFORMANCE')]
node = self.driver.create_node(name='test2', image=image, auth=root_pw,
ex_description='test2 node',
ex_network=network,
ex_is_started=False,
ex_memory_gb=8,
ex_disks=disks,
ex_cpu_specification=cpu_spec,
ex_primary_dns='10.0.0.5',
ex_secondary_dns='10.0.0.6'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_mcp1_node_response_no_pass_random_gen(self):
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node',
ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' in node.extra)
def test_create_mcp1_node_response_no_pass_customer_windows(self):
image = self.driver.ex_list_customer_images()[1]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' in node.extra)
def test_create_mcp1_node_response_no_pass_customer_windows_STR(self):
image = self.driver.ex_list_customer_images()[1].id
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' in node.extra)
def test_create_mcp1_node_response_no_pass_customer_linux(self):
image = self.driver.ex_list_customer_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' not in node.extra)
def test_create_mcp1_node_response_no_pass_customer_linux_STR(self):
image = self.driver.ex_list_customer_images()[0].id
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=None,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
self.assertTrue('password' not in node.extra)
def test_create_mcp1_node_response_STR(self):
rootPw = '<PASSWORD>'
image = self.driver.list_images()[0].id
network = self.driver.ex_list_networks()[0].id
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_response_network_domain(self):
rootPw = NodeAuthPassword('<PASSWORD>')
location = self.driver.ex_get_location_by_id('NA9')
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0]
vlan = self.driver.ex_list_vlans(location=location)[0]
cpu = DimensionDataServerCpuSpecification(
cpu_count=4,
cores_per_socket=1,
performance='HIGHPERFORMANCE'
)
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node',
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False, ex_cpu_specification=cpu,
ex_memory_gb=4)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_response_network_domain_STR(self):
rootPw = NodeAuthPassword('<PASSWORD>')
location = self.driver.ex_get_location_by_id('NA9')
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0].id
vlan = self.driver.ex_list_vlans(location=location)[0].id
cpu = DimensionDataServerCpuSpecification(
cpu_count=4,
cores_per_socket=1,
performance='HIGHPERFORMANCE'
)
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node',
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False, ex_cpu_specification=cpu,
ex_memory_gb=4)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_mcp1_node_no_network(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
with self.assertRaises(InvalidRequestError):
self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network=None,
ex_is_started=False)
def test_create_node_mcp1_ipv4(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network='fakenetwork',
ex_primary_ipv4='10.0.0.1',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_mcp1_network(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network='fakenetwork',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_mcp2_vlan(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_vlan='fakevlan',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_mcp2_ipv4(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_ipv4='10.0.0.1',
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_network_domain_no_vlan_or_ipv4(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fake_network_domain',
ex_is_started=False)
def test_create_node_response(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
node = self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_vlan='fakevlan'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_ms_time_zone(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
node = self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_vlan='fakevlan',
ex_microsoft_time_zone='040'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_ambigious_mcps_fail(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_network='fakenetwork',
ex_primary_nic_vlan='fakevlan'
)
def test_create_node_no_network_domain_fail(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_primary_nic_vlan='fakevlan'
)
def test_create_node_no_primary_nic_fail(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain'
)
def test_create_node_primary_vlan_nic(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
node = self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_vlan='fakevlan',
ex_primary_nic_network_adapter='v1000'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_primary_ipv4(self):
rootPw = '<PASSWORD>'
image = self.driver.list_images()[0]
node = self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1'
)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_both_primary_nic_and_vlan_fail(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name='test3',
image=image,
auth=rootPw,
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_primary_nic_vlan='fakevlan'
)
def test_create_node_cpu_specification(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
cpu_spec = DimensionDataServerCpuSpecification(cpu_count='4',
cores_per_socket='2',
performance='STANDARD')
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_is_started=False,
ex_cpu_specification=cpu_spec)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_memory(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_is_started=False,
ex_memory_gb=8)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_disks(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
disks = [DimensionDataServerDisk(scsi_id='0', speed='HIGHPERFORMANCE')]
node = self.driver.create_node(name='test2',
image=image,
auth=rootPw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
ex_primary_nic_private_ipv4='10.0.0.1',
ex_is_started=False,
ex_disks=disks)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_disks_fail(self):
root_pw = NodeAuthPassword('<PASSWORD>123')
image = self.driver.list_images()[0]
disks = 'blah'
with self.assertRaises(TypeError):
self.driver.create_node(name='test2',
image=image,
auth=root_pw,
ex_description='test2 node',
ex_network_domain='fakenetworkdomain',
| |
import pytest
from plums.plot.engine.position_generator import (
SimpleImagePositionGenerator,
LayoutImagePositionGenerator,
AdaptiveImagePositionGenerator,
LegendItemPositionGenerator
)
class TestSimpleImagePositionGenerator:
def test_constructor(self):
dummy_datapoints = list(range(150))
dummy_max_cols = 20
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = SimpleImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check attributes
assert position_generator._width == 110
assert position_generator._height == 220
assert position_generator._n_cols == 20
assert position_generator._n_rows == 7
assert position_generator._remainder == 10
# Change number of datapoints (not enough to completely fill a row)
dummy_datapoints = list(range(15))
position_generator = SimpleImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check attributes
assert position_generator._width == 110
assert position_generator._height == 220
assert position_generator._n_cols == 15
assert position_generator._n_rows == 1
assert position_generator._remainder == 0
def test__iter(self):
dummy_datapoints = list(range(5))
dummy_max_cols = 2
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = SimpleImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
expected_positions = [(5, 10), (115, 10), (5, 230), (115, 230), (5, 450)]
for i, position in enumerate(list(position_generator)):
assert position == expected_positions[i]
def test_mosaic_size(self):
dummy_datapoints = list(range(150))
dummy_max_cols = 20
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = SimpleImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check size
assert position_generator.mosaic_size == (20 * 110, 8 * 220)
dummy_datapoints = list(range(15))
position_generator = SimpleImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check size
assert position_generator.mosaic_size == (15 * 110, 1 * 220)
class TestLayoutImagePositionGenerator:
def test_constructor(self):
dummy_datapoints = [list(range(5)) for _ in range(20)]
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = LayoutImagePositionGenerator(
data_points=dummy_datapoints,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check attributes
assert position_generator._layout == tuple(5 for _ in range(20))
assert position_generator._width == 110
assert position_generator._height == 220
assert position_generator._n_cols == 5
assert position_generator._n_rows == 20
# With unequal number of column per row
dummy_datapoints = [list(range(3 + j % 3)) for j in range(20)]
position_generator = LayoutImagePositionGenerator(
data_points=dummy_datapoints,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check attributes
assert position_generator._layout == tuple(3 + j % 3 for j in range(20))
assert position_generator._width == 110
assert position_generator._height == 220
assert position_generator._n_cols == 5
assert position_generator._n_rows == 20
def test__iter(self):
tolerance = 1e-8
dummy_datapoints = [list(range(3 + (j + 2) % 3)) for j in range(4)]
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = LayoutImagePositionGenerator(
data_points=dummy_datapoints,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
first_margin = 5
second_margin = (250.0 / 3) / 2
third_margin = (150.0 / 4) / 2
fourth_margin = 5
expected_positions = [(first_margin, 10), (100 + 3 * first_margin, 10), (200 + 5 * first_margin, 10),
(300 + 7 * first_margin, 10), (400 + 9 * first_margin, 10),
(second_margin, 230), (100 + 3 * second_margin, 230), (200 + 5 * second_margin, 230),
(third_margin, 450), (100 + 3 * third_margin, 450), (200 + 5 * third_margin, 450),
(300 + 7 * third_margin, 450),
(fourth_margin, 670), (100 + 3 * fourth_margin, 670), (200 + 5 * fourth_margin, 670),
(300 + 7 * fourth_margin, 670), (400 + 9 * fourth_margin, 670)]
for i, position in enumerate(list(position_generator)):
assert (abs(position[0] - expected_positions[i][0]) < tolerance) and \
(abs(position[1] - expected_positions[i][1]) < tolerance)
def test_mosaic_size(self):
dummy_datapoints = [list(range(5)) for _ in range(20)]
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = LayoutImagePositionGenerator(
data_points=dummy_datapoints,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check size
assert position_generator.mosaic_size == (5 * 110, 20 * 220)
dummy_datapoints = [list(range(3 + j % 3)) for j in range(20)]
position_generator = LayoutImagePositionGenerator(
data_points=dummy_datapoints,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check size
assert position_generator.mosaic_size == (5 * 110, 20 * 220)
class TestAdaptiveImagePositionGenerator:
def test_constructor(self):
dummy_datapoints = list(range(150))
dummy_max_cols = 20
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = AdaptiveImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check attributes
assert position_generator._width == 110
assert position_generator._height == 220
assert position_generator._n_cols == 20
assert position_generator._n_rows == 8
# Change number of datapoints (not enough to completely fill a row)
dummy_datapoints = list(range(15))
position_generator = AdaptiveImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check attributes
assert position_generator._width == 110
assert position_generator._height == 220
assert position_generator._n_cols == 15
assert position_generator._n_rows == 1
def test__iter(self):
dummy_datapoints = list(range(5))
dummy_max_cols = 2
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = AdaptiveImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
expected_positions = [(5, 10), (115, 10),
(5, 230), (115, 230),
(60, 450)]
for i, position in enumerate(list(position_generator)):
assert position == expected_positions[i]
def test_mosaic_size(self):
dummy_datapoints = list(range(150))
dummy_max_cols = 20
dummy_margins = (5, 10)
dummy_max_image_size = (100, 200)
position_generator = AdaptiveImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check size
assert position_generator.mosaic_size == (20 * 110, 8 * 220)
dummy_datapoints = list(range(15))
position_generator = AdaptiveImagePositionGenerator(
data_points=dummy_datapoints,
max_cols=dummy_max_cols,
margins=dummy_margins,
max_image_size=dummy_max_image_size
)
# Check size
assert position_generator.mosaic_size == (15 * 110, 1 * 220)
class TestLegendItemPositionGenerator:
def test_constructor(self):
# Parameters
item_sizes = [(100, 100), (50, 100), (100, 50), (200, 100), (20, 300)]
axis = 0
max_size_along_axis = 350
main_axis_align = 'start'
minor_axis_align = 'start'
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align=main_axis_align,
minor_axis_align=minor_axis_align
)
# Check attributes
assert position_generator._cell_size == (200, 300)
assert position_generator._new_positions == (1, 0)
assert position_generator._n_items_along_main_axis == 1
assert position_generator._n_items_along_minor_axis == 5
assert position_generator._remainder == 0
# Check invalid items_sizes
with pytest.raises(ValueError):
LegendItemPositionGenerator(
items_sizes=[],
axis=axis,
max_size_along_axis=200,
main_axis_align=main_axis_align,
minor_axis_align=minor_axis_align
)
with pytest.raises(ValueError):
LegendItemPositionGenerator(
items_sizes=None,
axis=axis,
max_size_along_axis=200,
main_axis_align=main_axis_align,
minor_axis_align=minor_axis_align
)
# Check that items fits in the legend
with pytest.raises(ValueError):
LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=200,
main_axis_align=main_axis_align,
minor_axis_align=minor_axis_align
)
def test__iter(self):
# Vertically
# Parameters
item_sizes = [(100, 100), (50, 100), (100, 50), (200, 100), (20, 300)]
axis = 0
max_size_along_axis = 600
main_axis_align = 'start'
minor_axis_align = 'start'
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align=main_axis_align,
minor_axis_align=minor_axis_align
)
expected_positions = [(0, 0), (0, 300), (200, 0), (200, 300), (400, 0)]
for i, position in enumerate(list(position_generator)):
assert position == expected_positions[i]
# Horizontally
axis = 1
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align=main_axis_align,
minor_axis_align=minor_axis_align
)
expected_positions = [(0, 0), (200, 0), (400, 0), (0, 300), (200, 300)]
for i, position in enumerate(list(position_generator)):
assert position == expected_positions[i]
def test_align_cell_in_box(self):
# Vertically
# Parameters
item_sizes = [(100, 100), (50, 100), (100, 50), (200, 100), (20, 300)]
axis = 0
max_size_along_axis = 600
top_left_positions = [(0, 0), (0, 300), (200, 0), (200, 300), (400, 0)]
# Start - start
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align='start',
minor_axis_align='start'
)
expected_positions = [(0, 0), (0, 300), (200, 0), (200, 300), (400, 0)]
for i, item_size in enumerate(item_sizes):
coordinates = position_generator.align_cell_in_box(
main_axis_coordinate=top_left_positions[i][1],
minor_axis_coordinate=top_left_positions[i][0],
item_size=item_size
)
assert expected_positions[i] == coordinates
# Center - start
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align='center',
minor_axis_align='start'
)
expected_positions = [(0, 100), (0, 400), (200, 125), (200, 400), (400, 0)]
for i, item_size in enumerate(item_sizes):
coordinates = position_generator.align_cell_in_box(
main_axis_coordinate=top_left_positions[i][1],
minor_axis_coordinate=top_left_positions[i][0],
item_size=item_size
)
assert expected_positions[i] == coordinates
# End - start
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align='end',
minor_axis_align='start'
)
expected_positions = [(0, 200), (0, 500), (200, 250), (200, 500), (400, 0)]
for i, item_size in enumerate(item_sizes):
coordinates = position_generator.align_cell_in_box(
main_axis_coordinate=top_left_positions[i][1],
minor_axis_coordinate=top_left_positions[i][0],
item_size=item_size
)
assert expected_positions[i] == coordinates
# Start - center
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align='start',
minor_axis_align='center'
)
expected_positions = [(50, 0), (75, 300), (250, 0), (200, 300), (490, 0)]
for i, item_size in enumerate(item_sizes):
coordinates = position_generator.align_cell_in_box(
main_axis_coordinate=top_left_positions[i][1],
minor_axis_coordinate=top_left_positions[i][0],
item_size=item_size
)
assert expected_positions[i] == coordinates
# Start - end
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align='start',
minor_axis_align='end'
)
expected_positions = [(100, 0), (150, 300), (300, 0), (200, 300), (580, 0)]
for i, item_size in enumerate(item_sizes):
coordinates = position_generator.align_cell_in_box(
main_axis_coordinate=top_left_positions[i][1],
minor_axis_coordinate=top_left_positions[i][0],
item_size=item_size
)
assert expected_positions[i] == coordinates
# Center - end
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis,
main_axis_align='center',
minor_axis_align='end'
)
expected_positions = [(100, 100), (150, 400), (300, 125), (200, 400), (580, 0)]
for i, item_size in enumerate(item_sizes):
coordinates = position_generator.align_cell_in_box(
main_axis_coordinate=top_left_positions[i][1],
minor_axis_coordinate=top_left_positions[i][0],
item_size=item_size
)
assert expected_positions[i] == coordinates
def test_cell_size(self):
# Parameters
item_sizes = [(100, 100), (50, 100), (100, 50), (200, 100), (20, 300)]
axis = 0
max_size_along_axis = 600
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis
)
assert position_generator.cell_size == (200, 300)
def test_legend_size(self):
# Parameters (with remainder)
item_sizes = [(100, 100), (50, 100), (100, 50), (200, 100), (20, 300)]
axis = 0
max_size_along_axis = 600
position_generator = LegendItemPositionGenerator(
items_sizes=item_sizes,
axis=axis,
max_size_along_axis=max_size_along_axis
)
assert position_generator.legend_size == (600, 600)
| |
if digit not in digits:
# result.append(digit)
# digits.add(digit)
# return result
#
# def compare(logins, d1, d2):
# for login in logins:
# if d1 in login and d2 in login:
# i1 = login.find(d1)
# i2 = login.find(d2)
# if i1 < i2:
# return -1
# elif i1 > i2:
# return +1
# else:
# raise Exception, "Invalid comparison for " + d1 + "," + d2
#
# def sort_digits():
# logins = read_logins()
# digits = get_digits(logins)
# for i in range(0, len(digits)-1):
# for j in range(i+1, len(digits)):
# comp = compare(logins, digits[i], digits[j])
# if comp > 0:
# digits[i], digits[j] = digits[j], digits[i]
# return digits
#
# digits = sort_digits()
# print digits
'''
Problem 80
It is well known that if the square root of a natural number is not an integer, then it is irrational.
The decimal expansion of such square roots is infinite without any repeating pattern at all.
The square root of two is 1.41421356237309504880..., and the digital sum of the first one hundred decimal digits is 475.
For the first one hundred natural numbers, find the total of the digital sums of the first one hundred decimal digits for all the irrational square roots.
'''
def p80():
getcontext().prec = 102 # Decimal automatically round last digit
total = 0
for i in range(1, 101):
if is_square(i):
continue
decimal_digits = Decimal(i).sqrt()
s = str(decimal_digits)[0] + str(decimal_digits)[2:101]
total += sum(list(map(int, s)))
print(total)
return
'''
Problem 81
In the 5 by 5 matrix below, the minimal path sum
from the top left to the bottom right,
by only moving to the right and down, is indicated in bold red and is equal to 2427.
131} 673 234 103 18
201} 96} 342} 965 150
630 803 746} 422} 111
537 699 497 121} 956
805 732 524 37} 331}
Find the minimal path sum, in matrix.txt (right click and "Save Link/Target As..."),
a 31K text file containing a 80 by 80 matrix,
from the top left to the bottom right by only moving right and down.
'''
def get_matrix():
matrix = []
f = open('text/p081_matrix.txt')
for line in f:
m = line.replace('\n', '').split(',')
matrix.append(list(map(int, m)))
return matrix
# http://www.geeksforgeeks.org/dynamic-programming-set-6-min-cost-path/
def minCost(matrix, m, n):
R = 80
C = 80
# initial 0, 80 X 80 matrix
tc = [[0 for x in range(C)] for x in range(R)]
tc[0][0] = matrix[0][0]
# Initialize first column of total cost(tc) array
for i in range(1, m + 1): # x[0], y[1 ~ 80]
tc[i][0] = tc[i - 1][0] + matrix[i][0]
# Initialize first row of tc array
for j in range(1, n + 1): # y[0], x[1 ~ 80]
tc[0][j] = tc[0][j - 1] + matrix[0][j]
# Construct rest of the tc array
for i in range(1, m + 1): # 1 ~ 80
for j in range(1, n + 1): # 1 ~ 80
tc[i][j] = min(tc[i - 1][j], tc[i][j - 1]) + matrix[i][j]
# tc[i][j] = min(tc[i-1][j-1], tc[i-1][j], tc[i][j-1]) + cost[i][j]
for t in tc:
print(t)
return tc[m][n]
def p81():
matrix = get_matrix()
ret = minCost(matrix, 79, 79)
print(ret)
'''
Problem 82
NOTE: This problem is a more challenging version of Problem 81.
The minimal path sum in the 5 by 5 matrix below,
by starting in any cell in the left column and
finishing in any cell in the right column,
and only moving up, down, and right, is indicated in red and bold; the sum is equal to 994.
131 673 234} 103} 18}
201} 96} 342} 965 150
630 803 746 422 111
537 699 497 121 956
805 732 524 37 331
Find the minimal path sum, in matrix.txt (right click and "Save Link/Target As..."), a 31K text file containing a 80 by 80 matrix, from the left column to the right column.
'''
def p82_get_matrix():
matrix = []
f = open('text/p082_matrix.txt')
for line in f:
m = line.replace('\n', '').split(',')
matrix.append(list(map(int, m)))
return matrix
# http://www.geeksforgeeks.org/dynamic-programming-set-6-min-cost-path/
def p82_minCost(matrix):
n, m = len(matrix), len(matrix[0])
cost = [matrix[i][-1] for i in range(n)]
for i in range(m - 2, -1, -1):
cost[0] += matrix[0][i]
for j in range(1, n):
cost[j] = min(cost[j], cost[j - 1]) + matrix[j][i]
for j in range(n - 2, -1, -1):
cost[j] = min(cost[j], cost[j + 1] + matrix[j][i])
return min(cost)
def p82():
matrix = p82_get_matrix()
ret = p82_minCost(matrix)
print(ret)
'''
Problem 83
NOTE: This problem is a significantly more challenging version of Problem 81.
In the 5 by 5 matrix below, the minimal path sum
from the top left to the bottom right,
by moving left, right, up, and down, is indicated in bold red and is equal to 2297.
131} 673 234} 103} 18}
201} 96} 342} 965 150}
630 803 746 422} 111}
537 699 497 121} 956
805 732 524 37} 331}
Find the minimal path sum, in matrix.txt (right click and
"Save Link/Target As..."), a 31K text file containing a 80 by 80 matrix, from the top left to the bottom right by moving left, right, up, and down.
'''
def p83():
matrix = []
with open('text/p083_matrix.txt') as f:
for line in f:
matrix.append(list(map(int, line.split(','))))
n, m = len(matrix), len(matrix[0])
graph = Graph()
for i in range(n):
str_i = '%.2d' % i
for j in range(m):
str_j = '%.2d' % j
node = str_i + str_j
graph.add_node(node)
# print(node, matrix[i][j], end="")
for x, y in (-1, 0), (0, -1), (1, 0), (0, 1):
if 0 <= i + x < n and 0 <= j + y < m:
str_ix = '%.2d' % (i + x)
str_jy = '%.2d' % (j + y)
neighbor = str_ix + str_jy
# print('\t', neighbor, matrix[i+x][j+y], end="")
graph.add_edge(node, neighbor, matrix[i + x][j + y])
# print('')
print('Answer = ', shortest_path(graph, '0000', '7979')[0] + matrix[0][0])
return
'''
Problem 84
In the game, Monopoly, the standard board is set up in the following way:
GO A1 CC1 A2 T1 R1 B1 CH1 B2 B3 JAIL
H2 C1
T2 U1
H1 C2
CH3 C3
R4 R2
G3 D1
CC3 CC2
G2 D2
G1 D3
G2J F3 U2 F2 F1 R3 E3 E2 CH2 E1 FP
A player starts on the GO square and adds the scores on two 6-sided dice to determine the number of squares they advance in a clockwise direction.
Without any further rules we would expect to visit each square with equal probability: 2.5%.
However, landing on G2J (Go To Jail), CC (community chest), and CH (chance) changes this distribution.
In addition to G2J, and one card from each of CC and CH, that orders the player to go directly to jail,
if a player rolls three consecutive doubles, they do not advance the result of their 3rd roll.
Instead they proceed directly to jail.
At the beginning of the game, the CC and CH cards are shuffled.
When a player lands on CC or CH they take a card from the top of the respective pile and, after following the instructions, it is returned to the bottom of the pile.
There are sixteen cards in each pile, but for the purpose of this problem we are only concerned with cards that order a movement;
any instruction not concerned with movement will be ignored and the player will remain on the CC/CH square.
Community Chest (2/16 cards):
Advance to GO
Go to JAIL
Chance (10/16 cards):
Advance to GO
Go to JAIL
Go to C1
Go to E3
Go to H2
Go to R1
Go to next R (railway company)
Go to next R
Go to next U (utility company)
Go back 3 squares.
The heart of this problem concerns the likelihood of visiting a particular square.
That is, the probability of finishing at that square after a roll.
For this reason it should be clear that, with the exception of G2J for which the probability of finishing on it is zero,
the CH squares will have the lowest probabilities, as 5/8 request a movement to another square,
and it is the final square that the player finishes at on each roll that we are interested in.
We shall make no distinction between "Just Visiting" and being sent to JAIL,
and we shall also ignore the rule about requiring a double to "get out of jail", assuming that they pay to get out on their next turn.
By starting at GO and numbering the | |
= 2048
elif device_type == "SAHD":
expected_block_size = 256
# Attempt to load the device properties file:
# same file name with PROPERTIES_SUFFIX appended
drive_properties = f"{CFG_DIR}/{file_name}.{PROPERTIES_SUFFIX}"
if Path(drive_properties).is_file():
process = file_cmds.read_drive_properties(drive_properties)
process = ReturnCodeMapper.add_msg(process)
if not process["status"]:
flash(process["msg"], "error")
return redirect(url_for("index"))
conf = process["conf"]
kwargs["vendor"] = conf["vendor"]
kwargs["product"] = conf["product"]
kwargs["revision"] = conf["revision"]
kwargs["block_size"] = conf["block_size"]
expected_block_size = conf["block_size"]
process = ractl.attach_image(scsi_id, **kwargs)
process = ReturnCodeMapper.add_msg(process)
if process["status"]:
flash(_("Attached %(file_name)s to SCSI ID %(id_number)s LUN %(unit_number)s",
file_name=file_name, id_number=scsi_id, unit_number=unit))
if int(file_size) % int(expected_block_size):
flash(_("The image file size %(file_size)s bytes is not a multiple of "
u"%(block_size)s. RaSCSI will ignore the trailing data. "
u"The image may be corrupted, so proceed with caution.",
file_size=file_size, block_size=expected_block_size), "error")
return redirect(url_for("index"))
flash(_("Failed to attach %(file_name)s to SCSI ID %(id_number)s LUN %(unit_number)s",
file_name=file_name, id_number=scsi_id, unit_number=unit), "error")
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/scsi/detach_all", methods=["POST"])
@login_required
def detach_all_devices():
"""
Detaches all currently attached devices
"""
process = ractl.detach_all()
if process["status"]:
flash(_("Detached all SCSI devices"))
return redirect(url_for("index"))
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/scsi/detach", methods=["POST"])
@login_required
def detach():
"""
Detaches a specified device
"""
scsi_id = request.form.get("scsi_id")
unit = request.form.get("unit")
process = ractl.detach_by_id(scsi_id, unit)
if process["status"]:
flash(_("Detached SCSI ID %(id_number)s LUN %(unit_number)s",
id_number=scsi_id, unit_number=unit))
return redirect(url_for("index"))
flash(_("Failed to detach SCSI ID %(id_number)s LUN %(unit_number)s",
id_number=scsi_id, unit_number=unit), "error")
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/scsi/eject", methods=["POST"])
@login_required
def eject():
"""
Ejects a specified removable device image, but keeps the device attached
"""
scsi_id = request.form.get("scsi_id")
unit = request.form.get("unit")
process = ractl.eject_by_id(scsi_id, unit)
if process["status"]:
flash(_("Ejected SCSI ID %(id_number)s LUN %(unit_number)s",
id_number=scsi_id, unit_number=unit))
return redirect(url_for("index"))
flash(_("Failed to eject SCSI ID %(id_number)s LUN %(unit_number)s",
id_number=scsi_id, unit_number=unit), "error")
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/scsi/info", methods=["POST"])
def device_info():
"""
Displays detailed info for a specific device
"""
scsi_id = request.form.get("scsi_id")
unit = request.form.get("unit")
devices = ractl.list_devices(scsi_id, unit)
# First check if any device at all was returned
if not devices["status"]:
flash(devices["msg"], "error")
return redirect(url_for("index"))
# Looking at the first dict in list to get
# the one and only device that should have been returned
device = devices["device_list"][0]
if str(device["id"]) == scsi_id:
flash(_("DEVICE INFO"))
flash("===========")
flash(_("SCSI ID: %(id_number)s", id_number=device["id"]))
flash(_("LUN: %(unit_number)s", unit_number=device["unit"]))
flash(_("Type: %(device_type)s", device_type=device["device_type"]))
flash(_("Status: %(device_status)s", device_status=device["status"]))
flash(_("File: %(image_file)s", image_file=device["image"]))
flash(_("Parameters: %(value)s", value=device["params"]))
flash(_("Vendor: %(value)s", value=device["vendor"]))
flash(_("Product: %(value)s", value=device["product"]))
flash(_("Revision: %(revision_number)s", revision_number=device["revision"]))
flash(_("Block Size: %(value)s bytes", value=device["block_size"]))
flash(_("Image Size: %(value)s bytes", value=device["size"]))
return redirect(url_for("index"))
flash(devices["msg"], "error")
return redirect(url_for("index"))
@APP.route("/scsi/reserve", methods=["POST"])
@login_required
def reserve_id():
"""
Reserves a SCSI ID and stores the memo for that reservation
"""
scsi_id = request.form.get("scsi_id")
memo = request.form.get("memo")
reserved_ids = ractl.get_reserved_ids()["ids"]
reserved_ids.extend(scsi_id)
process = ractl.reserve_scsi_ids(reserved_ids)
if process["status"]:
RESERVATIONS[int(scsi_id)] = memo
flash(_("Reserved SCSI ID %(id_number)s", id_number=scsi_id))
return redirect(url_for("index"))
flash(_("Failed to reserve SCSI ID %(id_number)s", id_number=scsi_id))
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/scsi/unreserve", methods=["POST"])
@login_required
def unreserve_id():
"""
Removes the reservation of a SCSI ID as well as the memo for the reservation
"""
scsi_id = request.form.get("scsi_id")
reserved_ids = ractl.get_reserved_ids()["ids"]
reserved_ids.remove(scsi_id)
process = ractl.reserve_scsi_ids(reserved_ids)
if process["status"]:
RESERVATIONS[int(scsi_id)] = ""
flash(_("Released the reservation for SCSI ID %(id_number)s", id_number=scsi_id))
return redirect(url_for("index"))
flash(_("Failed to release the reservation for SCSI ID %(id_number)s", id_number=scsi_id))
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/pi/reboot", methods=["POST"])
@login_required
def restart():
"""
Restarts the Pi
"""
ractl.shutdown_pi("reboot")
return redirect(url_for("index"))
@APP.route("/pi/shutdown", methods=["POST"])
@login_required
def shutdown():
"""
Shuts down the Pi
"""
ractl.shutdown_pi("system")
return redirect(url_for("index"))
@APP.route("/files/download_to_iso", methods=["POST"])
@login_required
def download_to_iso():
"""
Downloads a remote file and creates a CD-ROM image formatted with HFS that contains the file
"""
scsi_id = request.form.get("scsi_id")
url = request.form.get("url")
iso_args = request.form.get("type").split()
process = file_cmds.download_file_to_iso(url, *iso_args)
process = ReturnCodeMapper.add_msg(process)
if process["status"]:
flash(process["msg"])
flash(_("Saved image as: %(file_name)s", file_name=process['file_name']))
else:
flash(_("Failed to create CD-ROM image from %(url)s", url=url), "error")
flash(process["msg"], "error")
return redirect(url_for("index"))
process_attach = ractl.attach_image(scsi_id, device_type="SCCD", image=process["file_name"])
process_attach = ReturnCodeMapper.add_msg(process_attach)
if process_attach["status"]:
flash(_("Attached to SCSI ID %(id_number)s", id_number=scsi_id))
return redirect(url_for("index"))
flash(_("Failed to attach image to SCSI ID %(id_number)s. Try attaching it manually.",
id_number=scsi_id), "error")
flash(process_attach["msg"], "error")
return redirect(url_for("index"))
@APP.route("/files/download_to_images", methods=["POST"])
@login_required
def download_img():
"""
Downloads a remote file onto the images dir on the Pi
"""
url = request.form.get("url")
server_info = ractl.get_server_info()
process = file_cmds.download_to_dir(url, server_info["image_dir"], Path(url).name)
process = ReturnCodeMapper.add_msg(process)
if process["status"]:
flash(process["msg"])
return redirect(url_for("index"))
flash(_("Failed to download file from %(url)s", url=url), "error")
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/files/download_to_afp", methods=["POST"])
@login_required
def download_afp():
"""
Downloads a remote file onto the AFP shared dir on the Pi
"""
url = request.form.get("url")
file_name = Path(url).name
process = file_cmds.download_to_dir(url, AFP_DIR, file_name)
process = ReturnCodeMapper.add_msg(process)
if process["status"]:
flash(process["msg"])
return redirect(url_for("index"))
flash(_("Failed to download file from %(url)s", url=url), "error")
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/files/upload", methods=["POST"])
def upload_file():
"""
Uploads a file from the local computer to the images dir on the Pi
Depending on the Dropzone.js JavaScript library
"""
# Due to the embedded javascript library, we cannot use the @login_required decorator
auth = auth_active()
if auth["status"] and "username" not in session:
return make_response(auth["msg"], 403)
from werkzeug.utils import secure_filename
from os import path
log = logging.getLogger("pydrop")
file_object = request.files["file"]
file_name = secure_filename(file_object.filename)
server_info = ractl.get_server_info()
save_path = path.join(server_info["image_dir"], file_name)
current_chunk = int(request.form['dzchunkindex'])
# Makes sure not to overwrite an existing file,
# but continues writing to a file transfer in progress
if path.exists(save_path) and current_chunk == 0:
return make_response(_("The file already exists!"), 400)
try:
with open(save_path, "ab") as save:
save.seek(int(request.form["dzchunkbyteoffset"]))
save.write(file_object.stream.read())
except OSError:
log.exception("Could not write to file")
return make_response(_("Unable to write the file to disk!"), 500)
total_chunks = int(request.form["dztotalchunkcount"])
if current_chunk + 1 == total_chunks:
# Validate the resulting file size after writing the last chunk
if path.getsize(save_path) != int(request.form["dztotalfilesize"]):
log.error(
"Finished transferring %s, "
"but it has a size mismatch with the original file. "
"Got %s but we expected %s.",
file_object.filename,
path.getsize(save_path),
request.form['dztotalfilesize'],
)
return make_response(_("Transferred file corrupted!"), 500)
log.info("File %s has been uploaded successfully", file_object.filename)
log.debug("Chunk %s of %s for file %s completed.",
current_chunk + 1, total_chunks, file_object.filename)
return make_response(_("File upload successful!"), 200)
@APP.route("/files/create", methods=["POST"])
@login_required
def create_file():
"""
Creates an empty image file in the images dir
"""
file_name = request.form.get("file_name")
size = (int(request.form.get("size")) * 1024 * 1024)
file_type = request.form.get("type")
full_file_name = file_name + "." + file_type
process = file_cmds.create_new_image(file_name, file_type, size)
if process["status"]:
flash(_("Image file created: %(file_name)s", file_name=full_file_name))
return redirect(url_for("index"))
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/files/download", methods=["POST"])
@login_required
def download():
"""
Downloads a file from the Pi to the local computer
"""
image = request.form.get("file")
return send_file(image, as_attachment=True)
@APP.route("/files/delete", methods=["POST"])
@login_required
def delete():
"""
Deletes a specified file in the images dir
"""
file_name = request.form.get("file_name")
process = file_cmds.delete_image(file_name)
if process["status"]:
flash(_("Image file deleted: %(file_name)s", file_name=file_name))
else:
flash(process["msg"], "error")
return redirect(url_for("index"))
# Delete the drive properties file, if it exists
prop_file_path = f"{CFG_DIR}/{file_name}.{PROPERTIES_SUFFIX}"
if Path(prop_file_path).is_file():
process = file_cmds.delete_file(prop_file_path)
process = ReturnCodeMapper.add_msg(process)
if process["status"]:
flash(process["msg"])
return redirect(url_for("index"))
flash(process["msg"], "error")
return redirect(url_for("index"))
return redirect(url_for("index"))
@APP.route("/files/rename", methods=["POST"])
@login_required
def rename():
"""
Renames a specified file in the images dir
"""
file_name = request.form.get("file_name")
new_file_name = request.form.get("new_file_name")
process = file_cmds.rename_image(file_name, new_file_name)
if process["status"]:
flash(_("Image file renamed to: %(file_name)s", file_name=new_file_name))
else:
flash(process["msg"], "error")
return redirect(url_for("index"))
# Rename the drive properties file, if it exists
prop_file_path = f"{CFG_DIR}/{file_name}.{PROPERTIES_SUFFIX}"
new_prop_file_path = f"{CFG_DIR}/{new_file_name}.{PROPERTIES_SUFFIX}"
if Path(prop_file_path).is_file():
process = file_cmds.rename_file(prop_file_path, new_prop_file_path)
process = ReturnCodeMapper.add_msg(process)
if process["status"]:
flash(process["msg"])
return redirect(url_for("index"))
flash(process["msg"], "error")
return redirect(url_for("index"))
return redirect(url_for("index"))
@APP.route("/files/unzip", methods=["POST"])
@login_required
def unzip():
"""
Unzips all files in a specified zip archive, or a single file in the zip archive
"""
zip_file = request.form.get("zip_file")
zip_member = request.form.get("zip_member") or False
zip_members = request.form.get("zip_members") or False
from ast import literal_eval
if zip_members:
zip_members = literal_eval(zip_members)
process = file_cmds.unzip_file(zip_file, zip_member, zip_members)
if process["status"]:
if not process["msg"]:
flash(_("Aborted unzip: File(s) with the same name already exists."), "error")
return redirect(url_for("index"))
flash(_("Unzipped the following files:"))
for msg in process["msg"]:
flash(msg)
if process["prop_flag"]:
flash(_("Properties file(s) have been moved to %(directory)s", directory=CFG_DIR))
return redirect(url_for("index"))
flash(_("Failed to unzip %(zip_file)s", zip_file=zip_file), "error")
flash(process["msg"], "error")
return redirect(url_for("index"))
@APP.route("/language", methods=["POST"])
def change_language():
"""
Changes the session language locale and refreshes the Flask app context
"""
locale = request.form.get("locale")
session["language"] = locale
ractl.locale = session["language"]
file_cmds.locale = session["language"]
refresh()
language = Locale.parse(locale)
language_name = language.get_language_name(locale)
flash(_("Changed Web Interface language to %(locale)s", locale=language_name))
return redirect(url_for("index"))
@APP.before_first_request
def detect_locale():
"""
Get the detected locale to use for UI string translations.
This requires the Flask app to have started first.
"""
session["language"] = get_locale()
ractl.locale = session["language"]
file_cmds.locale = session["language"]
if __name__ == "__main__":
APP.secret_key = "rascsi_is_awesome_insecure_secret_key"
APP.config["SESSION_TYPE"] = "filesystem"
APP.config["MAX_CONTENT_LENGTH"] | |
<reponame>pulumi/pulumi-alicloud
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AccountArgs', 'Account']
@pulumi.input_type
class AccountArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
db_cluster_id: pulumi.Input[str],
account_description: Optional[pulumi.Input[str]] = None,
account_password: Optional[pulumi.Input[str]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
The set of arguments for constructing a Account resource.
:param pulumi.Input[str] account_name: Operation account requiring a uniqueness check. It may consist of lower case letters, numbers, and underlines, and must start with a letter and have no more than 16 characters.
:param pulumi.Input[str] db_cluster_id: The Id of cluster in which account belongs.
:param pulumi.Input[str] account_description: Account description. It cannot begin with https://. It must start with a Chinese character or English letter. It can include Chinese and English characters, underlines (_), hyphens (-), and numbers. The length may be 2-256 characters.
:param pulumi.Input[str] account_password: <PASSWORD>. It may consist of letters, digits, or underlines, with a length of 6 to 32 characters. You have to specify one of `account_password` and `kms_encrypted_password` fields.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a db account. If the `account_password` is filled in, this field will be ignored.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "db_cluster_id", db_cluster_id)
if account_description is not None:
pulumi.set(__self__, "account_description", account_description)
if account_password is not None:
pulumi.set(__self__, "account_password", account_password)
if kms_encrypted_password is not None:
pulumi.set(__self__, "kms_encrypted_password", kms_encrypted_password)
if kms_encryption_context is not None:
pulumi.set(__self__, "kms_encryption_context", kms_encryption_context)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Operation account requiring a uniqueness check. It may consist of lower case letters, numbers, and underlines, and must start with a letter and have no more than 16 characters.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="dbClusterId")
def db_cluster_id(self) -> pulumi.Input[str]:
"""
The Id of cluster in which account belongs.
"""
return pulumi.get(self, "db_cluster_id")
@db_cluster_id.setter
def db_cluster_id(self, value: pulumi.Input[str]):
pulumi.set(self, "db_cluster_id", value)
@property
@pulumi.getter(name="accountDescription")
def account_description(self) -> Optional[pulumi.Input[str]]:
"""
Account description. It cannot begin with https://. It must start with a Chinese character or English letter. It can include Chinese and English characters, underlines (_), hyphens (-), and numbers. The length may be 2-256 characters.
"""
return pulumi.get(self, "account_description")
@account_description.setter
def account_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_description", value)
@property
@pulumi.getter(name="accountPassword")
def account_password(self) -> Optional[pulumi.Input[str]]:
"""
Operation password. It may consist of letters, digits, or underlines, with a length of 6 to 32 characters. You have to specify one of `account_password` and `kms_encrypted_password` fields.
"""
return pulumi.get(self, "account_password")
@account_password.setter
def account_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_password", value)
@property
@pulumi.getter(name="kmsEncryptedPassword")
def kms_encrypted_password(self) -> Optional[pulumi.Input[str]]:
"""
An KMS encrypts password used to a db account. If the `account_password` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_password")
@kms_encrypted_password.setter
def kms_encrypted_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_encrypted_password", value)
@property
@pulumi.getter(name="kmsEncryptionContext")
def kms_encryption_context(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
return pulumi.get(self, "kms_encryption_context")
@kms_encryption_context.setter
def kms_encryption_context(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "kms_encryption_context", value)
@pulumi.input_type
class _AccountState:
def __init__(__self__, *,
account_description: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
account_password: Optional[pulumi.Input[str]] = None,
db_cluster_id: Optional[pulumi.Input[str]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
Input properties used for looking up and filtering Account resources.
:param pulumi.Input[str] account_description: Account description. It cannot begin with https://. It must start with a Chinese character or English letter. It can include Chinese and English characters, underlines (_), hyphens (-), and numbers. The length may be 2-256 characters.
:param pulumi.Input[str] account_name: Operation account requiring a uniqueness check. It may consist of lower case letters, numbers, and underlines, and must start with a letter and have no more than 16 characters.
:param pulumi.Input[str] account_password: Operation password. It may consist of letters, digits, or underlines, with a length of 6 to 32 characters. You have to specify one of `account_password` and `kms_encrypted_password` fields.
:param pulumi.Input[str] db_cluster_id: The Id of cluster in which account belongs.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a db account. If the `account_password` is filled in, this field will be ignored.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
if account_description is not None:
pulumi.set(__self__, "account_description", account_description)
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if account_password is not None:
pulumi.set(__self__, "account_password", account_password)
if db_cluster_id is not None:
pulumi.set(__self__, "db_cluster_id", db_cluster_id)
if kms_encrypted_password is not None:
pulumi.set(__self__, "kms_encrypted_password", kms_encrypted_password)
if kms_encryption_context is not None:
pulumi.set(__self__, "kms_encryption_context", kms_encryption_context)
@property
@pulumi.getter(name="accountDescription")
def account_description(self) -> Optional[pulumi.Input[str]]:
"""
Account description. It cannot begin with https://. It must start with a Chinese character or English letter. It can include Chinese and English characters, underlines (_), hyphens (-), and numbers. The length may be 2-256 characters.
"""
return pulumi.get(self, "account_description")
@account_description.setter
def account_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_description", value)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
Operation account requiring a uniqueness check. It may consist of lower case letters, numbers, and underlines, and must start with a letter and have no more than 16 characters.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="accountPassword")
def account_password(self) -> Optional[pulumi.Input[str]]:
"""
Operation password. It may consist of letters, digits, or underlines, with a length of 6 to 32 characters. You have to specify one of `account_password` and `kms_encrypted_password` fields.
"""
return pulumi.get(self, "account_password")
@account_password.setter
def account_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_password", value)
@property
@pulumi.getter(name="dbClusterId")
def db_cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The Id of cluster in which account belongs.
"""
return pulumi.get(self, "db_cluster_id")
@db_cluster_id.setter
def db_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_cluster_id", value)
@property
@pulumi.getter(name="kmsEncryptedPassword")
def kms_encrypted_password(self) -> Optional[pulumi.Input[str]]:
"""
An KMS encrypts password used to a db account. If the `account_password` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_password")
@kms_encrypted_password.setter
def kms_encrypted_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_encrypted_password", value)
@property
@pulumi.getter(name="kmsEncryptionContext")
def kms_encryption_context(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
return pulumi.get(self, "kms_encryption_context")
@kms_encryption_context.setter
def kms_encryption_context(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "kms_encryption_context", value)
class Account(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_description: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
account_password: Optional[pulumi.Input[str]] = None,
db_cluster_id: Optional[pulumi.Input[str]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
__props__=None):
"""
Provides a [ADB](https://www.alibabacloud.com/help/product/92664.htm) account resource and used to manage databases.
> **NOTE:** Available in v1.71.0+.
## Example Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
creation = config.get("creation")
if creation is None:
creation = "ADB"
name = config.get("name")
if name is None:
name = "adbaccountmysql"
default_zones = alicloud.get_zones(available_resource_creation=creation)
default_network = alicloud.vpc.Network("defaultNetwork", cidr_block="172.16.0.0/16")
default_switch = alicloud.vpc.Switch("defaultSwitch",
vpc_id=default_network.id,
cidr_block="172.16.0.0/24",
zone_id=default_zones.zones[0].id)
cluster = alicloud.adb.Cluster("cluster",
db_cluster_version="3.0",
db_cluster_category="Cluster",
db_node_class="C8",
db_node_count=2,
db_node_storage=200,
pay_type="PostPaid",
vswitch_id=default_switch.id,
description=name)
account = alicloud.adb.Account("account",
db_cluster_id=cluster.id,
account_name="tftestnormal",
account_password="<PASSWORD>",
account_description=name)
```
## Import
ADB account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:adb/account:Account example "am-12345:tf_account"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_description: Account description. It cannot begin with https://. It must start with a Chinese character or English letter. It can include Chinese and English characters, underlines (_), hyphens (-), and numbers. The length may be 2-256 characters.
:param pulumi.Input[str] account_name: Operation account requiring a uniqueness check. It may consist of lower case letters, numbers, | |
== "silva":
execute("identify_chimeric_seqs.py -i %s -m usearch61 -o %s -r %s"
% (inFolder + i, temp + i, PR['silva_chim_ref']),
shell=True)
else:
execute("identify_chimeric_seqs.py -i %s -m usearch61 -o %s -r %s" % (
inFolder + i, temp + i, PR['gg_chim_ref']),
shell=True)
execute("filter_fasta.py -f %s -o %s -s %s/non_chimeras.txt" % (inFolder + i, outFolder + i, temp + i),
shell=True)
call("rm -r %s" % temp, shell=True)
if PR['remove_intermediate']:
os.remove(inFolder+i)
p = Pool(PR['number_of_cores'])
p.map(process, files)
if PR['remove_intermediate']:
os.removedirs(inFolder)
def pickotus(inFolder, outFolder, rdb="silva", fungus=False):
"""
"""
# TODO : add no parallel option
global PR
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
inFolder_fasta = inFolder + "*.fasta"
print("Otu picking...")
if PR['np']:
parallel_string = ""
else:
parallel_string = "-a -O %d" % PR['number_of_cores']
if PR['c_ref'] != "none":
if rdb == "silva":
execute("pick_open_reference_otus.py -i %s -o %s -p %s -r %s %s -n %s"
% (
inFolder_fasta, outFolder, PR['parameter_file_name'], PR['c_ref'], parallel_string, PR['c_otu_id']),
shell=True)
#execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s"
# % (out_folder + "otu_table_mc2_w_tax_no_pynast_failures.biom",
# out_folder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom",
# PR['silva_reference_seqs']), shell=True)
elif fungus:
execute("pick_open_reference_otus.py -i %s -o %s -p %s %s -n %s --suppress_align_and_tree"
% (inFolder_fasta, outFolder, PR['parameter_file_name'], parallel_string, PR['c_otu_id']), shell=True)
else:
execute("pick_open_reference_otus.py -i %s -o %s -r %s -p %s %s -n %s"
% (inFolder_fasta, outFolder,
PR['c_ref'], PR['parameter_file_name'],
parallel_string, PR['c_otu_id']), shell=True)
#execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s"
# % (out_folder + "otu_table_mc2_w_tax_no_pynast_failures.biom",
# out_folder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom",
# PR['gg_reference_seqs']), shell=True)
else:
if rdb == "silva":
execute("pick_open_reference_otus.py -i %s -o %s -p %s -r %s %s -n %s"
% (inFolder_fasta, outFolder, PR['parameter_file_name'], PR['silva_reference_seqs'], parallel_string,
PR['c_otu_id']),
shell=True)
execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s"
% (outFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom",
outFolder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom",
PR['silva_reference_seqs']), shell=True)
elif fungus:
execute("pick_open_reference_otus.py -i %s -o %s -p %s %s -n %s--suppress_align_and_tree"
% (inFolder_fasta, outFolder, PR['parameter_file_name'], parallel_string,
PR['c_otu_id']), shell=True)
else:
execute("pick_open_reference_otus.py -i %s -o %s -r %s -p %s -n %s"
% (inFolder_fasta, outFolder,
PR['gg_reference_seqs'], PR['parameter_file_name'],
parallel_string, PR['c_otu_id']), shell=True)
execute("filter_otus_from_otu_table.py -i %s -o %s --negate_ids_to_exclude -e %s"
% (outFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom",
outFolder + "otu_table_mc2_w_tax_no_pynast_failures_close_reference.biom",
PR['gg_reference_seqs']), shell=True)
if PR['remove_intermediate']:
os.removedirs(inFolder)
def writedf(outFile, ids, sampleIds):
f = open(outFile, "w+")
f.write("#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tRead\tFile\tDescription\n")
for x in range(len(ids)):
f.write("%s\t\t\tR1\t%s\tsingle_file\n" % (ids[x], sampleIds[x]))
f.close()
def create_map(inFolder, outFile):
"""
"""
inFolder = asfolder(inFolder)
print("Writing mapping file")
import os
sampleIds = os.listdir(inFolder)
ids = [x.replace(".fasta", "") for x in sampleIds]
ids = [x.split("_")[0] for x in ids]
d = {'#SampleID': ids}
writedf(outFile, ids, sampleIds)
def corediv(inFolder, outFolder, mappingFile, depth):
"""
"""
print("Core diversity analyses...")
inFolder = asfolder(inFolder)
outFolder = asfolder(outFolder)
if PR['fungus']:
biom = inFolder + "otu_table_mc2_w_tax.biom"
else:
biom = inFolder + "otu_table_mc2_w_tax_no_pynast_failures.biom"
tree = inFolder + "rep_set.tre"
# get_ipython().system(
# u'core_diversity_analyses.py -i {biom} -o {out_folder} -m {mapping_file} -t {tree} -e {depth}')
if PR['fungus']:
execute("core_diversity_analyses.py -i %s -o %s -m %s -e %d --nonphylogenetic_diversity" % (
biom, outFolder, mappingFile, depth),
shell=True)
else:
execute(
"core_diversity_analyses.py -i %s -o %s -m %s -t %s -e %d" % (biom, outFolder, mappingFile, tree, depth),
shell=True)
def full_analysis(inFolder, outFolder, depth, rdb, trimq, joining_method,
qcq, maxloose, fastq_p):
global PR
"""
"""
trimmed = asfolder(outFolder + PR['Ftrimmed'])
merged = asfolder(outFolder + PR['Fmerged'])
qc = asfolder(outFolder + PR['Fqc'])
chi = asfolder(outFolder + PR['Fchi'])
otus = asfolder(outFolder + PR['Fotus'])
div = asfolder(outFolder + PR['Fdiv'])
trimfolder(inFolder, trimmed, trimq)
if joining_method == "fastq-join":
mergefolderfastq(trimmed, merged, fastq_p)
elif joining_method == "bbmerge":
mergefolderbb(trimmed, merged, maxloose=maxloose)
else:
raise ("Wrong method")
qualitycontrol(merged, qc, qcq)
removechimera(qc, chi, rdb)
pickotus(chi, otus, rdb)
# here
if create_mapping_file:
create_map(qc, PR['mapping_file'])
corediv(otus, div, PR['mapping_file'], depth)
def stop_at_merging(inFolder, outFolder, trimq, joining_method, maxloose, fastq_p):
global PR
trimmed = asfolder(outFolder + PR['Ftrimmed'])
merged = asfolder(outFolder) + PR['Fmerged']
trimfolder(inFolder, trimmed, trimq)
if joining_method == "fastq-join":
mergefolderfastq(trimmed, merged, fastq_p)
elif joining_method == "bbmerge":
mergefolderbb(trimmed, merged, maxloose=maxloose)
else:
raise ("%s: unknown merging metod method" % joining_method)
def stop_at_quality_control(inFolder, outFolder, joining_method, trimq,
qcq, maxloose, fastq_p):
global PR
"""
"""
trimmed = asfolder(outFolder + PR['Ftrimmed'])
merged = asfolder(outFolder + PR['Fmerged'])
qc = asfolder(outFolder + PR['Fqc'])
trimfolder(inFolder, trimmed, trimq)
if joining_method == "fastq-join":
mergefolderfastq(trimmed, merged, fastq_p)
elif joining_method == "bbmerge":
mergefolderbb(trimmed, merged, maxloose=maxloose)
else:
raise ("%s: unknown merging metod method" % joining_method)
qualitycontrol(merged, qc, qcq)
def stop_at_chimera_removal(inFolder, outFolder, rdb, trimq, joining_method,
qcq, maxloose, fastq_p):
"""
"""
global PR
trimmed = asfolder(outFolder + PR['Ftrimmed'])
merged = asfolder(outFolder + PR['Fmerged'])
qc = asfolder(outFolder + PR['Fqc'])
chi = asfolder(outFolder + PR['Fchi'])
trimfolder(inFolder, trimmed, trimq)
if joining_method == "fastq-join":
mergefolderfastq(trimmed, merged, fastq_p)
elif joining_method == "bbmerge":
mergefolderbb(trimmed, merged, maxloose=maxloose)
else:
raise ("%s: unknown merging metod method" % joining_method)
qualitycontrol(merged, qc, qcq)
removechimera(qc, chi, rdb)
def start_at_chimera_removal(inFolder, outFolder, rdb, depth):
global PR
qc = asfolder(inFolder)
chi = asfolder(outFolder + PR['Fchi'])
otus = asfolder(outFolder + PR['Fotus'])
div = asfolder(outFolder + PR['Fdiv'])
removechimera(qc, chi, rdb)
pickotus(chi, otus, rdb)
# here
if create_mapping_file:
create_map(qc, PR['mapping_file'])
corediv(otus, div, PR['mapping_file'], depth)
def start_otu_pickng(inFolder, outFolder, depth, rdb):
"""
"""
global PR
chi = asfolder(inFolder)
otus = asfolder(outFolder + PR['Fotus'])
div = asfolder(outFolder + PR['Fdiv'])
pickotus(chi, otus, rdb)
if create_mapping_file:
create_map(chi, PR['mapping_file'])
corediv(otus, div, PR['mapping_file'], depth)
def start_diversity_analysis(inFolder, outFolder, mapping_file, depth):
otus = asfolder(inFolder)
div = asfolder(outFolder + PR['Fdiv'])
corediv(inFolder=otus, outFolder=div, mappingFile=mapping_file, depth=depth)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Microbiome analysis using multiple methods
Version: %s
Date: %s """ % (__version__, __date__))
parser.add_argument("-i", # "--input",
dest="input",
# type=str,
help="the input sequences filepath (fastq files) [REQUIRED]",
metavar="Input folder",
required=True)
parser.add_argument("-o",
# "--output",
dest="output",
type=str,
metavar="Output folder",
help="the output directory [REQUIRED]",
required=True)
parser.add_argument("-t",
dest="trim_threshold",
type=int,
metavar="trim_phred_threshold",
help="phred quality threshold for trimming [default: 12]",
default=12)
parser.add_argument("-p",
type=int,
dest="fastq_p",
metavar="fastq-join p",
help="fastq-join's percentage of mismatch [default: 16]",
default=16)
parser.add_argument("--adapter",
metavar=None,
dest="adapter_reference",
help="Adapters reference file",
type=str)
parser.add_argument("-b",
dest="beginwith",
type=str,
metavar="starting step",
choices=['otu_picking', 'diversity_analysis', 'chimera_removal'],
help="starting the analysis in the middle: (otu_picking), (diversity_analysis), (chimera_removal)")
parser.add_argument("-s",
dest="stop_at",
type=str,
metavar="stop at",
choices = ['merging', 'quality_control','chimera_removal'],
help='terminate the analysis at this step [choices: (merging), (quality_control), (chimera_'
'removal))')
parser.add_argument("-j",
dest='joining_method',
help="choose the merging method (fastq-join) or (bbmerge) [default: fastq-join]",
type=str,
metavar="joining method",
choices = ['fastq-join', "bbmerge"],
default="fastq-join")
parser.add_argument("-m",
dest="maxloose",
help="Assign maxloose to be true for bbmerge [default: False]",
action="store_true")
parser.add_argument("-q",
dest="qc_threshold",
type=int,
metavar="quality control threshold",
help="quality control phred threshold [default: 19]",
default=19)
parser.add_argument("--continuation_reference",
dest="c_ref",
type=str,
metavar="newref_seq.fna",
help="reference sequence for continuation. If you want to continue analysis using the reference "
"data set from previous analysis. you can find it in the last sample otus folder new_refseqs.fna",
default="none")
parser.add_argument("--continuation_otu_id",
dest="c_otu_id",
type=str,
metavar=None,
help="continuation reference new otus ids",
default="New")
parser.add_argument("-r",
dest="rdb",
metavar="Reference database",
help="silva, greengenes [default: silva]",
choices=['silva', 'greengenes', 'unite'],
type=str,
default="silva")
parser.add_argument("-c",
dest="ConfigFile",
type=str,
metavar="Configuration file name",
default='qiime.cfg',
help="Configuration file name [default: qiime.cfg]")
parser.add_argument("-a",
dest="mapping_file",
help="Mapping file name",
metavar="Mapping file name",
type=str)
parser.add_argument("--parameter_file_name",
help="The name of the parameter file [if not assigned is automatically produced using "
"configuration file",
type=str,
metavar=None,
dest="parameter_file_name")
parser.add_argument("-n",
# "--number_of_cores",
help="Specify the number of jobs to start with [default: 2]",
type=int,
metavar='Number of jobs',
dest="number_of_cores",
default=2)
parser.add_argument("-e",
dest="depth",
type=int,
metavar="Sampling depth",
help="sampling depth for diversity analyses [default: 10000]",
default=10000)
parser.add_argument("--remove_intermediate_files",
help="To remove intermediate files, to reduce the disk space",
dest="remove_intermediate",
action="store_true")
# parser.add_argument("--decompress",
# help="Copy input files to outputfolder/fastq and decompress them",
# dest="decompress",
# action="store_true")
parser.add_argument("--ml",
dest="minimum_length",
metavar='Minimum length',
type=int,
help="Minimum length of reads kept after merging [default: 380]",
default=380)
parser.add_argument("--primer-trim-f",
dest="primertrim_forward",
metavar='Primer Trim',
type=int,
help="length of the forward primer [17]",
default=17)
parser.add_argument("--primer-trim-r",
dest="primertrim_reverse",
metavar='Primer Trim',
type=int,
help="length of the reverse primer [21]",
default=21)
#x = parser.format_usage()
#parser.usage = starting_message #+ x
arg = parser.parse_args()
PR.update({
'in_folder': asfolder(arg.input),
'out_folder': asfolder(arg.output),
# 'decompress': arg.aaa
# ress,
'rdb': arg.rdb,
'qcq': arg.qc_threshold,
'maxloose': arg.maxloose,
'trimq': arg.trim_threshold,
'joining_method': arg.joining_method,
'fastq_p': arg.fastq_p,
'depth': arg.depth,
'ConfigFile': arg.ConfigFile,
'parameter_file_name': arg.parameter_file_name,
'remove_intermediate': arg.remove_intermediate,
'beginwith': arg.beginwith,
'mapping_file': arg.mapping_file,
'adapter_ref': arg.adapter_reference,
'minimum_length': arg.minimum_length,
'c_ref': arg.c_ref,
'c_otu_id': arg.c_otu_id,
'primertrim_forward': arg.primertrim_forward,
'primertrim_reverse': arg.primertrim_reverse})
## parameter_file
get_configuration()
check_before_start()
if PR['rdb'] == 'unite':
PR['fungus'] = True
else:
PR['fungus'] = False
PR['others'] = asfolder(PR['out_folder'] + PR['Fothers'])
PR['number_of_cores'] = arg.number_of_cores
if PR['number_of_cores'] == 1:
PR['np'] = True
else:
PR['np'] = False
if (os.path.isdir(PR['out_folder'])):
sys.exit()
else:
os.mkdir(PR['out_folder'])
if not os.path.isdir(PR['others']):
os.mkdir(PR['others'])
logging.basicConfig(filename=PR['others'] + "log.txt",
format='%(levelname)s \n %(message)s',
level=logging.DEBUG)
loginfo('started')
[loginfo(str(P) + ": " + str(PR[P])) for P in PR]
# if PR['decompress']:
# copyfilesanddecompress(PR['in_folder'], asfolder(PR['out_folder']+"fastq"))
# PR['in_folder'] = asfolder(PR['out_folder'])+'fastq/'
if arg.parameter_file_name == None:
| |
dossier partagé',
'Audio' : 'Son',
'Enable Audio' : 'Activer le son',
'Host Audio Driver' : 'Pilote audio hôte',
'Audio Controller' : 'Controleur audio',
'WinMM' : 'Windows multimedia',
'Null Audio Driver' : 'Null Audio Driver',
'OSS' : 'Open Sound System',
'ALSA' : 'Advanced Linux Sound Architecture',
'DirectSound' : 'Microsoft DirectSound',
'CoreAudio' : 'Core Audio',
'MMPM' : 'Reserved for historical reasons.', """ In API. May never see it in the real world """
'Pulse' : 'Pulse Audio',
'SolAudio' : 'Solaris Audio',
'AC97' : 'ICH AC97',
'SB16' : 'SoundBlaster 16',
'Network' : 'Réseau',
'Adapter' : 'Interface',
'Network Adapter' : 'Carte réseau',
'Enable Network Adapter' : 'Activer la carte réseau',
'Adapter Type' : 'Type de carte',
'adapter' : 'carte',
'Bridged' : 'Pont',
'Bridged Adapter' : 'Accès par pont',
'HostOnly' : 'privé hôte',
'Internal' : 'interne',
'Internal Network' : 'Réseau interne',
'Host-only Adapter' : 'Réseau privé hôte',
'NAT' : 'NAT',
'network' : 'réseau',
'Ethernet' : 'Ethernet',
'PPP' : 'PPP',
'SLIP' : 'SLIP',
'IPv4Addr' : 'Adresse IPv4',
'IPv6Addr' : 'Adresse IPv6',
'Mac Address' : 'Adresse MAC',
'Cable connected' : 'Câble branché',
'netMediumType' : 'Type',
'Guest Network Adapters' : 'Carte réseau invité',
'Am79C970A' : 'AMD PCNet-PCI II network card',
'Am79C973' : 'AMD PCNet-FAST III network card',
'I82540EM' : 'Intel PRO/1000 MT Desktop network card',
'I82543GC' : 'Intel PRO/1000 T Server network card',
'I82545EM' : 'Intel PRO/1000 MT Server network card',
'Virtio' : 'Réseau para-virtuel (virtio-net)',
# Machine states
'PoweredOff' : 'Éteinte',
'Saved' : 'Sauvegardée',
'Teleported' : 'Téléporté',
'Aborted' : 'Avortée',
'Running' : 'En fonction',
'Paused' : 'En pause',
'Stuck' : 'Collé',
'Teleporting' : 'En téléportation',
'LiveSnapshotting' : 'Instantané en direct',
'Starting' : 'Démarrage',
'Stopping' : 'Extinction',
'Saving' : 'Enregistrement',
'Restoring' : 'Restauration',
'TeleportingPausedVM' : 'En pause pour la téléportation',
'TeleportingIn' : 'Téléportation vers',
'RestoringSnapshot' : 'Restauration de l\'instantané',
'DeletingSnapshot' : 'Suppression de l\'instantané',
'SettingUp' : 'Initialisation',
'FirstOnline' : 'First Online',
'LastOnline' : 'Last Online',
'FirstTransient' : 'First Transient',
'LastTransient' : 'Last Transient',
# Mount dialog
'Mount' : 'Insérer',
# list separator
'LIST_SEP' : ', ',
# Sizes
'B' : 'o',
'KB' : 'Kio',
'MB' : 'Mio',
'GB' : 'Gio',
'TB' : 'Tio',
# Virtual Media Manager
'Open Virtual Media Manager' : 'Ouvrir le Gestionnaire de médias virtuels',
'Virtual Media Manager' : 'Gestionnaire de médias',
'Are you sure remove medium' : 'Êtes-vous sûr de vouloir supprimer le média %s de la liste des médias inconnus?',
'Medium remove note' : 'Le conteneur de ce média ne sera pas supprimé et il sera possible de le rajouter à la liste ultérieurement.',
'Are you sure release medium' : 'Êtes-vous sûr de vouloir libérer le média %s?',
'This will detach from' : 'Il sera détaché de (ou des) machines virtuelles suivantes : %s.',
'Please select a medium.' : 'Sélectionnez un média.',
'VMM Remove Media Message1' : 'Voulez-vous supprimer le conteneur du disque dur %s?',
'VMM Remove Media Message2' : 'Si vous choisissez Supprimer le conteneur sera supprimé. <b>Cette opération est irréversible.</b>',
'VMM Remove Media Message3' : 'Si vous choisissez Conserver il sera seulement enlevé de la liste des disques connus, et le conteneur sera laissé tel quel. Il sera donc possible de rajouter le disque dur à la liste ultérieurement.',
'Normal' : 'Normal',
'Writethrough' : 'Hors instantanés',
'Immutable' : 'Immuable',
'Actions' : 'Actions',
'Add' : 'Ajouter',
'Clone' : 'Cloner',
'Remove' : 'Enlever',
'Release' : 'Libérer',
'Hard Disks' : 'Disques durs',
'CD/DVD Images' : 'Images CD/DVD',
'Floppy Images' : 'Images de disquette',
""" New hard disk wizard """
'Create New Virtual Disk' : 'Créer un nouveau disque virtuel',
'newDisk Welcome' : 'Bienvenue dans l\'assistant de création de disque virtuel!',
'newDisk Step1 Message1' : 'Cet assistant vous aidera à créer un nouveau disque dur virtuel pour votre machine.',
'newDisk Step1 Message2' : 'Utilisez le bouton Suivant pour atteindre la page suivante de l\'assistant et le bouton Précédent pour revenir à la page précédente. Vous pouvez également interrompre l\'exécution de l\'assistant avec le bouton Annuler.',
'Hard Disk Storage Type' : 'Type de conteneur pour le disque dur',
'newDisk Step2 Message1' : 'Choisissez le type d\'image qui contiendra le disque dur virtuel que vous voulez créer.',
'newDisk Step2 dynamic' : 'Au début une <b>image de taille variable</b> prend peu de place sur votre vrai disque dur. L\'espace occupé augmentera en fonction des besoins du système d\'exploitation invité, jusqu\'à la taille limite spécifiée.',
'newDisk Step2 fixed' : 'Une <b>image de taille fixe</b> occupe un espace constant. La taille du fichier image correspond approximativement à l\'espace du disque virtuel. La création d\'une image de taille fixe peut prendre un certain temps, qui dépend de la taille choisie et des performances en écriture de votre vrai disque dur.',
'Storage Type' : 'Type de l\'image',
'Dynamically expanding storage' : 'Image de taille variable',
'Fixed-size storage' : 'Image de taille fixe',
'Virtual Disk Location and Size' : 'Emplacement et taille du disque virtuel',
'newDisk Step3 Message1' : 'Entrez le chemin du fichier qui contiendra les données du disque dur ou cliquez sur le bouton pour choisir son emplacement.',
'newDisk Step3 Message2' : 'Choisissez la taille maximale du disque dur virtuel. Le système d\'exploitation invité verra cette taille comme taille maximale de ce disque dur.',
'Summary' : 'Récapitulatif',
'newDisk Step4 Message1' : 'Vous êtes sur le point de créer un disque dur virtuel avec les paramètres suivants :',
'newDisk Step4 Message2' : 'Si ces paramètres vous conviennent cliquez sur Terminer pour créer le nouveau disque dur.',
""" New virtual machine wizard """
'Create New Virtual Machine' : 'Créer une nouvelle machine virtuelle',
'New Virtual Machine Wizard' : 'Assistant de création d\'une nouvelle machine virtuelle',
'newVM Welcome' : 'Bienvenue dans l\'assistant de création de machine virtuelle!',
'newVM Step1 Message1' : 'Cet assistant aidera à créer une nouvelle machine virtuelle pour VirtualBox.',
'newVM Step1 Message2' : 'Utilisez le bouton Suivant pour atteindre la page suivante de l\'assistant et le bouton Précédent pour revenir à la page précédente. Vous pouvez également interrompre l\'exécution de l\'assistant avec le bouton Annuler.',
'VM Name and OS Type' : 'Nom et système d\'exploitation',
'newVM Step2 Message1' : 'Choisissez un nom pour la nouvelle machine virtuelle et le type du système d\'exploitation invité que vous désirez installer sur cette machine.',
'newVM Step2 Message2' : 'Le nom de la machine virtuelle peut servir à indiquer la configuration matérielle et logicielle. Il sera utilisé par tous les composants de VirtualBox pour l\'identifier.',
'newVM Step3 Message1' : 'Choisissez la quantité de la mémoire vive (RAM) à allouer à la machine virtuelle, en mégaoctets.',
'newVM Step3 Message2' : 'La quantité recommandée est de %s Mio.', """ %s will be replaced with the recommended memory size at run time """
'Virtual Hard Disk' : 'Disque dur virtuel',
'Boot Hard Disk' : 'Disque dur d\'amorçage',
'Create new hard disk' : 'Créer un nouveau disque dur',
'Use existing hard disk' : 'Utiliser un disque dur existant',
'newVM Step4 Message1' : 'Choisissez une image de disque dur à utiliser pour l\'amorçage de la machine virtuelle. Vous pouvez soit créer une nouvelle image en cliquant sur Nouveau soit choisir une image existante dans le Gestionnaire de médias virtuels avec le bouton Existant.',
'newVM Step4 Message2' : 'Si vous avez besoin d\'une configuration de disques plus complexe, vous pouvez sauter cette étape et allouer des disques plus tard dans la Configuration de la machine.',
'newVM Step4 Message3' : 'La taille recommandée pour le disque dur d\'amorçage est de %s Mio.', """ %s will be replaced with the recommended memory size at run time """
'newVM Step5 Message1' : 'Vous êtes sur le point de créer une nouvelle machine virtuelle avec les paramètres suivants :',
'newVM Step5 Message2' : 'Si cette configuration vous convient cliquez sur Terminer pour créer la nouvelle machine virtuelle.',
'newVM Step5 Message3' : 'Vous pourrez modifier ces paramètres ainsi que d\'autres à tout moment avec la fenêtre Configuration du menu de la fenêtre principale.',
""" VM Log files """
'Show Log' : 'Afficher le journal',
'Logs' : 'Journaux',
'No logs found.' : 'Aucun journal trouvé.',
""" Import / Export Appliances """
'Export Appliance' : 'Exporter application virtuelle',
'Appliance Export Wizard' : 'Assistant d\'exportation d\'application virtuelle',
'Appliance Export Wizard Welcome' : 'Bienvenue dans l\'assistant d\'exportation d\'application virtuelle!',
'appExport Step1 Message1' : 'Cet assistant va vous aider à exporter une application virtuelle.',
'appExport Step1 Message2' : 'Utilisez le bouton Suivant pour atteindre la page suivante de l\'assistant et le bouton Précédent pour revenir à la page précédente. Vous pouvez également interrompre l\'exécution de l\'assistant avec le bouton Annuler.',
'appExport Step1 Message3' : 'Choisissez les machines virtuelles à ajouter à l\'application virtuelle. Vous pouvez en choisir plusieurs, mais elles doivent être éteintes avant d\'être exportées.',
'Appliance Export Settings' : 'Paramètre d\'exportation d\'application virtuelle',
'appExport Step2 Message1' : 'Vous pouvez effectuer des | |
# ========== pointer distribution (decoder self-attention) mask ==========
if self.args.apply_tgt_actnode_masks:
assert tgt_actnode_masks is not None
if self.args.shift_pointer_value:
tgt_actnode_masks[:, 1:] = tgt_actnode_masks[:, :-1]
tgt_actnode_masks[:, 0] = 0
ptr_self_attn_mask = tgt_actnode_masks.unsqueeze(dim=1).repeat(1, tgt_actnode_masks.size(1), 1)
ptr_self_attn_mask = ptr_self_attn_mask.unsqueeze(dim=1).repeat(
1, self.args.pointer_dist_decoder_selfattn_heads, 1, 1).view(
-1, tgt_actnode_masks.size(1), tgt_actnode_masks.size(1))
# NOTE need to include the causal mask as well in case some rows are completely masked out
# in which case we need to do the post mask
ptr_self_attn_mask &= (self.buffered_future_mask(x) != -float('inf')).byte().unsqueeze(dim=0)
# NOTE when one row out of bsz * num_heads (tgt_max_len, src_max_len) masks is full zeros, after softmax the
# distribution will be all "nan"s, which will cause problem when calculating gradients.
# Thus, we mask these positions after softmax
ptr_self_attn_mask_post_softmax = ptr_self_attn_mask.new_ones(*ptr_self_attn_mask.size()[:2], 1,
dtype=torch.float)
ptr_self_attn_mask_post_softmax[ptr_self_attn_mask.sum(dim=2) == 0] = 0
# we need to modify the pre-softmax as well, since after we get nan, multiplying by 0 is still nan
ptr_self_attn_mask[(ptr_self_attn_mask.sum(dim=2, keepdim=True) == 0).
repeat(1, 1, tgt_actnode_masks.size(1))] = 1
# NOTE must use torch.bool for mask for PyTorch >= 1.2, otherwise there will be problems around ~mask
# for compatibility of PyTorch 1.1
if version.parse(torch.__version__) < version.parse('1.2.0'):
ptr_self_attn_mask = (ptr_self_attn_mask, ptr_self_attn_mask_post_softmax)
else:
ptr_self_attn_mask = (ptr_self_attn_mask.to(torch.bool), ptr_self_attn_mask_post_softmax)
else:
ptr_self_attn_mask = None
# ========================================================================
# import pdb; pdb.set_trace()
# breakpoint()
# TODO tgt_src_align_layers are not really controlled!!!
# decoder layers
for layer_index, layer in enumerate(self.layers):
# Encode state of state machine as attention masks and encoded
# token positions changing for each target action
if self.encode_state_machine is None:
head_attention_masks = None
head_positions = None
else:
raise NotImplementedError('Deprecated: use stack-transformer branch')
# import pdb; pdb.set_trace()
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
head_attention_masks=head_attention_masks,
head_positions=head_positions,
cross_attention_mask=(cross_attention_mask
if layer_index in self.args.tgt_src_align_layers
else None),
ptr_self_attn_mask=(ptr_self_attn_mask
if layer_index in self.args.pointer_dist_decoder_selfattn_layers
else None),
graph_self_attn_mask=(graph_self_attn_mask
if layer_index in self.args.tgt_graph_layers
else None)
)
inner_states.append(x)
if layer_index not in self.args.pointer_dist_decoder_selfattn_layers:
continue
# attn is tgt self-attention of size (bsz, num_heads, tgt_len, tgt_len) with future masks
if self.args.pointer_dist_decoder_selfattn_heads == 1:
attn = attn[:, 0, :, :]
attn_all.append(attn)
else:
attn = attn[:, :self.args.pointer_dist_decoder_selfattn_heads, :, :]
if self.args.pointer_dist_decoder_selfattn_avg == 1:
# arithmetic mean
attn = attn.sum(dim=1) / self.args.pointer_dist_decoder_selfattn_heads
attn_all.append(attn)
elif self.args.pointer_dist_decoder_selfattn_avg == 0:
# geometric mean
attn = attn.prod(dim=1).pow(1 / self.args.pointer_dist_decoder_selfattn_heads)
# TODO there is an nan bug when backward for the above power
attn_all.append(attn)
elif self.args.pointer_dist_decoder_selfattn_avg == -1:
# no mean
pointer_dists = list(map(lambda x: x.squeeze(1),
torch.chunk(attn, self.args.pointer_dist_decoder_selfattn_heads, dim=1)))
# for decoding: using a single pointer distribution
attn = attn.prod(dim=1).pow(1 / self.args.pointer_dist_decoder_selfattn_heads)
attn_all.extend(pointer_dists)
else:
raise ValueError
# for decoding: which pointer distribution to use
attn = attn_all[self.args.pointer_dist_decoder_selfattn_layers.index(
self.args.pointer_dist_decoder_selfattn_infer)]
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
# NOTE here 'attn' is used for inference pointer prediction, 'attn_all' is used for loss calculation
# TODO change the names to be more straightforward, such as 'pointer_dist_infer', 'pointer_dist_list'
# TODO add teacher forcing; this will change the backward behavior
return x, {'attn': attn, 'inner_states': inner_states, 'attn_all': attn_all}
def output_layer(self, features, logits_mask=None, logits_indices=None,
tgt_vocab_masks=None,
**kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
emb_weights = self.embed_tokens.weight
else:
emb_weights = self.embed_out
if logits_indices:
# indices of active logits
indices = torch.tensor(list(logits_indices.keys()))
# compute only active logits
# (batch_size, target_size, target_emb_size)
active_output = F.linear(features, emb_weights[indices, :])
# forbid masked elements
active_output[logits_mask == 0] = float("-Inf")
# assign output
emb_size = emb_weights.shape[0]
batch_size, target_size, _ = features.shape
out_shape = (batch_size, target_size, emb_size)
output = features.new_ones(out_shape) * float("-Inf")
output[:, :, indices] = active_output
else:
output = F.linear(features, emb_weights)
# TODO fix this when decoding
# if args is not None and args.apply_tgt_vocab_masks:
# import pdb; pdb.set_trace()
if self.args.apply_tgt_vocab_masks:
assert tgt_vocab_masks is not None
output[tgt_vocab_masks == 0] = float('-inf')
else:
assert not logits_mask
output = features
return output
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(
self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
# @register_model_architecture('transformer', 'transformer')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
# @register_model_architecture('transformer', 'transformer_iwslt_de_en')
# def transformer_iwslt_de_en(args):
# args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
# args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
# args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
# args.encoder_layers = getattr(args, 'encoder_layers', 6)
# args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
# args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
# args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
# args.decoder_layers = getattr(args, 'decoder_layers', 6)
# base_architecture(args)
# @register_model_architecture('transformer', 'transformer_wmt_en_de')
# def transformer_wmt_en_de(args):
# base_architecture(args)
# # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
# @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')
# def transformer_vaswani_wmt_en_de_big(args):
# args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
# args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
# args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
# args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
# args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
# args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
# args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
# args.dropout = getattr(args, 'dropout', 0.3)
# base_architecture(args)
# @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')
# def transformer_vaswani_wmt_en_fr_big(args):
# args.dropout = getattr(args, 'dropout', 0.1)
# transformer_vaswani_wmt_en_de_big(args)
# @register_model_architecture('transformer', 'transformer_wmt_en_de_big')
# def transformer_wmt_en_de_big(args):
# args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
# transformer_vaswani_wmt_en_de_big(args)
# # default parameters used in tensor2tensor implementation
# @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
# def transformer_wmt_en_de_big_t2t(args):
# args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
# args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
# args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
# args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
# transformer_vaswani_wmt_en_de_big(args)
# @register_model_architecture('transformer', 'transformer_2x2')
# def transformer_2x2(args):
# args.encode_state_machine = getattr(args, 'encode_state_machine', None)
# #
# args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
# args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 512)
# args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
# args.encoder_layers = getattr(args, 'encoder_layers', 2)
# args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
# args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 512)
# args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
# args.decoder_layers = getattr(args, 'decoder_layers', 2)
# base_architecture(args)
# @register_model_architecture('transformer', 'transformer_6x6')
# def transformer_6x6(args):
# args.encode_state_machine = getattr(args, 'encode_state_machine', None)
# #
# args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
# args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 512)
# args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
# args.encoder_layers = getattr(args, 'encoder_layers', 6)
# args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
# args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 512)
# args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
# args.decoder_layers = getattr(args, 'decoder_layers', 6)
# base_architecture(args)
# @register_model_architecture('transformer', 'transformer_3x8')
# def transformer_3x8(args):
# args.encode_state_machine = getattr(args, 'encode_state_machine', None)
# #
# args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
# args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 512)
# args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
# args.encoder_layers = getattr(args, 'encoder_layers', 3)
# args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
# args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 512)
# args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
# args.decoder_layers = getattr(args, 'decoder_layers', 8)
# base_architecture(args)
# | |
possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
# during TRAINING:
# (features is a list of 5 elements)
# (features[0] has shape: (16, 256, h/4, w/4)) (not always exactly h/4, w/4)
# (features[1] has shape: (16, 256, h/8, w/8))
# (features[2] has shape: (16, 256, h/16, w/16))
# (features[3] has shape: (16, 256, h/32, w/32))
# (features[4] has shape: (16, 256, h/64, w/64))
#
# (targets is a list of 16 elements, each element is a BoxList (e.g. [BoxList(num_boxes=3, image_width=800, image_height=1066, mode=xyxy), BoxList(num_boxes=19, image_width=800, image_height=1201, mode=xyxy),...]))
if self.training:
with torch.no_grad():
proposals = self.loss_evaluator.sample_jittered_boxes(targets) #######################################################
# (proposals is a list of 16 elements, each element is a BoxList, num_boxes in each BoxList is M*{num_boxes for the corresponding BoxList in targets})
for proposal in proposals:
proposal.bbox.requires_grad = True
x = self.feature_extractor(features, proposals)
iou_score = self.predictor(x)
# (shape: (num_gt_bboxes_in_batch*M, 81)) (81 is the number of classes)
if not self.training:
if self.cfg.MODEL.ROI_IOU_HEAD.PERFORM_FILTERING and self.cfg.MODEL.ROI_IOU_HEAD.NMS_BEFORE:
result = self.post_processor(proposals, iou_score)
else:
result = proposals
with torch.enable_grad():
result = self.optimize_boxes(features, result)
if self.cfg.MODEL.ROI_IOU_HEAD.PERFORM_FILTERING and not self.cfg.MODEL.ROI_IOU_HEAD.NMS_BEFORE:
x = self.feature_extractor(features, result)
# final classifier that converts the features into predictions
iou_score = self.predictor(x)
result = self.post_processor(result, iou_score)
return x, result, {}
if self.training:
loss_iou = self.loss_evaluator(iou_score, targets, proposals)
# (loss_iou is just a tensor of a single value)
return (
x,
proposals,
dict(loss_iou=loss_iou),
)
else:
return x, iou_score, {}
class ROIIoUHead_mlmcmc(torch.nn.Module): ############################################################################
"""
"""
def __init__(self, cfg, in_channels):
super(ROIIoUHead_mlmcmc, self).__init__()
self.feature_extractor = make_roi_iou_feature_extractor(cfg, in_channels)
self.predictor = make_roi_iou_predictor(
cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_iou_post_processor(cfg)
self.loss_evaluator = make_roi_iou_loss_evaluator(cfg)
self.mode = cfg.MODEL.ROI_IOU_HEAD.LOSS_TYPE
self.cfg = cfg
self.L = cfg.MODEL.ROI_IOU_HEAD.LANGEVIN_L
self.alpha = cfg.MODEL.ROI_IOU_HEAD.LANGEVIN_alpha
def optimize_boxes(self, features, boxes):
# Optimize iounet boxes
step_length = self.cfg.MODEL.ROI_IOU_HEAD.STEP_LENGTH
if isinstance(step_length, (tuple, list)):
if len(step_length) == 1:
step_length = torch.Tensor([step_length[0], step_length[0], step_length[0], step_length[0]]).to(
features[0].device).view(1, 4)
elif len(step_length) == 2:
step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(
features[0].device).view(1, 4)
else:
raise ValueError
if self.mode == "L2":
box_refinement_space = "default"
else:
box_refinement_space = 'relative'
box_refinement_iter = self.cfg.MODEL.ROI_IOU_HEAD.NUM_REFINE_ITER
boxes_per_image = [b.bbox.shape[0] for b in boxes]
step_length = [step_length.clone().expand(b.bbox.shape[0], -1).contiguous() for b in boxes]
labels_list = [b.get_field("box_labels") for b in boxes]
labels = torch.cat(labels_list)
scores = [b.get_field("scores") for b in boxes]
for f in features:
f.requires_grad = True
if box_refinement_space == 'default':
# raise NotImplementedError
# omega1 = 0.001
# omega2 = -0.01
for i_ in range(box_refinement_iter):
# forward pass
# Assume box format is xyxy
bb_init = [BoxList(b.bbox.clone().detach(), b.size, b.mode) for b in boxes]
for b in bb_init:
b.bbox.requires_grad = True
x = self.feature_extractor(features, bb_init)
iou_score = self.predictor(x)
iou_score = iou_score[torch.arange(iou_score.shape[0]), labels]
iou_score.backward(gradient = torch.ones_like(iou_score))
# Update proposal
bb_refined = [BoxList((b.bbox + s * b.bbox.grad * (b.bbox[:, 2:] - b.bbox[:, :2]).repeat(1, 2)).detach(),
b.size, b.mode) for b, s in zip(bb_init, step_length)]
with torch.no_grad():
x = self.feature_extractor(features, bb_refined)
new_iou_score = self.predictor(x)
new_iou_score = new_iou_score[torch.arange(new_iou_score.shape[0]), labels]
refinement_failed = (new_iou_score < iou_score)
refinement_failed = refinement_failed.view(-1, 1)
refinement_failed = refinement_failed.split(boxes_per_image, dim=0)
boxes = [BoxList(b_i.bbox * r_f.float() + b_r.bbox * (1 - r_f).float(), b_i.size, b_i.mode)
for b_i, b_r, r_f in zip(bb_init, bb_refined, refinement_failed)]
# decay step length for failures
decay_factor = self.cfg.MODEL.ROI_IOU_HEAD.STEP_LENGTH_DECAY
step_length = [s * (1 - r_f).float() + s * decay_factor * r_f.float()
for s, r_f in zip(step_length, refinement_failed)]
elif box_refinement_space == 'relative':
boxes = [b.convert("xywh") for b in boxes]
sz_norm = [b.bbox[:, 2:].clone() for b in boxes]
# TODO test this
boxes_rel = [BoxList(rect_to_rel(b.bbox, s), b.size, b.mode) for b, s in zip(boxes, sz_norm)]
for i_ in range(box_refinement_iter):
# forward pass
bb_init_rel = [BoxList(b.bbox.clone().detach(), b.size, b.mode) for b in boxes_rel]
for b in bb_init_rel:
b.bbox.requires_grad = True
bb_init = [BoxList(rel_to_rect(b.bbox, s), b.size, b.mode) for b, s in zip(bb_init_rel, sz_norm)]
bb_init = [b.convert('xyxy') for b in bb_init]
x = self.feature_extractor(features, bb_init)
iou_score = self.predictor(x)
iou_score = iou_score[torch.arange(iou_score.shape[0]), labels]
iou_score.backward(gradient=torch.ones_like(iou_score))
# Update proposal
bb_refined_rel = [BoxList((b.bbox + s * b.bbox.grad).detach(), b.size, b.mode)
for b, s in zip(bb_init_rel, step_length)]
bb_refined = [BoxList(rel_to_rect(b.bbox, s), b.size, b.mode) for b, s in zip(bb_refined_rel, sz_norm)]
bb_refined = [b.convert('xyxy') for b in bb_refined]
with torch.no_grad():
x = self.feature_extractor(features, bb_refined)
new_iou_score = self.predictor(x)
new_iou_score = new_iou_score[torch.arange(new_iou_score.shape[0]), labels]
refinement_failed = (new_iou_score < iou_score)
refinement_failed = refinement_failed.view(-1, 1)
refinement_failed = refinement_failed.split(boxes_per_image, dim=0)
boxes_rel = [BoxList(b_i.bbox * r_f.float() + b_r.bbox * (1 - r_f).float(), b_i.size, b_i.mode)
for b_i, b_r, r_f in zip(bb_init_rel, bb_refined_rel, refinement_failed)]
# decay step length for failures
decay_factor = self.cfg.MODEL.ROI_IOU_HEAD.STEP_LENGTH_DECAY
step_length = [s*(1 - r_f).float() + s*decay_factor*r_f.float()
for s, r_f in zip(step_length, refinement_failed)]
boxes = [BoxList(rel_to_rect(b.bbox, s), b.size, b.mode) for b, s in zip(boxes_rel, sz_norm)]
boxes = [b.convert("xyxy") for b in boxes]
for b, s, l in zip(boxes, scores, labels_list):
b.add_field("scores", s)
b.add_field("labels", l)
b.add_field("box_labels", l)
return boxes
def forward(self, features, proposals=None, targets=None, iteration=None, original_image_ids=None): ###############################################################
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
# during TRAINING:
# (features is a list of 5 elements)
# (features[0] has shape: (16, 256, h/4, w/4)) (not always exactly h/4, w/4)
# (features[1] has shape: (16, 256, h/8, w/8))
# (features[2] has shape: (16, 256, h/16, w/16))
# (features[3] has shape: (16, 256, h/32, w/32))
# (features[4] has shape: (16, 256, h/64, w/64))
#
# (targets is a list of 16 elements, each element is a BoxList (e.g. [BoxList(num_boxes=3, image_width=800, image_height=1066, mode=xyxy), BoxList(num_boxes=19, image_width=800, image_height=1201, mode=xyxy),...]))
print (self.L)
print (self.alpha)
if self.training:
target_labels_list = [b.get_field("labels") for b in targets]
target_labels = torch.cat(target_labels_list).long() # (shape: (num_gt_bboxes_in_batch))
print (target_labels.size())
fs = self.predictor(self.feature_extractor(features, targets)) # (shape: (num_gt_bboxes_in_batch, 81)) (81 is the number of classes)
# print (fs.size())
fs = fs[torch.arange(fs.shape[0]), target_labels] # (shape: (num_gt_bboxes_in_batch))
# print (fs.size())
if self.training:
proposals = self.loss_evaluator.copy_targets(targets)
# (proposals is a list of 16 elements, each element is a BoxList, num_boxes in each BoxList is M*{num_boxes for the corresponding BoxList in targets})
# (proposals are just M copies of each target)
proposal_labels_list = [b.get_field("labels") for b in proposals]
proposal_labels = torch.cat(proposal_labels_list).long() # (shape: (num_gt_bboxes_in_batch*M))
# print (proposal_labels.size())
proposals = [b.convert("xywh") for b in proposals]
sz_norm = [b.bbox[:, 2:].clone() for b in proposals]
# print (proposals[0].bbox[0:10])
# print ("@@@@@@@@@@@@@@@@@")
proposals_rel = [BoxList(rect_to_rel(b.bbox, s), b.size, b.mode) for b, s in zip(proposals, sz_norm)]
# print (proposals_rel[0].bbox[0:10])
for l in range(self.L):
# print (l)
bb_init_rel = [BoxList(b.bbox.clone().detach(), b.size, b.mode) for b in proposals_rel]
for b in bb_init_rel:
b.bbox.requires_grad = True
bb_init = [BoxList(rel_to_rect(b.bbox, s), b.size, b.mode) for b, s in zip(bb_init_rel, sz_norm)]
bb_init = [b.convert('xyxy') for b in bb_init]
f_proposals = self.predictor(self.feature_extractor(features, bb_init)) # (shape: (num_gt_bboxes_in_batch*M, 81))
# print (f_proposals.size())
f_proposals = f_proposals[torch.arange(f_proposals.shape[0]), proposal_labels] # (shape: (num_gt_bboxes_in_batch*M))
# print (f_proposals.size())
f_proposals.backward(gradient=torch.ones_like(f_proposals))
# print (bb_init_rel[0].bbox.grad[0:10])
proposals_rel = [BoxList((b.bbox + (0.5*self.alpha**2)*b.bbox.grad).detach() + self.alpha*torch.randn(b.bbox.size()).cuda(), b.size, b.mode) for b in bb_init_rel]
# print (proposals_rel[0].bbox[0:10])
proposals = [BoxList(rel_to_rect(b.bbox, s), b.size, b.mode) for b, s in zip(proposals_rel, sz_norm)]
# print ("@@@@@@@@@@@@@@@@@")
# print (proposals[0].bbox[0:10])
proposals = [b.convert("xyxy") for b in proposals]
for b, l in zip(proposals, proposal_labels_list):
b.add_field("labels", l)
if self.training:
proposal_labels_list = [b.get_field("labels") for b in proposals]
proposal_labels = torch.cat(proposal_labels_list).long() # (shape: (num_gt_bboxes_in_batch*M))
# print (proposal_labels.size())
x = self.feature_extractor(features, proposals)
f_samples = self.predictor(x)
# (shape: (num_gt_bboxes_in_batch*M, 81)) (81 is the number of classes)
# print (f_samples.size())
f_samples = f_samples[torch.arange(f_samples.shape[0]), proposal_labels] # (shape: (num_gt_bboxes_in_batch*M))
# print (f_samples.size())
else:
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x = self.feature_extractor(features, proposals)
# (x has shape: (num_preds, 1024)) (num_preds is different from batch to batch, e.g. | |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from enum import Enum
class THOST_TE_RESUME_TYPE(Enum):
TERT_RESTART = 0
TERT_RESUME = 1
TERT_QUICK = 2
class TThostFtdcExchangePropertyType(Enum):
"""交易所属性类型"""
THOST_FTDC_EXP_Normal = 48
"""正常"""
THOST_FTDC_EXP_GenOrderByTrade = 49
"""根据成交生成报单"""
class TThostFtdcIdCardTypeType(Enum):
"""证件类型类型"""
THOST_FTDC_ICT_EID = 48
"""组织机构代码"""
THOST_FTDC_ICT_IDCard = 49
"""中国公民身份证"""
THOST_FTDC_ICT_OfficerIDCard = 50
"""军官证"""
THOST_FTDC_ICT_PoliceIDCard = 51
"""警官证"""
THOST_FTDC_ICT_SoldierIDCard = 52
"""士兵证"""
THOST_FTDC_ICT_HouseholdRegister = 53
"""户口簿"""
THOST_FTDC_ICT_Passport = 54
"""护照"""
THOST_FTDC_ICT_TaiwanCompatriotIDCard = 55
"""台胞证"""
THOST_FTDC_ICT_HomeComingCard = 56
"""回乡证"""
THOST_FTDC_ICT_LicenseNo = 57
"""营业执照号"""
THOST_FTDC_ICT_TaxNo = 65
"""税务登记号/当地纳税ID"""
THOST_FTDC_ICT_HMMainlandTravelPermit = 66
"""港澳居民来往内地通行证"""
THOST_FTDC_ICT_TwMainlandTravelPermit = 67
"""台湾居民来往大陆通行证"""
THOST_FTDC_ICT_DrivingLicense = 68
"""驾照"""
THOST_FTDC_ICT_SocialID = 70
"""当地社保ID"""
THOST_FTDC_ICT_LocalID = 71
"""当地身份证"""
THOST_FTDC_ICT_BusinessRegistration = 72
"""商业登记证"""
THOST_FTDC_ICT_HKMCIDCard = 73
"""港澳永久性居民身份证"""
THOST_FTDC_ICT_AccountsPermits = 74
"""人行开户许可证"""
THOST_FTDC_ICT_FrgPrmtRdCard = 75
"""外国人永久居留证"""
THOST_FTDC_ICT_CptMngPrdLetter = 76
"""资管产品备案函"""
THOST_FTDC_ICT_OtherCard = 120
"""其他证件"""
class TThostFtdcInvestorRangeType(Enum):
"""投资者范围类型"""
THOST_FTDC_IR_All = 49
"""所有"""
THOST_FTDC_IR_Group = 50
"""投资者组"""
THOST_FTDC_IR_Single = 51
"""单一投资者"""
class TThostFtdcDepartmentRangeType(Enum):
"""投资者范围类型"""
THOST_FTDC_DR_All = 49
"""所有"""
THOST_FTDC_DR_Group = 50
"""组织架构"""
THOST_FTDC_DR_Single = 51
"""单一投资者"""
class TThostFtdcDataSyncStatusType(Enum):
"""数据同步状态类型"""
THOST_FTDC_DS_Asynchronous = 49
"""未同步"""
THOST_FTDC_DS_Synchronizing = 50
"""同步中"""
THOST_FTDC_DS_Synchronized = 51
"""已同步"""
class TThostFtdcBrokerDataSyncStatusType(Enum):
"""经纪公司数据同步状态类型"""
THOST_FTDC_BDS_Synchronized = 49
"""已同步"""
THOST_FTDC_BDS_Synchronizing = 50
"""同步中"""
class TThostFtdcExchangeConnectStatusType(Enum):
"""交易所连接状态类型"""
THOST_FTDC_ECS_NoConnection = 49
"""没有任何连接"""
THOST_FTDC_ECS_QryInstrumentSent = 50
"""已经发出合约查询请求"""
THOST_FTDC_ECS_GotInformation = 57
"""已经获取信息"""
class TThostFtdcTraderConnectStatusType(Enum):
"""交易所交易员连接状态类型"""
THOST_FTDC_TCS_NotConnected = 49
"""没有任何连接"""
THOST_FTDC_TCS_Connected = 50
"""已经连接"""
THOST_FTDC_TCS_QryInstrumentSent = 51
"""已经发出合约查询请求"""
THOST_FTDC_TCS_SubPrivateFlow = 52
"""订阅私有流"""
class TThostFtdcFunctionCodeType(Enum):
"""功能代码类型"""
THOST_FTDC_FC_DataAsync = 49
"""数据异步化"""
THOST_FTDC_FC_ForceUserLogout = 50
"""强制用户登出"""
THOST_FTDC_FC_UserPasswordUpdate = 51
"""变更管理用户口令"""
THOST_FTDC_FC_BrokerPasswordUpdate = 52
"""变更经纪公司口令"""
THOST_FTDC_FC_InvestorPasswordUpdate = 53
"""变更投资者口令"""
THOST_FTDC_FC_OrderInsert = 54
"""报单插入"""
THOST_FTDC_FC_OrderAction = 55
"""报单操作"""
THOST_FTDC_FC_SyncSystemData = 56
"""同步系统数据"""
THOST_FTDC_FC_SyncBrokerData = 57
"""同步经纪公司数据"""
THOST_FTDC_FC_BachSyncBrokerData = 65
"""批量同步经纪公司数据"""
THOST_FTDC_FC_SuperQuery = 66
"""超级查询"""
THOST_FTDC_FC_ParkedOrderInsert = 67
"""预埋报单插入"""
THOST_FTDC_FC_ParkedOrderAction = 68
"""预埋报单操作"""
THOST_FTDC_FC_SyncOTP = 69
"""同步动态令牌"""
THOST_FTDC_FC_DeleteOrder = 70
"""删除未知单"""
class TThostFtdcBrokerFunctionCodeType(Enum):
"""经纪公司功能代码类型"""
THOST_FTDC_BFC_ForceUserLogout = 49
"""强制用户登出"""
THOST_FTDC_BFC_UserPasswordUpdate = 50
"""变更用户口令"""
THOST_FTDC_BFC_SyncBrokerData = 51
"""同步经纪公司数据"""
THOST_FTDC_BFC_BachSyncBrokerData = 52
"""批量同步经纪公司数据"""
THOST_FTDC_BFC_OrderInsert = 53
"""报单插入"""
THOST_FTDC_BFC_OrderAction = 54
"""报单操作"""
THOST_FTDC_BFC_AllQuery = 55
"""全部查询"""
THOST_FTDC_BFC_log = 97
"""系统功能:登入/登出/修改密码等"""
THOST_FTDC_BFC_BaseQry = 98
"""基本查询:查询基础数据,如合约,交易所等常量"""
THOST_FTDC_BFC_TradeQry = 99
"""交易查询:如查成交,委托"""
THOST_FTDC_BFC_Trade = 100
"""交易功能:报单,撤单"""
THOST_FTDC_BFC_Virement = 101
"""银期转账"""
THOST_FTDC_BFC_Risk = 102
"""风险监控"""
THOST_FTDC_BFC_Session = 103
"""查询/管理:查询会话,踢人等"""
THOST_FTDC_BFC_RiskNoticeCtl = 104
"""风控通知控制"""
THOST_FTDC_BFC_RiskNotice = 105
"""风控通知发送"""
THOST_FTDC_BFC_BrokerDeposit = 106
"""察看经纪公司资金权限"""
THOST_FTDC_BFC_QueryFund = 107
"""资金查询"""
THOST_FTDC_BFC_QueryOrder = 108
"""报单查询"""
THOST_FTDC_BFC_QueryTrade = 109
"""成交查询"""
THOST_FTDC_BFC_QueryPosition = 110
"""持仓查询"""
THOST_FTDC_BFC_QueryMarketData = 111
"""行情查询"""
THOST_FTDC_BFC_QueryUserEvent = 112
"""用户事件查询"""
THOST_FTDC_BFC_QueryRiskNotify = 113
"""风险通知查询"""
THOST_FTDC_BFC_QueryFundChange = 114
"""出入金查询"""
THOST_FTDC_BFC_QueryInvestor = 115
"""投资者信息查询"""
THOST_FTDC_BFC_QueryTradingCode = 116
"""交易编码查询"""
THOST_FTDC_BFC_ForceClose = 117
"""强平"""
THOST_FTDC_BFC_PressTest = 118
"""压力测试"""
THOST_FTDC_BFC_RemainCalc = 119
"""权益反算"""
THOST_FTDC_BFC_NetPositionInd = 120
"""净持仓保证金指标"""
THOST_FTDC_BFC_RiskPredict = 121
"""风险预算"""
THOST_FTDC_BFC_DataExport = 122
"""数据导出"""
THOST_FTDC_BFC_RiskTargetSetup = 65
"""风控指标设置"""
THOST_FTDC_BFC_MarketDataWarn = 66
"""行情预警"""
THOST_FTDC_BFC_QryBizNotice = 67
"""业务通知查询"""
THOST_FTDC_BFC_CfgBizNotice = 68
"""业务通知模板设置"""
THOST_FTDC_BFC_SyncOTP = 69
"""同步动态令牌"""
THOST_FTDC_BFC_SendBizNotice = 70
"""发送业务通知"""
THOST_FTDC_BFC_CfgRiskLevelStd = 71
"""风险级别标准设置"""
THOST_FTDC_BFC_TbCommand = 72
"""交易终端应急功能"""
THOST_FTDC_BFC_DeleteOrder = 74
"""删除未知单"""
THOST_FTDC_BFC_ParkedOrderInsert = 75
"""预埋报单插入"""
THOST_FTDC_BFC_ParkedOrderAction = 76
"""预埋报单操作"""
THOST_FTDC_BFC_ExecOrderNoCheck = 77
"""资金不够仍允许行权"""
THOST_FTDC_BFC_Designate = 78
"""指定"""
THOST_FTDC_BFC_StockDisposal = 79
"""证券处置"""
THOST_FTDC_BFC_BrokerDepositWarn = 81
"""席位资金预警"""
THOST_FTDC_BFC_CoverWarn = 83
"""备兑不足预警"""
THOST_FTDC_BFC_PreExecOrder = 84
"""行权试算"""
THOST_FTDC_BFC_ExecOrderRisk = 80
"""行权交收风险"""
THOST_FTDC_BFC_PosiLimitWarn = 85
"""持仓限额预警"""
THOST_FTDC_BFC_QryPosiLimit = 86
"""持仓限额查询"""
THOST_FTDC_BFC_FBSign = 87
"""银期签到签退"""
THOST_FTDC_BFC_FBAccount = 88
"""银期签约解约"""
class TThostFtdcOrderActionStatusType(Enum):
"""报单操作状态类型"""
THOST_FTDC_OAS_Submitted = 97
"""已经提交"""
THOST_FTDC_OAS_Accepted = 98
"""已经接受"""
THOST_FTDC_OAS_Rejected = 99
"""已经被拒绝"""
class TThostFtdcOrderStatusType(Enum):
"""报单状态类型"""
THOST_FTDC_OST_AllTraded = 48
"""全部成交"""
THOST_FTDC_OST_PartTradedQueueing = 49
"""部分成交还在队列中"""
THOST_FTDC_OST_PartTradedNotQueueing = 50
"""部分成交不在队列中"""
THOST_FTDC_OST_NoTradeQueueing = 51
"""未成交还在队列中"""
THOST_FTDC_OST_NoTradeNotQueueing = 52
"""未成交不在队列中"""
THOST_FTDC_OST_Canceled = 53
"""撤单"""
THOST_FTDC_OST_Unknown = 97
"""未知"""
THOST_FTDC_OST_NotTouched = 98
"""尚未触发"""
THOST_FTDC_OST_Touched = 99
"""已触发"""
class TThostFtdcOrderSubmitStatusType(Enum):
"""报单提交状态类型"""
THOST_FTDC_OSS_InsertSubmitted = 48
"""已经提交"""
THOST_FTDC_OSS_CancelSubmitted = 49
"""撤单已经提交"""
THOST_FTDC_OSS_ModifySubmitted = 50
"""修改已经提交"""
THOST_FTDC_OSS_Accepted = 51
"""已经接受"""
THOST_FTDC_OSS_InsertRejected = 52
"""报单已经被拒绝"""
THOST_FTDC_OSS_CancelRejected = 53
"""撤单已经被拒绝"""
THOST_FTDC_OSS_ModifyRejected = 54
"""改单已经被拒绝"""
class TThostFtdcPositionDateType(Enum):
"""持仓日期类型"""
THOST_FTDC_PSD_Today = 49
"""今日持仓"""
THOST_FTDC_PSD_History = 50
"""历史持仓"""
class TThostFtdcPositionDateTypeType(Enum):
"""持仓日期类型类型"""
THOST_FTDC_PDT_UseHistory = 49
"""使用历史持仓"""
THOST_FTDC_PDT_NoUseHistory = 50
"""不使用历史持仓"""
class TThostFtdcTradingRoleType(Enum):
"""交易角色类型"""
THOST_FTDC_ER_Broker = 49
"""代理"""
THOST_FTDC_ER_Host = 50
"""自营"""
THOST_FTDC_ER_Maker = 51
"""做市商"""
class TThostFtdcProductClassType(Enum):
"""产品类型类型"""
THOST_FTDC_PC_Futures = 49
"""期货"""
THOST_FTDC_PC_Options = 50
"""期货期权"""
THOST_FTDC_PC_Combination = 51
"""组合"""
THOST_FTDC_PC_Spot = 52
"""即期"""
THOST_FTDC_PC_EFP = 53
"""期转现"""
THOST_FTDC_PC_SpotOption = 54
"""现货期权"""
class TThostFtdcInstLifePhaseType(Enum):
"""合约生命周期状态类型"""
THOST_FTDC_IP_NotStart = 48
"""未上市"""
THOST_FTDC_IP_Started = 49
"""上市"""
THOST_FTDC_IP_Pause = 50
"""停牌"""
THOST_FTDC_IP_Expired = 51
"""到期"""
class TThostFtdcDirectionType(Enum):
"""买卖方向类型"""
THOST_FTDC_D_Buy = 48
"""买"""
THOST_FTDC_D_Sell = 49
"""卖"""
class TThostFtdcPositionTypeType(Enum):
"""持仓类型类型"""
THOST_FTDC_PT_Net = 49
"""净持仓"""
THOST_FTDC_PT_Gross = 50
"""综合持仓"""
class TThostFtdcPosiDirectionType(Enum):
"""持仓多空方向类型"""
THOST_FTDC_PD_Net = 49
"""净"""
THOST_FTDC_PD_Long = 50
"""多头"""
THOST_FTDC_PD_Short = 51
"""空头"""
class TThostFtdcSysSettlementStatusType(Enum):
"""系统结算状态类型"""
THOST_FTDC_SS_NonActive = 49
"""不活跃"""
THOST_FTDC_SS_Startup = 50
"""启动"""
THOST_FTDC_SS_Operating = 51
"""操作"""
THOST_FTDC_SS_Settlement = 52
"""结算"""
THOST_FTDC_SS_SettlementFinished = 53
"""结算完成"""
class TThostFtdcRatioAttrType(Enum):
"""费率属性类型"""
THOST_FTDC_RA_Trade = 48
"""交易费率"""
THOST_FTDC_RA_Settlement = 49
"""结算费率"""
class TThostFtdcHedgeFlagType(Enum):
"""投机套保标志类型"""
THOST_FTDC_HF_Speculation = 49
"""投机"""
THOST_FTDC_HF_Arbitrage = 50
"""套利"""
THOST_FTDC_HF_Hedge = 51
"""套保"""
THOST_FTDC_HF_MarketMaker = 53
"""做市商"""
THOST_FTDC_HF_SpecHedge = 54
"""第一腿投机第二腿套保 大商所专用"""
THOST_FTDC_HF_HedgeSpec = 55
"""第一腿套保第二腿投机 大商所专用"""
class TThostFtdcBillHedgeFlagType(Enum):
"""投机套保标志类型"""
THOST_FTDC_BHF_Speculation = 49
"""投机"""
THOST_FTDC_BHF_Arbitrage = 50
"""套利"""
THOST_FTDC_BHF_Hedge = 51
"""套保"""
class TThostFtdcClientIDTypeType(Enum):
"""交易编码类型类型"""
THOST_FTDC_CIDT_Speculation = 49
"""投机"""
THOST_FTDC_CIDT_Arbitrage = 50
"""套利"""
THOST_FTDC_CIDT_Hedge = 51
"""套保"""
THOST_FTDC_CIDT_MarketMaker = 53
"""做市商"""
class TThostFtdcOrderPriceTypeType(Enum):
"""报单价格条件类型"""
THOST_FTDC_OPT_AnyPrice = 49
"""任意价"""
THOST_FTDC_OPT_LimitPrice = 50
"""限价"""
THOST_FTDC_OPT_BestPrice = 51
"""最优价"""
THOST_FTDC_OPT_LastPrice = 52
"""最新价"""
THOST_FTDC_OPT_LastPricePlusOneTicks = 53
"""最新价浮动上浮1个ticks"""
THOST_FTDC_OPT_LastPricePlusTwoTicks = 54
"""最新价浮动上浮2个ticks"""
THOST_FTDC_OPT_LastPricePlusThreeTicks = 55
"""最新价浮动上浮3个ticks"""
THOST_FTDC_OPT_AskPrice1 = 56
"""卖一价"""
THOST_FTDC_OPT_AskPrice1PlusOneTicks = 57
"""卖一价浮动上浮1个ticks"""
THOST_FTDC_OPT_AskPrice1PlusTwoTicks = 65
"""卖一价浮动上浮2个ticks"""
THOST_FTDC_OPT_AskPrice1PlusThreeTicks = 66
"""卖一价浮动上浮3个ticks"""
THOST_FTDC_OPT_BidPrice1 = 67
"""买一价"""
THOST_FTDC_OPT_BidPrice1PlusOneTicks = 68
"""买一价浮动上浮1个ticks"""
THOST_FTDC_OPT_BidPrice1PlusTwoTicks = 69
"""买一价浮动上浮2个ticks"""
THOST_FTDC_OPT_BidPrice1PlusThreeTicks = 70
"""买一价浮动上浮3个ticks"""
THOST_FTDC_OPT_FiveLevelPrice = 71
"""五档价"""
class TThostFtdcOffsetFlagType(Enum):
"""开平标志类型"""
THOST_FTDC_OF_Open = 48
"""开仓"""
THOST_FTDC_OF_Close = 49
"""平仓"""
THOST_FTDC_OF_ForceClose = 50
"""强平"""
THOST_FTDC_OF_CloseToday = 51
"""平今"""
THOST_FTDC_OF_CloseYesterday = 52
"""平昨"""
THOST_FTDC_OF_ForceOff = 53
"""强减"""
THOST_FTDC_OF_LocalForceClose = 54
"""本地强平"""
class TThostFtdcForceCloseReasonType(Enum):
"""强平原因类型"""
THOST_FTDC_FCC_NotForceClose = 48
"""非强平"""
THOST_FTDC_FCC_LackDeposit = 49
"""资金不足"""
THOST_FTDC_FCC_ClientOverPositionLimit = 50
"""客户超仓"""
THOST_FTDC_FCC_MemberOverPositionLimit = 51
"""会员超仓"""
THOST_FTDC_FCC_NotMultiple = 52
"""持仓非整数倍"""
THOST_FTDC_FCC_Violation = 53
"""违规"""
THOST_FTDC_FCC_Other = 54
"""其它"""
THOST_FTDC_FCC_PersonDeliv = 55
"""自然人临近交割"""
class TThostFtdcOrderTypeType(Enum):
"""报单类型类型"""
THOST_FTDC_ORDT_Normal = 48
"""正常"""
THOST_FTDC_ORDT_DeriveFromQuote = 49
"""报价衍生"""
THOST_FTDC_ORDT_DeriveFromCombination = 50
"""组合衍生"""
THOST_FTDC_ORDT_Combination = 51
"""组合报单"""
THOST_FTDC_ORDT_ConditionalOrder = 52
"""条件单"""
THOST_FTDC_ORDT_Swap = 53
"""互换单"""
THOST_FTDC_ORDT_DeriveFromBlockTrade = 54
"""大宗交易成交衍生"""
THOST_FTDC_ORDT_DeriveFromEFPTrade = 55
"""期转现成交衍生"""
class TThostFtdcTimeConditionType(Enum):
"""有效期类型类型"""
THOST_FTDC_TC_IOC = 49
"""立即完成,否则撤销"""
THOST_FTDC_TC_GFS = 50
"""本节有效"""
THOST_FTDC_TC_GFD = 51
"""当日有效"""
THOST_FTDC_TC_GTD = 52
"""指定日期前有效"""
THOST_FTDC_TC_GTC = 53
"""撤销前有效"""
THOST_FTDC_TC_GFA = 54
"""集合竞价有效"""
class TThostFtdcVolumeConditionType(Enum):
"""成交量类型类型"""
THOST_FTDC_VC_AV = 49
"""任何数量"""
THOST_FTDC_VC_MV = 50
"""最小数量"""
THOST_FTDC_VC_CV = 51
"""全部数量"""
class TThostFtdcContingentConditionType(Enum):
"""触发条件类型"""
THOST_FTDC_CC_Immediately = 49
"""立即"""
THOST_FTDC_CC_Touch = 50
"""止损"""
THOST_FTDC_CC_TouchProfit = 51
"""止赢"""
THOST_FTDC_CC_ParkedOrder = 52
"""预埋单"""
THOST_FTDC_CC_LastPriceGreaterThanStopPrice = 53
"""最新价大于条件价"""
THOST_FTDC_CC_LastPriceGreaterEqualStopPrice = 54
"""最新价大于等于条件价"""
THOST_FTDC_CC_LastPriceLesserThanStopPrice = 55
"""最新价小于条件价"""
THOST_FTDC_CC_LastPriceLesserEqualStopPrice = 56
"""最新价小于等于条件价"""
THOST_FTDC_CC_AskPriceGreaterThanStopPrice = 57
"""卖一价大于条件价"""
THOST_FTDC_CC_AskPriceGreaterEqualStopPrice = 65
"""卖一价大于等于条件价"""
THOST_FTDC_CC_AskPriceLesserThanStopPrice = 66
"""卖一价小于条件价"""
THOST_FTDC_CC_AskPriceLesserEqualStopPrice = 67
"""卖一价小于等于条件价"""
THOST_FTDC_CC_BidPriceGreaterThanStopPrice = 68
"""买一价大于条件价"""
THOST_FTDC_CC_BidPriceGreaterEqualStopPrice = 69
"""买一价大于等于条件价"""
THOST_FTDC_CC_BidPriceLesserThanStopPrice = 70
"""买一价小于条件价"""
THOST_FTDC_CC_BidPriceLesserEqualStopPrice = 72
"""买一价小于等于条件价"""
class TThostFtdcActionFlagType(Enum):
"""操作标志类型"""
THOST_FTDC_AF_Delete = 48
"""删除"""
THOST_FTDC_AF_Modify = 51
"""修改"""
class TThostFtdcTradingRightType(Enum):
"""交易权限类型"""
THOST_FTDC_TR_Allow = 48
"""可以交易"""
THOST_FTDC_TR_CloseOnly = 49
"""只能平仓"""
THOST_FTDC_TR_Forbidden = 50
"""不能交易"""
class TThostFtdcOrderSourceType(Enum):
"""报单来源类型"""
THOST_FTDC_OSRC_Participant = 48
"""来自参与者"""
THOST_FTDC_OSRC_Administrator = 49
"""来自管理员"""
class TThostFtdcTradeTypeType(Enum):
"""成交类型类型"""
THOST_FTDC_TRDT_SplitCombinatio = 110
"""组合持仓拆分为单一持仓,初始化不应包含该类型的持仓"""
THOST_FTDC_TRDT_Common = 48
"""普通成交"""
THOST_FTDC_TRDT_OptionsExecution = 49
"""期权执行"""
THOST_FTDC_TRDT_OTC = 50
"""OTC成交"""
THOST_FTDC_TRDT_EFPDerived = 51
"""期转现衍生成交"""
THOST_FTDC_TRDT_CombinationDerived = 52
"""组合衍生成交"""
THOST_FTDC_TRDT_BlockTrade = 53
"""大宗交易成交"""
class TThostFtdcPriceSourceType(Enum):
"""成交价来源类型"""
THOST_FTDC_PSRC_LastPrice = 48
"""前成交价"""
THOST_FTDC_PSRC_Buy = 49
"""买委托价"""
THOST_FTDC_PSRC_Sell = 50
"""卖委托价"""
THOST_FTDC_PSRC_OTC = 51
"""场外成交价"""
class TThostFtdcInstrumentStatusType(Enum):
"""合约交易状态类型"""
THOST_FTDC_IS_BeforeTrading = 48
"""开盘前"""
THOST_FTDC_IS_NoTrading = 49
"""非交易"""
THOST_FTDC_IS_Continous = 50
"""连续交易"""
THOST_FTDC_IS_AuctionOrdering = 51
"""集合竞价报单"""
THOST_FTDC_IS_AuctionBalance = 52
"""集合竞价价格平衡"""
THOST_FTDC_IS_AuctionMatch = 53
"""集合竞价撮合"""
THOST_FTDC_IS_Closed = 54
"""收盘"""
class TThostFtdcInstStatusEnterReasonType(Enum):
"""品种进入交易状态原因类型"""
THOST_FTDC_IER_Automatic = 49
"""自动切换"""
THOST_FTDC_IER_Manual = 50
"""手动切换"""
THOST_FTDC_IER_Fuse = 51
"""熔断"""
class TThostFtdcBatchStatusType(Enum):
"""处理状态类型"""
THOST_FTDC_BS_NoUpload = 49
"""未上传"""
THOST_FTDC_BS_Uploaded = 50
"""已上传"""
THOST_FTDC_BS_Failed = 51
"""审核失败"""
class TThostFtdcReturnStyleType(Enum):
"""按品种返还方式类型"""
THOST_FTDC_RS_All = 49
"""按所有品种"""
THOST_FTDC_RS_ByProduct = 50
"""按品种"""
class TThostFtdcReturnPatternType(Enum):
"""返还模式类型"""
THOST_FTDC_RP_ByVolume = 49
"""按成交手数"""
THOST_FTDC_RP_ByFeeOnHand = 50
"""按留存手续费"""
class TThostFtdcReturnLevelType(Enum):
"""返还级别类型"""
THOST_FTDC_RL_Level1 = 49
"""级别1"""
THOST_FTDC_RL_Level2 = 50
"""级别2"""
THOST_FTDC_RL_Level3 = 51
"""级别3"""
THOST_FTDC_RL_Level4 = 52
"""级别4"""
THOST_FTDC_RL_Level5 = 53
"""级别5"""
THOST_FTDC_RL_Level6 = 54
"""级别6"""
THOST_FTDC_RL_Level7 = 55
"""级别7"""
THOST_FTDC_RL_Level8 = 56
"""级别8"""
THOST_FTDC_RL_Level9 = 57
"""级别9"""
class TThostFtdcReturnStandardType(Enum):
"""返还标准类型"""
THOST_FTDC_RSD_ByPeriod = 49
"""分阶段返还"""
THOST_FTDC_RSD_ByStandard = 50
"""按某一标准"""
class TThostFtdcMortgageTypeType(Enum):
"""质押类型类型"""
THOST_FTDC_MT_Out = 48
"""质出"""
THOST_FTDC_MT_In = 49
"""质入"""
class TThostFtdcInvestorSettlementParamIDType(Enum):
"""投资者结算参数代码类型"""
THOST_FTDC_ISPI_MortgageRatio = 52
"""质押比例"""
THOST_FTDC_ISPI_MarginWay = 53
"""保证金算法"""
THOST_FTDC_ISPI_BillDeposit = 57
"""结算单结存是否包含质押"""
class TThostFtdcExchangeSettlementParamIDType(Enum):
"""交易所结算参数代码类型"""
THOST_FTDC_ESPI_MortgageRatio = 49
"""质押比例"""
THOST_FTDC_ESPI_OtherFundItem = | |
= argToList(args['file'])
extended_data = argToBoolean(args.get('extended_data'))
results: List[CommandResults] = list()
for file in files:
raise_if_hash_not_valid(file)
try:
raw_response = client.file(file, relationships)
results.append(build_file_output(client, score_calculator, file, raw_response, extended_data))
except Exception as exc:
# If anything happens, just keep going
results.append(CommandResults(readable_output=f'Could not process file: "{file}"\n {str(exc)}'))
return results
def url_command(client: Client, score_calculator: ScoreCalculator, args: dict, relationships: str) -> List[CommandResults]:
"""
1 API Call for regular
1-4 API Calls for premium subscriptions
"""
urls = argToList(args['url'])
extended_data = argToBoolean(args.get('extended_data'))
results: List[CommandResults] = list()
for url in urls:
try:
raw_response = client.url(
url, relationships
)
except Exception as exception:
# If anything happens, just keep going
demisto.debug(f'Could not process URL: "{url}".\n {str(exception)}')
continue
results.append(build_url_output(client, score_calculator, url, raw_response, extended_data))
return results
def domain_command(client: Client, score_calculator: ScoreCalculator, args: dict, relationships: str) -> List[CommandResults]:
"""
1 API Call for regular
1-4 API Calls for premium subscriptions
"""
domains = argToList(args['domain'])
results: List[CommandResults] = list()
for domain in domains:
try:
raw_response = client.domain(domain, relationships)
except Exception as exception:
# If anything happens, just keep going
demisto.debug(f'Could not process domain: "{domain}"\n {str(exception)}')
continue
results.append(
build_domain_output(client, score_calculator, domain, raw_response, argToBoolean(args.get('extended_data')))
)
return results
# endregion
# region Scan commands
def file_rescan_command(client: Client, args: dict) -> CommandResults:
"""
1 API Call
"""
file_hash = args['file']
raise_if_hash_not_valid(file_hash)
raw_response = client.file_rescan(file_hash)
data = raw_response['data']
data['hash'] = file_hash
context = {
f'{INTEGRATION_ENTRY_CONTEXT}.Submission(val.id && val.id === obj.id)': data,
'vtScanID': data.get('id') # BC preservation
}
return CommandResults(
readable_output=tableToMarkdown(
f'File "{file_hash}" resubmitted.',
data,
removeNull=True,
headerTransform=underscoreToCamelCase
),
outputs=context,
raw_response=raw_response
)
def get_md5_by_entry_id(entry_id: str) -> str:
"""Gets an MD5 from context using entry ID"""
md5 = demisto.dt(demisto.context(), f'File(val.EntryID === "{entry_id}").MD5')
if not md5:
raise DemistoException('Could not find MD5')
return md5
def encode_to_base64(md5: str, id_: Union[str, int]) -> str:
"""Sometime the API returns the id as number only. Need to join the id with md5 in base64.
Args:
md5: The MD5 of the file sent to scan
id_: The id returned from the file scan
Returns:
base64 encoded of md5:id_
"""
return base64.b64encode(f'{md5}:{id_}'.encode('utf-8')).decode('utf-8')
def get_working_id(id_: str, entry_id: str) -> str:
"""Sometimes new scanned files ID will be only a number. Should connect them with base64(MD5:_id).
Fixes bug in VirusTotal API.
Args:
entry_id: the entry id connected to the file
id_: id given from the API
Returns:
A working ID that we can use in other commands.
"""
if isinstance(id_, str) and id_.isnumeric() or (isinstance(id_, int)):
demisto.debug(f'Got an integer id from file-scan. {id_=}, {entry_id=}\n')
raise DemistoException(
f'Got an int {id_=} as analysis report. This is a bug in VirusTotal v3 API.\n'
f'While VirusTotal team is fixing the problem, try to resend the file.'
)
return id_
def file_scan(client: Client, args: dict) -> List[CommandResults]:
"""
1 API Call
"""
entry_ids = argToList(args['entryID'])
upload_url = args.get('uploadURL')
if len(entry_ids) > 1 and upload_url:
raise DemistoException('You can supply only one entry ID with an upload URL.')
results = list()
for entry_id in entry_ids:
try:
file_obj = demisto.getFilePath(entry_id)
file_path = file_obj['path']
raw_response = client.file_scan(file_path, upload_url=upload_url)
data = raw_response.get('data', {})
# add current file as identifiers
data.update(
get_file_context(entry_id)
)
id_ = data.get('id')
demisto.debug(f'Result from vt-scan-file {entry_id=} {id_=} {data.get("type")=}')
id_ = get_working_id(id_, entry_id)
data['id'] = id_
context = {
f'{INTEGRATION_ENTRY_CONTEXT}.Submission(val.id && val.id === obj.id)': data,
'vtScanID': id_ # BC preservation
}
results.append(CommandResults(
readable_output=tableToMarkdown(
f'The file has been submitted "{file_obj["name"]}"',
data,
headers=['id', 'EntryID', 'MD5', 'SHA1', 'SHA256'],
),
outputs=context,
raw_response=raw_response
))
except Exception as exc:
err = f'Could not process {entry_id=}.\n{str(exc)}'
demisto.debug(err)
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': err
})
return results
def get_upload_url(client: Client) -> CommandResults:
"""
1 API Call
"""
raw_response = client.get_upload_url()
upload_url = raw_response['data']
context = {
f'{INTEGRATION_ENTRY_CONTEXT}.FileUploadURL': upload_url,
'vtUploadURL': upload_url # BC preservation
}
return CommandResults(
readable_output=tableToMarkdown(
'New upload url acquired!',
{'Upload url': upload_url}
),
outputs=context,
raw_response=raw_response
)
def scan_url_command(client: Client, args: dict) -> CommandResults:
"""
1 API Call
"""
url = args['url']
raw_response = client.url_scan(url)
data = raw_response['data']
data['url'] = url
context = {
f'{INTEGRATION_ENTRY_CONTEXT}.Submission(val.id && val.id === obj.id)': data,
'vtScanID': data.get('id') # BC preservation
}
return CommandResults(
readable_output=tableToMarkdown(
'New url submission:',
data,
headers=['id', 'url']
),
outputs=context,
raw_response=raw_response
)
# endregion
# region Comments commands
def get_comments_command(client: Client, args: dict) -> CommandResults:
"""
1 API Call
BC Break - No NotBefore argument
added limit
"""
limit = arg_to_number_must_int(
args.get('limit'),
arg_name='limit',
required=True
)
resource = args['resource']
if before := args.get('before'):
before = parse(before)
assert before is not None, f'Could not parse the before date "{before}"'
before = before.replace(tzinfo=None)
resource_type = args.get('resource_type')
if not resource_type:
try:
raise_if_hash_not_valid(resource)
resource_type = 'file'
except ValueError:
resource_type = 'url'
resource_type = resource_type.lower()
# Will find if there's one and only one True in the list.
if resource_type == 'ip':
raise_if_ip_not_valid(resource)
raw_response = client.get_ip_comments(resource, limit)
elif resource_type == 'url':
raw_response = client.get_url_comments(resource, limit)
elif resource_type == 'hash':
raise_if_hash_not_valid(resource)
raw_response = client.get_hash_comments(resource, limit)
elif resource_type == 'domain':
raw_response = client.get_domain_comments(resource, limit)
else:
raise DemistoException(f'Could not find resource type of "{resource_type}"')
data = raw_response.get('data', {})
context = {
'indicator': resource,
'comments': data
}
comments = []
for comment in data:
attributes = comment.get('attributes', {})
votes = attributes.get('votes', {})
if date := parse(str(attributes.get('date'))):
date = date.replace(tzinfo=None)
if date and before and date > before:
continue
comments.append({
'Date': epoch_to_timestamp(attributes.get('date')),
'Text': attributes.get('text'),
'Positive Votes': votes.get('positive'),
'Abuse Votes': votes.get('abuse'),
'Negative Votes': votes.get('negative')
})
return CommandResults(
f'{INTEGRATION_ENTRY_CONTEXT}.Comments',
'id',
readable_output=tableToMarkdown(
f'Virus Total comments of {resource_type}: "{resource}"',
comments,
headers=['Date', 'Text', 'Positive Votes', 'Abuse Votes', 'Negative Votes']
),
outputs=context,
raw_response=raw_response
)
def add_comments_command(client: Client, args: dict) -> CommandResults:
"""
1 API Call
"""
resource = args['resource']
comment = args['comment']
resource_type = args.get('resource_type')
if not resource_type:
try:
raise_if_hash_not_valid(resource)
resource_type = 'file'
except ValueError:
resource_type = 'url'
resource_type = resource_type.lower()
if resource_type == 'ip':
raise_if_ip_not_valid(resource)
raw_response = client.add_comment_to_ip(resource, comment)
elif resource_type == 'url':
raw_response = client.add_comment_to_url(resource, comment)
elif resource_type == 'domain':
raw_response = client.add_comment_to_domain(resource, comment)
elif resource_type == 'file':
raise_if_hash_not_valid(resource)
raw_response = client.add_comment_to_file(resource, comment)
else:
raise DemistoException(f'Could not find resource type of "{resource_type}"')
data = raw_response['data']
attributes = data.get('attributes', {})
votes = attributes.get('votes', {})
return CommandResults(
f'{INTEGRATION_ENTRY_CONTEXT}.Comments.comments',
'id',
readable_output=tableToMarkdown(
'Comment has been added!',
{
'Date': epoch_to_timestamp(attributes.get('date')),
'Text': attributes.get('text'),
'Positive Votes': votes.get('positive'),
'Abuse Votes': votes.get('abuse'),
'Negative Votes': votes.get('negative')
},
headers=['Date', 'Text', 'Positive Votes', 'Abuse Votes', 'Negative Votes']
),
outputs=data,
raw_response=raw_response
)
def get_comments_by_id_command(client: Client, args: dict) -> CommandResults:
"""
1 API Call
"""
comment_id = args['id']
raw_response = client.get_comment_by_id(comment_id)
data = raw_response['data']
attributes = data.get('attributes', {})
votes = attributes.get('votes', {})
return CommandResults(
f'{INTEGRATION_ENTRY_CONTEXT}.Comments.comments',
'id',
readable_output=tableToMarkdown(
f'Comment of ID {comment_id}',
{
'Date': epoch_to_timestamp(attributes.get('date')),
'Text': attributes.get('text'),
'Positive Votes': votes.get('positive'),
'Abuse Votes': votes.get('abuse'),
'Negative Votes': votes.get('negative')
},
headers=['Date', 'Text', 'Positive Votes', 'Abuse Votes', 'Negative Votes']
),
outputs=data,
raw_response=raw_response
)
# endregion
def file_sandbox_report_command(client: Client, args: dict) -> CommandResults:
"""
1 API Call
"""
file_hash = args['file']
limit = arg_to_number(
args['limit'],
'limit',
required=True
)
assert isinstance(limit, int) # mypy fix
raise_if_hash_not_valid(file_hash)
raw_response = client.file_sandbox_report(file_hash, limit)
data = raw_response['data']
return CommandResults(
f'{INTEGRATION_ENTRY_CONTEXT}.SandboxReport',
'id',
readable_output=tableToMarkdown(
f'Sandbox Reports for file hash: {file_hash}',
[
{
'id': item['id'],
**item['attributes'],
'link': item['links']['self']
} for item in data
],
headers=['analysis_date', 'last_modification_date', 'sandbox_name', 'link'],
removeNull=True,
headerTransform=underscoreToCamelCase
),
outputs=data,
raw_response=raw_response
)
def passive_dns_data(client: Client, args: dict) -> CommandResults:
"""
1 API Call
"""
ip = args['ip']
limit = arg_to_number_must_int(
args['limit'],
arg_name='limit',
required=True
)
raw_response = client.passive_dns_data(ip, limit)
data = raw_response['data']
return CommandResults(
f'{INTEGRATION_ENTRY_CONTEXT}.PassiveDNS',
'id',
readable_output=tableToMarkdown(
f'Passive DNS data for IP {ip}',
[
{
'id': item['id'],
**item['attributes']
} for item in data
],
headers=['id', 'date', 'host_name', 'ip_address', 'resolver'],
removeNull=True,
headerTransform=underscoreToCamelCase
),
outputs=data,
raw_response=raw_response
)
def search_command(client: Client, args: dict) -> CommandResults:
"""
1 API Call
"""
query = args['query']
limit = arg_to_number_must_int(args.get('limit'), 'limit', required=True)
raw_response = client.search(query, limit)
data = raw_response.get('data', [])
if not argToBoolean(args.get('extended_data')):
data = decrease_data_size(data)
return CommandResults(
f'{INTEGRATION_ENTRY_CONTEXT}.SearchResults',
'id',
readable_output=tableToMarkdown(
f'Search result of query {query}',
[item.get('attributes') for item in data],
removeNull=True,
headerTransform=underscoreToCamelCase
),
outputs=data,
raw_response=raw_response
)
def get_analysis_command(client: Client, args: dict) -> CommandResults:
"""
1 API Call
"""
analysis_id = args['id']
raw_response = client.get_analysis(analysis_id)
data = raw_response.get('data', {})
if not argToBoolean(args.get('extended_data', False)):
data = decrease_data_size(data)
return CommandResults(
f'{INTEGRATION_ENTRY_CONTEXT}.Analysis',
'id',
readable_output=tableToMarkdown(
'Analysis results:',
{
**data.get('attributes', {}),
'id': analysis_id
},
headers=['id', 'stats', 'status'],
headerTransform=underscoreToCamelCase
),
outputs={
**raw_response,
'id': analysis_id
},
raw_response=raw_response
| |
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ____
# __ _ ___ ____ / __/__ __ __
# / ' \/ _ `/ _ \_\ \/ _ \/ // /
#/_/_/_/\_,_/_//_/___/ .__/\_, /
# /_/ /___/
#
# Auteur : <NAME>
# Outil : ManSpy
# Usage : ./manspy.py 'exemple.com' (ou) python manspy.py 'exemple.com'.
# La description : cet outil permet de automatiser le processus d'analyse de sécurité à la multitude
# d’outils de sécurité Linux disponibles et certains scripts personnalisés.
# Importer les librairies
import sys
import socket
import subprocess
import os
import time
import signal
import random
import string
import threading
import re
from urlparse import urlsplit
# Temps d'analyse éc..
intervals = (
('h', 3600),
('m', 60),
('s', 1),
)
def display_time(seconds, granularity=3):
result = []
seconds = seconds + 1
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
result.append("{}{}".format(value, name))
return ' '.join(result[:granularity])
def url_maker(url):
if not re.match(r'http(s?)\:', url):
url = 'http://' + url
parsed = urlsplit(url)
host = parsed.netloc
if host.startswith('www.'):
host = host[4:]
return host
def verifier_internet():
os.system('ping -c1 google.com > ms_net 2>&1')
if "0% packet loss" in open('ms_net').read():
val = 1
else:
val = 0
os.system('rm ms_net > /dev/null 2>&1')
return val
# la classe de module de couleur
class bcolors:
HEADER = '\033[95m'
TBLUE = '\033[94m'
TGREEN = '\033[92m'
TLRED = '\033[91m'
WARNING = '\033[93m'
BADFAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BG_YLL = "\033[103m"
BG_LB = "\033[105m"
BG_Cyan = '\033[46m'
BG_ERR_TXT = '\033[41m' #Pour les erreurs critiques et les plantages
BG_HEAD_TXT = '\033[100m'
BG_ENDL_TXT = '\033[46m'
BG_CRIT_TXT = '\033[45m'
BG_HIGH_TXT = '\033[41m'
BG_MED_TXT = '\033[43m'
BG_LOW_TXT = '\033[44m'
BG_INFO_TXT = '\033[42m'
# Classifie la gravité de la vulnérabilité
def vul_info(val):
resultat = ''
if val == 'c':
resultat = bcolors.BG_CRIT_TXT + " critique " + bcolors.ENDC
elif val == 'e':
resultat = bcolors.BG_HIGH_TXT + " élevé " + bcolors.ENDC
elif val == 'm':
resultat = bcolors.BG_MED_TXT + " moyen " + bcolors.ENDC
elif val == 'f':
resultat = bcolors.BG_LOW_TXT + " faible " + bcolors.ENDC
else:
resultat = bcolors.BG_INFO_TXT + " info " + bcolors.ENDC
return resultat
# Les index
proc_haut = bcolors.BADFAIL + "●" + bcolors.ENDC
proc_med = bcolors.WARNING + "●" + bcolors.ENDC
proc_fible = bcolors.TGREEN + "●" + bcolors.ENDC
# Lie la vulnérabilité au niveau de menace...
def vul_as_info(v1, v2, v3):
print (bcolors.BOLD + "Niveau de menace de vulnérabilité" + bcolors.ENDC)
print ("\t" + vul_info(v2) + " " + bcolors.WARNING + str(rep_outil[v1][0]) + bcolors.ENDC)
print (bcolors.BOLD + "Définition de la vulnérabilité" + bcolors.ENDC)
print ("\t" + bcolors.BADFAIL + str(outils_correctifs[v3 - 1][1]) + bcolors.ENDC)
print (bcolors.BOLD + "Assainissement de la vulnérabilité" + bcolors.ENDC)
print ("\t" + bcolors.TGREEN + str(outils_correctifs[v3 - 1][2]) + bcolors.ENDC)
# ManSpy Help
def helper():
print (bcolors.TBLUE + "Les informations:" + bcolors.ENDC)
print ("------------")
print ("\t./manSpy.py exemple.com: analyse le domaine 'exemple.com'")
print ("\t./manSpy.py --help : Affiche ce contexte d'aide.")
print (bcolors.TBLUE + "Interactives:" + bcolors.ENDC)
print ("------------")
print (bcolors.TLRED +"\tCtrl+C:"+bcolors.ENDC+" Ignore le test en cours.")
print (bcolors.TLRED +"\tCtrl+Z:"+bcolors.ENDC+" Quitte ManSpy.")
print (bcolors.TBLUE + "Les index:" + bcolors.ENDC)
print ("--------")
print ("\t[" + proc_haut + "]: Le processus de numérisation peut prendre plus de temps (non prévisible).")
print ("\t[" + proc_med + "]: Le processus de numérisation peut prendre moins de 10 minutes.")
print ("\t[" + proc_fible + "]: Le processus de numérisation peut prendre moins d’une minute ou deux.")
print (bcolors.BG_Cyan + "Les informations de vulnérabilité" + bcolors.ENDC)
print ("--------------------------")
print ("\t" + vul_info(
'c') + ": A besion une attention immédiate car cela peut entraîner des compromissions ou une indisponibilité du service.")
print ("\t" + vul_info(
'e') + " : Peut ne pas conduire à un compromis immédiat, mais les chances de probabilité sont grandes.")
print ("\t" + vul_info(
'm') + " : L'attaquant peut mettre en corrélation plusieurs vulnérabilités de ce type pour lancer une attaque sophistiquée.")
print ("\t" + vul_info('f') + " : Pas un problème grave, mais il est recommandé d'assister à la conclusion.")
print ("\t" + vul_info(
'i') + " : Ne pas classé comme une vulnérabilité,tout simplement une alerte informationnelle utile à prendre en compte.\n")
# Effacment
def clear():
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
# ManSpy Logo
def logo():
print (bcolors.WARNING)
print("""\
_____ ______ ________ ________ ________ ________ ___ ___
|\ _ \ _ \|\ __ \|\ ___ \|\ ____\|\ __ \|\ \ / /|
\ \ \\\__\ \ \ \ \|\ \ \ \\ \ \ \ \___|\ \ \|\ \ \ \/ / /
\ \ \\|__| \ \ \ __ \ \ \\ \ \ \_____ \ \ ____\ \ / /
\ \ \ \ \ \ \ \ \ \ \ \\ \ \|____|\ \ \ \___|\/ / /
\ \__\ \ \__\ \__\ \__\ \__\\ \__\____\_\ \ \__\ __/ / /
\|__| \|__|\|__|\|__|\|__| \|__|\_________\|__||\___/ /
\|_________| \|___|/
""" + bcolors.TLRED + """(<NAME> - <NAME> - <NAME> '4isi')
""")
print (bcolors.ENDC)
class Spinner:
occupe = False
retard = 0.05
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/\\': yield cursor #←↑↓→
#for cursor in '←↑↓→': yield cursor !! prob affichage !!!
def __init__(self, retard=None):
self.spinner_generator = self.spinning_cursor()
if retard and float(retard): self.retard = retard
def spinner_task(self):
try:
while self.occupe:
#sys.stdout.write(next(self.spinner_generator))
print bcolors.BG_ERR_TXT+next(self.spinner_generator)+bcolors.ENDC,
sys.stdout.flush()
time.sleep(self.retard)
sys.stdout.write('\b')
sys.stdout.flush()
except (KeyboardInterrupt, SystemExit):
#clear()
print "\n\t"+ bcolors.BG_ERR_TXT+"ManSpy à reçu une série des clicks sur Ctrl + C. Quitter..." +bcolors.ENDC
sys.exit(1)
def start(self):
self.occupe = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
try:
self.occupe = False
time.sleep(self.retard)
except (KeyboardInterrupt, SystemExit):
#clear()
print "\n\t"+ bcolors.BG_ERR_TXT+"ManSpy à reçu une série des clicks sur Ctrl + C. Quitter..." +bcolors.ENDC
sys.exit(1)
spinner = Spinner()
noms_outils = [
["host", "host - Vérifie l'existence d'une adresse IPV6.", "host", 1],
["aspnet_config_err", "ASP.Net Misconfiguration - Vérifie si ASP.Net Misconfiguration.", "wget", 1],
["wp_check", "WordPress Checker - Vérifie l'installation de WordPress.", "wget", 1],
["drp_check", "Drupal Checker - Vérifie l’installation de Drupal.", "wget", 1],
["joom_check", "Joomla Checker - Vérifie l’installation de Joomla.", "wget", 1],
["uniscan", "Uniscan - Vérifie les fichiers robots.txt et sitemap.xml", "uniscan", 1],
["wafw00f", "Wafw00f - Vérifications des pare-feu applicatifs.", "wafw00f", 1],
["nmap", "Nmap - Analyse rapide [seulement quelques vérifications de ports] "," nmap ", 1],
["theharvester", "The Harvester - Analyse les emails en utilisant la recherche passive de Google.", "theharvester", 1],
["dnsrecon", "DNSRecon - tente plusieurs transferts de zone sur des serveurs de noms.", "dnsrecon", 1],
["féroce", "Féroce - Tentatives de transfert de zone [Pas de force brutale]", "féroce", 1],
["dnswalk", "DNSWalk - Tentative de transfert de zone.", "dnswalk", 1],
["whois", "WHOis - Vérifications des informations de contact de l'administrateur.", "whois", 1],
["nmap_header", "Nmap [Vérification du filtre XSS] - Vérifie si l'en-tête de protection XSS est présent.", "nmap", 1],
["nmap_sloris", "Nmap [Slowloris DoS] - Vérifications de la vulnérabilité de déni de service de Slowloris.", "nmap", 1],
["sslyze_hbleed", "SSLyze - Vérifie uniquement la vulnérabilité Heartbleed.", "sslyze", 1],
["nmap_hbleed", "Nmap [Heartbleed] - Vérifie uniquement la vulnérabilité de Heartbleed.", "nmap", 1],
["nmap_poodle", "Nmap [POODLE] - Vérifie uniquement la vulnérabilité du caniche.", "nmap", 1],
["nmap_ccs", "Nmap [Injection OpenSSL CCS] - Vérifie uniquement l'injection CCS.", "nmap", 1],
["nmap_freak", "Nmap [FREAK] - Vérifie uniquement la vulnérabilité de FREAK.", "nmap", 1],
["nmap_logjam", "Nmap [LOGJAM] - Vérifications de la vulnérabilité de LOGJAM.", "nmap", 1],
["sslyze_ocsp", "SSLyze - Vérifie l'agrafage OCSP.", "sslyze", 1],
["sslyze_zlib", "SSLyze - Vérifications de la compression ZLib Deflate.", "sslyze", 1],
["sslyze_reneg", "SSLyze - Vérifie la prise en charge de la renégociation sécurisée et la renégociation du client.", "sslyze", 1],
["sslyze_resum", "SSLyze - Vérifie la prise en charge de la reprise de session avec [ID de session / tickets TLS].", "sslyze", 1],
["lbd", "LBD - Vérifications des équilibreurs de charge DNS / HTTP.", "lbd", 1],
["golismero_dns_malware", "Golismero - Vérifie si le domaine est spoofé ou détourné.", "golismero", 1],
["golismero_heartbleed", "Golismero - Recherche uniquement la vulnérabilité Heartbleed.", "golismero", 1],
["golismero_brute_url_predictables", "Golismero - BruteForces pour certains fichiers du domaine.", "golismero", 1],
["golismero_brute_directories", "Golismero - BruteForces pour certains répertoires du domaine.", "golismero", 1],
["golismero_sqlmap", "Golismero - SQLMap [ne récupère que la bannière DB]", "golismero", 1],
["dirb", "DirB - Brute la cible pour les répertoires | |
<gh_stars>10-100
import unittest
import numpy as np
import pandas as pd
from powersimdata.design.transmission.upgrade import (
_construct_composite_allow_list,
_find_branches_connected_to_bus,
_find_capacity_at_bus,
_find_first_degree_branches,
_find_stub_degree,
_identify_mesh_branch_upgrades,
_increment_branch_scaling,
get_branches_by_area,
scale_renewable_stubs,
)
from powersimdata.tests.mock_change_table import MockChangeTable
from powersimdata.tests.mock_grid import MockGrid
from powersimdata.tests.mock_scenario import MockScenario
"""
This test network is a ring, with several spurs coming off of it. The central
ring is buses {1, 2, 3, 4}, and has three spurs coming off of it:
bus 2 ----- bus 5 (with a wind generator).
bus 4 ----- bus 6 ----- bus 7 (with two solar generators and one ng)
bus 3 ----- bus 8 (with a wind generator)
"""
mock_branch = {
"branch_id": [101, 102, 103, 104, 105, 106, 107, 108],
"from_bus_id": [1, 2, 2, 3, 3, 4, 4, 6],
"to_bus_id": [2, 3, 5, 8, 4, 1, 6, 7],
"rateA": [0.25, 1, 8, 25, 100, 100, 15, 25],
"from_lat": [47, 47, 47, 46, 46, 46, 46, 46],
"from_lon": [122, 122, 122, 122, 122, 123, 123, 124],
"to_lat": [47, 46, 47, 46, 46, 47, 46, 46],
"to_lon": [122, 122, 112, 121, 123, 122, 124, 125],
}
mock_bus = {
"bus_id": [1, 2, 3, 4, 5, 6, 7, 8],
"zone_id": [
"Washington",
"Oregon",
"Oregon",
"Washington",
"Oregon",
"Washington",
"Washington",
"Oregon",
],
}
mock_plant = {
"plant_id": ["A", "B", "C", "D", "E", "F", "G"],
"bus_id": [1, 1, 5, 7, 7, 7, 8],
"type": ["solar", "coal", "wind", "solar", "solar", "ng", "wind"],
"Pmax": [15, 30, 10, 12, 8, 20, 15],
}
mock_grid = MockGrid(
grid_attrs={"branch": mock_branch, "bus": mock_bus, "plant": mock_plant}
)
class TestStubTopologyHelpers(unittest.TestCase):
def setUp(self):
self.branch = mock_grid.branch
self.plant = mock_grid.plant
def test_find_branches_connected_to_bus_1(self):
branches_connected = _find_branches_connected_to_bus(self.branch, 1)
self.assertEqual(branches_connected, {101, 106})
def test_find_branches_connected_to_bus_4(self):
branches_connected = _find_branches_connected_to_bus(self.branch, 4)
self.assertEqual(branches_connected, {105, 106, 107})
def test_find_branches_connected_to_bus_5(self):
branches_connected = _find_branches_connected_to_bus(self.branch, 5)
self.assertEqual(branches_connected, {103})
def test_find_first_degree_branches_101(self):
branches_connected = _find_first_degree_branches(self.branch, 101)
self.assertEqual(branches_connected, {101, 102, 103, 106})
def test_find_first_degree_branches_108(self):
branches_connected = _find_first_degree_branches(self.branch, 108)
self.assertEqual(branches_connected, {107, 108})
def test_find_stub_degree_1(self):
stub_degree, stubs = _find_stub_degree(self.branch, 1)
self.assertEqual(stub_degree, 0)
self.assertEqual(stubs, set())
def test_find_stub_degree_5(self):
stub_degree, stubs = _find_stub_degree(self.branch, 5)
self.assertEqual(stub_degree, 1)
self.assertEqual(stubs, {103})
def test_find_stub_degree_7(self):
stub_degree, stubs = _find_stub_degree(self.branch, 7)
self.assertEqual(stub_degree, 2)
self.assertEqual(stubs, {107, 108})
def test_find_capacity_at_bus_1_solar_tuple(self):
gen_capacity = _find_capacity_at_bus(self.plant, 1, ("solar",))
self.assertEqual(gen_capacity, 15)
def test_find_capacity_at_bus_1_solar_str(self):
gen_capacity = _find_capacity_at_bus(self.plant, 1, "solar")
self.assertEqual(gen_capacity, 15)
def test_find_capacity_at_bus_2_wind(self):
gen_capacity = _find_capacity_at_bus(self.plant, 2, ("wind",))
self.assertEqual(gen_capacity, 0)
def test_find_capacity_at_bus_7_solar(self):
gen_capacity = _find_capacity_at_bus(self.plant, 7, ("solar",))
self.assertEqual(gen_capacity, 20)
def test_find_capacity_at_bus_7_solar_ng(self):
gen_capacity = _find_capacity_at_bus(self.plant, 7, ("solar", "ng"))
self.assertEqual(gen_capacity, 40)
class TestGetBranchesByArea(unittest.TestCase):
def setUp(self):
self.grid = mock_grid
from_zone_name = [
self.grid.bus.loc[i, "zone_id"] for i in self.grid.branch.from_bus_id
]
to_zone_name = [
self.grid.bus.loc[i, "zone_id"] for i in self.grid.branch.to_bus_id
]
self.grid.branch["from_zone_name"] = from_zone_name
self.grid.branch["to_zone_name"] = to_zone_name
self.grid.id2zone = {201: "Wahington", 202: "Oregon"}
self.grid.zone2id = {"Washington": 201, "Oregon": 202}
def test_internal_washington(self):
branch_idxs = get_branches_by_area(self.grid, {"Washington"}, method="internal")
assert branch_idxs == {106, 107, 108}
def test_internal_oregon(self):
branch_idxs = get_branches_by_area(self.grid, ["Oregon"], method="internal")
assert branch_idxs == {102, 103, 104}
def test_internal_multi_state(self):
branch_idxs = get_branches_by_area(
self.grid, ("Washington", "Oregon"), "internal"
)
assert branch_idxs == {102, 103, 104, 106, 107, 108}
def test_bridging_washington(self):
branch_idxs = get_branches_by_area(self.grid, ["Washington"], method="bridging")
assert branch_idxs == {101, 105}
def test_bridging_oregon(self):
branch_idxs = get_branches_by_area(self.grid, {"Oregon"}, method="bridging")
assert branch_idxs == {101, 105}
def test_bridging_multi_state(self):
branch_idxs = get_branches_by_area(
self.grid, ("Washington", "Oregon"), "bridging"
)
assert branch_idxs == {101, 105}
def test_either_washington(self):
branch_idxs = get_branches_by_area(self.grid, ("Washington",), method="either")
assert branch_idxs == {101, 105, 106, 107, 108}
def test_either_oregon(self):
branch_idxs = get_branches_by_area(self.grid, ("Oregon",), method="either")
assert branch_idxs == {101, 102, 103, 104, 105}
def test_either_multi_state(self):
branch_idxs = get_branches_by_area(
self.grid, ("Oregon", "Washington"), "either"
)
assert branch_idxs == {101, 102, 103, 104, 105, 106, 107, 108}
def test_bad_grid_type(self):
with self.assertRaises(TypeError):
get_branches_by_area("grid", ["Oregon"], "either")
def test_bad_area_type(self):
with self.assertRaises(TypeError):
get_branches_by_area(self.grid, "Oregon", "either")
def test_bad_area_name(self):
with self.assertRaises(ValueError):
get_branches_by_area(self.grid, ["S"], "internal")
def test_bad_method_type(self):
with self.assertRaises(TypeError):
get_branches_by_area(self.grid, ["Oregon"], ["bridging"])
def test_bad_method_name(self):
with self.assertRaises(ValueError):
get_branches_by_area(self.grid, ["Oregon"], "purple")
class TestIdentifyMesh(unittest.TestCase):
def setUp(self):
# Build dummy congu and congl dataframes, containing barrier cruft
num_hours = 100
branch_indices = mock_branch["branch_id"]
num_branches = len(branch_indices)
congu_data = np.ones((num_hours, num_branches)) * 1e-9
congl_data = np.ones((num_hours, num_branches)) * 1e-10
columns = mock_branch["branch_id"]
congu = pd.DataFrame(congu_data, index=range(num_hours), columns=columns)
congl = pd.DataFrame(congl_data, index=range(num_hours), columns=columns)
# Populate with dummy data, added in different hours for thorough testing
# Branch 101 will have frequent, low congestion
congu[101].iloc[-15:] = 1
# Branch 102 will have less frequent, but greater congestion
congu[102].iloc[:8] = 6
# Branch 103 will have only occassional congestion, but very high
congu[103].iloc[10:13] = 20
congl[103].iloc[20:23] = 30
# Branch 105 will have extremely high congestion in only one hour
congl[105].iloc[49] = 9000
# Build dummy change table
ct = {"branch": {"branch_id": {b: 1 for b in branch_indices}}}
# Finally, combine all of this into a MockScenario
self.mock_scenario = MockScenario(
grid_attrs={
"branch": mock_branch,
"bus": mock_bus,
"plant": mock_plant,
},
congu=congu,
congl=congl,
ct=ct,
)
# These tests use the default 'branch' ranking: [103, 102, 101]
def test_identify_mesh_branch_upgrades_default(self):
# Not enough branches
with self.assertRaises(ValueError):
_identify_mesh_branch_upgrades(self.mock_scenario)
def test_identify_mesh_branch_upgrades_n_4(self):
# Not enough congest branches (barrier cruft values don't count)
with self.assertRaises(ValueError):
_identify_mesh_branch_upgrades(self.mock_scenario, upgrade_n=4)
def test_identify_mesh_branch_upgrades_n_3(self):
expected_return = {101, 102, 103}
branches = _identify_mesh_branch_upgrades(self.mock_scenario, upgrade_n=3)
self.assertEqual(branches, expected_return)
def test_identify_mesh_branch_upgrades_n_2(self):
expected_return = {102, 103}
branches = _identify_mesh_branch_upgrades(self.mock_scenario, upgrade_n=2)
self.assertEqual(branches, expected_return)
def test_identify_mesh_branch_upgrades_quantile90(self):
# Fewer branches are congested for >= 10% of the time
expected_return = {101}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=1, quantile=0.9
)
self.assertEqual(branches, expected_return)
# These tests use the 'MW' ranking: [102, 101, 103]
# This happens because 101 is very small, 102 is small (compared to 103)
def test_identify_mesh_MW_n_3(self): # noqa: N802
expected_return = {101, 102, 103}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=3, cost_metric="MW"
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_MW_n_2(self): # noqa: N802
expected_return = {101, 102}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=2, cost_metric="MW"
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_MW_n_2_allow_list(self): # noqa: N802
expected_return = {102, 103}
allow_list = {102, 103, 104}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=2, cost_metric="MW", allow_list=allow_list
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_MW_n_2_deny_list(self): # noqa: N802
expected_return = {101, 103}
deny_list = [102, 105]
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=2, cost_metric="MW", deny_list=deny_list
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_MW_n_1(self): # noqa: N802
expected_return = {102}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=1, cost_metric="MW"
)
self.assertEqual(branches, expected_return)
# These tests use the 'MWmiles' ranking: [101, 102, 103]
# This happens because 101 is zero-distance, 102 is short (compared to 103)
def test_identify_mesh_MWmiles_n_3(self): # noqa: N802
expected_return = {101, 102, 103}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=3, cost_metric="MWmiles"
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_MWmiles_n_2(self): # noqa: N802
expected_return = {101, 102}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=2, cost_metric="MWmiles"
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_MWmiles_n_1(self): # noqa: N802
expected_return = {101}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=1, cost_metric="MWmiles"
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_mean(self):
# Not enough branches
with self.assertRaises(ValueError):
_identify_mesh_branch_upgrades(self.mock_scenario, congestion_metric="mean")
def test_identify_mesh_mean_n_4_specify_quantile(self):
with self.assertRaises(ValueError):
_identify_mesh_branch_upgrades(
self.mock_scenario, congestion_metric="mean", upgrade_n=4, quantile=0.99
)
def test_identify_mesh_mean_n_4(self):
expected_return = {101, 102, 103, 105}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, congestion_metric="mean", upgrade_n=4
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_mean_n_3(self):
expected_return = {102, 103, 105}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, congestion_metric="mean", upgrade_n=3
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_mean_n_2(self):
expected_return = {103, 105}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, congestion_metric="mean", upgrade_n=2
)
self.assertEqual(branches, expected_return)
def test_identify_mesh_mean_n_1(self):
expected_return = {105}
branches = _identify_mesh_branch_upgrades(
self.mock_scenario, congestion_metric="mean", upgrade_n=1
)
self.assertEqual(branches, expected_return)
# What about a made-up method?
def test_identify_mesh_bad_method(self):
with self.assertRaises(ValueError):
_identify_mesh_branch_upgrades(
self.mock_scenario, upgrade_n=2, cost_metric="does not exist"
)
class TestConstructCompositeAllowlist(unittest.TestCase):
def test_none_none(self):
branch_list = mock_branch["branch_id"].copy()
composite_allow_list = _construct_composite_allow_list(
mock_branch["branch_id"].copy(), None, None
)
self.assertEqual(composite_allow_list, set(branch_list))
def test_good_allow_list(self):
allow_list = list(range(101, 105))
composite_allow_list = _construct_composite_allow_list(
mock_branch["branch_id"].copy(), allow_list, None
)
self.assertEqual(composite_allow_list, set(allow_list))
def test_good_deny_list(self):
deny_list = list(range(101, 105))
composite_allow_list = _construct_composite_allow_list(
mock_branch["branch_id"].copy(), None, deny_list
)
self.assertEqual(composite_allow_list, set(range(105, 109)))
def test_allow_list_and_deny_list_failure(self):
allow_list = list(range(101, 105))
deny_list = list(range(105, 109))
with self.assertRaises(ValueError):
_construct_composite_allow_list(
mock_branch["branch_id"].copy(), allow_list, deny_list
)
def test_bad_allow_list_value(self):
allow_list = list(range(101, 110))
with self.assertRaises(ValueError):
_construct_composite_allow_list(
mock_branch["branch_id"].copy(), allow_list, None
)
def test_bad_allow_list_entry_type(self):
allow_list = [str(i) for i in range(101, 105)]
with self.assertRaises(ValueError):
_construct_composite_allow_list(
mock_branch["branch_id"].copy(), allow_list, None
)
def test_bad_deny_list_value(self):
deny_list = list(range(108, 110))
with self.assertRaises(ValueError):
_construct_composite_allow_list(
mock_branch["branch_id"].copy(), None, deny_list
)
def test_bad_deny_list_type(self):
with self.assertRaises(TypeError):
_construct_composite_allow_list(
mock_branch["branch_id"].copy(), None, "108"
)
class TestIncrementBranch(unittest.TestCase):
def setUp(self):
self.ct = {
# These data aren't used, but we make sure they don't get changed.
"demand": {"zone_id": {"Washington": 1.1, "Oregon": 1.2}},
"solar": {"zone_id": {"Washington": 1.5, "Oregon": 1.7}},
"wind": {"zone_id": {"Oregon": 1.3, "Washington": 2.1}},
}
self.ref_scenario = MockScenario(
grid_attrs={
"branch": mock_branch,
"bus": mock_bus,
| |
self._exclude = exclude
self._pattern = pattern
self._ratio = ratio
self._hass = hass
self._ui = ui
self._tasks = {}
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._dic_friendly_name = {}
self._dic_operation_en_to_cn = {
'on':'开',
'off':'关',
'temporary_on':'关⇌开',
'temporary_off': '开⇌关',
'custom:*': '调服务'
}
self._dic_operation_cn_to_en = {v : k for k, v in self._dic_operation_en_to_cn.items()}
self._dic_domain_en_to_cn = {
'light': '灯',
'switch': '开关',
'input_boolean': '二元选择器',
'automation': '自动化',
'script': '脚本'
}
self._dic_domain_cn_to_en = {v : k for k, v in self._dic_domain_en_to_cn.items()}
self._dic_icon = {'light': 'mdi:lightbulb', 'switch': 'mdi:toggle-switch', 'automation': 'mdi:playlist-play', 'script': 'mdi:script', 'input_boolean': 'mdi:toggle-switch'}
self._domain = None
self._entity_id = None
self._queue = DelayQueue(60) # create a queue
self._running_tasks = None
self._running_tasks_ids = None
self._last_running_tasks_ids = None
self._info_config = info_config
def refresh_ui(self):
_LOGGER.debug('refresh_ui()')
if self._entity_id is None:
return
task = self._get_task(self._entity_id)
self.set_state(self._ui[UI_INPUT_OPERATION], state = {'option': '开'}, service = 'select_option', context = CONTEXT_IGNORE)
self.set_state(self._ui[UI_INPUT_OPERATION], state = {'option': self.get_operation(task = task)}, service = 'select_option', context = CONTEXT_IGNORE)
@asyncio.coroutine
def start(self):
"""prepare task list and default ui. """
pattern = re.compile(self._pattern)
states = self._hass.states.async_all()
data = yield from self._store.async_load() # load task config from disk
tasks = {
user_dict['entity_id']: {'entity_id':user_dict['entity_id'],'duration':user_dict.get('duration','0:00:00'),'operation':user_dict.get('operation','off'),'count':user_dict.get('count',0),'ratio':user_dict.get('ratio',self._ratio)} for user_dict in data['tasks'] if 'entity_id' in user_dict
} if data else {}
# _LOGGER.debug('[start()] load task config: <tasks=%s>',tasks)
for state in states:
domain = state.domain
object_id = state.object_id
entity_id = '{}.{}'.format(domain, object_id)
if domain not in self._domains or entity_id in self._exclude:
pass
else:
friendly_name = state.name
if not self._pattern or pattern.search(friendly_name):
_LOGGER.debug("添加设备:{}({})".format(friendly_name, entity_id))
self._dic_friendly_name.setdefault(friendly_name, entity_id)
self._tasks.setdefault(domain,{}).setdefault(entity_id,{})
self._tasks[domain][entity_id]['friendly_name'] = friendly_name
self._tasks[domain][entity_id]['icon'] = self.get_attributes(entity_id).get('icon', self._dic_icon[domain])
self._tasks[domain][entity_id]['entity_id'] = entity_id
self._tasks[domain][entity_id]['remaining'] = '0:00:00'
self._tasks[domain][entity_id]['handle'] = None
self._tasks[domain][entity_id]['next_operation'] = None
if tasks.get(entity_id) is not None:
self._tasks[domain][entity_id]['duration'] = tasks.get(entity_id).get('duration')
self._tasks[domain][entity_id]['operation'] = tasks.get(entity_id).get('operation')
self._tasks[domain][entity_id]['count'] = tasks.get(entity_id).get('count')
self._tasks[domain][entity_id]['ratio'] = tasks.get(entity_id).get('ratio')
else:
self._tasks[domain][entity_id]['duration'] = '0:00:00'
self._tasks[domain][entity_id]['operation'] = 'on' if domain == 'autonmation' or domain == 'script' else 'off'
self._tasks[domain][entity_id]['count'] = 0
self._tasks[domain][entity_id]['ratio'] = self._ratio
else:
_LOGGER.debug("忽略设备:{}({})".format(friendly_name, entity_id))
options = [self._dic_domain_en_to_cn.get(d) for d in self._tasks.keys()]
options.insert(0,'---请选择设备类型---')
data = {
'entity_id':self._ui[UI_INPUT_DOMAIN],
'options': options
}
self._hass.async_add_job(self._hass.services.async_call('input_select', SERVICE_SET_OPTIONS, data))
async_track_time_change(self._hass, self.update) # update every second
@asyncio.coroutine
def save_tasks(self):
"""save task config to disk"""
tasks = [
{
'entity_id': attrs['entity_id'],
'duration': attrs['duration'],
'operation': attrs['operation'],
'count': attrs['count'],
'ratio': attrs['ratio']
}
for entities in self._tasks.values() for entity_id, attrs in entities.items() if attrs['duration'] != '0:00:00' or attrs['count'] != 0
]
# _LOGGER.debug('[stop()] save task config: <tasks=%s>',tasks)
if not tasks:
return
data = {
'tasks':tasks
}
yield from self._store.async_save(data)
def choose_domain(self, domain):
"""refresh entity input list """
domain = self._dic_domain_cn_to_en.get(domain, domain)
self._domain = domain
if domain == '---请选择设备类型---':
options = ['---请选择设备---']
else:
entities = [attrs for entity_id, attrs in self._tasks[domain].items() ]
options = [self._get_task(entity['entity_id'])['friendly_name'] for entity in sorted(entities, key = operator.itemgetter('count'), reverse = True)] #show friendly_name
options.insert(0,'---请选择设备---')
self.set_options(self._ui[UI_INPUT_ENTITY], options)
# self.set_options(self._ui[UI_INPUT_OPERATION], DEFAULT_OPERATION_OPTIONS)
def choose_entity(self, friendly_name):
""" load entity task params and set ui."""
# self.set_state(self._ui[UI_INPUT_OPERATION], state = '-', force_update = True, context = CONTEXT_IGNORE)
if friendly_name == '---请选择设备---':
self._entity_id = None
self.set_state(self._ui[UI_INPUT_DURATION], state= '0:00:00')
self.set_state(self._ui[UI_SWITCH], state = 'off')
self.set_options(self._ui[UI_INPUT_OPERATION], DEFAULT_OPERATION_OPTIONS)
self.set_state(self._ui[UI_INPUT_OPERATION], state = {'option': '关'}, service = 'select_option', context = CONTEXT_IGNORE)
else:
entity_id = self._entity_id = self._dic_friendly_name.get(friendly_name, None)
task = self._get_task(entity_id)
if task is None:
_LOGGER.info("Function choose_entity: friendly_name not found in dic !")
return
remaining_time = self._queue.get_remaining_time(task['handle'])
# task's running
if remaining_time is not None:
duration = str(remaining_time)
self.set_state(self._ui[UI_INPUT_DURATION], state= duration)
self.set_state(self._ui[UI_SWITCH], state = 'on')
else:
duration = task['remaining'] if task['remaining'] != '0:00:00' else task['duration']
self.set_state(self._ui[UI_INPUT_DURATION], state= duration)
self.set_state(self._ui[UI_SWITCH], state = 'off')
options = ['开','关','开⇌关 [1:{}]'.format(task['ratio']),'关⇌开 [1:{}]'.format(task['ratio']), '调服务']
self.set_options(self._ui[UI_INPUT_OPERATION], options)
self.set_state(self._ui[UI_INPUT_OPERATION], state = {'option': self.get_operation(task = task)}, service = 'select_option', context = CONTEXT_IGNORE)
def choose_operation(self, operation, context = None):
""" set operation param """
task = self._get_task(self._entity_id)
if task is None:
_LOGGER.debug("no entity selected, pass.")
return
# save operation if task is not running, besides setting options will cause a operation change.
if self.get_state(self._ui[UI_SWITCH]) == 'off' and context != CONTEXT_IGNORE:
task['operation'] = self.get_operation(ui_operation = operation)
def switch(self, state, context = None):
""" start or stop task """
# _LOGGER.debug('switch()')
if self._domain != '---请选择设备类型---':
entity_id = self._entity_id
task = self._get_task(self._entity_id)
if task is None:
_LOGGER.debug("未选择设备/未找到对应entity_id")
self.set_state(self._ui[UI_SWITCH], state = 'off')
return
else:
duration = self.get_state(self._ui[UI_INPUT_DURATION])
operation = self.get_operation(ui_operation = self.get_state(self._ui[UI_INPUT_OPERATION]))
if duration == '0:00:00':
return
# start timer
if state == 'on':
task['count'] += 1
if task['handle'] is None:
if task['remaining'] != duration:
task['duration'] = duration # set duration attr
task['handle'] = self._queue.insert(entity_id, duration, self.handle_task, operation = operation) # initialize queue task
task['operation'] = operation #set operation attr
# sync state for loop task
if 'temporary' in operation:
task['next_operation'] = operation.split('_')[1] # set next_operation attr, used in info panenl to show state
state = 'off' if task['next_operation'] == 'on' else 'on'
self.set_state(entity_id, service = 'turn_'+state, force_update = True)
#service.call()
else:
task['next_operation'] = operation
task['exec_time'] = datetime.now() + self._queue.get_remaining_time(task['handle'])
# stop timer
else:
self._queue.remove(task['handle'])
task['handle'] = None
task['next_operation'] = None
if 'temporary' in task['operation']:
task['remaining'] = '0:00:00'
else:
task['remaining'] = duration
self.set_state(self._ui[UI_INPUT_DURATION], state = task['duration']) # reset control panel ui
_LOGGER.debug("---switch---")
self.set_state(self._ui[UI_INPUT_OPERATION], state = self.get_operation(task = task)) # reset control panel ui
else:
_LOGGER.debug("no device type selected")
self.set_state(self._ui[UI_SWITCH], state = 'off')
if context != CONTEXT_IGNORE: # UI_SWITCH will be reset after a task finish, this shouldn't trigger a update twice
self._hass.async_add_job(self.update_info) # refresh info panel
def _get_task(self, entity_id):
""" return task base info """
if entity_id is None:
return None
domain = entity_id.split('.')[0]
if self._tasks.get(domain, None) is not None:
return self._tasks.get(domain, None).get(entity_id, None)
else:
return None
def get_operation(self,ui_operation = None, task = None):
""" transform operation string between ui and task"""
if ui_operation is not None:
# _LOGGER.debug("get_operation from ui:{}|{}".format(ui_operation,self._dic_operation.get(ui_operation.split(' ')[0])))
return self._dic_operation_cn_to_en.get(ui_operation.split(' ')[0])
if task is not None:
if 'custom:' in task['operation']:
return '调服务'
if task['operation'] in ['on','off']:
# _LOGGER.debug("get_operation from task:{}|{}".format(task['operation'],self._dic_operation.get(task['operation'])))
return self._dic_operation_en_to_cn.get(task['operation'])
else:
# _LOGGER.debug("get_operation from task:{}|{}".format(task['operation'],'{} [1:{}]'.format(self._dic_operation.get(task['operation']),task['ratio'])))
return '{} [1:{}]'.format(self._dic_operation_en_to_cn.get(task['operation']),task['ratio'])
else:
return '关'
def get_state(self, entity_id):
""" return entity state. """
state = self._hass.states.get(entity_id)
if state:
return state.as_dict()['state']
else:
return None
def get_attributes(self, entity_id):
""" return entity attributes. """
state = self._hass.states.get(entity_id)
if state:
return state.as_dict().get('attributes',{})
else:
return None
def set_state(self, entity_id, state = None, attributes = None, service = None, force_update = False, context = None):
""" set entity state. """
if context is None:
context = CONTEXT
if service is None:
_LOGGER.debug("[set_state] state machine: entity_id= {}, from {} to {}, context = {}".format(entity_id, self.get_state(entity_id), state, context))
attr = self.get_attributes(entity_id)
if attributes is not None:
attr.update(attributes)
self._hass.states.async_set(entity_id, state, attr, force_update = force_update, context = context)
else:
domain = entity_id.split('.')[0]
_LOGGER.debug('[set_state] call service: entity_id =%s, context = %s',entity_id, context)
# unused, after 0.78.0 fixed.
# attr = self.get_attributes(entity_id)
# if attributes is not None:
# attr.update(attributes)
# change state directly with a context identification since call service can't pass context in code.
# self._hass.states.async_set(entity_id, state, attr, force_update = force_update, context = CONTEXT)
data = {'entity_id': entity_id}
if state is not None:
data.update(state)
# call service to controll device
# self._hass.services.async_call(domain, service, data, blocking = True, context = context )
self._hass.async_add_job(self._hass.services.async_call(domain, service, data, context = context ))
def set_options(self, entity_id, options, current_option = None, context = CONTEXT_IGNORE):
""" set options for input select """
domain = entity_id.split('.')[0]
if domain != 'input_select':
_LOGGER.debug('wrong service')
return
data = {'entity_id': entity_id,'options': options}
if current_option is not None:
data['current_option'] = current_option
self._hass.async_add_job(self._hass.services.async_call(domain, SERVICE_SET_OPTIONS, data , blocking = True, context = context)) # set blocking to wait till service is executed
@callback
def update(self, time):
""" queue step forward and refresh timer display.
define callback to run in main thread.
"""
self._queue.next() # queue handler
# refresh timer display when task is running
if self.get_state(self._ui[UI_SWITCH]) == 'on':
entity_id = self._entity_id
if entity_id is None:
_LOGGER.info("Function task: friendly_name(%s) not found in dic !", entity_id)
return
task = self._get_task(entity_id)
remaining_time = self._queue.get_remaining_time(task['handle'])
# task finish
if remaining_time is None:
remaining_time = task['remaining']
if remaining_time == '0:00:00':
self.set_state(self._ui[UI_INPUT_DURATION], state = task['duration'])
else:
self.set_state(self._ui[UI_INPUT_DURATION], state = str(remaining_time))
self.set_state(self._ui[UI_SWITCH], state = 'off', context = CONTEXT_IGNORE)
# task waite
else:
self.set_state(self._ui[UI_INPUT_DURATION], state | |
>>> _ = plt.title('Using `fftcoef` for FFT curve fit')
>>> _ = plt.xlabel('Time (s)')
"""
if coef not in ("mag", "ab", "complex"):
raise ValueError(
f"invalid `coef` ({coef!r}). Must be one of 'mag', 'ab', or 'complex'."
)
x = np.atleast_1d(x)
n = x.shape[axis]
if isinstance(window, (str, tuple)):
window = signal.get_window(window, n)
else:
window = np.atleast_1d(window)
if len(window) != n:
raise ValueError(
f"window size is {len(window)}; expected {n} to match signal"
)
window *= n / window.sum()
if not (axis == -1 or axis == x.ndim - 1):
# Move the axis containing the data to the end
x = np.swapaxes(x, axis, x.ndim - 1)
window = _vector_to_axis(window, x.ndim, -1)
if dodetrend:
x = signal.detrend(x) * window
else:
x = x * window
N = _fftsize(n, sr, maxdf)
if N > n:
shape = [*x.shape]
shape[-1] = N
X = np.empty(shape)
X[..., :n] = x
X[..., n:] = 0.0
else:
X = x
F = np.fft.rfft(X)
f = np.fft.rfftfreq(N, 1.0 / sr)
# or, could do this to get same result:
# F = np.fft.fft(X)
# m = N // 2 + 1
# f = np.arange(0.0, m) * (sr / N)
# F = F[:m]
if fold:
a = 2.0 * F.real / n
a[..., 0] /= 2.0
if not N & 1: # if N is an even number
a[..., -1] /= 2.0
b = -2.0 * F.imag / n
else:
a = F.real / n
b = -F.imag / n
if not (axis == -1 or axis == x.ndim - 1):
# Move the axis containing the data to the end
a = np.swapaxes(a, axis, x.ndim - 1)
b = np.swapaxes(b, axis, x.ndim - 1)
if coef == "mag":
return np.sqrt(a ** 2 + b ** 2), np.arctan2(-a, b), f
elif coef == "complex":
return a + 1j * b, None, f
return a, b, f
def fftmap(
timeslice, tsoverlap, sig, sr, window="hann", dodetrend=False, fold=True, maxdf=None
):
"""
Make an FFT map ('waterfall') over time and frequency.
Parameters
----------
timeslice : scalar or string-integer
If scalar, it is the length in seconds for each slice. If
string, it contains the integer number of points for each
slice. For example, if `sr` is 1000 samples/second,
``timeslice=0.75`` is equivalent to ``timeslice="750"``.
tsoverlap : scalar in [0, 1) or string-integer
If scalar, is the fraction of each time-slice to overlap. If
string, it contains the integer number of points to
overlap. For example, if `sr` is 1000 samples/second,
``tsoverlap=0.5`` and ``tsoverlap="500"`` each specify 50%
overlap.
sig : 1d array_like
Signal to compute FFT map of.
sr : scalar
The sample rate (samples/sec)
window : string, tuple, or 1d array_like; optional
Specifies window function. If a string or tuple, it is passed
to :func:`scipy.signal.get_window` to get the window. If 1d
array_like, it must be length ``len(x)`` and is used directly.
dodetrend : bool; optional
If True, remove a linear fit from `x`; otherwise, no
detrending is done.
fold : bool; optional
If true, "fold" negative frequencies on top of positive
frequencies such that the coefficients at frequencies that
have a negative counterpart are doubled (magnitude is also
doubled).
maxdf : scalar or None; optional
If scalar, this is the maximum allowed frequency step; zero
padding will be done if necessary to enforce this. Note that
this is for providing more points between peaks only. If None,
the delta frequency is simply ``sr/len(x)``.
Returns
-------
mp : 2d ndarray
The FFT map; columns span time, rows span frequency (so each
column is an FFT curve). Time increases going across the
columns and frequency increases going down the rows.
t : 1d ndarray
Time vector of center times; corresponds to columns in map.
Signal is assumed to start at time = 0.
f : 1d ndarray
Frequency vector; corresponds to rows in map.
Notes
-----
This routine calls :func:`fftcoef` for each time slice. `mp` is a
matrix where each column is the FFT magnitude at all discrete
frequencies for a certain time-slice. That is, time increases
going across the columns and frequency increases going down the
rows.
See also
--------
:func:`fftcoef`
Examples
--------
.. plot::
:context: close-figs
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from matplotlib import cm, colors
>>> from pyyeti import dsp, ytools
>>> sig, t, f = ytools.gensweep(10, 1, 50, 4)
>>> sr = 1/t[1]
>>> mp, t, f = dsp.fftmap(2, .1, sig, sr)
>>> pv = f <= 50.0
>>> cs = plt.contour(t, f[pv], mp[pv], 40, cmap=cm.plasma)
>>> # This doesn't work in matplotlib 3.5.0:
>>> # cbar = plt.colorbar()
>>> # cbar.filled = True
>>> # cbar.draw_all()
>>> # But this does:
>>> norm = colors.Normalize(
... vmin=cs.cvalues.min(), vmax=cs.cvalues.max()
... )
>>> sm = plt.cm.ScalarMappable(norm=norm, cmap=cs.cmap)
>>> cb = plt.colorbar(sm) # , ticks=cs.levels)
>>> #
>>> _ = plt.xlabel('Time (s)')
>>> _ = plt.ylabel('Frequency (Hz)')
>>> ttl = 'FFT Map of Sine-Sweep @ 4 oct/min'
>>> _ = plt.title(ttl)
"""
return waterfall(
sig,
sr,
timeslice,
tsoverlap,
fftcoef,
which=0,
freq=2,
kwargs=dict(sr=sr, window=window, dodetrend=dodetrend, fold=fold, maxdf=maxdf),
)
def transmissibility(
in_data,
out_data,
sr,
timeslice=1.0,
tsoverlap=0.5,
window="hann",
getmap=False,
**kwargs,
):
r"""
Compute transmissibility transfer function using the FFT
Transmissibility is a common transfer function measurement of
``output / input``. It is a type of frequency response function
where the gain (magnitude) vs frequency is typically of primary
interest. Note that the phase can be computed from the output of
this routine as well.
Parameters
----------
in_data : 1d array_like
Time series of measurement values for the input data
out_data : 1d array_like
Time series of measurement values for the output data
sr : scalar
Sample rate.
timeslice : scalar or string-integer
If scalar, it is the length in seconds for each slice. If
string, it contains the integer number of points for each
slice. For example, if `sr` is 1000 samples/second,
``timeslice=0.75`` is equivalent to ``timeslice="750"``.
tsoverlap : scalar in [0, 1) or string-integer
If scalar, is the fraction of each time-slice to overlap. If
string, it contains the integer number of points to
overlap. For example, if `sr` is 1000 samples/second,
``tsoverlap=0.5`` and ``tsoverlap="500"`` each specify 50%
overlap.
window : string, tuple, or 1d array_like; optional
Specifies window function. If a string or tuple, it is passed
to :func:`scipy.signal.get_window` to get the window. If 1d
array_like, it must be length ``len(x)`` and is used directly.
getmap : bool, optional
If True, get the transfer function map outputs (see below).
*kwargs : optional
Named arguments to pass to :func:`fftcoef`. Note that `x`,
`sr`, `coef` and `window` arguments are passed automatically,
and that `fold` is irrelevant (due to computing a
ratio). Therefore, at the time of this writing, only
`dodetrend`, and `maxdf` are really valid entries in `kwargs`.
Returns
-------
A SimpleNamespace with the members:
f : 1d ndarray
Array of sample frequencies.
mag : 1d ndarray
Average magnitude of transmissibility transfer function across
all time slices of ``out_data / in_data``; length is
``len(f)``::
mag = abs(tr_map).mean(axis=1)
phase : 1d ndarray
Average phase in degrees of transmissibility transfer function
across all time slices of ``out_data / in_data``; length is
``len(f)``. Computing the average of angles is tricky; for
example, the average of 15 degrees and 355 degrees is 5
degrees. To get this result, the approach used here is to
compute the average of cartesian coordinates of points on a
unit circle at each angle, and then compute the angle to that
average location::
phase = np.angle(
(tr_map / abs(tr_map)).mean(axis=1), deg=True
)
This definition of phase follows the negative sign convention
of phase (as in :func:`fftcoef`): ``sin(theta - phase)``.
tr_map : complex 2d ndarray; optional
The complex transmissibility transfer function map. Each
column is the transmissibility of ``out_data / in_data``
computed from the FFT ratio (from :func:`fftcoef`) for the
corresponding time slice. Rows correspond to | |
"""
To add a new kind of graphic items create a child class
of VisualizerGraphicItem in this file and add it in the
model.create_item function to the model. This is the only function
in the model that should be changed. The behaivor and values of the
object should be defindes inside of its own class.
To add new properties to a class modify the attributes of a class
and modify the 'parse_init_value', the 'to_init_str', the 'restart',
the 'do_action' and the 'undo_action' methods to adjust the objects
behaivor. To adjust the appearance of an item modify the set_rect method.
Note: The set_color function is called automatically by the modelView for
every item if the colors are defined in the configuration.
Look at the method definitions of other classes for examples.
"""
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QGraphicsItem
from PyQt5.QtWidgets import QGraphicsTextItem
from PyQt5.QtWidgets import QGraphicsRectItem
from PyQt5.QtWidgets import QGraphicsEllipseItem
from PyQt5.QtGui import QFont
from PyQt5.QtGui import QColor
from PyQt5.QtGui import QBrush
from . import modelView
from . import visualizerItem
from .configuration import *
VIZ_STATE_MOVED = 0x0001
VIZ_STATE_DELIVERED = 0x0002
VIZ_STATE_PICKED_UP = 0x0004
VIZ_STATE_PUT_DOWN = 0x0008
VIZ_STATE_MOVE = 0x0010
VIZ_STATE_DELIVER = 0x0020
VIZ_STATE_PICK_UP = 0x0040
VIZ_STATE_PUT_DOWN2 = 0x0080
VIZ_STATE_CHARGED = 0x0100
VIZ_STATE_CHARGE = 0x1000
VIZ_STATE_ACTION = 0xffff
def calculate_color(first_color, second_color, multiplier):
"""
This function interpolates between two color values.
"""
red = (min(first_color.red(), second_color.red()),
max(first_color.red(), second_color.red()))
green = (min(first_color.green(), second_color.green()),
max(first_color.green(), second_color.green()))
blue = (min(first_color.blue(), second_color.blue()),
max(first_color.blue(), second_color.blue()))
return QColor(
red[0] + (red[1] - red[0]) * multiplier,
green[0] + (green[1] - green[0]) * multiplier,
blue[0] + (blue[1] - blue[0]) * multiplier)
class VisualizerGraphicItem(QGraphicsItem, visualizerItem.VisualizerItem):
"""
This is the templete class for visualizer graphic items.
There should never be an instance of this class.
A visualizer graphic item is a part of the visualizer model
that is drawn on the model view and can perform actions.
Attributes:
_kind_name: str
The name of this kind of item.
_ID: int
The id of this item.
_model:
The model this item belongs to
_position: tuple
A tuple that consists of two integers representing
the position of the item on the grid.
_start_position: tuple
The position of this item at timestep 0.
_dragged: tuple
This is the position of this item before a drag
action started.
_enable_drag: bool
This is true if drag and drop is enabled for this item.
This will be set by the model. It will be set to false
as soon as the first action atom occurs.
_graphics_item: QGraphicsItem
This is the main graphic item of this item
and is used for some default functions.
_text: QGraphicsTextItem
This is a text item that is drawn
on the model view and represents primarily the id of the item.
_actions: list
This is a sorted list of the actions of this item. The index
of every action is the time step at which the action occurs.
_color: QColor
The current main color of the item.
_colors: list
A list of QColor values. Contains several colors for multi-colored
items and color interpolation.
_display_mode: int
This is set by the model view and determines whether
the item text should be rendered and whether the item
should be rendered in the whole grid field.
It is primarily defined by the zoom factor and the grid size.
_draw_path: bool
This is true if the path of the item should be drawn on the
model view. Should only be used for items that can have a path
like robots.
_state: int
Consists of one or more state flags, that describes which action
a item is currently doing.
_highlighted: bool
This is true if the item will be highlighted. Highlighted items
are be drawn lager.
"""
def __init__(self, ID = '0', x = 0, y = 0):
"""
Parameters:
ID : int, optional
The ID of the item
x: int, optional
The x coordinate on the grid
y: int, optional
The y coordinate on the grid
"""
QGraphicsItem.__init__(self)
visualizerItem.VisualizerItem.__init__(self)
self._kind_name = ''
self._id = ID
self._model = None
self._position = (x, y)
self._start_position = (x, y)
self._dragged = None
self._enable_drag = False
self._graphics_item = None
self._text = None
self._actions = []
self._color = None
self._colors = [QColor(0,0,0)]
self._display_mode = 0
self._draw_path = False
self.setAcceptedMouseButtons(Qt.MouseButtons(1))
self._state = 0x00
self._highlighted = False
def set_starting_position(self, x, y):
"""
Sets the starting position of an item.
"""
self._start_position = (x, y)
def set_position(self, x, y):
"""
Sets the current position of an item.
"""
self._position = (x, y)
def set_color(self, color, color_id = 0):
"""
Sets a specific color of an item.
Parameters:
color: QColor
The color
color_id:
The id of the color that will be set
"""
while color_id >= len(self._colors):
self._colors.append(QColor(0,0,0))
self._colors[color_id] = color
def set_rect(self, rect):
"""
Sets the rectangle that an item can use to draw things in.
This equals usually one field of the grid in the model view.
This function usually defines the appearance of an item
on the grid.
"""
return
def set_action(self, action, time_step):
"""
Sets the action for the specific time step.
Overrides existing actions at the time step but prints out
a warning since this should never happen.
Parameters:
action: clingo.Symbol
This is the action that will be performed.
time_step: int
This is the time step at which the action
shold be performed.
"""
if time_step < 0:
print(('Warning: invalid time step in occurs(object('
+ str(self._kind_name) + ','
+ str(self._ID) + '),' + str(action)
+ ',' + str(time_step) + ')'))
print('time step is less than 0')
return
for ii in range((time_step + 1) - len(self._actions)):
self._actions.append(None)
if not self._actions[time_step] == None:
print(('Warning: for object(' + str(self._kind_name)
+ ', ' + str(self._id)
+ ') multiple actions are defined at time step '
+ str(time_step)))
self._actions[time_step] = action
def set_display_mode(self, display_mode):
"""
Sets the current display mode.
"""
self._display_mode = display_mode
def set_draw_path(self, draw_path):
"""
Sets whether the path of the object should be drawn.
"""
self._draw_path = draw_path
def parse_init_value(self, name, value):
"""
This function handels the input phrases for every item.
This is called for every phrase the model receives with the
following syntax:
init(object([object type], [object ID]), value([value name], [value])).
While [object type] is the same as self._kind_name and [object ID]
is the ID of the object and is the same as self._id.
The model decides based on this value the object that receives
the phrase.
Parameters:
name: str
This is the name of the value.
value: clingo.Symbol
This is the actual value. It contains the [value]
part of the phrase.
Returns 1 if the phrase cannot be parsed, -1 if one parameter is
invalid and 0 if the function succeeded.
"""
if value is None or name is None:
return -1
if name == 'at':
pos = (value.arguments[0].number, value.arguments[1].number)
self.set_starting_position(pos[0], pos[1])
self.set_position(pos[0], pos[1])
return 0
return 1
def enable_drag(self, enable):
"""
Enables and disables the drag and drop feature for the
model editor.
"""
self._enable_drag = enable
def restart(self):
"""
Resets the item to its original values.
Sets the items position to its starting position.
"""
self._position = self._start_position
def do_action(self, time_step):
"""
Action handler. Must be implemented for items
which can perform actions. This function will be called
every time the model does one time step forward.
"""
return
def undo_action(self, time_step):
"""
Reverse action handler. Must be implemented for items
which can perform actions. This function will be called
every time the model does one time step backwards.
"""
return
def clear_actions(self):
"""
Deletes all actions for an object.
"""
self._actions = []
def to_init_str(self):
"""
Converts the item to a string that represents the items values.
This function is used to send the whole model to a solver
and to save an instance to a file.
"""
return ('init(object('
+ self._kind_name + ','
+ str(self._id) + '), value(at,('
+ str(self._position[0]) + ','
+ str(self._position[1]) + '))).')
def to_occurs_str(self):
"""
This function returns a list | |
= Constraint(expr= m.b120 - m.b127 + m.b161 <= 1)
m.c907 = Constraint(expr= m.b121 - m.b122 + m.b162 <= 1)
m.c908 = Constraint(expr= m.b121 - m.b123 + m.b163 <= 1)
m.c909 = Constraint(expr= m.b121 - m.b124 + m.b164 <= 1)
m.c910 = Constraint(expr= m.b121 - m.b125 + m.b165 <= 1)
m.c911 = Constraint(expr= m.b121 - m.b126 + m.b166 <= 1)
m.c912 = Constraint(expr= m.b121 - m.b127 + m.b167 <= 1)
m.c913 = Constraint(expr= m.b122 - m.b123 + m.b168 <= 1)
m.c914 = Constraint(expr= m.b122 - m.b124 + m.b169 <= 1)
m.c915 = Constraint(expr= m.b122 - m.b125 + m.b170 <= 1)
m.c916 = Constraint(expr= m.b122 - m.b126 + m.b171 <= 1)
m.c917 = Constraint(expr= m.b122 - m.b127 + m.b172 <= 1)
m.c918 = Constraint(expr= m.b123 - m.b124 + m.b173 <= 1)
m.c919 = Constraint(expr= m.b123 - m.b125 + m.b174 <= 1)
m.c920 = Constraint(expr= m.b123 - m.b126 + m.b175 <= 1)
m.c921 = Constraint(expr= m.b123 - m.b127 + m.b176 <= 1)
m.c922 = Constraint(expr= m.b124 - m.b125 + m.b177 <= 1)
m.c923 = Constraint(expr= m.b124 - m.b126 + m.b178 <= 1)
m.c924 = Constraint(expr= m.b124 - m.b127 + m.b179 <= 1)
m.c925 = Constraint(expr= m.b125 - m.b126 + m.b180 <= 1)
m.c926 = Constraint(expr= m.b125 - m.b127 + m.b181 <= 1)
m.c927 = Constraint(expr= m.b126 - m.b127 + m.b182 <= 1)
m.c928 = Constraint(expr= m.b128 - m.b129 + m.b138 <= 1)
m.c929 = Constraint(expr= m.b128 - m.b130 + m.b139 <= 1)
m.c930 = Constraint(expr= m.b128 - m.b131 + m.b140 <= 1)
m.c931 = Constraint(expr= m.b128 - m.b132 + m.b141 <= 1)
m.c932 = Constraint(expr= m.b128 - m.b133 + m.b142 <= 1)
m.c933 = Constraint(expr= m.b128 - m.b134 + m.b143 <= 1)
m.c934 = Constraint(expr= m.b128 - m.b135 + m.b144 <= 1)
m.c935 = Constraint(expr= m.b128 - m.b136 + m.b145 <= 1)
m.c936 = Constraint(expr= m.b128 - m.b137 + m.b146 <= 1)
m.c937 = Constraint(expr= m.b129 - m.b130 + m.b147 <= 1)
m.c938 = Constraint(expr= m.b129 - m.b131 + m.b148 <= 1)
m.c939 = Constraint(expr= m.b129 - m.b132 + m.b149 <= 1)
m.c940 = Constraint(expr= m.b129 - m.b133 + m.b150 <= 1)
m.c941 = Constraint(expr= m.b129 - m.b134 + m.b151 <= 1)
m.c942 = Constraint(expr= m.b129 - m.b135 + m.b152 <= 1)
m.c943 = Constraint(expr= m.b129 - m.b136 + m.b153 <= 1)
m.c944 = Constraint(expr= m.b129 - m.b137 + m.b154 <= 1)
m.c945 = Constraint(expr= m.b130 - m.b131 + m.b155 <= 1)
m.c946 = Constraint(expr= m.b130 - m.b132 + m.b156 <= 1)
m.c947 = Constraint(expr= m.b130 - m.b133 + m.b157 <= 1)
m.c948 = Constraint(expr= m.b130 - m.b134 + m.b158 <= 1)
m.c949 = Constraint(expr= m.b130 - m.b135 + m.b159 <= 1)
m.c950 = Constraint(expr= m.b130 - m.b136 + m.b160 <= 1)
m.c951 = Constraint(expr= m.b130 - m.b137 + m.b161 <= 1)
m.c952 = Constraint(expr= m.b131 - m.b132 + m.b162 <= 1)
m.c953 = Constraint(expr= m.b131 - m.b133 + m.b163 <= 1)
m.c954 = Constraint(expr= m.b131 - m.b134 + m.b164 <= 1)
m.c955 = Constraint(expr= m.b131 - m.b135 + m.b165 <= 1)
m.c956 = Constraint(expr= m.b131 - m.b136 + m.b166 <= 1)
m.c957 = Constraint(expr= m.b131 - m.b137 + m.b167 <= 1)
m.c958 = Constraint(expr= m.b132 - m.b133 + m.b168 <= 1)
m.c959 = Constraint(expr= m.b132 - m.b134 + m.b169 <= 1)
m.c960 = Constraint(expr= m.b132 - m.b135 + m.b170 <= 1)
m.c961 = Constraint(expr= m.b132 - m.b136 + m.b171 <= 1)
m.c962 = Constraint(expr= m.b132 - m.b137 + m.b172 <= 1)
m.c963 = Constraint(expr= m.b133 - m.b134 + m.b173 <= 1)
m.c964 = Constraint(expr= m.b133 - m.b135 + m.b174 <= 1)
m.c965 = Constraint(expr= m.b133 - m.b136 + m.b175 <= 1)
m.c966 = Constraint(expr= m.b133 - m.b137 + m.b176 <= 1)
m.c967 = Constraint(expr= m.b134 - m.b135 + m.b177 <= 1)
m.c968 = Constraint(expr= m.b134 - m.b136 + m.b178 <= 1)
m.c969 = Constraint(expr= m.b134 - m.b137 + m.b179 <= 1)
m.c970 = Constraint(expr= m.b135 - m.b136 + m.b180 <= 1)
m.c971 = Constraint(expr= m.b135 - m.b137 + m.b181 <= 1)
m.c972 = Constraint(expr= m.b136 - m.b137 + m.b182 <= 1)
m.c973 = Constraint(expr= m.b138 - m.b139 + m.b147 <= 1)
m.c974 = Constraint(expr= m.b138 - m.b140 + m.b148 <= 1)
m.c975 = Constraint(expr= m.b138 - m.b141 + m.b149 <= 1)
m.c976 = Constraint(expr= m.b138 - m.b142 + m.b150 <= 1)
m.c977 = Constraint(expr= m.b138 - m.b143 + m.b151 <= 1)
m.c978 = Constraint(expr= m.b138 - m.b144 + m.b152 <= 1)
m.c979 = Constraint(expr= m.b138 - m.b145 + m.b153 <= 1)
m.c980 = Constraint(expr= m.b138 - m.b146 + m.b154 <= 1)
m.c981 = Constraint(expr= m.b139 - m.b140 + m.b155 <= 1)
m.c982 = Constraint(expr= m.b139 - m.b141 + m.b156 <= 1)
m.c983 = Constraint(expr= m.b139 - m.b142 + m.b157 <= 1)
m.c984 = Constraint(expr= m.b139 - m.b143 + m.b158 <= 1)
m.c985 = Constraint(expr= m.b139 - m.b144 + m.b159 <= 1)
m.c986 = Constraint(expr= m.b139 - m.b145 + m.b160 <= 1)
m.c987 = Constraint(expr= m.b139 - m.b146 + m.b161 <= 1)
m.c988 = Constraint(expr= m.b140 - m.b141 + m.b162 <= 1)
m.c989 = Constraint(expr= m.b140 - m.b142 + m.b163 <= 1)
m.c990 = Constraint(expr= m.b140 - m.b143 + m.b164 <= 1)
m.c991 = Constraint(expr= m.b140 - m.b144 + m.b165 <= 1)
m.c992 = Constraint(expr= m.b140 - m.b145 + m.b166 <= 1)
m.c993 = Constraint(expr= m.b140 - m.b146 + m.b167 <= 1)
m.c994 = Constraint(expr= m.b141 - m.b142 + m.b168 <= 1)
m.c995 = Constraint(expr= m.b141 - m.b143 + m.b169 <= 1)
m.c996 = Constraint(expr= m.b141 - m.b144 + m.b170 <= 1)
m.c997 = Constraint(expr= m.b141 - m.b145 + m.b171 <= 1)
m.c998 = Constraint(expr= m.b141 - m.b146 + m.b172 <= 1)
m.c999 = Constraint(expr= m.b142 - m.b143 + m.b173 <= 1)
m.c1000 = Constraint(expr= m.b142 - m.b144 + m.b174 <= 1)
m.c1001 = Constraint(expr= m.b142 - m.b145 + m.b175 <= 1)
m.c1002 = Constraint(expr= m.b142 - m.b146 + m.b176 <= 1)
m.c1003 = Constraint(expr= m.b143 - m.b144 + m.b177 <= 1)
m.c1004 = Constraint(expr= m.b143 - m.b145 + m.b178 <= 1)
m.c1005 = Constraint(expr= m.b143 - m.b146 + m.b179 <= 1)
m.c1006 = Constraint(expr= m.b144 - m.b145 + m.b180 <= 1)
m.c1007 = Constraint(expr= m.b144 - m.b146 + m.b181 <= 1)
m.c1008 = Constraint(expr= m.b145 - m.b146 + m.b182 <= 1)
m.c1009 = Constraint(expr= m.b147 - m.b148 + m.b155 <= 1)
m.c1010 = Constraint(expr= m.b147 - m.b149 + m.b156 <= 1)
m.c1011 = Constraint(expr= m.b147 - m.b150 + m.b157 <= 1)
m.c1012 = Constraint(expr= m.b147 - m.b151 + m.b158 <= 1)
m.c1013 = Constraint(expr= m.b147 - m.b152 + m.b159 <= 1)
m.c1014 = Constraint(expr= m.b147 - m.b153 + m.b160 <= 1)
m.c1015 = Constraint(expr= m.b147 - m.b154 + m.b161 <= 1)
m.c1016 = Constraint(expr= m.b148 - m.b149 + m.b162 <= 1)
m.c1017 = Constraint(expr= m.b148 - m.b150 + m.b163 <= 1)
m.c1018 = Constraint(expr= m.b148 - m.b151 + m.b164 <= 1)
m.c1019 = Constraint(expr= m.b148 - m.b152 + m.b165 <= 1)
m.c1020 = Constraint(expr= m.b148 - m.b153 + m.b166 <= 1)
m.c1021 = Constraint(expr= m.b148 - m.b154 + m.b167 <= 1)
m.c1022 = Constraint(expr= m.b149 - m.b150 + m.b168 <= 1)
m.c1023 = Constraint(expr= m.b149 - m.b151 + m.b169 <= 1)
m.c1024 = Constraint(expr= m.b149 - m.b152 + m.b170 <= 1)
m.c1025 = Constraint(expr= m.b149 - m.b153 + m.b171 <= 1)
m.c1026 = Constraint(expr= m.b149 - m.b154 + m.b172 <= 1)
m.c1027 = Constraint(expr= m.b150 - m.b151 + m.b173 <= 1)
m.c1028 = Constraint(expr= m.b150 - m.b152 + m.b174 <= 1)
m.c1029 = Constraint(expr= m.b150 - m.b153 + m.b175 <= 1)
m.c1030 = Constraint(expr= m.b150 - m.b154 + m.b176 <= 1)
m.c1031 = Constraint(expr= m.b151 - m.b152 + m.b177 <= 1)
m.c1032 = Constraint(expr= m.b151 - m.b153 + m.b178 <= 1)
m.c1033 = Constraint(expr= m.b151 - m.b154 + m.b179 <= 1)
m.c1034 = Constraint(expr= m.b152 - m.b153 + m.b180 <= 1)
m.c1035 = Constraint(expr= m.b152 - m.b154 + m.b181 <= 1)
m.c1036 = Constraint(expr= m.b153 - m.b154 + m.b182 <= 1)
m.c1037 = Constraint(expr= m.b155 - m.b156 + m.b162 <= 1)
m.c1038 = Constraint(expr= m.b155 - m.b157 + m.b163 <= 1)
m.c1039 = Constraint(expr= m.b155 - m.b158 + m.b164 <= 1)
m.c1040 = Constraint(expr= m.b155 - m.b159 + m.b165 <= 1)
m.c1041 = Constraint(expr= m.b155 - m.b160 + m.b166 <= 1)
m.c1042 = Constraint(expr= m.b155 - m.b161 + m.b167 <= 1)
m.c1043 = Constraint(expr= m.b156 - m.b157 + m.b168 <= 1)
m.c1044 = Constraint(expr= m.b156 - m.b158 + m.b169 <= 1)
m.c1045 = Constraint(expr= m.b156 - m.b159 + m.b170 <= 1)
m.c1046 = Constraint(expr= m.b156 - m.b160 + m.b171 <= 1)
m.c1047 = Constraint(expr= m.b156 - m.b161 + m.b172 <= 1)
m.c1048 = Constraint(expr= | |
<filename>cli/cros/lint_unittest.py
# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the lint module."""
from __future__ import print_function
import collections
import io
import os
from chromite.cli.cros import lint
from chromite.lib import cros_test_lib
from chromite.lib import osutils
pytestmark = cros_test_lib.pytestmark_inside_only
# pylint: disable=protected-access
class DocStringSectionDetailsTest(cros_test_lib.TestCase):
"""Basic DocStringSectionDetails class tests."""
def testInit(self):
"""Verify constructor behavior."""
s = lint.DocStringSectionDetails()
self.assertEqual(None, s.name)
self.assertEqual(None, s.header)
self.assertEqual([], s.lines)
self.assertEqual(None, s.lineno)
s = lint.DocStringSectionDetails(
name='Args', header=' Args:', lines=[' foo: Yes.'], lineno=2)
self.assertEqual('Args', s.name)
self.assertEqual(' Args:', s.header)
self.assertEqual([' foo: Yes.'], s.lines)
self.assertEqual(2, s.lineno)
def testStr(self):
"""Sanity check __str__."""
s = lint.DocStringSectionDetails()
self.assertNotEqual(None, str(s))
def testRepr(self):
"""Sanity check __repr__."""
s = lint.DocStringSectionDetails()
self.assertNotEqual(None, repr(s))
def testEqual(self):
"""Sanity check __eq__."""
s1 = lint.DocStringSectionDetails()
s2 = lint.DocStringSectionDetails()
self.assertEqual(s1, s2)
s2 = lint.DocStringSectionDetails(name='Args')
self.assertNotEqual(s1, s2)
s2 = lint.DocStringSectionDetails(header=' Args:')
self.assertNotEqual(s1, s2)
s2 = lint.DocStringSectionDetails(lineno=2)
self.assertNotEqual(s1, s2)
s1 = lint.DocStringSectionDetails(name='n', header='h', lineno=0)
s2 = lint.DocStringSectionDetails(name='n', header='h', lineno=0)
self.assertEqual(s1, s2)
class PylintrcConfigTest(cros_test_lib.TempDirTestCase):
"""Basic _PylintrcConfig tests."""
def testEmptySettings(self):
"""Check default empty names behavior."""
lint._PylintrcConfig('/dev/null', '', ())
def testDefaultValue(self):
"""Check we can read a default."""
cfg_file = os.path.join(self.tempdir, 'pylintrc')
osutils.WriteFile(cfg_file, '[sect]\nkey = " "\n')
cfg = lint._PylintrcConfig(cfg_file, 'sect', (
('key', {'default': 'KEY', 'type': 'string'}),
('foo', {'default': 'FOO', 'type': 'string'}),
))
self.assertEqual(' ', cfg.option_value('key'))
self.assertEqual('FOO', cfg.option_value('foo'))
class TestNode(object):
"""Object good enough to stand in for lint funcs"""
Args = collections.namedtuple('Args', ('args', 'vararg', 'kwarg'))
Arg = collections.namedtuple('Arg', ('name',))
def __init__(self, doc='', fromlineno=0, path='foo.py', args=(), vararg='',
kwarg='', names=None, lineno=0, name='module',
display_type='Module', col_offset=None):
if names is None:
names = [('name', None)]
self.doc = doc
self.lines = doc.split('\n')
self.fromlineno = fromlineno
self.lineno = lineno
self.file = path
self.args = self.Args(args=[self.Arg(name=x) for x in args],
vararg=vararg, kwarg=kwarg)
self.names = names
self.name = name
self._display_type = display_type
self.col_offset = col_offset
def argnames(self):
return [arg.name for arg in self.args.args]
def display_type(self):
return self._display_type
class StatStub(object):
"""Dummy object to stand in for stat checks."""
def __init__(self, size=0, mode=0o644):
self.st_size = size
self.st_mode = mode
class CheckerTestCase(cros_test_lib.TestCase):
"""Helpers for Checker modules"""
def add_message(self, msg_id, node=None, line=None, args=None):
"""Capture lint checks"""
# We include node.doc here explicitly so the pretty assert message
# inclues it in the output automatically.
doc = node.doc if node else ''
self.results.append((msg_id, doc, line, args))
def setUp(self):
assert hasattr(self, 'CHECKER'), 'TestCase must set CHECKER'
self.results = []
self.checker = self.CHECKER()
self.checker.add_message = self.add_message
def assertLintPassed(self, msg='Checks failed'):
"""Assert that no lint results have been queued."""
msg += '\nChecks failed: %s' % ([x[0] for x in self.results],)
self.assertEqual(self.results, [], msg=msg)
def assertLintFailed(self, msg='Checks incorrectly passed', expected=()):
"""Assert that failed results matching |expected| have been queued."""
if expected:
self.assertEqual(list(expected), [x[0] for x in self.results])
else:
self.assertNotEqual(len(self.results), 0, msg=msg)
class DocStringCheckerTest(CheckerTestCase):
"""Tests for DocStringChecker module"""
GOOD_FUNC_DOCSTRINGS = (
'Some string',
"""Short summary
Body of text.
""",
"""line o text
Body and comments on
more than one line.
Args:
moo: cow
Returns:
some value
Raises:
something else
""",
"""Short summary.
Args:
fat: cat
Yields:
a spoon
""",
"""Don't flag args variables as sections.
Args:
return: Foo!
""",
"""the indentation is extra special
Returns:
First line is two spaces which is ok.
Then we indent some more!
""",
"""Arguments with same names as sections.
Args:
result: Valid arg, invalid section.
return: Valid arg, invalid section.
returns: Valid arg, valid section.
arg: Valid arg, invalid section.
args: Valid arg, valid section.
attribute: Valid arg, invalid section.
attributes: Valid arg, valid section.
""",
)
BAD_FUNC_DOCSTRINGS = (
"""
bad first line
""",
"""The first line is good
but the second one isn't
""",
""" whitespace is wrong""",
"""whitespace is wrong """,
""" whitespace is wrong
Multiline tickles differently.
""",
"""First line is OK, but too much trailing whitespace
""",
"""Should be no trailing blank lines
Returns:
a value
""",
"""ok line
cuddled end""",
"""we want Args, not Arguments
Arguments:
some: arg
""",
"""we want Args, not Params
Params:
some: arg
""",
"""section order is wrong here
Raises:
It raised.
Returns:
It returned
""",
"""sections are duplicated
Returns:
True
Returns:
or was it false
""",
"""sections lack whitespace between them
Args:
foo: bar
Returns:
yeah
""",
"""yields is misspelled
Yield:
a car
""",
"""We want Examples, not Usage.
Usage:
a car
""",
"""Section name has bad spacing
Args:\x20\x20\x20
key: here
""",
"""too many blank lines
Returns:
None
""",
"""wrongly uses javadoc
@returns None
""",
"""the indentation is incorrect
Args:
some: day
""",
"""the final indentation is incorrect
Blah.
""",
"""the indentation is incorrect
Returns:
one space but should be two
""",
"""the indentation is incorrect
Returns:
three spaces but should be two (and we have just one line)
""",
"""the indentation is incorrect
Args:
some: has three spaces but should be two
""",
"""the indentation is incorrect
Args:
some: has one space but should be two
""",
"""the indentation is incorrect
Args:
some: has four spaces but should be two
""",
""""Extra leading quotes.""",
"""Class-only sections aren't allowed.
Attributes:
foo: bar.
""",
"""No lines between section headers & keys.
Args:
arg: No blank line above!
""",
)
# The current linter isn't good enough yet to detect these.
TODO_BAD_FUNC_DOCSTRINGS = (
"""The returns section isn't a proper section
Args:
bloop: de
returns something
""",
"""Too many spaces after header.
Args:
arg: too many spaces
""",
)
# We don't need to test most scenarios as the func & class checkers share
# code. Only test the differences.
GOOD_CLASS_DOCSTRINGS = (
"""Basic class.""",
"""Class with attributes.
Attributes:
foo: bar
""",
"""Class with examples.
Examples:
Do stuff.
""",
"""Class with examples & attributes.
Examples:
Do stuff.
Attributes:
foo: bar
""",
"""Attributes with same names as sections.
Attributes:
result: Valid arg, invalid section.
return: Valid arg, invalid section.
returns: Valid arg, valid section.
arg: Valid arg, invalid section.
args: Valid arg, valid section.
attribute: Valid arg, invalid section.
attributes: Valid arg, valid section.
""",
)
BAD_CLASS_DOCSTRINGS = (
"""Class with wrong attributes name.
Members:
foo: bar
""",
"""Class with func-specific section.
These sections aren't valid for classes.
Args:
foo: bar
""",
"""Class with examples & attributes out of order.
Attributes:
foo: bar
Examples:
Do stuff.
""",
)
CHECKER = lint.DocStringChecker
def testGood_visit_functiondef(self):
"""Allow known good docstrings"""
for dc in self.GOOD_FUNC_DOCSTRINGS:
self.results = []
node = TestNode(doc=dc, display_type=None, col_offset=4)
self.checker.visit_functiondef(node)
self.assertLintPassed(msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_visit_functiondef(self):
"""Reject known bad docstrings"""
for dc in self.BAD_FUNC_DOCSTRINGS:
self.results = []
node = TestNode(doc=dc, display_type=None, col_offset=4)
self.checker.visit_functiondef(node)
self.assertLintFailed(msg='docstring was not rejected:\n"""%s"""' % dc)
def testSmoke_visit_module(self):
"""Smoke test for modules"""
self.checker.visit_module(TestNode(doc='foo'))
self.assertLintPassed()
self.checker.visit_module(TestNode(doc='', path='/foo/__init__.py'))
self.assertLintPassed()
def testGood_visit_classdef(self):
"""Allow known good docstrings"""
for dc in self.GOOD_CLASS_DOCSTRINGS:
self.results = []
node = TestNode(doc=dc, display_type=None, col_offset=4)
self.checker.visit_classdef(node)
self.assertLintPassed(msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_visit_classdef(self):
"""Reject known bad docstrings"""
for dc in self.BAD_CLASS_DOCSTRINGS:
self.results = []
node = TestNode(doc=dc, display_type=None, col_offset=4)
self.checker.visit_classdef(node)
self.assertLintFailed(msg='docstring was not rejected:\n"""%s"""' % dc)
def testSmoke_visit_classdef(self):
"""Smoke test for classes"""
self.checker.visit_classdef(TestNode(doc='bar'))
def testGood_check_first_line(self):
"""Verify _check_first_line accepts good inputs"""
docstrings = (
'Some string',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_first_line(node, node.lines)
self.assertLintPassed(msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_check_first_line(self):
"""Verify _check_first_line rejects bad inputs"""
docstrings = (
'\nSome string\n',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_first_line(node, node.lines)
self.assertLintFailed(expected=('C9009',))
def testGood_check_second_line_blank(self):
"""Verify _check_second_line_blank accepts good inputs"""
docstrings = (
'Some string\n\nThis is the third line',
'Some string',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_second_line_blank(node, node.lines)
self.assertLintPassed(msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_check_second_line_blank(self):
"""Verify _check_second_line_blank rejects bad inputs"""
docstrings = (
'Some string\nnonempty secondline',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_second_line_blank(node, node.lines)
self.assertLintFailed(expected=('C9014',))
def testGoodFuncVarKwArg(self):
"""Check valid inputs for *args and **kwargs"""
for vararg in (None, 'args', '_args'):
for kwarg in (None, 'kwargs', '_kwargs'):
self.results = []
node = TestNode(vararg=vararg, kwarg=kwarg)
self.checker._check_func_signature(node)
self.assertLintPassed()
def testMisnamedFuncVarKwArg(self):
"""Reject anything but *args and **kwargs"""
for vararg in ('arg', 'params', 'kwargs', '_moo'):
self.results = | |
<reponame>carolgaudeoso/PyTTa
# -*- coding: utf-8 -*-
"""
This module does calculations compliant to ISO 3382-1 in order to obtain room
acoustic paramters.
It has an implementation of Lundeby et al. [1] algorithm to estimate the
correction factor for the cumulative integral, as suggested by the ISO 3382-1.
"""
import numpy as np
import matplotlib.pyplot as plt
from numba import njit
from pytta import SignalObj, OctFilter, Analysis, ImpulsiveResponse
from pytta.utils import fractional_octave_frequencies as FOF
import traceback
import copy as cp
def _filter(signal,
order: int = 4,
nthOct: int = 3,
minFreq: float = 20,
maxFreq: float = 20000,
refFreq: float = 1000,
base: int = 10):
of = OctFilter(order=order,
nthOct=nthOct,
samplingRate=signal.samplingRate,
minFreq=minFreq,
maxFreq=maxFreq,
refFreq=refFreq,
base=base)
result = of.filter(signal)
return result[0]
@njit
def _level_profile(timeSignal, samplingRate,
numSamples, numChannels, blockSamples=None):
"""
Gets h(t) in octave bands and do the local time averaging in nblocks.
Returns h^2_averaged(block).
"""
def mean_squared(x):
return np.mean(x**2)
if blockSamples is None:
blockSamples = 100
nblocks = int(numSamples // blockSamples)
profile = np.zeros((nblocks, numChannels), dtype=np.float32)
timeStamp = np.zeros((nblocks, 1))
for ch in range(numChannels):
# if numChannels == 1:
# tmp = timeSignal
# else:
tmp = timeSignal[:, ch]
for idx in range(nblocks):
profile[idx, ch] = mean_squared(tmp[:blockSamples])
timeStamp[idx, 0] = idx*blockSamples/samplingRate
tmp = tmp[blockSamples:]
return profile, timeStamp
@njit
def _start_sample_ISO3382(timeSignal, threshold) -> np.ndarray:
squaredIR = timeSignal**2
# assume the last 10% of the IR is noise, and calculate its noise level
last10Idx = -int(len(squaredIR)//10)
noiseLevel = np.mean(squaredIR[last10Idx:])
# get the maximum of the signal, that is the assumed IR peak
max_val = np.max(squaredIR)
max_idx = np.argmax(squaredIR)
# check if the SNR is enough to assume that the signal is an IR. If not,
# the signal is probably not an IR, so it starts at sample 1
idxNoShift = np.asarray([max_val < 100*noiseLevel or
max_idx > int(0.9*squaredIR.shape[0])])
# less than 20dB SNR or in the "noisy" part
if idxNoShift.any():
print("noiseLevelCheck: The SNR too bad or this is not an " +
"impulse response.")
return 0
# find the first sample that lies under the given threshold
threshold = abs(threshold)
startSample = 1
# # TODO - envelope mar/pdi - check!
# if idxNoShift:
# print("Something wrong!")
# return
# if maximum lies on the first point, then there is no point in searching
# for the beginning of the IR. Just return this position.
if max_idx > 0:
abs_dat = 10*np.log10(squaredIR[:max_idx]) \
- 10.*np.log10(max_val)
thresholdNotOk = True
thresholdShift = 0
while thresholdNotOk:
if len(np.where(abs_dat < (-threshold+thresholdShift))[0]) > 0:
lastBelowThreshold = \
np.where(abs_dat < (-threshold+thresholdShift))[0][-1]
thresholdNotOk = False
else:
thresholdShift += 1
if thresholdShift > 0:
print("_start_sample_ISO3382: 20 dB threshold too high. " +
"Decreasing it.")
if lastBelowThreshold > 0:
startSample = lastBelowThreshold
else:
startSample = 1
return startSample
@njit
def _circular_time_shift(timeSignal, threshold=20):
# find the first sample where inputSignal level > 20 dB or > bgNoise level
startSample = _start_sample_ISO3382(timeSignal, threshold)
newTimeSignal = timeSignal[startSample:]
return (newTimeSignal, startSample)
@njit
def _Lundeby_correction(band, timeSignal, samplingRate, numSamples,
numChannels, timeLength):
returnTuple = (np.float32(0), np.float32(0), np.int32(0), np.float32(0))
timeSignal, sampleShift = _circular_time_shift(timeSignal)
if sampleShift is None:
return returnTuple
winTimeLength = 0.03 # 30 ms window
numSamples -= sampleShift # discount shifted samples
numParts = 5 # number of parts per 10 dB decay. N = any([3, 10])
dBtoNoise = 7 # stop point 10 dB above first estimated background noise
useDynRange = 15 # dynamic range
# 1) local time average:
blockSamples = int(winTimeLength * samplingRate)
timeWinData, timeVecWin = _level_profile(timeSignal, samplingRate,
numSamples, numChannels,
blockSamples)
# 2) estimate noise from h^2_averaged(block):
bgNoiseLevel = 10 * \
np.log10(
np.mean(timeWinData[-int(timeWinData.size/10):]))
# 3) Calculate premilinar slope
startIdx = np.argmax(np.abs(timeWinData/np.max(np.abs(timeWinData))))
stopIdx = startIdx + np.where(10*np.log10(timeWinData[startIdx+1:])
>= bgNoiseLevel + dBtoNoise)[0][-1]
dynRange = 10*np.log10(timeWinData[stopIdx]) \
- 10*np.log10(timeWinData[startIdx])
if (stopIdx == startIdx) or (dynRange > -5)[0]:
print(band, "[Hz] band: SNR too low for the preliminar slope",
"calculation.")
return returnTuple
# X*c = EDC (energy decaying curve)
X = np.ones((stopIdx-startIdx, 2), dtype=np.float32)
X[:, 1] = timeVecWin[startIdx:stopIdx, 0]
c = np.linalg.lstsq(X, 10*np.log10(timeWinData[startIdx:stopIdx]),
rcond=-1)[0]
if (c[1] == 0)[0] or np.isnan(c).any():
print(band, "[Hz] band: regression failed. T would be inf.")
return returnTuple
# 4) preliminary intersection
crossingPoint = (bgNoiseLevel - c[0]) / c[1] # [s]
if (crossingPoint > 2*(timeLength + sampleShift/samplingRate))[0]:
print(band, "[Hz] band: preliminary intersection point between",
"bgNoiseLevel and the decay slope greater than signal length.")
return returnTuple
# 5) new local time interval length
nBlocksInDecay = numParts * dynRange[0] / -10
dynRangeTime = timeVecWin[stopIdx] - timeVecWin[startIdx]
blockSamples = int(samplingRate * dynRangeTime[0] / nBlocksInDecay)
# 6) average
timeWinData, timeVecWin = _level_profile(timeSignal, samplingRate,
numSamples, numChannels,
blockSamples)
oldCrossingPoint = 11+crossingPoint # arbitrary higher value to enter loop
loopCounter = 0
while (np.abs(oldCrossingPoint - crossingPoint) > 0.001)[0]:
# 7) estimate background noise level (BGL)
bgNoiseMargin = 7
idxLast10Percent = int(len(timeWinData)-(len(timeWinData)//10))
bgStartTime = crossingPoint - bgNoiseMargin/c[1]
if (bgStartTime > timeVecWin[-1:][0])[0]:
idx10dBDecayBelowCrossPoint = len(timeVecWin)-1
else:
idx10dBDecayBelowCrossPoint = \
np.where(timeVecWin >= bgStartTime)[0][0]
BGL = np.mean(timeWinData[np.min(
np.array([idxLast10Percent,
idx10dBDecayBelowCrossPoint])):])
bgNoiseLevel = 10*np.log10(BGL)
# 8) estimate late decay slope
stopTime = (bgNoiseLevel + dBtoNoise - c[0])/c[1]
if (stopTime > timeVecWin[-1])[0]:
stopIdx = 0
else:
stopIdx = int(np.where(timeVecWin >= stopTime)[0][0])
startTime = (bgNoiseLevel + dBtoNoise + useDynRange - c[0])/c[1]
if (startTime < timeVecWin[0])[0]:
startIdx = 0
else:
startIdx = int(np.where(timeVecWin <= startTime)[0][0])
lateDynRange = np.abs(10*np.log10(timeWinData[stopIdx]) \
- 10*np.log10(timeWinData[startIdx]))
# where returns empty
if stopIdx == startIdx or (lateDynRange < useDynRange)[0]:
print(band, "[Hz] band: SNR for the Lundeby late decay slope too",
"low. Skipping!")
# c[1] = np.inf
c[1] = 0
break
X = np.ones((stopIdx-startIdx, 2), dtype=np.float32)
X[:, 1] = timeVecWin[startIdx:stopIdx, 0]
c = np.linalg.lstsq(X, 10*np.log10(timeWinData[startIdx:stopIdx]),
rcond=-1)[0]
if (c[1] >= 0)[0]:
print(band, "[Hz] band: regression did not work, T -> inf.",
"Setting slope to 0!")
# c[1] = np.inf
c[1] = 0
break
# 9) find crosspoint
oldCrossingPoint = crossingPoint
crossingPoint = (bgNoiseLevel - c[0]) / c[1]
loopCounter += 1
if loopCounter > 30:
print(band, "[Hz] band: more than 30 iterations on regression.",
"Canceling!")
break
interIdx = crossingPoint * samplingRate # [sample]
return c[0][0], c[1][0], np.int32(interIdx[0]), BGL
@njit
def energy_decay_calculation(band, timeSignal, timeVector, samplingRate,
numSamples, numChannels, timeLength, bypassLundeby):
"""Calculate the Energy Decay Curve."""
if not bypassLundeby:
lundebyParams = \
_Lundeby_correction(band,
timeSignal,
samplingRate,
numSamples,
numChannels,
timeLength)
_, c1, interIdx, BGL = lundebyParams
lateRT = -60/c1 if c1 != 0 else 0
else:
interIdx = 0
lateRT = 1
if interIdx == 0:
interIdx = -1
truncatedTimeSignal = timeSignal[:interIdx, 0]
truncatedTimeVector = timeVector[:interIdx]
if lateRT != 0.0:
if not bypassLundeby:
C = samplingRate*BGL*lateRT/(6*np.log(10))
else:
C = 0
sqrInv = truncatedTimeSignal[::-1]**2
energyDecayFull = np.cumsum(sqrInv)[::-1] + C
energyDecay = energyDecayFull/energyDecayFull[0]
else:
print(band, "[Hz] band: could not estimate C factor")
C = 0
energyDecay = np.zeros(truncatedTimeVector.size)
return (energyDecay, truncatedTimeVector, lundebyParams)
def cumulative_integration(inputSignal,
bypassLundeby,
plotLundebyResults,
**kwargs):
"""Cumulative integration with proper corrections."""
def plot_lundeby():
c0, c1, interIdx, BGL = lundebyParams
fig = plt.figure(figsize=(10, 5))
ax = fig.add_axes([0.08, 0.15, 0.75, 0.8], polar=False,
projection='rectilinear', xscale='linear')
line = c1*timeVector + c0
ax.plot(timeVector, 10*np.log10(timeSignal**2),label='IR')
ax.axhline(y=10*np.log10(BGL), color='#1f77b4', label='BG Noise')
ax.plot(timeVector, line,label='Late slope')
ax.axvline(x=interIdx/samplingRate, label='Truncation point')
plt.title('{0:.0f} [Hz]'.format(band))
ax.legend(loc='upper center', shadow=True, fontsize='x-large')
timeSignal = inputSignal.timeSignal[:]
# Substituted by SignalObj.crop in analyse function
# timeSignal, sampleShift = _circular_time_shift(timeSignal)
# del sampleShift
hSignal = SignalObj(timeSignal,
inputSignal.lengthDomain,
inputSignal.samplingRate)
hSignal = _filter(hSignal, **kwargs)
bands = FOF(nthOct=kwargs['nthOct'],
freqRange=[kwargs['minFreq'],kwargs['maxFreq']])[:,1]
listEDC = []
for ch in range(hSignal.numChannels):
signal = hSignal[ch]
band = bands[ch]
timeSignal = cp.copy(signal.timeSignal[:])
timeVector = signal.timeVector[:]
samplingRate = signal.samplingRate
numSamples = signal.numSamples
numChannels = signal.numChannels
timeLength = signal.timeLength
energyDecay, energyVector, lundebyParams = \
energy_decay_calculation(band,
timeSignal,
timeVector,
samplingRate,
numSamples,
numChannels,
timeLength,
bypassLundeby)
listEDC.append((energyDecay, energyVector))
if plotLundebyResults: # Placed here because Numba can't handle plots.
# plot_lundeby(band, timeVector, timeSignal, samplingRate,
# lundebyParams)
plot_lundeby()
return listEDC
@njit
def reverb_time_regression(energyDecay, energyVector, upperLim, lowerLim):
"""Interpolate the EDT to get the reverberation time."""
if not np.any(energyDecay):
return 0
first = np.where(10*np.log10(energyDecay) >= upperLim)[0][-1]
last = np.where(10*np.log10(energyDecay) >= lowerLim)[0][-1]
if last <= first:
# return np.nan
return 0
X = np.ones((last-first, 2))
X[:, 1] = energyVector[first:last]
c = np.linalg.lstsq(X, 10*np.log10(energyDecay[first:last]), rcond=-1)[0]
return -60/c[1]
def reverberation_time(decay, nthOct, samplingRate, listEDC):
"""Call the reverberation time regression."""
try:
decay = int(decay)
y1 = -5
y2 = y1 - decay
except ValueError:
if decay in ['EDT', 'edt']:
y1 = 0
y2 = -10
else:
raise | |
export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='PostShipmentUploadDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PostShipmentUploadDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'PostShipmentUploadDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PostShipmentUploadDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='PostShipmentUploadDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PostShipmentUploadDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='PostShipmentUploadDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.TrackingNumber is not None:
namespaceprefix_ = self.TrackingNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.TrackingNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTrackingNumber>%s</%sTrackingNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.TrackingNumber), input_name='TrackingNumber')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'TrackingNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TrackingNumber')
value_ = self.gds_validate_string(value_, node, 'TrackingNumber')
self.TrackingNumber = value_
self.TrackingNumber_nsprefix_ = child_.prefix
# end class PostShipmentUploadDetail
class TransactionDetail(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, CustomerTransactionId=None, Localization=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.CustomerTransactionId = CustomerTransactionId
self.CustomerTransactionId_nsprefix_ = None
self.Localization = Localization
self.Localization_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TransactionDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TransactionDetail.subclass:
return TransactionDetail.subclass(*args_, **kwargs_)
else:
return TransactionDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_CustomerTransactionId(self):
return self.CustomerTransactionId
def set_CustomerTransactionId(self, CustomerTransactionId):
self.CustomerTransactionId = CustomerTransactionId
def get_Localization(self):
return self.Localization
def set_Localization(self, Localization):
self.Localization = Localization
def hasContent_(self):
if (
self.CustomerTransactionId is not None or
self.Localization is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='TransactionDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TransactionDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'TransactionDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TransactionDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='TransactionDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TransactionDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='TransactionDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.CustomerTransactionId is not None:
namespaceprefix_ = self.CustomerTransactionId_nsprefix_ + ':' if (UseCapturedNS_ and self.CustomerTransactionId_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCustomerTransactionId>%s</%sCustomerTransactionId>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.CustomerTransactionId), input_name='CustomerTransactionId')), namespaceprefix_ , eol_))
if self.Localization is not None:
namespaceprefix_ = self.Localization_nsprefix_ + ':' if (UseCapturedNS_ and self.Localization_nsprefix_) else ''
self.Localization.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Localization', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'CustomerTransactionId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'CustomerTransactionId')
value_ = self.gds_validate_string(value_, node, 'CustomerTransactionId')
self.CustomerTransactionId = value_
self.CustomerTransactionId_nsprefix_ = child_.prefix
elif nodeName_ == 'Localization':
obj_ = Localization.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Localization = obj_
obj_.original_tagname_ = 'Localization'
# end class TransactionDetail
class UploadDocumentDetail(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, LineNumber=None, CustomerReference=None, DocumentType=None, FileName=None, DocumentContent=None, ExpirationDate=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.LineNumber = LineNumber
self.LineNumber_nsprefix_ = None
self.CustomerReference = CustomerReference
self.CustomerReference_nsprefix_ = None
self.DocumentType = DocumentType
self.validate_UploadDocumentType(self.DocumentType)
self.DocumentType_nsprefix_ = None
self.FileName = FileName
self.FileName_nsprefix_ = None
self.DocumentContent = DocumentContent
self.DocumentContent_nsprefix_ = None
if isinstance(ExpirationDate, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(ExpirationDate, '%Y-%m-%d').date()
else:
initvalue_ = ExpirationDate
self.ExpirationDate = initvalue_
self.ExpirationDate_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, UploadDocumentDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if UploadDocumentDetail.subclass:
return UploadDocumentDetail.subclass(*args_, **kwargs_)
else:
return UploadDocumentDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_LineNumber(self):
return self.LineNumber
def set_LineNumber(self, LineNumber):
self.LineNumber = LineNumber
def get_CustomerReference(self):
return self.CustomerReference
def set_CustomerReference(self, CustomerReference):
self.CustomerReference = CustomerReference
def get_DocumentType(self):
return self.DocumentType
def set_DocumentType(self, DocumentType):
self.DocumentType = DocumentType
def get_FileName(self):
return self.FileName
def set_FileName(self, FileName):
self.FileName = FileName
def get_DocumentContent(self):
return self.DocumentContent
def set_DocumentContent(self, DocumentContent):
self.DocumentContent = DocumentContent
def get_ExpirationDate(self):
return self.ExpirationDate
def set_ExpirationDate(self, ExpirationDate):
self.ExpirationDate = ExpirationDate
def validate_UploadDocumentType(self, value):
result = True
# Validate type UploadDocumentType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['CERTIFICATE_OF_ORIGIN', 'COMMERCIAL_INVOICE', 'ETD_LABEL', 'NAFTA_CERTIFICATE_OF_ORIGIN', 'OTHER', 'PRO_FORMA_INVOICE']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on UploadDocumentType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.LineNumber is not None or
self.CustomerReference is not None or
self.DocumentType is not None or
self.FileName is not None or
self.DocumentContent is not None or
self.ExpirationDate is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='UploadDocumentDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('UploadDocumentDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'UploadDocumentDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='UploadDocumentDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='UploadDocumentDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='UploadDocumentDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='UploadDocumentDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.LineNumber is not None:
namespaceprefix_ = self.LineNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.LineNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sLineNumber>%s</%sLineNumber>%s' % (namespaceprefix_ , self.gds_format_integer(self.LineNumber, input_name='LineNumber'), namespaceprefix_ , eol_))
if self.CustomerReference is not None:
namespaceprefix_ = self.CustomerReference_nsprefix_ + ':' if (UseCapturedNS_ and self.CustomerReference_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCustomerReference>%s</%sCustomerReference>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.CustomerReference), input_name='CustomerReference')), namespaceprefix_ , eol_))
if self.DocumentType is not None:
namespaceprefix_ = self.DocumentType_nsprefix_ + ':' if (UseCapturedNS_ and self.DocumentType_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sDocumentType>%s</%sDocumentType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.DocumentType), input_name='DocumentType')), namespaceprefix_ , eol_))
if self.FileName is not None:
namespaceprefix_ = self.FileName_nsprefix_ + ':' if (UseCapturedNS_ and self.FileName_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sFileName>%s</%sFileName>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.FileName), input_name='FileName')), namespaceprefix_ , eol_))
if self.DocumentContent is not None:
namespaceprefix_ = self.DocumentContent_nsprefix_ + ':' if (UseCapturedNS_ and self.DocumentContent_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sDocumentContent>%s</%sDocumentContent>%s' % (namespaceprefix_ , self.gds_format_base64(self.DocumentContent, input_name='DocumentContent'), namespaceprefix_ , eol_))
if self.ExpirationDate is not None:
namespaceprefix_ = self.ExpirationDate_nsprefix_ + ':' if (UseCapturedNS_ and self.ExpirationDate_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sExpirationDate>%s</%sExpirationDate>%s' % (namespaceprefix_ , self.gds_format_date(self.ExpirationDate, input_name='ExpirationDate'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'LineNumber' and child_.text:
| |
<reponame>chrhenning/snn_global_pattern_induction
#!/usr/bin/env python3
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
@title :recordings.py
@author :ch
@contact :<EMAIL>
@created :04/24/2017
@version :1.0
@python_version :3.5.2
This class takes care of recording state variables during simulation.
"""
import configuration as config
from util.config_exception import ConfigException
from util import utils
from pypatterns.singleton import Singleton
from pypatterns.observer import Observer
from simulation import Simulation
import brian2 as b2
import os
import _pickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import logging
logger = logging.getLogger(config.logging_name)
class Recordings(Observer, metaclass=Singleton):
"""To understand the dynamics of a network, its internal state and
variables must be recordable. This class shall allow one to analyse the
dynamics during and after simulation according to the configs.
Under the hood, this class simply creates instances of the Brian classes
StateMonitor, PopulationRateMonitor and SpikeMonitor. However,
SpikeMonitors are not instantiated in this class due to efficiency
considerations. Moreover, the instantiation in the class NetworkModel is
modified. This introduces an unneccesary interweaving of recording code
with simulation code, but ensures that SpikeMonitors are only instantiated
once.
Attributes:
"""
def __init__(self, network):
"""Generate all recording objects and add them to the network.
Note, SpikeMonitors have been already instantiated.
Args:
network: An instance of class NetworkModel.
Returns:
"""
super().__init__()
self._network = network
Recordings._check_state_var_recordings()
Recordings._check_population_rate_recordings()
Recordings._check_spike_event_recordings()
# FIXME Following two methods are dirty and misplaced.
# Check that chosen layers exist (cannot be done in static methods, as
# network has to be known).
def layer_exists(tup, layer):
if not (layer >=0 and layer < network.num_layers):
raise ConfigException('Recording %s has non-existing layer.' \
% (str(tup)))
# Make sure, indices and vars exist in layer.
def vars_exists(tup, layer, source, var, inds=None):
if var is not None and isinstance(var, list):
for v in var:
if not hasattr(source, v):
print(source.get_states().keys())
raise ConfigException('Variable ' + v + ' does not ' \
+ 'exist from recording %s.' \
% (str(tup)))
if inds is not None and isinstance(inds, list):
for i in inds:
if not (i >=0 and i < source.N):
raise ConfigException('Recording %s cannot have ' \
% (str(tup)) + 'index %d.' % (i))
self._state_monitors = dict()
self._pop_rate_monitors = dict()
self._spike_monitors = dict()
for tup in config.state_var_recordings:
typ, layer, var, inds, dt, _ = tup
layer_exists(tup, layer)
exn, inn, eis, ies, ees = network.brian_objects(layer)
source = None
if typ == 'ne':
source = exn
elif typ == 'ni':
source = inn
elif typ == 'ei':
source = eis
elif typ == 'ie':
source = ies
else:
source = ees
vars_exists(tup, layer, source, var, inds)
dt = dt * b2.ms if dt is not None else dt
state_mon = b2.StateMonitor(source, var, inds, dt=dt)
self._state_monitors[str(tup)] = state_mon
network.add_component(state_mon)
for tup in config.population_rate_recordings:
typ, layer, _, _, _ = tup
layer_exists(tup, layer)
exn, inn, _, _, _ = network.brian_objects(layer)
source = None
if typ == 'ne':
source = exn
else:
source = inn
pop_rmon = b2.PopulationRateMonitor(source)
self._pop_rate_monitors[str(tup)] = pop_rmon
network.add_component(pop_rmon)
for tup in config.spike_event_recordings:
typ, layer, var, _ = tup
layer_exists(tup, layer)
sp_mon = None
if typ == 'ne':
sp_mon = network.exc_spike_monitor(layer)
else:
sp_mon = network.inh_spike_monitor(layer)
vars_exists(tup, layer, sp_mon.source, var)
self._spike_monitors[str(tup)] = sp_mon
# For online recordings, we need to know, when the network state has
# changed.
if config.online_recording:
sim = Simulation()
sim.register(self)
def update(self, *args, **kwargs):
"""Update plots for online recordings.
TODO: In future, one could incrementally write recordings to a file.
Args:
Returns:
"""
if args[0] == 'Simulation':
# TODO online plotting of recordings.
#print(kwargs['curr_sim_time'])
pass
else:
assert(False)
"""
Static class attribute, that contains the attributes passed to
SpikeMonitors.
"""
_spike_monitor_args = None
def store_recordings(self):
"""Store the whole recordings made during simulation into files and
optionally into plots.
Args:
Returns:
"""
plt.close('all')
if not os.path.isdir(config.recording_dir):
os.makedirs(config.recording_dir)
### Handle StateMonitors.
for tup in config.state_var_recordings:
state_mon = self._state_monitors[str(tup)]
typ, layer, var, inds, dt, duration = tup
var_str = utils.list_to_str(var)
inds_str = '_'+str(inds) if isinstance(inds, bool) \
else utils.list_to_str(inds)
folder_name = 'state_monitor_%s_%d_vars%s_indices%s_%s_%d' \
% (typ, layer, var_str, inds_str, str(dt), duration)
folder_name = os.path.join(config.recording_dir, folder_name)
os.mkdir(folder_name)
logger.info("StateMonitor recordings %s are stored in %s." \
% (str(tup), folder_name))
dump_obj = dict()
dump_obj['type'] = typ
dump_obj['layer'] = layer
dump_obj['variables'] = var
dump_obj['indices'] = inds
dump_obj['dt'] = dt
dump_obj['recordings'] = dict()
recs = dump_obj['recordings']
recs['t'] = np.array(getattr(state_mon, 't_'))
for v in var:
recs[v] = np.array(getattr(state_mon, '%s_' % v))
assert(len(recs[v].shape) == 2)
# Store recordings in file.
dump_file = os.path.join(folder_name, 'recordings.pickle')
with open(dump_file, 'wb') as f:
pickle.dump(dump_obj, f)
# Generate recording plots.
if config.save_recording_plots:
# Note, that duration is in ms, but as recs['t'] is
# dimensionless, its values are interpretable as seconds.
slice_gen = utils.list_to_val_dependent_slices(recs['t'],
duration/1000)
# For each slice, a variables with all its recorded indices
# will be part of a plot (one plot for each duration and
# variable).
# Compute min and max to scale y-axis uniformly per var.
mins = dict()
maxs = dict()
for v in var:
mins[v] = np.min(recs[v])
maxs[v] = np.max(recs[v])
for sind, eind in slice_gen:
for v in var:
# Note, that inds might be boolean.
ind_labels = inds
if not isinstance(inds, list):
ind_labels = list(range(recs[v].shape[0]))
vunit = getattr(state_mon.source,v).unit
Recordings._plot_slice(recs['t'], recs[v], sind, eind,
v, ind_labels, str(tup), vunit,
folder=folder_name,
miny=mins[v], maxy=maxs[v])
### Handle PopulationRateMonitors.
for tup in config.population_rate_recordings:
prate_mon = self._pop_rate_monitors[str(tup)]
typ, layer, duration, swin, swidth = tup
folder_name = 'pop_rate_monitor_%s_%d_%d_%s_%s' \
% (typ, layer, duration, str(swin), str(swidth))
folder_name = os.path.join(config.recording_dir, folder_name)
os.mkdir(folder_name)
logger.info("PopulationRate recordings %s are stored in %s." \
% (str(tup), folder_name))
dump_obj = dict()
dump_obj['type'] = typ
dump_obj['layer'] = layer
dump_obj['t'] = np.array(getattr(prate_mon, 't_'))
dump_obj['rate'] = np.array(getattr(prate_mon, 'rate_'))
# Store recordings in file.
dump_file = os.path.join(folder_name, 'recordings.pickle')
with open(dump_file, 'wb') as f:
pickle.dump(dump_obj, f)
# Generate recording plots.
if config.save_recording_plots:
slice_gen = utils.list_to_val_dependent_slices(dump_obj['t'],
duration/1000)
if swin is not None:
rates = np.array(prate_mon.smooth_rate(swin, swidth*b2.ms))
else:
rates = dump_obj['rate']
# Compute min and max to scale y-axis uniformly for rates.
miny = np.min(rates)
maxy = np.max(rates)
for sind, eind in slice_gen:
Recordings._plot_slice(dump_obj['t'], rates, sind, eind,
'rate', None, str(tup), b2.Hz,
folder=folder_name, miny=miny,
maxy=maxy)
### Handle SpikeMonitors.
for tup in config.spike_event_recordings:
spike_mon = self._spike_monitors[str(tup)]
typ, layer, var, duration = tup
var_str = '_None' if var is None else utils.list_to_str(var)
folder_name = 'spike_monitor_%s_%d_vars%s_%d' \
% (typ, layer, var_str, duration)
folder_name = os.path.join(config.recording_dir, folder_name)
os.mkdir(folder_name)
logger.info("Spike recordings %s are stored in %s." \
% (str(tup), folder_name))
dump_obj = dict()
dump_obj['type'] = typ
dump_obj['layer'] = layer
dump_obj['recordings'] = dict()
recs = dump_obj['recordings']
recs['t'] = np.array(getattr(spike_mon, 't_'))
recs['i'] = np.array(getattr(spike_mon, 'i_'))
if var is not None:
for v in var:
recs[v] = np.array(getattr(spike_mon, '%s_' % v))
# Store recordings in file.
dump_file = os.path.join(folder_name, 'recordings.pickle')
with open(dump_file, 'wb') as f:
pickle.dump(dump_obj, f)
# Generate recording plots.
if config.save_recording_plots and len(recs['i']) > 0:
# We need to keep track of the time to scale the x-axis.
# etime = stime + duration/1000
stime = 0
slice_gen = utils.list_to_val_dependent_slices(recs['t'],
duration/1000)
# Compute min and max values to properly and uniformly color
# code vars.
if var is not None:
mins = dict()
maxs = dict()
for v in var:
mins[v] = np.min(recs[v])
maxs[v] = np.max(recs[v])
# We need to know the number of neurons, to set ymax.
ymin = -0.5
ymax = spike_mon.source.N - 0.5
for sind, eind in slice_gen:
minx = stime
maxx = minx + duration/1000
stime = maxx
# Plot pure spike events.
Recordings._scatter_slice(recs['t'], recs['i'], sind, eind,
minx, maxx, ymin, ymax, str(tup),
folder=folder_name)
if var is None:
continue
for v in var:
vunit = getattr(spike_mon,v).unit
Recordings._scatter_slice(recs['t'], recs['i'], sind,
eind, minx, maxx, ymin, ymax,
str(tup), var=recs[v],
var_min=mins[v],
var_max=maxs[v], var_name=v,
var_unit=vunit,
folder=folder_name)
elif len(recs['i']) == 0:
logger.warning('Could | |
Jersey'},
{'city': 'Barrington', 'state': 'New Hampshire'},
{'city': 'Barrington', 'state': 'Illinois'},
{'city': 'Barstow', 'state': 'California'},
{'city': 'Bartlesville', 'state': 'Oklahoma'},
{'city': 'Bartlett', 'state': 'Illinois'},
{'city': 'Bartlett', 'state': 'Tennessee'},
{'city': 'Barton', 'state': 'New York'},
{'city': 'Bartonville', 'state': 'Illinois'},
{'city': 'Bartow', 'state': 'Florida'},
{'city': 'Bastrop', 'state': 'Louisiana'},
{'city': 'Batavia', 'state': 'Illinois'},
{'city': 'Batavia', 'state': 'New York'},
{'city': 'Batesville', 'state': 'Mississippi'},
{'city': 'Batesville', 'state': 'Indiana'},
{'city': 'Batesville', 'state': 'Arkansas'},
{'city': 'Bath', 'state': 'Maine'},
{'city': 'Bath', 'state': 'New York'},
{'city': 'Baton Rouge', 'state': 'Louisiana'},
{'city': 'Battle Creek', 'state': 'Michigan'},
{'city': 'Battle Ground', 'state': 'Washington'},
{'city': 'Bay City', 'state': 'Texas'},
{'city': 'Bay City', 'state': 'Michigan'},
{'city': 'Bay Minette', 'state': 'Alabama'},
{'city': 'Bay Point', 'state': 'California'},
{'city': 'Bay Shore', 'state': 'New York'},
{'city': 'Bay St. Louis', 'state': 'Mississippi'},
{'city': 'Bay Village', 'state': 'Ohio'},
{'city': 'Bayonet Point', 'state': 'Florida'},
{'city': 'Bayonne', 'state': 'New Jersey'},
{'city': 'Bayou Cane', 'state': 'Louisiana'},
{'city': 'Bayport', 'state': 'New York'},
{'city': 'Bayshore Gardens', 'state': 'Florida'},
{'city': 'Baytown', 'state': 'Texas'},
{'city': 'Bayville', 'state': 'New York'},
{'city': 'Baywood', 'state': 'New York'},
{'city': 'Baywood-Los Osos', 'state': 'California'},
{'city': 'Beach Park', 'state': 'Illinois'},
{'city': 'Beachwood', 'state': 'Ohio'},
{'city': 'Beachwood', 'state': 'New Jersey'},
{'city': 'Beacon', 'state': 'New York'},
{'city': 'Beacon Square', 'state': 'Florida'},
{'city': 'Bear', 'state': 'Delaware'},
{'city': 'Beatrice', 'state': 'Nebraska'},
{'city': 'Beaufort', 'state': 'South Carolina'},
{'city': 'Beaumont', 'state': 'Texas'},
{'city': 'Beaumont', 'state': 'California'},
{'city': 'Beaver Dam', 'state': 'Wisconsin'},
{'city': 'Beaver Falls', 'state': 'Pennsylvania'},
{'city': 'Beavercreek', 'state': 'Ohio'},
{'city': 'Beaverton', 'state': 'Oregon'},
{'city': 'Beckett Ridge', 'state': 'Ohio'},
{'city': 'Beckley', 'state': 'West Virginia'},
{'city': 'Bedford', 'state': 'Virginia'},
{'city': 'Bedford', 'state': 'Texas'},
{'city': 'Bedford', 'state': 'Ohio'},
{'city': 'Bedford', 'state': 'New York'},
{'city': 'Bedford', 'state': 'New Hampshire'},
{'city': 'Bedford', 'state': 'Massachusetts'},
{'city': 'Bedford', 'state': 'Indiana'},
{'city': 'Bedford Heights', 'state': 'Ohio'},
{'city': 'Bee Ridge', 'state': 'Florida'},
{'city': 'Beech Grove', 'state': 'Indiana'},
{'city': 'Beecher', 'state': 'Michigan'},
{'city': 'Beekman', 'state': 'New York'},
{'city': 'Beeville', 'state': 'Texas'},
{'city': 'Bel Air', 'state': 'Maryland'},
{'city': 'Bel Air North', 'state': 'Maryland'},
{'city': 'Bel Air South', 'state': 'Maryland'},
{'city': 'Belchertown', 'state': 'Massachusetts'},
{'city': 'Belen', 'state': 'New Mexico'},
{'city': 'Belfast', 'state': 'Maine'},
{'city': 'Bell', 'state': 'California'},
{'city': 'Bell Gardens', 'state': 'California'},
{'city': 'Bella Vista', 'state': 'Arkansas'},
{'city': 'Bellair-Meadowbrook Terrace', 'state': 'Florida'},
{'city': 'Bellaire', 'state': 'Texas'},
{'city': 'Bellbrook', 'state': 'Ohio'},
{'city': 'Belle Chasse', 'state': 'Louisiana'},
{'city': 'Belle Glade', 'state': 'Florida'},
{'city': 'Belle Haven', 'state': 'Virginia'},
{'city': 'Bellefontaine', 'state': 'Ohio'},
{'city': 'Bellefontaine Neighbors', 'state': 'Missouri'},
{'city': 'Bellefonte', 'state': 'Pennsylvania'},
{'city': 'Belleville', 'state': 'New Jersey'},
{'city': 'Belleville', 'state': 'Illinois'},
{'city': 'Bellevue', 'state': 'Kentucky'},
{'city': 'Bellevue', 'state': 'Nebraska'},
{'city': 'Bellevue', 'state': 'Ohio'},
{'city': 'Bellevue', 'state': 'Pennsylvania'},
{'city': 'Bellevue', 'state': 'Wisconsin'},
{'city': 'Bellevue', 'state': 'Washington'},
{'city': 'Bellevue Town', 'state': 'Wisconsin'},
{'city': 'Bellflower', 'state': 'California'},
{'city': 'Bellingham', 'state': 'Massachusetts'},
{'city': 'Bellingham', 'state': 'Washington'},
{'city': 'Bellmawr', 'state': 'New Jersey'},
{'city': 'Bellmead', 'state': 'Texas'},
{'city': 'Bellmore', 'state': 'New York'},
{'city': 'Bellview', 'state': 'Florida'},
{'city': 'Bellwood', 'state': 'Illinois'},
{'city': 'Belmar', 'state': 'New Jersey'},
{'city': 'Belmont', 'state': 'Massachusetts'},
{'city': 'Belmont', 'state': 'Massachusetts'},
{'city': 'Belmont', 'state': 'New Hampshire'},
{'city': 'Belmont', 'state': 'North Carolina'},
{'city': 'Belmont', 'state': 'California'},
{'city': 'Beloit', 'state': 'Wisconsin'},
{'city': 'Beloit', 'state': 'Wisconsin'},
{'city': 'Belpre', 'state': 'Ohio'},
{'city': 'Belton', 'state': 'Missouri'},
{'city': 'Belton', 'state': 'Texas'},
{'city': 'Beltsville', 'state': 'Maryland'},
{'city': 'Belvedere Park', 'state': 'Georgia'},
{'city': 'Belvidere', 'state': 'Illinois'},
{'city': 'Bemidji', 'state': 'Minnesota'},
{'city': 'Benbrook', 'state': 'Texas'},
{'city': 'Bend', 'state': 'Oregon'},
{'city': 'Benicia', 'state': 'California'},
{'city': 'Bennettsville', 'state': 'South Carolina'},
{'city': 'Bennington', 'state': 'Vermont'},
{'city': 'Bennington', 'state': 'Vermont'},
{'city': 'Bennsville', 'state': 'Maryland'},
{'city': 'Bensenville', 'state': 'Illinois'},
{'city': 'Benton', 'state': 'Illinois'},
{'city': 'Benton', 'state': 'Arkansas'},
{'city': 'Benton Harbor', 'state': 'Michigan'},
{'city': 'Bentonville', 'state': 'Arkansas'},
{'city': 'Berea', 'state': 'Kentucky'},
{'city': 'Berea', 'state': 'Ohio'},
{'city': 'Berea', 'state': 'South Carolina'},
{'city': 'Bergenfield', 'state': 'New Jersey'},
{'city': 'Berkeley', 'state': 'Missouri'},
{'city': 'Berkeley', 'state': 'California'},
{'city': 'Berkeley Heights', 'state': 'New Jersey'},
{'city': 'Berkley', 'state': 'Michigan'},
{'city': 'Berkley', 'state': 'Colorado'},
{'city': 'Berlin', 'state': 'Connecticut'},
{'city': 'Berlin', 'state': 'New Jersey'},
{'city': 'Berlin', 'state': 'New Hampshire'},
{'city': '<NAME>', 'state': 'California'},
{'city': 'Bernalillo', 'state': 'New Mexico'},
{'city': 'Bernardsville', 'state': 'New Jersey'},
{'city': 'Berwick', 'state': 'Maine'},
{'city': 'Berwick', 'state': 'Pennsylvania'},
{'city': 'Berwyn', 'state': 'Illinois'},
{'city': 'Bessemer', 'state': 'Alabama'},
{'city': 'Bethalto', 'state': 'Illinois'},
{'city': 'Bethany', 'state': 'Oklahoma'},
{'city': 'Bethel', 'state': 'Connecticut'},
{'city': 'Bethel', 'state': 'Connecticut'},
{'city': 'Bethel Park', 'state': 'Pennsylvania'},
{'city': 'Bethesda', 'state': 'Maryland'},
{'city': 'Bethlehem', 'state': 'New York'},
{'city': 'Bethlehem', 'state': 'Pennsylvania'},
{'city': 'Bethpage', 'state': 'New York'},
{'city': 'Bettendorf', 'state': 'Iowa'},
{'city': 'Beverly', 'state': 'Massachusetts'},
{'city': 'Beverly Hills', 'state': 'Michigan'},
{'city': 'Beverly Hills', 'state': 'Florida'},
{'city': 'Beverly Hills', 'state': 'California'},
{'city': 'Bexley', 'state': 'Ohio'},
{'city': 'Biddeford', 'state': 'Maine'},
{'city': 'Big Flats', 'state': 'New York'},
{'city': 'Big Lake', 'state': 'Minnesota'},
{'city': 'Big Rapids', 'state': 'Michigan'},
{'city': 'Big Spring', 'state': 'Texas'},
{'city': 'Billerica', 'state': 'Massachusetts'},
{'city': 'Billings', 'state': 'Montana'},
{'city': 'Biloxi', 'state': 'Mississippi'},
{'city': 'Binghamton', 'state': 'New York'},
{'city': 'Birmingham', 'state': 'Michigan'},
{'city': 'Birmingham', 'state': 'Alabama'},
{'city': 'Bisbee', 'state': 'Arizona'},
{'city': 'Bismarck', 'state': 'North Dakota'},
{'city': 'Bixby', 'state': 'Oklahoma'},
{'city': 'Black Forest', 'state': 'Colorado'},
{'city': 'Black Jack', 'state': 'Missouri'},
{'city': 'Black Mountain', 'state': 'North Carolina'},
{'city': 'Blackfoot', 'state': 'Idaho'},
{'city': 'Blackhawk-Camino Tassajara', 'state': 'California'},
{'city': 'Blacklick Estates', 'state': 'Ohio'},
{'city': 'Blacksburg', 'state': 'Virginia'},
{'city': 'Blackstone', 'state': 'Massachusetts'},
{'city': 'Blackwell', 'state': 'Oklahoma'},
{'city': 'Bladensburg', 'state': 'Maryland'},
{'city': 'Blaine', 'state': 'Minnesota'},
{'city': 'Blair', 'state': 'Nebraska'},
{'city': 'Blakely', 'state': 'Pennsylvania'},
{'city': 'Bloomfield', 'state': 'New Jersey'},
{'city': 'Bloomfield', 'state': 'New Mexico'},
{'city': 'Bloomfield', 'state': 'Connecticut'},
{'city': 'Bloomfield Township', 'state': 'Michigan'},
{'city': 'Blooming Grove', 'state': 'New York'},
{'city': 'Bloomingdale', 'state': 'New Jersey'},
{'city': 'Bloomingdale', 'state': 'Florida'},
{'city': 'Bloomingdale', 'state': 'Illinois'},
{'city': 'Bloomingdale', 'state': 'Tennessee'},
{'city': 'Bloomington', 'state': 'Illinois'},
{'city': 'Bloomington', 'state': 'Indiana'},
{'city': 'Bloomington', 'state': 'California'},
{'city': 'Bloomington', 'state': 'Minnesota'},
{'city': 'Bloomsburg', 'state': 'Pennsylvania'},
{'city': 'Blue Ash', 'state': 'Ohio'},
{'city': 'Blue Bell', 'state': 'Pennsylvania'},
{'city': 'Blue Island', 'state': 'Illinois'},
{'city': 'Blue Springs', 'state': 'Missouri'},
{'city': 'Bluefield', 'state': 'West Virginia'},
{'city': 'Bluffton', 'state': 'Indiana'},
{'city': 'Blythe', 'state': 'California'},
{'city': 'Blytheville', 'state': 'Arkansas'},
{'city': 'Boardman', 'state': 'Ohio'},
{'city': 'Boaz', 'state': 'Alabama'},
{'city': 'Boca Del Mar', 'state': 'Florida'},
{'city': 'Boca Raton', 'state': 'Florida'},
{'city': 'Boerne', 'state': 'Texas'},
{'city': 'Bogalusa', 'state': 'Louisiana'},
{'city': 'Bogota', 'state': 'New Jersey'},
{'city': 'Bohemia', 'state': 'New York'},
{'city': 'Boise City', 'state': 'Idaho'},
{'city': 'Bolingbrook', 'state': 'Illinois'},
{'city': 'Bolivar', 'state': 'Missouri'},
{'city': 'Bon Air', 'state': 'Virginia'},
{'city': 'Bonadelle Ranchos-Madera Ranchos', 'state': 'California'},
{'city': 'Bonham', 'state': 'Texas'},
{'city': 'Bonita', 'state': 'California'},
{'city': 'Bonita Springs', 'state': 'Florida'},
{'city': 'Bonner Springs', 'state': 'Kansas'},
{'city': 'Bonney Lake', 'state': 'Washington'},
{'city': 'Boone', 'state': 'Iowa'},
{'city': 'Boone', 'state': 'North Carolina'},
{'city': 'Booneville', 'state': 'Mississippi'},
{'city': 'Boonton', 'state': 'New Jersey'},
{'city': 'Boonville', 'state': 'Missouri'},
{'city': 'Boonville', 'state': 'Indiana'},
{'city': 'Borger', 'state': 'Texas'},
{'city': 'Bossier City', 'state': 'Louisiana'},
{'city': 'Boston', 'state': 'Massachusetts'},
{'city': 'Boston', 'state': 'New York'},
{'city': 'Bostonia', 'state': 'California'},
{'city': 'Bothell', 'state': 'Washington'},
{'city': 'Boulder', 'state': 'Colorado'},
{'city': 'Boulder City', 'state': 'Nevada'},
{'city': 'Boulder Hill', 'state': 'Illinois'},
{'city': 'Bound Brook', 'state': 'New Jersey'},
{'city': 'Bountiful', 'state': 'Utah'},
{'city': 'Bourbonnais', 'state': 'Illinois'},
{'city': 'Bourne', 'state': 'Massachusetts'},
{'city': 'Bow', 'state': 'New Hampshire'},
{'city': 'Bowie', 'state': 'Maryland'},
{'city': 'Bowleys Quarters', 'state': 'Maryland'},
{'city': 'Bowling Green', 'state': 'Kentucky'},
{'city': 'Bowling Green', 'state': 'Ohio'},
{'city': 'Boxford', 'state': 'Massachusetts'},
{'city': 'Boyes Hot Springs', 'state': 'California'},
{'city': 'Boynton Beach', 'state': 'Florida'},
{'city': 'Bozeman', 'state': 'Montana'},
{'city': 'Bradenton', 'state': 'Florida'},
{'city': 'Bradford', 'state': 'Pennsylvania'},
{'city': 'Bradley', 'state': 'Illinois'},
{'city': 'Brainerd', 'state': 'Minnesota'},
{'city': 'Braintree', 'state': 'Massachusetts'},
{'city': 'Braintree', 'state': 'Massachusetts'},
{'city': 'Brandon', 'state': 'Mississippi'},
{'city': 'Brandon', 'state': 'Florida'},
{'city': 'Branford', 'state': 'Connecticut'},
{'city': 'Branson', 'state': 'Missouri'},
{'city': 'Brattleboro', 'state': 'Vermont'},
{'city': 'Brattleboro', 'state': 'Vermont'},
{'city': 'Brawley', 'state': 'California'},
{'city': 'Brazil', 'state': 'Indiana'},
{'city': 'Brea', 'state': 'California'},
{'city': 'Breaux Bridge', 'state': 'Louisiana'},
{'city': 'Brecksville', 'state': 'Ohio'},
{'city': 'Bremerton', 'state': 'Washington'},
{'city': 'Brenham', 'state': 'Texas'},
{'city': 'Brent', 'state': 'Florida'},
{'city': 'Brentwood', 'state': 'California'},
{'city': 'Brentwood', 'state': 'New York'},
{'city': 'Brentwood', 'state': 'Missouri'},
{'city': 'Brentwood', 'state': 'Pennsylvania'},
{'city': 'Brentwood', 'state': 'Tennessee'},
{'city': 'Brevard', 'state': 'North Carolina'},
{'city': 'Brewer', 'state': 'Maine'},
{'city': 'Brewster', 'state': 'Massachusetts'},
{'city': 'Briarcliff Manor', 'state': 'New York'},
{'city': 'Bridge City', 'state': 'Louisiana'},
{'city': 'Bridge | |
import asyncio
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union, cast
from asgiref.sync import async_to_sync
from django.apps import apps
from django.core.exceptions import ValidationError as DjangoValidationError
from mypy_extensions import TypedDict
from ..utils.cache import element_cache
from ..utils.validate import validate_html_permissive, validate_html_strict
from .exceptions import ConfigError, ConfigNotFound
from .models import ConfigStore
INPUT_TYPE_MAPPING = {
"string": str,
"text": str,
"markupText": str,
"integer": int,
"boolean": bool,
"choice": str,
"colorpicker": str,
"datetimepicker": int,
"static": dict,
"translations": list,
"groups": list,
}
ALLOWED_NONE = ("datetimepicker",)
build_key_to_id_lock = asyncio.Lock()
class ConfigHandler:
"""
A simple object class to wrap the config variables. It is a container
object. To get a config variable use x = config[...], to set it use
config[...] = x.
"""
def __init__(self) -> None:
# Dict, that keeps all ConfigVariable objects. Has to be set at statup.
# See the ready() method in openslides.core.apps.
self.config_variables: Dict[str, ConfigVariable] = {}
# Index to get the database id from a given config key
self.key_to_id: Optional[Dict[str, int]] = None
def __getitem__(self, key: str) -> Any:
"""
Returns the value of the config variable.
"""
if not self.exists(key):
raise ConfigNotFound(f"The config variable {key} was not found.")
return async_to_sync(element_cache.get_element_data)(
self.get_collection_string(), self.get_key_to_id()[key]
)["value"]
def get_key_to_id(self) -> Dict[str, int]:
"""
Returns the key_to_id dict. Builds it, if it does not exist.
"""
if self.key_to_id is None:
async_to_sync(self.build_key_to_id)()
self.key_to_id = cast(Dict[str, int], self.key_to_id)
return self.key_to_id
async def async_get_key_to_id(self) -> Dict[str, int]:
"""
Like get_key_to_id but in an async context.
"""
if self.key_to_id is None:
await self.build_key_to_id()
self.key_to_id = cast(Dict[str, int], self.key_to_id)
return self.key_to_id
async def build_key_to_id(self) -> None:
"""
Build the key_to_id dict.
Recreates it, if it does not exists.
This uses the element_cache. It expects, that the config values are in the database
before this is called.
"""
async with build_key_to_id_lock:
# Another worker could have build the key_to_id_dict, check and return early
if self.key_to_id is not None:
return
config_full_data = await element_cache.get_collection_data(
self.get_collection_string()
)
elements = config_full_data.values()
self.key_to_id = {}
for element in elements:
self.key_to_id[element["key"]] = element["id"]
def exists(self, key: str) -> bool:
"""
Returns True, if the config varialbe was defined.
"""
return key in self.config_variables
# TODO: Remove the any by using right types in INPUT_TYPE_MAPPING
def __setitem__(self, key: str, value: Any) -> None:
"""
Sets the new value. First it validates the input.
"""
# Check if the variable is defined.
try:
config_variable = self.config_variables[key]
except KeyError:
raise ConfigNotFound(f"The config variable {key} was not found.")
# Validate datatype and run validators.
expected_type = INPUT_TYPE_MAPPING[config_variable.input_type]
# Try to convert value into the expected datatype
if value is None and config_variable.input_type not in ALLOWED_NONE:
raise ConfigError(f"Got None for {key}")
elif value is not None:
try:
value = expected_type(value)
except (ValueError, TypeError):
raise ConfigError(
f"Wrong datatype. Expected {expected_type}, got {type(value)}."
)
if config_variable.input_type == "choice":
# Choices can be a callable. In this case call it at this place
if callable(config_variable.choices):
choices = config_variable.choices()
else:
choices = config_variable.choices
if choices is None or value not in map(
lambda choice: choice["value"], choices
):
raise ConfigError("Invalid input. Choice does not match.")
if config_variable.input_type == "groups":
from ..users.models import Group
groups = set(group.id for group in Group.objects.all())
if not groups.issuperset(set(value)):
raise ConfigError("Invalid input. Chosen group does not exist.")
for validator in config_variable.validators:
try:
validator(value)
except DjangoValidationError as err:
raise ConfigError(err.messages[0])
if config_variable.input_type == "static":
if not isinstance(value, dict):
raise ConfigError("This has to be a dict.")
whitelist = ("path", "display_name")
for required_entry in whitelist:
if required_entry not in value:
raise ConfigError(f"{required_entry} has to be given.")
if not isinstance(value[required_entry], str):
raise ConfigError(f"{required_entry} has to be a string.")
if config_variable.input_type == "translations":
if not isinstance(value, list):
raise ConfigError("Translations has to be a list.")
for entry in value:
if not isinstance(entry, dict):
raise ConfigError(
f"Every value has to be a dict, not {type(entry)}."
)
whitelist = ("original", "translation")
for required_entry in whitelist:
if required_entry not in entry:
raise ConfigError(f"{required_entry} has to be given.")
if not isinstance(entry[required_entry], str):
raise ConfigError(f"{required_entry} has to be a string.")
if config_variable.input_type == "markupText":
if config_variable.name == "general_event_welcome_text":
value = validate_html_permissive(value)
else:
value = validate_html_strict(value)
# Save the new value to the database.
db_value = ConfigStore.objects.get(key=key)
db_value.value = value
db_value.save()
# Call on_change callback.
if config_variable.on_change:
config_variable.on_change()
def collect_config_variables_from_apps(self) -> None:
for app in apps.get_app_configs():
try:
# Each app can deliver config variables when implementing the
# get_config_variables method.
get_config_variables = app.get_config_variables
except AttributeError:
# The app doesn't have this method. Continue to next app.
continue
self.update_config_variables(get_config_variables())
def update_config_variables(self, items: Iterable["ConfigVariable"]) -> None:
"""
Updates the config_variables dict.
"""
# build an index from variable name to the variable
item_index = dict((variable.name, variable) for variable in items)
# Check that all ConfigVariables are unique. So no key from items can
# be in already in self.config_variables
intersection = set(item_index.keys()).intersection(self.config_variables.keys())
if intersection:
raise ConfigError(
f"Too many values for config variables {intersection} found."
)
self.config_variables.update(item_index)
def save_default_values(self) -> bool:
"""
Saves the default values to the database. Does also build the dictonary key_to_id.
Returns True, if something in the DB was changed.
"""
self.key_to_id = {}
altered_config = False
for item in self.config_variables.values():
try:
db_value = ConfigStore.objects.get(key=item.name)
except ConfigStore.DoesNotExist:
db_value = ConfigStore()
db_value.key = item.name
db_value.value = item.default_value
db_value.save(skip_autoupdate=True)
altered_config = True
self.key_to_id[db_value.key] = db_value.id
return altered_config
def increment_version(self) -> None:
"""
Increments the config key "config_version"
"""
db_value = ConfigStore.objects.get(key="config_version")
db_value.value = db_value.value + 1
db_value.save(skip_autoupdate=True)
def cleanup_old_config_values(self) -> bool:
"""
Deletes all config variable in the database, if the keys are not
in key_to_id. This required a fully build key_to_id!
Returns True, if something in the DB was changed.
"""
key_to_id = key_to_id = cast(Dict[str, int], self.key_to_id)
queryset = ConfigStore.objects.exclude(key__in=key_to_id.keys())
altered_config = queryset.exists()
queryset.delete()
return altered_config
def get_collection_string(self) -> str:
"""
Returns the collection_string from the CollectionStore.
"""
return ConfigStore.get_collection_string()
config = ConfigHandler()
"""
Final entry point to get an set config variables. To get a config variable
use x = config[...], to set it use config[...] = x.
"""
T = TypeVar("T")
ChoiceType = Optional[Iterable[Dict[str, str]]]
ChoiceCallableType = Union[ChoiceType, Callable[[], ChoiceType]]
ValidatorsType = Iterable[Callable[[T], None]]
OnChangeType = Callable[[], None]
ConfigVariableDict = TypedDict(
"ConfigVariableDict",
{
"defaultValue": Any,
"inputType": str,
"label": str,
"helpText": str,
"choices": ChoiceType,
"weight": int,
"group": str,
"subgroup": Optional[str],
},
)
class ConfigVariable:
"""
A simple object class to wrap new config variables.
The keyword arguments 'name' and 'default_value' are required.
The keyword arguments 'input_type', 'label', 'help_text' and 'hidden'
are for rendering a HTML form element. The 'input_type is also used for
validation. If you set 'input_type' to 'choice' you have to provide
'choices', which is a list of dictionaries containing a value and a
display_name of every possible choice.
The keyword arguments 'weight', 'group' and 'subgroup' are for sorting
and grouping.
The keyword argument validators expects an interable of validator
functions. Such a function gets the value and raises Django's
ValidationError if the value is invalid.
The keyword argument 'on_change' can be a callback which is called
every time, the variable is changed.
If the argument 'translatable' is set, OpenSlides is able to translate
the value during setup of the database if the admin uses the respective
command line option.
"""
def __init__(
self,
name: str,
default_value: T,
input_type: str = "string",
label: str = None,
help_text: str = None,
choices: ChoiceCallableType = None,
hidden: bool = False,
weight: int = 0,
group: str = "General",
subgroup: str = "General",
validators: ValidatorsType = None,
on_change: OnChangeType = None,
) -> None:
if input_type not in INPUT_TYPE_MAPPING:
raise ValueError("Invalid value for config attribute input_type.")
if input_type == "choice" and choices is None:
raise ConfigError(
"Either config attribute 'choices' must not be None or "
"'input_type' must not be 'choice'."
)
elif input_type != "choice" and choices is not None:
raise ConfigError(
"Either config attribute 'choices' must be None or "
"'input_type' must be 'choice'."
)
self.name = name
self.default_value = default_value
self.input_type = input_type
self.label = label or name
self.help_text = help_text or ""
self.choices = choices
self.hidden = hidden
self.weight = weight
self.group = group
self.subgroup = subgroup
| |
#!/usr/bin/python
"""
atom_energy_reporter.py
MIT License
Copyright (c) 2018
Weill Cornell Medicine, Memorial Sloan Kettering Cancer Center, and Authors
Authors:
<NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from openmmtools import testsystems
import mdtraj as md
import netCDF4
from netCDF4 import Dataset
import warnings
import time
# NOTE:
# - currently only the most common energy were implemented
# TODO:
# - implement AMOEBA forces
class AtomEnergyReporter(object):
"""
AtomEnergyReporter outputs information about every atom in a simulation,
including the energy breakdown, to a file.
to use it, create an AtomEnergyReporter, then add it to the list of reporters
of the list of the Simulation. by default the data is written in CSV format.
this module is written in order to implement the algorithm developed by
<NAME> and <NAME> in University of Cambridge.
this calculates the Eq.11 in the paper:
$$
u_{X_a} = \\
\frac12(u_{electrostaic} + u_{Lennard-Jones} + u_{bonded} + u_{Urey-Bradley}) \\
+ \frac13 u_{angle} \\
+ \frac14 u_{dihedral} + u_{improper}
$$
further data analysis is needed
ref:
https://pubs.acs.org/doi/abs/10.1021/acs.jctc.8b00027
"""
def __init__(self, file_path, reportInterval, idxs = None):
"""
create a AtomEnergyReporter
parameters
----------
file : a string
the file to write to
reportInterval : int
the interval at which to write
"""
self._reportInterval = reportInterval
self.idxs = idxs
self.force_map = {
'AmoebaAngleForce' : self.analyze_amoeba_angle_force,
'AmoebaBondForce' : self.analyze_amoeba_bond_force,
'AmoebaGeneralizedKirkwoodForce' : self.analyze_amoeba_generalized_kirkwood_force,
'AmoebaInPlaneAngleForce' : self.analyze_amoeba_in_plane_angle_force,
'AmoebaMultipoleForce' : self.analyze_amoeba_multipole_force,
'AmoebaOutOfPlaneBendForce' : self.analyze_amoeba_out_of_plane_bend_force,
'AmoebaPiTorsionForce' : self.analyze_amoeba_pi_torsion_force,
'AmoebaStretchBendForce' : self.analyze_amoeba_stretch_bend_force,
'AmoebaTorsionTorsionForce' : self.analyze_amoeba_torsion_torsion_force,
'AmoebaVdwForce' : self.analyze_amoeba_vdw_force,
'AmoebaWcaDispersionForce' : self.analyze_amoeba_wca_dispersion_force,
'AndersenThermostat' : self.analyze_andersen_thermostat,
'CMAPTorsionForce' : self.analyze_cmap_torsion_force,
'CMMotionRemover' : self.analyze_cmm_motion_remover,
'CustomAngleForce' : self.analyze_custom_angle_force,
'CustomBondForce' : self.analyze_custom_bond_force,
'CustomCVForce' : self.analyze_custom_cv_force,
'CustomCentroidBondForce' : self.analyze_centroid_bond_force,
'CustomCompoundBondForce' : self.analyze_custom_compound_bond_force,
'CustomExternalForce' : self.analyze_custom_external_force,
'CustomGBForce' : self.analyze_gb_force,
'CustomHbondForce' : self.analyze_hbond_force,
'CustomManyParticleForce' : self.analyze_custom_many_particle_force,
'CustomNonbondedForce' : self.analyze_custom_nonbonded_force,
'CustomTorsionForce' : self.analyze_custom_torsion_force,
'DrudeForce' : self.analyze_drude_force,
'GBSAOBCForce' : self.analyze_gbsaobc_force,
'GayBerneForce' : self.analyze_gay_berne_force,
'HarmonicAngleForce' : self.analyze_harmonic_angle_force,
'HarmonicBondForce' : self.analyze_harmonic_bond_force,
'MonteCarloAnisotropicBarostat' : self.analyze_monte_carlo_anisotropic_barostat,
'MonteCarloBarostat' : self.analyze_monte_carlo_barostat,
'MonteCarloMembraneBarostat' : self.analyze_monte_carlo_membrane_barostat,
'NonbondedForce' : self.analyze_nonbonded_force,
'PeriodicTorsionForce' : self.analyze_periodic_torsion_force,
'RBTorsionForce' : self.analyze_rb_torsion_force,
'RPMDMonteCarloBarostat' : self.analyze_rpmd_monte_carlo_barostat
}
# create a netCDF4 Dataset to record the energy
self._out = Dataset(file_path ,'w')
self._out.createDimension("time", None)
times = self._out.createVariable("time", "i8", ("time",))
times.unit = str(self._reportInterval)
self.time = 0
# let the analyzer register for once
self.registered = False
def describeNextReport(self, simulation):
"""
adopted from:
openmm/wrappers/python/simtk/openmm/app/statedatareporter.py
Get information about the next report this object will generate.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
Returns
-------
tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep%self._reportInterval
return (steps, True, False, True, True)
def report(self, simulation, state):
"""
generate a report
parameters
----------
simulation : an OpenMM simulation object
state : an OpenMM state object
"""
# find the small molecule to analyze
if self.registered == False: # if the system is not registered, register the system
if self.idxs == None:
self.find_small_mol(simulation, state)
# set the attributes in Dataset
self._out.description = 'record of an OpenMM run'
self._out.history = 'created ' + time.ctime(time. time())
# initialize the Dataset
self._out.createDimension("atom", len(self.idxs))
self._out.createVariable("atom", "i8", ("atom", ))
atoms_name = ["idx = %s; mass = %s" % (idx, simulation.system.getParticleMass(idx)) for idx in self.idxs]
self._out.setncattr('atoms_name', atoms_name)
# get the forces
self.forces = simulation.system.getForces()
self.force_idx_mapping = [force for force in self.forces]
forces_name = [force.__class__.__name__ for force in self.forces]
self._out.setncattr('forces_name', forces_name)
# create a force dimension, using idxs
# and initialize the forces
self._out.createDimension("force", len(self.forces))
self._out.createVariable("force", "i8", ("force", ))
# initialize the energy variable
# that stands on the dimensions of: time, atom, and force
self.energy_var = self._out.createVariable("energy", "f4", ("time", "atom", "force"))
self.energy_var.units = 'kJ/mol'
# keep a copy of all the positions
self._out.createDimension("xyz", 3)
self.pos_var = self._out.createVariable("pos", "f4", ("time", "atom", "xyz"))
# keep a copy of the parameters of atoms
param_array = np.zeros((len(self.idxs), 3))
for force in self.forces:
if force.__class__.__name__ == "NonbondedForce":
for idx in self.idxs:
charge, sigma, epsilon = force.getParticleParameters(idx)
param_array[idx, 0], param_array[idx, 1], param_array[idx, 2] = charge._value, sigma._value, epsilon._value
# note that the units here are: elementary charge, nanometer, kilojoule/mole
self._out.setncattr('param_array', param_array)
# set the registered flag to True,
# since you only need to do this once
self.registered = True
# point these objects to the class, and update them
self.simulation = simulation
self.state = state
# get the positions of the small molecules
self.pos = tuple([state.getPositions()[idx] for idx in self.idxs])
pos_matrix = np.array([state.getPositions(asNumpy=True)[idx]._value for idx in self.idxs])
self.pos_var[self.time, :, :] = pos_matrix
# analyze each force in the system
for force_idx, force in enumerate(self.force_idx_mapping):
energy_dict = self.get_energy(force)
if energy_dict == None:
warnings.warn("no force information could be extracted from %s" % force.__class__.__name__)
continue
for atom_idx, energy in energy_dict.items():
self.energy_var[self.time, atom_idx, force_idx] = energy._value
# note that the unit here is kilojoule/mole
# increase the time dimension by one
self.time += 1
def find_small_mol(self, simulation, state):
"""
find the atoms of the smallest molecule, which is most likely to be
the region of greates interest for a simulation
parameters
----------
simulation : an OpenMM Simulation object
state : an OpenMM State object
returns
-------
atoms : a tuple of indicies of atoms that belongs to the small molecule
"""
context = simulation.context
mols = context.getMolecules()
small_mol = sorted([mol for mol in mols if len(mol) > 4],
key = lambda mol : len(mol), reverse = False)[0]
# register the atoms and idxs in the class
self.idxs = small_mol
return small_mol
def get_energy(self, force):
"""
anlyzes force and return the energy,
to be more specific, match the force with a certain type of analysis function
"""
name = str(force.__class__.__name__) # get the name of the force
energy_dict = self.force_map[name](force) # map the force to its specific analyze function and get the energy
return energy_dict
#################################################
# herlper functions to calculate distance, angles,
# and dihedral angels from positions of atoms
#################################################
def dist(self, atom0, atom1):
"""
calculate the distance between two atoms
require that self.pos is defined
parameters
----------
atom0 : the idx of the first atom
atom1 : the idx of the second atom
returns
-------
dist : a float representing the distance between the two atoms
"""
pos0 = self.pos[atom0]
pos1 = self.pos[atom1]
dist = np.linalg.norm(pos0 - pos1)
return dist
def angle(self, center_atom, atom0, atom1):
"""
calculate the angle between bond:
center_atom -- atom0
and
center_atom -- atom1
$ cos(<v0, v1>) = (v0 \dot v1) / |v0||v1| $
parameters
----------
center_atom : the idx of the center atom
atom0 : the idx of the first atom involved in the angle
atom1 : the idx of the second atom involved in the angle
returns
-------
angle : the value of the angle in rads
"""
# get all the positions
pos_center = self.pos[center_atom]
pos0 = self.pos[atom0]
pos1 = self.pos[atom1]
# express the distance in vectors
v0 = np.array(pos0) - np.array(pos_center)
v1 = np.array(pos1) - np.array(pos_center)
# to calculate:
# $ cos(<v0, v1>) = (v0 \dot v1) / |v0||v1| $
v0_dot_v1 = np.dot(v0, v1)
v0_norm = np.linalg.norm(v0)
v1_norm = np.linalg.norm(v1)
angle = | |
import os
import logging
log = logging.getLogger("amaptor")
import arcpy
from amaptor.version_check import PRO, ARCMAP, mapping, mp
from amaptor.classes.map import Map
from amaptor.classes.layout import Layout
from amaptor.classes.map_frame import MapFrame
from amaptor.constants import _TEMPLATES, _PRO_BLANK_LAYOUT
from amaptor.functions import _import_mxd_to_new_pro_project
from amaptor.errors import *
class Project(object):
"""
An ArcGIS Pro Project or an ArcMap map document - maps in ArcGIS Pro and data frames in ArcMap are Map class attached to this project
Access to the underlying object is provided using name ArcGISProProject and ArcMapDocument
"""
def __init__(self, path):
self.maps = [] # stores list of included maps/dataframes
self.layouts = []
self.path = None # will be set after any conversion to current version of ArcGIS is done (aprx->mxd or vice versa)
self.map_document = None
self.arcgis_pro_project = None
self.primary_document = None # will be an alias for either self.map_document or self.arcgis_pro_project depending on what we're working with - makes it easier for items where API isn't different
# this conditional tree is getting a little beefy now - could probably be refactored
if PRO:
if path == "CURRENT":
self.path = "CURRENT" # will be redirected to actual path after setup
elif path.endswith("aprx"):
self.path = path
elif path.endswith("mxd"):
self.path = _import_mxd_to_new_pro_project(path)
else:
raise ValueError("Project or MXD path not recognized as an ArcGIS compatible file (.aprx or .mxd)")
self._pro_setup()
else: # ArcMap
if path == "CURRENT":
self.path = "CURRENT"
self._arcmap_setup()
elif path.endswith("mxd"):
self.path = path # we'll overwrite once set up for "CURRENT"
self._arcmap_setup()
elif path.endswith("aprx"):
# I need to find a way to create blank ArcGIS Pro projects here - may need to include one as a template to copy, but that seems silly/buggy.
# planned approach is to create a Pro project in a temporary location, and import the map document provided.
raise MapNotImplementedError("Support for Pro Projects in ArcMap is not possible. Please provide an MXD template to work with.")
else:
raise ValueError("Project or MXD path not recognized as an ArcGIS compatible file (.aprx or .mxd)")
if path == "CURRENT":
self.path = self.primary_document.filePath
def _pro_setup(self):
"""
Sets up the data based on the ArcGIS Pro Project. Only called if working with arcpy.mp and after any needed
conversion from Map Document to Pro Project is done.
:return: None
"""
self.arcgis_pro_project = mp.ArcGISProject(self.path)
self.primary_document = self.arcgis_pro_project
for l_map in self.arcgis_pro_project.listMaps():
self.maps.append(Map(self, l_map))
for layout in self.primary_document.listLayouts():
self.layouts.append(Layout(layout, self))
for map in self.maps:
map._index_frames()
def _arcmap_setup(self):
"""
Sets up data based on an ArcGIS Map Document. Only called if working with arcpy.mapping and after any
needed conversion from Pro Project to map docusment is done (can we go that way?)
:return: None
"""
self.map_document = mapping.MapDocument(self.path)
self.primary_document = self.map_document
for l_map in mapping.ListDataFrames(self.map_document):
self.maps.append(Map(self, l_map))
def list_maps(self):
"""
Provided to give a similar interface to ArcGIS Pro - Project.maps is also publically accessible
:return:
"""
return self.maps
def find_layer(self, path, find_all=True):
"""
Finds a layer in all maps by searching for the path. By default finds all, but can find just the first one too
:param path: the full path of the data source for the layer
:param find_all: When True, reutrns a list of amaptor.Map instances that match. When false, returns only the first match
:return: list of amaptor.map instances or a single amaptor.map instance.
"""
layers = []
for map in self.maps:
try:
new_layers = map.find_layer(path=path, find_all=find_all)
except LayerNotFoundError:
continue
if find_all: # if it didn't find any, we would have raised an exception, so we have something
layers += new_layers
else: # if we were only supposed to get one, return it
return new_layers
if len(layers) == 0:
raise LayerNotFoundError()
return layers
@property
def active_map(self):
"""
Returns the active map object or data frame as determined by get_active_map()
:return:
"""
return self.get_active_map()
@property
def map_names(self):
"""
A convenience function to get a list of map names
:return:
"""
return [l_map.name for l_map in self.maps]
@property
def default_geodatabase(self):
"""
Returns the Project's default geodatabase in Pro, and the current workspace (arcpy.env.workspace) in ArcMap.
If arcpy.env.workspace is None, creates a GDB in same folder as map document and returns that value, to ensure
that this function always returns a usable workspace. If a GDB is created, this function does NOT set arcpy.env.workspace
to that, so as not to interfere with other operations. Do that explicitly if that behavior is desired.
:return:
"""
if PRO:
return self.arcgis_pro_project.defaultGeodatabase
else:
if arcpy.env.workspace is not None:
return arcpy.env.workspace
else:
folder_path = os.path.split(self.path)[0]
name = "amaptor_default_gdb"
arcpy.CreateFileGDB_management(folder_path, name)
return os.path.join(folder_path, name)
@default_geodatabase.setter
def default_geodatabase(self, value):
"""
Sets the default geodatabase in Pro and sets arcpy.env.workspace in ArcMap
:param value:
:return:
"""
if PRO:
self.arcgis_pro_project.defaultGeodatabase = value
else:
arcpy.env.workspace = value
def find_map(self, name):
"""
Given a map name, returns the map object or raises MapNotFoundError
:param name: name of map to find.
:return: amaptor.Map instance
"""
for l_map in self.maps:
if l_map.name == name:
return l_map
else:
raise MapNotFoundError(name)
def check_map_name(self, name):
"""
Checks to see if the project or map document already has a map or data frame with a given name.
Since names must be unique in ArcGIS Pro, this code helps check before adding new maps
:param name: name of map to check for
:return: None. Raises an error if name is taken
"""
try:
self.find_map(name) # it finds one, then raise a MapExists error
raise MapExists(name)
except MapNotFoundError:
pass # it's great if it's not found
def new_map(self, name, template_map=os.path.join(_TEMPLATES, "arcmap", "pro_import_map_template.mxd"), template_df_name="_rename_template_amaptor"):
"""
PRO ONLY. Creates a new map in the current project using a hack (importing a blank map document, and renaming data frame)
Warning: Only works in Pro due to workaround. There isn't a way to add a data frame from arcpy.mapping.
In the future, this could potentially work in arcmap by transparently working with a separate map document
in the background (creating a project, map, and layout for those items and linking them into this project).
:param name: The name to give the imported map
:param template_map: The map document to import. If we're just going with a blank new map, leave as default. To
import some other template as your base, provide a path to a document importable to ArcGIS Pro'
.importDocument function for projects.
:param template_df_name: The current name of the imported map document for renaming. Only needs to be set if template_map is overridden
:return: amaptor.Map instance - also added to the map document, but returned for immediate use.
"""
if ARCMAP:
raise MapNotImplementedError("ArcMap doesn't suppport adding data frames to map documents from Python")
self.check_map_name(name)
# step 1: import
self.primary_document.importDocument(template_map, include_layout=False)
# step 2: set up for amaptor and rename to match passed value
for l_map in self.primary_document.listMaps():
if l_map.name == template_df_name:
l_map.name = name
new_map = Map(self, l_map)
self.maps.append(new_map)
return new_map
else: # if it's not found
raise MapNotFoundError(template_df_name, "Map was inserted, but could not be found after insertion. If you provided a custom" \
"template, check that the name you provided for template_df_name matches the name of " \
"the data frame you want to use from the map document.")
def check_layout_name(self, name):
"""
PRO ONLY. Given the name of a layout, confirms it doesn't exist and raised amaptor.LayoutExists if it's found
:param name: the case sensitive name of an existing layout to find
:return: None. Raises amaptor.LayoutExists if layout with name exists.
"""
try:
self.find_layout(name) # it finds one, then raise a MapExists error
raise LayoutExists(name)
except LayoutNotFoundError:
pass # it's great if it's not found
def find_layout(self, name):
"""
PRO ONLY. Given a layout name, returns the amaptor.Layout object or raises LayoutNotFoundError
:param name: the name of the layout to find.
:return: amaptor.Layout instance with given name.
"""
for layout in self.layouts:
if layout.name == name:
return layout
else:
raise LayoutNotFoundError(name)
def new_layout(self, name, template_layout=_PRO_BLANK_LAYOUT, template_name="_pro_blank_layout_template"):
"""
PRO ONLY. Adds a new layout to an ArcGIS Pro Project by importing a saved blank layout. Alternatively,
you can provide an importable layout document (.pagx) for ArcGIS Pro, and then provide that layout's name
as template_name so that it can be renamed, and the provided template will be used instead of a blank.
:param name: The name to give the new layout
:param template_layout: The template to use for creating the layout (an ArcGIS Pro .pagx file).
If none is provided, uses a blank template
:param template_name: The name of the layout in the template. Only define this value if you also provide a new
template layout and the name should match the layout name in the template. This parameter is used to find
the inserted template and rename it. Strange things will happen if this value does not match the name of the
layout in the template_layout.
:return: amaptor.Layout instance. This layout will already have been added to the project, but is returned for
convenience.
"""
if ARCMAP:
raise MapNotImplementedError("ArcMap doesn't suppport adding data frames to map documents from Python")
# step 1: import
self.primary_document.importDocument(template_layout)
# step 2: set up for amaptor and rename to match passed value
for layout in self.primary_document.listLayouts():
if layout.name == template_name:
layout.name = name
new_layout = Layout(layout, self)
self.layouts.append(new_layout)
return new_layout
else:
raise LayoutNotFoundError("Layout was inserted, but could not be found after insertion. If you provided a custom" \
"template, check that the name you provided for template_name matches the name | |
<reponame>00schen/asha<filename>assistive-gym/assistive_gym/envs/util.py
import numpy as np
import pybullet as p
class Util:
def __init__(self, pid, np_random):
self.id = pid
self.ik_lower_limits = {}
self.ik_upper_limits = {}
self.ik_joint_ranges = {}
self.ik_rest_poses = {}
self.np_random = np_random
def ik_random_restarts(self, body, target_joint, target_pos, target_orient, world_creation, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, best_ik_joints = None, ik_indices=range(29, 29+7), max_iterations=1000, max_ik_random_restarts=50, random_restart_threshold=0.01, half_range=False, step_sim=False, check_env_collisions=False):
orient_orig = target_orient
best_ik_distance = 0
for r in range(max_ik_random_restarts):
target_joint_positions = self.ik(body, target_joint, target_pos, target_orient, mean_rest_pose=best_ik_joints, ik_indices=ik_indices, max_iterations=max_iterations, half_range=half_range)
world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(target_joint_positions), tool=None)
if step_sim:
for _ in range(5):
p.stepSimulation(physicsClientId=self.id)
if len(p.getContactPoints(bodyA=body, bodyB=body, physicsClientId=self.id)) > 0 and orient_orig is not None:
# The robot's arm is in contact with itself. Continually randomize end effector orientation until a solution is found
target_orient = p.getQuaternionFromEuler(p.getEulerFromQuaternion(orient_orig, physicsClientId=self.id) + np.deg2rad(self.np_random.uniform(-45, 45, size=3)), physicsClientId=self.id)
if check_env_collisions:
for _ in range(25):
p.stepSimulation(physicsClientId=self.id)
gripper_pos, gripper_orient = p.getLinkState(body, target_joint, computeForwardKinematics=True, physicsClientId=self.id)[:2]
if np.linalg.norm(target_pos - np.array(gripper_pos)) < random_restart_threshold and (target_orient is None or np.linalg.norm(target_orient - np.array(gripper_orient)) < random_restart_threshold or np.isclose(np.linalg.norm(target_orient - np.array(gripper_orient)), 2, atol=random_restart_threshold)):
return True, np.array(target_joint_positions)
if best_ik_joints is None or np.linalg.norm(target_pos - np.array(gripper_pos)) < best_ik_distance:
best_ik_joints = target_joint_positions
best_ik_distance = np.linalg.norm(target_pos - np.array(gripper_pos))
world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(best_ik_joints), tool=None)
return False, np.array(best_ik_joints)
def ik_jlwki(self, body, target_joint, target_pos, target_orient, world_creation, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, ik_indices=range(29, 29+7), max_iterations=100, success_threshold=0.03, half_range=False, step_sim=False, check_env_collisions=False):
target_joint_positions = self.ik(body, target_joint, target_pos, target_orient, ik_indices=ik_indices, max_iterations=max_iterations, half_range=half_range)
world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(target_joint_positions), tool=None)
if step_sim:
for _ in range(5):
p.stepSimulation(physicsClientId=self.id)
if len(p.getContactPoints(bodyA=body, bodyB=body, physicsClientId=self.id)) > 0:
# The robot's arm is in contact with itself.
return False, np.array(target_joint_positions)
if check_env_collisions:
for _ in range(25):
p.stepSimulation(physicsClientId=self.id)
gripper_pos, gripper_orient = p.getLinkState(body, target_joint, computeForwardKinematics=True, physicsClientId=self.id)[:2]
if np.linalg.norm(target_pos - np.array(gripper_pos)) < success_threshold and (target_orient is None or np.linalg.norm(target_orient - np.array(gripper_orient)) < success_threshold or np.isclose(np.linalg.norm(target_orient - np.array(gripper_orient)), 2, atol=success_threshold)):
return True, np.array(target_joint_positions)
return False, np.array(target_joint_positions)
def ik(self, body, target_joint, target_pos, target_orient, mean_rest_pose=None, ik_indices=range(29, 29+7), max_iterations=1000, half_range=False):
key = '%d_%d' % (body, target_joint)
if key not in self.ik_lower_limits:
self.ik_lower_limits[key] = []
self.ik_upper_limits[key] = []
self.ik_joint_ranges[key] = []
self.ik_rest_poses[key] = []
j_names = []
for j in range(p.getNumJoints(body, physicsClientId=self.id)):
if p.getJointInfo(body, j, physicsClientId=self.id)[2] != p.JOINT_FIXED:
joint_info = p.getJointInfo(body, j, physicsClientId=self.id)
lower_limit = joint_info[8]
upper_limit = joint_info[9]
if lower_limit == 0 and upper_limit == -1:
lower_limit = -2*np.pi
upper_limit = 2*np.pi
self.ik_lower_limits[key].append(lower_limit)
self.ik_upper_limits[key].append(upper_limit)
if not half_range:
self.ik_joint_ranges[key].append(upper_limit - lower_limit)
else:
self.ik_joint_ranges[key].append((upper_limit - lower_limit)/2.0)
j_names.append([len(j_names)] + list(joint_info[:2]))
self.ik_rest_poses[key] = self.np_random.uniform(self.ik_lower_limits[key], self.ik_upper_limits[key])
if mean_rest_pose is not None:
self.ik_rest_poses[key][ik_indices] = np.clip(self.np_random.normal(mean_rest_pose,0.5),np.array(self.ik_lower_limits[key])[ik_indices], np.array(self.ik_upper_limits[key])[ik_indices])
self.ik_rest_poses[key] = self.ik_rest_poses[key].tolist()
if target_orient is not None:
ik_joint_poses = np.array(p.calculateInverseKinematics(body, target_joint, targetPosition=target_pos, targetOrientation=target_orient, lowerLimits=self.ik_lower_limits[key], upperLimits=self.ik_upper_limits[key], jointRanges=self.ik_joint_ranges[key], restPoses=self.ik_rest_poses[key], maxNumIterations=max_iterations, physicsClientId=self.id))
else:
ik_joint_poses = np.array(p.calculateInverseKinematics(body, target_joint, targetPosition=target_pos, lowerLimits=self.ik_lower_limits[key], upperLimits=self.ik_upper_limits[key], jointRanges=self.ik_joint_ranges[key], restPoses=self.ik_rest_poses[key], maxNumIterations=max_iterations, physicsClientId=self.id))
target_joint_positions = ik_joint_poses[ik_indices]
return target_joint_positions
def points_in_cylinder(self, pt1, pt2, r, q):
vec = pt2 - pt1
const = r * np.linalg.norm(vec)
return np.dot(q - pt1, vec) >= 0 and np.dot(q - pt2, vec) <= 0 and np.linalg.norm(np.cross(q - pt1, vec)) <= const
def point_on_capsule(self, p1, p2, radius, theta_range=(0, np.pi*2)):
'''
Pick a random point along the outer surface of a capsule (cylinder)
'''
# Pick a random point along the length of the capsule
axis_vector = p2 - p1
random_length = self.np_random.uniform(radius, np.linalg.norm(axis_vector))
# Normalize axis vector to unit length
axis_vector = axis_vector / np.linalg.norm(axis_vector)
ortho_vector = self.orthogonal_vector(axis_vector)
# Normalize orthogonal vector to unit length
ortho_vector = ortho_vector / np.linalg.norm(ortho_vector)
# Determine normal vector through cross product (this will be of unit length)
normal_vector = np.cross(axis_vector, ortho_vector)
# Pick a random rotation along the cylinder
theta = self.np_random.uniform(theta_range[0], theta_range[1])
point = p1 + random_length*axis_vector + radius*np.cos(theta)*ortho_vector + radius*np.sin(theta)*normal_vector
return point
def capsule_points(self, p1, p2, radius, distance_between_points=0.05):
'''
Creates a set of points around a capsule.
Check out: http://mathworld.wolfram.com/ConicalFrustum.html
and: http://math.stackexchange.com/questions/73237/parametric-equation-of-a-circle-in-3d-space
sphere = [x, y, z, r]
'''
points = []
p1, p2 = np.array(p1), np.array(p2)
axis_vector = p2 - p1
# Normalize axis vector to unit length
axis_vector = axis_vector / np.linalg.norm(axis_vector)
ortho_vector = self.orthogonal_vector(axis_vector)
# Normalize orthogonal vector to unit length
ortho_vector = ortho_vector / np.linalg.norm(ortho_vector)
# Determine normal vector through cross product (this will be of unit length)
normal_vector = np.cross(axis_vector, ortho_vector)
# Determine the section positions along the frustum at which we will create point around in a circular fashion
sections = int(np.linalg.norm(p2 - p1) / distance_between_points)
section_positions = [(p2 - p1) / (sections + 1) * (i + 1) for i in range(sections)]
for i, section_pos in enumerate(section_positions):
# Determine radius and circumference of this section
circumference = 2*np.pi*radius
# Determine the angle difference (in radians) between points
theta_dist = distance_between_points / radius
for j in range(int(circumference / distance_between_points)):
theta = theta_dist * j
# Determine cartesian coordinates for the point along the circular section of the frustum
point_on_circle = p1 + section_pos + radius*np.cos(theta)*ortho_vector + radius*np.sin(theta)*normal_vector
points.append(point_on_circle)
return points
def orthogonal_vector(self, v):
'''
Two Euclidean vectors are orthogonal if and only if their dot product is zero.
'''
# Find first element in v that is nonzero
m = np.argmax(np.abs(v))
y = np.zeros(len(v))
y[(m+1) % len(v)] = 1
return np.cross(v, y)
def line_intersects_triangle(self, p0, p1, p2, q0, q1):
# Check that the arm line segment intersects two different triangles defined by points around the sleeve.
# https://stackoverflow.com/questions/42740765/intersection-between-line-and-triangle-in-3d
signed_volume = lambda a, b, c, d: (1.0/6.0) * np.dot(np.cross(b-a, c-a), d-a)
if np.sign(signed_volume(q0, p0, p1, p2)) != np.sign(signed_volume(q1, p0, p1, p2)):
if np.sign(signed_volume(q0, q1, p0, p1)) == np.sign(signed_volume(q0, q1, p1, p2)) == np.sign(signed_volume(q0, q1, p2, p0)):
return True
return False
def sleeve_on_arm_reward(self, triangle1_points, triangle2_points, human, hand_radius, elbow_radius, shoulder_radius):
shoulder_pos, shoulder_orient = p.getLinkState(human, 15, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(human, 17, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(human, 19, computeForwardKinematics=True, physicsClientId=self.id)[4:6]
# Use full length of arm, rather than from hand center to elbow center
wrist_pos, elbow_pos, shoulder_pos = np.array(wrist_pos), np.array(elbow_pos), np.array(shoulder_pos)
hand_end_pos = wrist_pos + (wrist_pos - elbow_pos) / np.linalg.norm(wrist_pos - elbow_pos) * hand_radius*2
elbow_end_pos = elbow_pos + (elbow_pos - wrist_pos) / np.linalg.norm(wrist_pos - elbow_pos) * elbow_radius
shoulder_end_pos = shoulder_pos + (shoulder_pos - elbow_pos) / np.linalg.norm(shoulder_pos - elbow_pos) * shoulder_radius
# Given the central axis of the arm, find the plane through the axis and one vector perpendicular to the axis
# and the plane through the axis and the second vector perpendicular to the other two.
# There must be points above and below both of these two planes
# https://math.stackexchange.com/questions/7931/point-below-a-plane
normal_forearm = hand_end_pos - elbow_end_pos
normal_forearm = normal_forearm / np.linalg.norm(normal_forearm)
# Normalized Tangent Vector, assumes arm axis not parallel to vector [1, 1, 0]
tangent_forearm = np.cross(np.array([1, 1, 0]), normal_forearm)
tangent_forearm = tangent_forearm / np.linalg.norm(tangent_forearm)
# Normalized Binormal_forearm or Bitangent_forearm vector
binormal_forearm = np.cross(tangent_forearm, normal_forearm)
binormal_forearm = binormal_forearm / np.linalg.norm(binormal_forearm)
# Check if at least one point exists above and below both planes
# v.dot(p - p0), p0 on plane, v is normal_forearm of a plane. v = tangent_forearm, v = binormal_forearm, p0 = elbow_end_pos
all_points = np.concatenate([triangle1_points, triangle2_points], axis=0)
tangent_forearm_points = np.dot(tangent_forearm, (all_points - elbow_end_pos).T)
binormal_forearm_points = np.dot(binormal_forearm, (all_points - elbow_end_pos).T)
points_above_below_forearm = np.any(tangent_forearm_points > 0) and np.any(tangent_forearm_points < 0) and np.any(binormal_forearm_points > 0) and np.any(binormal_forearm_points < 0)
normal_upperarm = elbow_end_pos - shoulder_end_pos
normal_upperarm = normal_upperarm / np.linalg.norm(normal_upperarm)
tangent_upperarm = np.cross(np.array([1, 1, 0]), normal_upperarm)
tangent_upperarm = tangent_upperarm / np.linalg.norm(tangent_upperarm)
binormal_upperarm = np.cross(tangent_upperarm, normal_upperarm)
binormal_upperarm = binormal_upperarm / np.linalg.norm(binormal_upperarm)
tangent_upperarm_points = np.dot(tangent_upperarm, (all_points - shoulder_end_pos).T)
binormal_upperarm_points = np.dot(binormal_upperarm, (all_points - shoulder_end_pos).T)
points_above_below_upperarm = np.any(tangent_upperarm_points > 0) and np.any(tangent_upperarm_points < 0) and np.any(binormal_upperarm_points > 0) and np.any(binormal_upperarm_points < 0)
# Check that the arm line segment intersects two different triangles defined by points around the sleeve.
# https://stackoverflow.com/questions/42740765/intersection-between-line-and-triangle-in-3d
forearm_intersects_triangle1 = self.line_intersects_triangle(triangle1_points[0], triangle1_points[1], triangle1_points[2], hand_end_pos, elbow_end_pos)
forearm_intersects_triangle2 = self.line_intersects_triangle(triangle2_points[0], triangle2_points[1], triangle2_points[2], hand_end_pos, elbow_end_pos)
upperarm_intersects_triangle1 = self.line_intersects_triangle(triangle1_points[0], triangle1_points[1], triangle1_points[2], elbow_end_pos, shoulder_end_pos)
upperarm_intersects_triangle2 = self.line_intersects_triangle(triangle2_points[0], triangle2_points[1], triangle2_points[2], elbow_end_pos, shoulder_end_pos)
sleeve_center = np.mean(all_points, axis=0)
distance_to_shoulder = np.linalg.norm(shoulder_end_pos - sleeve_center)
distance_to_elbow = np.linalg.norm(elbow_end_pos - sleeve_center)
distance_to_hand = np.linalg.norm(hand_end_pos - sleeve_center)
# Reward forward movement along the arm, away from the hand (pulling the sleeve onto the arm)
distance_along_forearm | |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import os, argparse
import csv
from run1 import get_params_office_world, get_params_traffic_world, get_params_craft_world
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y.append(sum(y[-5:])/len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y_smooth = np.convolve(y[0:-5], box, mode='same')
y_smooth[-1] = y_smooth[-6]
y_smooth[-2] = y_smooth[-6]
y_smooth[-3] = y_smooth[-6]
y_smooth[-4] = y_smooth[-6]
y_smooth[-5] = y_smooth[-6]
return y_smooth
def export_results_traffic_world(task_id, algorithm):
files = os.listdir("../plotdata/")
step_unit = get_params_traffic_world('../experiments/traffic/tests/ground_truth.txt')[0].num_steps
max_step = get_params_traffic_world('../experiments/traffic/tests/ground_truth.txt')[3].total_steps
steps = np.linspace(0, max_step, (max_step / step_unit) + 1, endpoint=True)
if task_id>0:
p25 = [0]
p50 = [0]
p75 = [0]
p25s = [0]
p50s = [0]
p75s = [0]
p25_q = [0]
p50_q = [0]
p75_q = [0]
p25_hrl = [0]
p50_hrl = [0]
p75_hrl = [0]
p25_dqn = [0]
p50_dqn = [0]
p75_dqn = [0]
files_of_interest = list()
for file in files:
if (("traffic" in file) and (".csv" in file) and (str(task_id) in file)):
files_of_interest.append(file)
for file in files_of_interest:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_q.append(np.percentile(row, 25))
p50_q.append(np.percentile(row, 50))
p75_q.append(np.percentile(row, 75))
elif 'hrl' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_hrl.append(np.percentile(row, 25))
p50_hrl.append(np.percentile(row, 50))
p75_hrl.append(np.percentile(row, 75))
elif 'dqn' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_dqn.append(np.percentile(row, 25))
p50_dqn.append(np.percentile(row, 50))
p75_dqn.append(np.percentile(row, 75))
elif 'rpni' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25.append(np.percentile(row, 25))
p50.append(np.percentile(row, 50))
p75.append(np.percentile(row, 75))
elif 'sat' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25s.append(np.percentile(row, 25))
p50s.append(np.percentile(row, 50))
p75s.append(np.percentile(row, 75))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25)-1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25)-1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s)-1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q)-1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl)-1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn)-1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_dqn)-1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
if algorithm == "all":
plt.xlim(0,max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '', '', 'D-DQN','','','HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
else:
step = 0
p25dict = dict()
p50dict = dict()
p75dict = dict()
p25sdict = dict()
p50sdict = dict()
p75sdict = dict()
p25_qdict = dict()
p50_qdict = dict()
p75_qdict = dict()
p25_hrldict = dict()
p50_hrldict = dict()
p75_hrldict = dict()
p25_dqndict = dict()
p50_dqndict = dict()
p75_dqndict = dict()
p25 = list()
p50 = list()
p75 = list()
p25s = list()
p50s = list()
p75s = list()
p25_q = list()
p50_q = list()
p75_q = list()
p25_hrl = list()
p50_hrl = list()
p75_hrl = list()
p25_dqn = list()
p50_dqn = list()
p75_dqn = list()
p25dict[0] = [0,0,0,0]
p50dict[0] = [0,0,0,0]
p75dict[0] = [0,0,0,0]
p25sdict[0] = [0,0,0,0]
p50sdict[0] = [0,0,0,0]
p75sdict[0] = [0,0,0,0]
p25_qdict[0] = [0,0,0,0]
p50_qdict[0] = [0,0,0,0]
p75_qdict[0] = [0,0,0,0]
p25_hrldict[0] = [0,0,0,0]
p50_hrldict[0] = [0,0,0,0]
p75_hrldict[0] = [0,0,0,0]
p25_dqndict[0] = [0,0,0,0]
p50_dqndict[0] = [0,0,0,0]
p75_dqndict[0] = [0,0,0,0]
files_dict = dict()
for file in files:
if (("traffic" in file) and (".csv" in file)):
if "1" in file:
task = 1
if "2" in file:
task = 2
if "3" in file:
task = 3
if "4" in file:
task = 4
if task not in files_dict:
files_dict[task] = [file]
else:
files_dict[task].append(file)
for task in files_dict:
for file in files_dict[task]:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_qdict:
p25_qdict[step].append(np.percentile(row, 25))
p50_qdict[step].append(np.percentile(row, 50))
p75_qdict[step].append(np.percentile(row, 75))
else:
p25_qdict[step] = [np.percentile(row, 25)]
p50_qdict[step] = [np.percentile(row, 50)]
p75_qdict[step] = [np.percentile(row, 75)]
elif 'hrl' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_hrldict:
p25_hrldict[step].append(np.percentile(row, 25))
p50_hrldict[step].append(np.percentile(row, 50))
p75_hrldict[step].append(np.percentile(row, 75))
else:
p25_hrldict[step] = [np.percentile(row, 25)]
p50_hrldict[step] = [np.percentile(row, 50)]
p75_hrldict[step] = [np.percentile(row, 75)]
elif 'dqn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_dqndict:
p25_dqndict[step].append(np.percentile(row, 25))
p50_dqndict[step].append(np.percentile(row, 50))
p75_dqndict[step].append(np.percentile(row, 75))
else:
p25_dqndict[step] = [np.percentile(row, 25)]
p50_dqndict[step] = [np.percentile(row, 50)]
p75_dqndict[step] = [np.percentile(row, 75)]
elif 'rpni' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25dict:
p25dict[step].append(np.percentile(row, 25))
p50dict[step].append(np.percentile(row, 50))
p75dict[step].append(np.percentile(row, 75))
else:
p25dict[step] = [np.percentile(row, 25)]
p50dict[step] = [np.percentile(row, 50)]
p75dict[step] = [np.percentile(row, 75)]
elif 'sat' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25sdict:
p25sdict[step].append(np.percentile(row, 25))
p50sdict[step].append(np.percentile(row, 50))
p75sdict[step].append(np.percentile(row, 75))
else:
p25sdict[step] = [np.percentile(row, 25)]
p50sdict[step] = [np.percentile(row, 50)]
p75sdict[step] = [np.percentile(row, 75)]
for step in steps:
if step in p25_qdict:
p25_q.append(sum(p25_qdict[step]) / len(p25_qdict[step]))
p50_q.append(sum(p50_qdict[step]) / len(p50_qdict[step]))
p75_q.append(sum(p75_qdict[step]) / len(p75_qdict[step]))
if step in p25_hrldict:
p25_hrl.append(sum(p25_hrldict[step]) / len(p25_hrldict[step]))
p50_hrl.append(sum(p50_hrldict[step]) / len(p50_hrldict[step]))
p75_hrl.append(sum(p75_hrldict[step]) / len(p75_hrldict[step]))
if step in p25dict:
p25.append(sum(p25dict[step]) / len(p25dict[step]))
p50.append(sum(p50dict[step]) / len(p50dict[step]))
p75.append(sum(p75dict[step]) / len(p75dict[step]))
if step in p25sdict:
p25s.append(sum(p25sdict[step]) / len(p25sdict[step]))
p50s.append(sum(p50sdict[step]) / len(p50sdict[step]))
p75s.append(sum(p75sdict[step]) / len(p75sdict[step]))
if step in p25_dqndict:
p25_dqn.append(sum(p25_dqndict[step]) / len(p25_dqndict[step]))
p50_dqn.append(sum(p50_dqndict[step]) / len(p50_dqndict[step]))
p75_dqn.append(sum(p75_dqndict[step]) / len(p75_dqndict[step]))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25) - 1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25) - 1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps | |
def get_Distance(self):
return self.Distance
def set_Distance(self, Distance):
self.Distance = Distance
def get_ReservationAvailabilityDetail(self):
return self.ReservationAvailabilityDetail
def set_ReservationAvailabilityDetail(self, ReservationAvailabilityDetail):
self.ReservationAvailabilityDetail = ReservationAvailabilityDetail
def get_SupportedRedirectToHoldServices(self):
return self.SupportedRedirectToHoldServices
def set_SupportedRedirectToHoldServices(self, SupportedRedirectToHoldServices):
self.SupportedRedirectToHoldServices = SupportedRedirectToHoldServices
def add_SupportedRedirectToHoldServices(self, value):
self.SupportedRedirectToHoldServices.append(value)
def insert_SupportedRedirectToHoldServices_at(self, index, value):
self.SupportedRedirectToHoldServices.insert(index, value)
def replace_SupportedRedirectToHoldServices_at(self, index, value):
self.SupportedRedirectToHoldServices[index] = value
def get_LocationDetail(self):
return self.LocationDetail
def set_LocationDetail(self, LocationDetail):
self.LocationDetail = LocationDetail
def validate_SupportedRedirectToHoldServiceType(self, value):
result = True
# Validate type SupportedRedirectToHoldServiceType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['FEDEX_EXPRESS', 'FEDEX_GROUND', 'FEDEX_GROUND_HOME_DELIVERY']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on SupportedRedirectToHoldServiceType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.Distance is not None or
self.ReservationAvailabilityDetail is not None or
self.SupportedRedirectToHoldServices or
self.LocationDetail is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DistanceAndLocationDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DistanceAndLocationDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'DistanceAndLocationDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DistanceAndLocationDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='DistanceAndLocationDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DistanceAndLocationDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DistanceAndLocationDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Distance is not None:
namespaceprefix_ = self.Distance_nsprefix_ + ':' if (UseCapturedNS_ and self.Distance_nsprefix_) else ''
self.Distance.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Distance', pretty_print=pretty_print)
if self.ReservationAvailabilityDetail is not None:
namespaceprefix_ = self.ReservationAvailabilityDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.ReservationAvailabilityDetail_nsprefix_) else ''
self.ReservationAvailabilityDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ReservationAvailabilityDetail', pretty_print=pretty_print)
for SupportedRedirectToHoldServices_ in self.SupportedRedirectToHoldServices:
namespaceprefix_ = self.SupportedRedirectToHoldServices_nsprefix_ + ':' if (UseCapturedNS_ and self.SupportedRedirectToHoldServices_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSupportedRedirectToHoldServices>%s</%sSupportedRedirectToHoldServices>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(SupportedRedirectToHoldServices_), input_name='SupportedRedirectToHoldServices')), namespaceprefix_ , eol_))
if self.LocationDetail is not None:
namespaceprefix_ = self.LocationDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.LocationDetail_nsprefix_) else ''
self.LocationDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='LocationDetail', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Distance':
obj_ = Distance.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Distance = obj_
obj_.original_tagname_ = 'Distance'
elif nodeName_ == 'ReservationAvailabilityDetail':
obj_ = ReservationAvailabilityDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ReservationAvailabilityDetail = obj_
obj_.original_tagname_ = 'ReservationAvailabilityDetail'
elif nodeName_ == 'SupportedRedirectToHoldServices':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'SupportedRedirectToHoldServices')
value_ = self.gds_validate_string(value_, node, 'SupportedRedirectToHoldServices')
self.SupportedRedirectToHoldServices.append(value_)
self.SupportedRedirectToHoldServices_nsprefix_ = child_.prefix
# validate type SupportedRedirectToHoldServiceType
self.validate_SupportedRedirectToHoldServiceType(self.SupportedRedirectToHoldServices[-1])
elif nodeName_ == 'LocationDetail':
obj_ = LocationDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.LocationDetail = obj_
obj_.original_tagname_ = 'LocationDetail'
# end class DistanceAndLocationDetail
class Holiday(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Name=None, Date=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Name = Name
self.Name_nsprefix_ = None
if isinstance(Date, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Date, '%Y-%m-%d').date()
else:
initvalue_ = Date
self.Date = initvalue_
self.Date_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Holiday)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Holiday.subclass:
return Holiday.subclass(*args_, **kwargs_)
else:
return Holiday(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Name(self):
return self.Name
def set_Name(self, Name):
self.Name = Name
def get_Date(self):
return self.Date
def set_Date(self, Date):
self.Date = Date
def hasContent_(self):
if (
self.Name is not None or
self.Date is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Holiday', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Holiday')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'Holiday':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Holiday')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='Holiday', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Holiday'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Holiday', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Name is not None:
namespaceprefix_ = self.Name_nsprefix_ + ':' if (UseCapturedNS_ and self.Name_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sName>%s</%sName>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Name), input_name='Name')), namespaceprefix_ , eol_))
if self.Date is not None:
namespaceprefix_ = self.Date_nsprefix_ + ':' if (UseCapturedNS_ and self.Date_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sDate>%s</%sDate>%s' % (namespaceprefix_ , self.gds_format_date(self.Date, input_name='Date'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Name':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Name')
value_ = self.gds_validate_string(value_, node, 'Name')
self.Name = value_
self.Name_nsprefix_ = child_.prefix
elif nodeName_ == 'Date':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.Date = dval_
self.Date_nsprefix_ = child_.prefix
# end class Holiday
class LatestDropOffDetail(GeneratedsSuper):
"""Specifies the latest time by which a package can be dropped off at a
FedEx location."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, DayOfWeek=None, Time=None, Overlays=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.DayOfWeek = DayOfWeek
self.validate_DayOfWeekType(self.DayOfWeek)
self.DayOfWeek_nsprefix_ = None
if isinstance(Time, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Time, '%H:%M:%S').time()
else:
initvalue_ = Time
self.Time = initvalue_
self.Time_nsprefix_ = None
if Overlays is None:
self.Overlays = []
else:
self.Overlays = Overlays
self.Overlays_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, LatestDropOffDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if LatestDropOffDetail.subclass:
return LatestDropOffDetail.subclass(*args_, **kwargs_)
else:
return LatestDropOffDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_DayOfWeek(self):
return self.DayOfWeek
def set_DayOfWeek(self, DayOfWeek):
self.DayOfWeek = DayOfWeek
def get_Time(self):
return self.Time
def set_Time(self, Time):
self.Time = Time
def get_Overlays(self):
return self.Overlays
def set_Overlays(self, Overlays):
self.Overlays = Overlays
def add_Overlays(self, value):
self.Overlays.append(value)
def insert_Overlays_at(self, index, value):
self.Overlays.insert(index, value)
def replace_Overlays_at(self, index, value):
self.Overlays[index] = value
def validate_DayOfWeekType(self, value):
result = True
# Validate type DayOfWeekType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['FRI', 'MON', 'SAT', 'SUN', 'THU', 'TUE', 'WED']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on DayOfWeekType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.DayOfWeek is not None or
self.Time is not None or
self.Overlays
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='LatestDropOffDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('LatestDropOffDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'LatestDropOffDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, | |
<filename>routing_changes/hornet/hornet.py
#!/usr/bin/env python3
import argparse
import atexit
from collections import Counter, ChainMap, defaultdict
import datetime
import functools
import hashlib
import ipaddress
import itertools
import logging
import random
import re
import sys
import ujson
from tempest import sample_generation
from boundary_search import boundary_search
import caida_routeviews
import persistent_lru
import prefix_to_as
import ripe_atlas
import serial
ASN_DELIM_RE = ",|_"
BAD_RESOLVE = "*"
HORNET_HDR = ("MSM_ID", "PROBE_ID", "T0", "T1", "T0_OBS", "T1_OBS",
"#_ALL_PROBES", "#_ALL_PFX", "#_ALL_ADDR",
"PCT_ALL_ADDR", "NUM_ALL_ASES",
"#_VALID_PROBES", "#_VALID_PFX", "#_VALID_ADDR",
"PCT_VALID_ADDR", "NUM_VALID_ASES",
"#_PRV_PROBE", "#_PRV_PFX", "#_PRV_ADDR",
"PCT_PRV_ADDR", "NUM_PRV_ASES",
"#_ITC_PROBES", "#_ITC_PFX", "#_ITC_ADDR",
"PCT_ITC_ADDR", "NUM_ITC_ASES")
def all_known_probes(msm_id, datetimes):
all_probes = list()
for dt in datetimes:
paths = probe_paths_cached(msm_id, dt)
probes = [(k, dt) for k in paths.keys()]
all_probes.extend(probes)
return all_probes
def all_probes_with_valid_obs(msm_id, observation_fn, observation_valid_fn,
datetimes):
all_probes = list()
for dt in datetimes:
paths = probe_paths_cached(msm_id, dt)
for probe, path in paths.items():
if observation_valid_fn(observation_fn(path)):
all_probes.append((probe, dt))
return all_probes
def analysis_interval_points(start_datetime, end_datetime, tick_width, msm_id):
assert(start_datetime <= end_datetime)
interval = [start_datetime]
next_dt = start_datetime + tick_width
time_to_add = round_datetime_to_interval(next_dt, msm_id)
while(time_to_add <= end_datetime):
interval.append(time_to_add)
next_dt = time_to_add + tick_width
time_to_add = round_datetime_to_interval(next_dt, msm_id)
return interval
def anonymity_set(msm_id, observation, observation_fn, observation_eq_fn, dt):
paths = probe_paths_cached(msm_id, dt)
anon_set = set()
for probe, path in paths.items():
if observation_eq_fn(observation_fn(path), observation):
anon_set.add((probe, dt))
return anon_set
def anonymity_set_wide(msm_id, observation, observation_fn, observation_eq_fn,
datetimes):
anon_set = set()
for dt in datetimes:
anon_set.update(anonymity_set(msm_id, observation, observation_fn,
observation_eq_fn, dt))
return anon_set
def asns_are_indistinguishable(x, y):
if x == None or y == None:
return False
if x == BAD_RESOLVE and y == BAD_RESOLVE:
return True
x_set = split_asn_set(x)
y_set = split_asn_set(y)
return len(x_set.intersection(y_set)) > 0
def asn_is_unambig(asn):
# Basic validity check
if asn is None or len(asn) == 0:
return False
if asn == BAD_RESOLVE:
return False
return True
def common_probes(probe_paths_t1, probe_paths_t2):
return list(sorted(probe_paths_t1.keys() & probe_paths_t2.keys()))
def common_probe_locations(msm_id, analysis_interval):
start_paths = probe_paths_cached(msm_id, analysis_interval[0])
end_paths = probe_paths_cached(msm_id, analysis_interval[-1])
probes = common_probes(start_paths, end_paths)
probe_locs = defaultdict(list)
for dt in analysis_interval:
probe_paths = probe_paths_cached(msm_id, dt)
for probe in probes:
if probe in probe_paths:
path = probe_paths[probe]
probe_locs[probe].append(path[0])
return probe_locs
common_probe_locations_cached = None
def datetimes_from_stride(dt, stride_seconds, num_strides, stride_factor=1):
datetimes = [dt]
for idx in range(1, num_strides + 1):
num_seconds = stride_seconds * idx * stride_factor
datetimes.append(dt + datetime.timedelta(seconds=num_seconds))
return datetimes
def define_common_probe_locations_cached(cache):
global common_probe_locations_cached
def key_fn(msm_id, analysis_interval):
interval_bytes = ujson.dumps(analysis_interval).encode('utf-8')
hash_v = hashlib.md5(interval_bytes).hexdigest()
return ("{} - {}".format(msm_id, hash_v))
common_probe_locations_cached = persistent_lru.add_persistent_caching(
common_probe_locations,
"common_probe_locations",
key_fn,
cache,
False
)
def define_hornet_obs_seqs_cached(cache):
global hornet_obs_seqs_cached
def key_fn(probes, analysis_interval, msm_id):
probe_bytes = ujson.dumps(sorted(probes)).encode('utf-8')
interval_bytes = ujson.dumps(analysis_interval).encode('utf-8')
hash_1 = hashlib.md5(probe_bytes).hexdigest()
hash_2 = hashlib.md5(interval_bytes).hexdigest()
return ("{} - {} - {}".format(msm_id, hash_1, hash_2))
hornet_obs_seqs_cached = persistent_lru.add_persistent_caching(
hornet_obs_seqs,
"hornet_obs_seqs",
key_fn,
cache,
False
)
def define_pfx2as_cached(cache):
global pfx2as_cached
def key_fn(dt):
best_url = caida_routeviews.best_pfx2as_url(dt)
return best_url
pfx2as_cached = persistent_lru.add_persistent_caching(
caida_routeviews.dl_pfx2as_closest_to_datetime,
"pfx2as",
key_fn,
cache,
False
)
def define_probe_paths_cached(cache):
global probe_paths_cached
def key_fn(msm_id, start_datetime):
start_datetime = round_datetime_to_interval(start_datetime, msm_id)
return "{} {}".format(msm_id, str(start_datetime))
probe_paths_cached =\
persistent_lru.add_persistent_caching(probe_paths, "paths", key_fn,
cache, True)
def hornet_adv_obs(path):
if path is None:
return None
else:
return path[1][-2]
def hornet_analyze_boundary_asn(msm_id, probe_id, boundary, num_strides=4):
t0, t1 = boundary
p0 = probe_path_at_time(msm_id, probe_id, t0)
p1 = probe_path_at_time(msm_id, probe_id, t1)
t0_obs = hornet_adv_obs(p0)
t1_obs = hornet_adv_obs(p1)
interval = ripe_atlas.msm_interval(msm_id)
t0_date_range = datetimes_from_stride(t1, interval, num_strides, -1)
t1_date_range = datetimes_from_stride(t0, interval, num_strides, 1)
t0_anon = anonymity_set_wide(msm_id, t0_obs, hornet_adv_obs,
asns_are_indistinguishable, t0_date_range)
t1_anon = anonymity_set_wide(msm_id, t1_obs, hornet_adv_obs,
asns_are_indistinguishable, t1_date_range)
t0_ases = probe_times_to_uniq_ases(msm_id, t0_anon)
t1_ases = probe_times_to_uniq_ases(msm_id, t1_anon)
itc_ases = t0_ases.intersection(t1_ases)
return len(t0_ases), len(itc_ases)
def hornet_analyze_boundary_pfx(msm_id, probe_id, boundary, num_strides=4):
t0, t1 = boundary
p0 = probe_path_at_time(msm_id, probe_id, t0)
p1 = probe_path_at_time(msm_id, probe_id, t1)
t0_obs = hornet_adv_obs(p0)
t1_obs = hornet_adv_obs(p1)
interval = ripe_atlas.msm_interval(msm_id)
t0_date_range = datetimes_from_stride(t1, interval, num_strides, -1)
t1_date_range = datetimes_from_stride(t0, interval, num_strides, 1)
t0_anon = anonymity_set_wide(msm_id, t0_obs, hornet_adv_obs,
asns_are_indistinguishable, t0_date_range)
t1_anon = anonymity_set_wide(msm_id, t1_obs, hornet_adv_obs,
asns_are_indistinguishable, t1_date_range)
t0_prefixes = probe_times_to_uniq_pfxs(msm_id, t0_anon)
t1_prefixes = probe_times_to_uniq_pfxs(msm_id, t1_anon)
itc_pfxs = t0_prefixes.intersection(t1_prefixes)
t0_num_addrs = num_ipv4_addrs(t0_prefixes)
itc_num_addrs = num_ipv4_addrs(itc_pfxs)
return t0_num_addrs, itc_num_addrs
def hornet_analyze_boundaries(msm_id, probe_id, boundaries, num_strides=4):
found_result = False
for (t0, t1) in boundaries:
if t0 == None or t1 == None:
continue
p0 = probe_path_at_time(msm_id, probe_id, t0)
p1 = probe_path_at_time(msm_id, probe_id, t1)
if p0 is None or p1 is None or (p0[0] != p1[0]):
s = "Ignoring boundary {} - {} with location {} change to {}"
logging.info(s.format(str(t0), str(t1), p0, p1))
t0_obs = hornet_adv_obs(p0)
t1_obs = hornet_adv_obs(p1)
if not asn_is_unambig(t0_obs) or not asn_is_unambig(t1_obs):
s = "Ignoring boundary {} - {} with hop {} change to {}"
logging.info(s.format(str(t0), str(t1), t0_obs, t1_obs))
continue
interval = ripe_atlas.msm_interval(msm_id)
# Overlapping intervals?
t0_date_range = datetimes_from_stride(t1, interval, num_strides, -1)
t1_date_range = datetimes_from_stride(t0, interval, num_strides, 1)
all_probes = all_known_probes(msm_id, t0_date_range)
all_valid = all_probes_with_valid_obs(msm_id, hornet_adv_obs,
asn_is_unambig, t0_date_range)
out_0 = probe_times_stats(msm_id, all_probes)
out_1 = probe_times_stats(msm_id, all_valid)
prv = anonymity_set_wide(msm_id, t0_obs, hornet_adv_obs,
asns_are_indistinguishable, t0_date_range)
out_2 = probe_times_stats(msm_id, prv)
prv_ases = probe_times_to_uniq_ases(msm_id, prv)
prv_pfxs = probe_times_to_uniq_pfxs(msm_id, prv)
prv_probes = probe_times_to_uniq_probes(prv)
nxt = anonymity_set_wide(msm_id, t1_obs, hornet_adv_obs,
asns_are_indistinguishable, t1_date_range)
nxt_ases = probe_times_to_uniq_ases(msm_id, nxt)
nxt_pfxs = probe_times_to_uniq_pfxs(msm_id, nxt)
nxt_probes = probe_times_to_uniq_probes(nxt)
itc_ases = prv_ases.intersection(nxt_ases)
itc_pfxs = prv_pfxs.intersection(nxt_pfxs)
itc_probes = prv_probes.intersection(nxt_probes)
itc_addrs = num_ipv4_addrs(itc_pfxs)
itc_pct = pct_ipv4_addrs(itc_pfxs)
out_3 = (len(itc_probes), len(itc_pfxs), itc_addrs, itc_pct,
len(itc_ases))
out = (msm_id, probe_id, t0, t1, t0_obs, t1_obs) + out_0 + out_1 +\
out_2 + out_3
print(",".join(map(lambda x: str(x), out)))
found_result = True
break
if not found_result:
out = (msm_id, probe_id, "NIL")
print(",".join(map(lambda x: str(x), out)))
def hornet_candidate_score(probe_path_t1, probe_path_t2,
t1_obs_frq, t2_obs_frq):
if probe_path_t1[0] != probe_path_t2[0]: # Probe changed network location
return 0.0
o1 = hornet_adv_obs(probe_path_t1)
o2 = hornet_adv_obs(probe_path_t2)
if (o1 == BAD_RESOLVE or o2 == BAD_RESOLVE):
# Avoid traceroutes where the previous hop was a *
return 0.0
if asns_are_indistinguishable(o1, o2):
return 0.0
return t1_obs_frq[o1] / t2_obs_frq[o2]
def hornet_diffs(msm_id, start_datetime, end_datetime):
changed_probes = []
start_paths = probe_paths_cached(msm_id, start_datetime)
end_paths = probe_paths_cached(msm_id, end_datetime)
probes = common_probes(start_paths, end_paths)
for probe in probes:
probe_path_t0 = start_paths[probe]
probe_path_t1 = end_paths[probe]
if probe_path_t0[0] != probe_path_t1[0]: # Probe changed location
continue
t0_obs = hornet_adv_obs(probe_path_t0)
t1_obs = hornet_adv_obs(probe_path_t1)
if t0_obs == BAD_RESOLVE or t1_obs == BAD_RESOLVE:
continue
if not asns_are_indistinguishable(t0_obs, t1_obs):
changed_probes.append(probe)
return changed_probes
hornet_obs_seqs_cached = None
def hornet_obs_seqs(probes, analysis_interval, msm_id):
probe_to_hornet_obs = defaultdict(list)
for dt in analysis_interval:
probe_paths = probe_paths_cached(msm_id, dt)
for probe in probes:
if probe not in probe_paths:
probe_to_hornet_obs[probe].append((None, dt))
else:
path = probe_paths[probe]
probe_to_hornet_obs[probe].append((hornet_adv_obs(path), dt))
return probe_to_hornet_obs
def hornet_obs_frq(probe_paths):
c = Counter()
for value in probe_paths.values():
c[hornet_adv_obs(value)] += 1
num_observations = sum(c.values())
for key in c.keys():
c[key] /= num_observations
return c
def hornet_scores(msm_id, start_datetime, end_datetime):
start_paths = probe_paths_cached(msm_id, start_datetime)
end_paths = probe_paths_cached(msm_id, end_datetime)
start_path_obs_frq = hornet_obs_frq(start_paths)
end_path_obs_frq = hornet_obs_frq(end_paths)
probe_scores = []
for probe_id in common_probes(start_paths, end_paths):
sp = start_paths[probe_id]
ep = end_paths[probe_id]
score = hornet_candidate_score(sp, ep, start_path_obs_frq,
end_path_obs_frq)
probe_scores.append((probe_id, score))
probe_scores = sorted(probe_scores, key=lambda x: x[1], reverse=True)
return probe_scores
def ip_addr_to_asn(ip_addr, prefix_tree):
if ip_addr is None or len(ip_addr) == 0:
return None
if ip_addr not in prefix_tree:
return None
else:
return prefix_tree[ip_addr]
def ip_addr_to_pfx(ip_addr, prefix_tree):
if ip_addr is None or len(ip_addr) == 0:
return None
if ip_addr not in prefix_tree:
return None
else:
return prefix_tree.get_key(ip_addr)
def jaccard_idx(a, b):
return len(a.intersection(b)) / len(a.union(b))
def list_of_boundaries(hornet_obs_seq):
ret = list()
possible_boundaries = itertools.zip_longest(hornet_obs_seq[:-1],
hornet_obs_seq[1:])
for x, y in possible_boundaries:
if x[0] is None or y[0] is None:
continue
if x[0] == BAD_RESOLVE or y[0] == BAD_RESOLVE:
continue
if not asns_are_indistinguishable(x[0], y[0]):
t0 = datetime.datetime.fromtimestamp(x[1], datetime.timezone.utc)
t1 = datetime.datetime.fromtimestamp(y[1], datetime.timezone.utc)
ret.append((t0, t1))
return ret
def main(args):
random.seed(313)
std_format =\
("[%(asctime)s %(process)d %(filename)s %(funcName)s %(levelname)s" +
"] -- %(message)s")
logging.basicConfig(format=std_format, stream=sys.stderr,
level=logging.INFO)
cache = persistent_lru.PersistentLRU(args.cache_filename, 8096)
cache.load()
atexit.register(cache.close)
define_probe_paths_cached(cache)
define_pfx2as_cached(cache)
msm_id = args.msm_id
start_datetime = datetime.datetime(year=2016, month=1, day=1,
tzinfo=datetime.timezone.utc)
end_datetime = datetime.datetime(year=2016, month=2, day=1,
tzinfo=datetime.timezone.utc)
print(",".join(HORNET_HDR))
diff_probes = hornet_diffs(msm_id, start_datetime, end_datetime)
logging.info("{} probes with a HORNET diff".format(len(diff_probes)))
as_thresh = 50
sampled_probes = sample_probes_by_as_thresh(msm_id, diff_probes,
start_datetime, as_thresh)
for probe_id in sampled_probes:
logging.info("PROBE ID: {}".format(probe_id))
start_path = probe_path_at_time(msm_id, probe_id, start_datetime)
end_path = probe_path_at_time(msm_id, probe_id, end_datetime)
logging.info("Start path {} at {}".format(start_path,
str(start_datetime)))
logging.info("End path {} at {}".format(end_path, str(end_datetime)))
boundaries = search_for_boundaries(msm_id, probe_id, start_datetime,
end_datetime)
hornet_analyze_boundaries(msm_id, probe_id, boundaries)
def make_pfx_to_probes(msm_id, probes, dt):
pfx_to_probes = defaultdict(list)
paths = probe_paths_cached(msm_id, dt)
for probe_id in probes:
pfx_to_probes[paths[probe_id][0][1]].append(probe_id)
return pfx_to_probes
def num_ipv4_addrs(pfxs):
num_addrs = 0
for pfx in pfxs:
num_addrs += ipaddress.ip_network(pfx).num_addresses
return num_addrs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("msm_id", type=int)
parser.add_argument("start_date", help="Format: YYYY-mm-dd")
parser.add_argument("end_date", help="Format: YYYY-mm-dd")
parser.add_argument("--cache_filename", default="cache.db")
parser.add_argument("--num_top_probes", type=int, default=50)
return parser.parse_args()
def pct_ipv4_addrs(pfxs):
return num_ipv4_addrs(pfxs) / num_allocable_ipv4_addrs()
def num_allocable_ipv4_addrs():
return 2**32 - num_reserved_ipv4_addrs()
def num_reserved_ipv4_addrs():
num_reserved_addrs = 0
reserved_ranges = [
"0.0.0.0/8",
"10.0.0.0/8",
"192.168.127.12/10",
"127.0.0.0/8",
"169.254.0.0/16",
"172.16.0.0/12",
"192.0.0.0/24",
"192.0.2.0/24",
"192.168.3.11/24",
"192.168.0.0/16",
"198.18.0.0/15",
"198.51.100.0/24",
| |
0xb4)
return 1
class Sorcerer_fix(Fix):
needed = True
def __init__(self, gf):
self.gamefile = gf
self.contents = bytearray(gf.contents)
self.gamesize = gf.gamesize
elf.game_name = "Sorcerer"
self.desc = ("In Sorcerer, the copy protection consists of an Infotater wheel "
"where the player looks up one of 12 monsters and gets a color code "
"for pushing buttons to open a trunk. "
"We fix this by making an unconditional jump, so that no matter what "
"button you press, it will always open the trunk.")
# All version verified to work - 2021-01-31
def fix(self):
zw = make_zword(-58)
if self.gamefile.release == 67: # both versions the same
start = 0xfadc
elif self.gamefile.release == 85:
start = 0xfcc6
elif self.gamefile.release == 4:
start = 0xfbf6
elif self.gamefile.release == 6:
start = 0xfb92
elif self.gamefile.release == 13:
start = 0xfae2
elif self.gamefile.release == 15:
start = 0xfad8
elif self.gamefile.release == 18:
start = 0x10434
else:
print("BUG?? Unknown Sorcerer version.")
return 0
self.set_byte(start, 0x8c)
self.set_byte(start+1, zw[0])
self.set_byte(start+2, zw[1])
return 1
def word(w):
return w[0] * 256 + w[1]
# I removed the part from z1 and z2. For full version, see my ztools
class Zscii:
modern_zscii = [
" ^^^^^abcdefghijklmnopqrstuvwxyz ",
" ^^^^^ABCDEFGHIJKLMNOPQRSTUVWXYZ ",
" ^^^^^ \n0123456789.,!?_#'\"/\\-:() ",
]
story = None
def __init__(self, s_obj):
self.story = s_obj
self.bytes_read = None
v = s_obj.header["version"]
self.zscii = self.modern_zscii
def convert_zscii_bytes(self, bytes):
zstring = ""
shift, abbrev_flag, ascii_flag = -1, False, False
v = self.story.header["version"]
zscii = self.zscii
for i, b in enumerate(bytes):
if ascii_flag:
ascii_flag = False
i += 1
if i == len(bytes):
return zstring
zstring += chr(bytes[i-1] << 5 | b)
continue
if abbrev_flag:
ndx = 32 * (bytes[i-1]-1) + b
zstring += self.story.abbreviations[ndx]
#print("ABBREV", self.story.abbreviations[ndx], ndx, "pre-byte", bytes[i-1], "byte", b)
abbrev_flag = False
shift = -1
continue
if b == 0:
zstring += " "
continue
elif b == 1:
if v < 2:
zstring += "\n"
else:
abbrev_flag = True
continue
elif b == 2:
abbrev_flag = True
continue
elif b == 3:
abbrev_flag = True
continue
elif b == 4:
shift = 1
abbrev_flag = False
continue
elif b == 5:
shift = 2
abbrev_flag = False
continue
elif b == 6:
if shift == 2:
shift = -1
ascii_flag = True
continue
if shift > -1:
zstring += zscii[shift][b]
else:
zstring += zscii[0][b]
shift = -1
abbrev_flag = False
return zstring
modern_zscii_convert = [
"abcdefghijklmnopqrstuvwxyz ",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ ",
" \n0123456789.,!?_#'\"/\\-:() ",
]
def find_char(self, c):
for i, alphabet in enumerate(self.modern_zscii_convert):
if c in alphabet:
return i, alphabet.index(c) + 6
return None, None
def convert_to_bytes(self, text):
i = 0
bytes = []
while i < len(text):
aflag = False
for ndx, abbr in enumerate(self.story.abbreviations):
if text[i:].startswith(abbr):
a, b = divmod(ndx, 32)
#print("ABBR", abbr, a, b)
bytes.append(a+1)
bytes.append(b)
i += len(abbr)
aflag = True
break
if aflag:
continue
if text[i] == ' ':
bytes.append(0)
i += 1
continue
alphabet, ndx = self.find_char(text[i])
if alphabet == None:
print("Char `{}': ASCII functionality not yet added. See zscii class.".format(text[i]))
return None
if alphabet:
bytes.append(3 + alphabet)
bytes.append(ndx)
i += 1
if len(bytes) % 3:
bytes += [5] * (3 - len(bytes) % 3)
#bytes.append(5)
return bytes
def encode_bytes(self, bytes):
it = iter(bytes)
zs = bytearray()
for c1 in it:
c2 = next(it)
c3 = next(it)
w = (c1 << 10) | (c2 << 5) | c3
zs.append(w >> 8)
zs.append(w & 255)
zs[-2] |= 2 ** 7
return zs
def encode_text(self, text):
bytes = self.convert_to_bytes(text)
return self.encode_bytes(bytes)
def read_text(self, addr, len, inform_escapes=False, full_return=False):
bytes = []
real_bytes = []
i = 0
for i in range(len):
w = word(self.story.contents[addr + i * 2:addr + i * 2 + 2])
real_bytes = real_bytes + [(w >> 8, w & 255)]
bit = w >> 15
c3 = w & 31
c2 = (w & 0x3e0) >> 5
c1 = (w & 0x7c00) >> 10
bytes += [ c1, c2, c3 ]
if bit:
i += 1
break
self.bytes_read = i * 2
zs = self.convert_zscii_bytes(bytes)
if inform_escapes:
zs = zs.replace('"', "~").replace("\n", "^")
if full_return:
return self.bytes_read, bytes, real_bytes, zs
return zs
class Story:
zscii = False
configuration = None
def fatal(self, s):
print("{0}: {1}".format(self.filename, s))
sys.exit(1)
def parse_header(self):
h = self.header = dict()
c = self.contents
if 0 < c[0] < 9:
h["version"] = version = c[0]
self.zcode_version = version
else:
self.fatal("unknown zmachine version (byte 0x00={:d}, expecting 1-8)".format(c[0]))
h["flags"] = c[1]
h["release"] = word(c[2:4])
h["highmem"] = word(c[4:6])
h["pc"] = word(c[6:8])
h["dict"] = word(c[8:10])
h["otable"] = word(c[0xa:0xc])
h["globals"] = word(c[0xc:0xe])
h["static"] = word(c[0xe:0x10])
h["gflags"] = word(c[0x10:0x12])
h["serial"] = c[18:24].decode("utf-8")
if version >= 2:
h["abbr"] = word(c[0x18:0x1a])
else:
h["abbr"] = None
h["filelen"] = word(c[0x1a:0x1c])
h["cksum"] = word(c[0x1c:0x1e])
# reasons: I'll clean up later
self.release = h["release"]
self.serial = h["serial"]
self.gamesize = h["filelen"]
if 1 <= self.zcode_version <= 3:
self.gamesize *= 2
elif 4 <= self.zcode_version <= 5:
self.gamesize *= 4
else:
self.gamesize *= 8
if len(self.contents) < self.gamesize:
print("{0}: file is truncated (less than header gamesize)".format(fn))
sys.exit(1)
def read_high(self, addr):
n, b, rb, s = self.zscii.read_text(addr, 0xffff, full_return=True)
# print(n)
# print("literal bytes", rb)
# print("binary literal bytes", ["({:08b},{:08b})".format(x[0], x[1]) for x in rb])
# print("5-bit zbytes", ["{:05b}".format(x) for x in b])
# print("zbytes", b)
# print(s)
return b, s
def __init__(self, storyfile):
self.filename = storyfile
try:
fd = open(storyfile, "rb")
except OSError as err:
self.fatal(err)
self.contents = fd.read()
if len(self.contents) < 0x40:
self.fatal("story file too short to be zmachine file")
self.abbreviations = None
self.addr_to_dict = dict()
self.parse_header()
self.zscii = Zscii(self)
self.read_abbreviations()
def read_abbreviations(self):
v = self.header["version"]
hi, lo = -1, 0x7ffff
if v == 1:
return
z = self.zscii
addr = self.header["abbr"]
if not addr:
return
max_a = 32 if v == 2 else 96
abbr = self.abbreviations = [0] * max_a
zo = self.zscii
for i in range(max_a):
abbr[i] = z.read_text(word(self.contents[addr:addr+2]) * 2, 753)
lo = min(word(self.contents[addr:addr+2]) * 2, lo)
hi = max(word(self.contents[addr:addr+2]) * 2 + z.bytes_read - 1, hi)
addr += 2
games = {
("841226", 1) : AMFV_fix,
("850313", 47) : AMFV_fix,
("850516", 84) : AMFV_fix,
("850628", 131) : AMFV_fix,
("850814", 77) : AMFV_fix,
("851122", 79) : AMFV_fix,
("830524", 13) : Witness_fix,
("830910", 18) : Witness_fix,
("831119", 20) : Witness_fix,
("831208", 21) : Witness_fix,
("840924", 22) : Witness_fix,
("840925", 23) : Witness_fix,
("870506", 203) : Lurking_fix,
("870912", 219) : Lurking_fix,
("870918", 221) : Lurking_fix,
("000000", 67) : Sorcerer_fix,
("831208", 67) : Sorcerer_fix,
("840106", 85) : Sorcerer_fix,
("840131", 4) : Sorcerer_fix,
("840508", 6) : Sorcerer_fix,
("851021", 13) : Sorcerer_fix,
("851108", 15) : Sorcerer_fix,
("860904", 18) : Sorcerer_fix,
("861017", 1) : Stationfall_fix,
("870218", 63) : Stationfall_fix,
("870326", 87) : Stationfall_fix,
("870430", 107) : Stationfall_fix,
("890502", 40) : Arthur_fix,
("890504", 41) : Arthur_fix,
("890606", 54) : Arthur_fix,
("890622", 63) : Arthur_fix,
("890714", 74) : Arthur_fix,
("820311", 18) : Deadline_fix,
("820427", 19) : Deadline_fix,
("820512", 21) : Deadline_fix,
("820809", 22) : Deadline_fix,
("821108", 26) : Deadline_fix,
("831005", 27) : Deadline_fix,
("850129", 28) : Deadline_fix,
("830810", 10) : Enchanter_fix,
("831107", 15) : Enchanter_fix,
("831118", 16) : Enchanter_fix,
("840518", 16) : Enchanter_fix,
("851118", 24) : Enchanter_fix,
("860820", 29) : Enchanter_fix,
("850916", 63) : Spellbreaker_fix,
("860829", 86) : Spellbreaker_fix,
("860904", 87) : Spellbreaker_fix,
("820901", 15) : Starcross_fix,
("821021", 17) : Starcross_fix,
("830114", 18) : Starcross_fix,
("000000", 65) : Moonmist_fix,
("860918", 4) : Moonmist_fix,
("861022", 9) : Moonmist_fix,
("880501", 13) : Moonmist_fix,
("000000", 5) : Zork_fix,
("000000", 20) : Zork_fix,
("000000", 20) : Zork_fix,
("820428", 23) : Zork_fix,
("820515", 25) : Zork_fix,
("820803", 26) : Zork_fix,
("821013", 28) : Zork_fix,
("830330", 30) : Zork_fix,
("830929", 75) : Zork_fix,
("840509", 76) : Zork_fix,
("840509", 76) : Zork_fix,
("840726", 88) : Zork_fix,
("840726", 88) : Zork_fix,
("871125", 52) : Zork_fix,
("880113", 3) : Zork_fix,
("880429", 119): Zork_fix,
("890613", 15) : Zork_fix,
("AS000C", 2) : Zork_fix,
("840330", 15) : Zork_fix,
("UG3AU5", 15) : Zork_fix,
("820308", 15) : Zork_fix,
("820427", | |
<reponame>pearsontechnology/k8sv1<filename>test/test_apiv_api.py<gh_stars>1-10
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import k8sv1
from k8sv1.rest import ApiException
from k8sv1.apis.apiv_api import ApivApi
class TestApivApi(unittest.TestCase):
""" ApivApi unit test stubs """
def setUp(self):
self.api = k8sv1.apis.apiv_api.ApivApi()
def tearDown(self):
pass
def test_connect_delete_namespaced_pod_proxy(self):
"""
Test case for connect_delete_namespaced_pod_proxy
connect DELETE requests to proxy of Pod
"""
pass
def test_connect_delete_namespaced_pod_proxy_0(self):
"""
Test case for connect_delete_namespaced_pod_proxy_0
connect DELETE requests to proxy of Pod
"""
pass
def test_connect_delete_namespaced_service_proxy(self):
"""
Test case for connect_delete_namespaced_service_proxy
connect DELETE requests to proxy of Service
"""
pass
def test_connect_delete_namespaced_service_proxy_0(self):
"""
Test case for connect_delete_namespaced_service_proxy_0
connect DELETE requests to proxy of Service
"""
pass
def test_connect_delete_node_proxy(self):
"""
Test case for connect_delete_node_proxy
connect DELETE requests to proxy of Node
"""
pass
def test_connect_delete_node_proxy_0(self):
"""
Test case for connect_delete_node_proxy_0
connect DELETE requests to proxy of Node
"""
pass
def test_connect_get_namespaced_pod_attach(self):
"""
Test case for connect_get_namespaced_pod_attach
connect GET requests to attach of Pod
"""
pass
def test_connect_get_namespaced_pod_exec(self):
"""
Test case for connect_get_namespaced_pod_exec
connect GET requests to exec of Pod
"""
pass
def test_connect_get_namespaced_pod_portforward(self):
"""
Test case for connect_get_namespaced_pod_portforward
connect GET requests to portforward of Pod
"""
pass
def test_connect_get_namespaced_pod_proxy(self):
"""
Test case for connect_get_namespaced_pod_proxy
connect GET requests to proxy of Pod
"""
pass
def test_connect_get_namespaced_pod_proxy_0(self):
"""
Test case for connect_get_namespaced_pod_proxy_0
connect GET requests to proxy of Pod
"""
pass
def test_connect_get_namespaced_service_proxy(self):
"""
Test case for connect_get_namespaced_service_proxy
connect GET requests to proxy of Service
"""
pass
def test_connect_get_namespaced_service_proxy_0(self):
"""
Test case for connect_get_namespaced_service_proxy_0
connect GET requests to proxy of Service
"""
pass
def test_connect_get_node_proxy(self):
"""
Test case for connect_get_node_proxy
connect GET requests to proxy of Node
"""
pass
def test_connect_get_node_proxy_0(self):
"""
Test case for connect_get_node_proxy_0
connect GET requests to proxy of Node
"""
pass
def test_connect_head_namespaced_pod_proxy(self):
"""
Test case for connect_head_namespaced_pod_proxy
connect HEAD requests to proxy of Pod
"""
pass
def test_connect_head_namespaced_pod_proxy_0(self):
"""
Test case for connect_head_namespaced_pod_proxy_0
connect HEAD requests to proxy of Pod
"""
pass
def test_connect_head_namespaced_service_proxy(self):
"""
Test case for connect_head_namespaced_service_proxy
connect HEAD requests to proxy of Service
"""
pass
def test_connect_head_namespaced_service_proxy_0(self):
"""
Test case for connect_head_namespaced_service_proxy_0
connect HEAD requests to proxy of Service
"""
pass
def test_connect_head_node_proxy(self):
"""
Test case for connect_head_node_proxy
connect HEAD requests to proxy of Node
"""
pass
def test_connect_head_node_proxy_0(self):
"""
Test case for connect_head_node_proxy_0
connect HEAD requests to proxy of Node
"""
pass
def test_connect_options_namespaced_pod_proxy(self):
"""
Test case for connect_options_namespaced_pod_proxy
connect OPTIONS requests to proxy of Pod
"""
pass
def test_connect_options_namespaced_pod_proxy_0(self):
"""
Test case for connect_options_namespaced_pod_proxy_0
connect OPTIONS requests to proxy of Pod
"""
pass
def test_connect_options_namespaced_service_proxy(self):
"""
Test case for connect_options_namespaced_service_proxy
connect OPTIONS requests to proxy of Service
"""
pass
def test_connect_options_namespaced_service_proxy_0(self):
"""
Test case for connect_options_namespaced_service_proxy_0
connect OPTIONS requests to proxy of Service
"""
pass
def test_connect_options_node_proxy(self):
"""
Test case for connect_options_node_proxy
connect OPTIONS requests to proxy of Node
"""
pass
def test_connect_options_node_proxy_0(self):
"""
Test case for connect_options_node_proxy_0
connect OPTIONS requests to proxy of Node
"""
pass
def test_connect_post_namespaced_pod_attach(self):
"""
Test case for connect_post_namespaced_pod_attach
connect POST requests to attach of Pod
"""
pass
def test_connect_post_namespaced_pod_exec(self):
"""
Test case for connect_post_namespaced_pod_exec
connect POST requests to exec of Pod
"""
pass
def test_connect_post_namespaced_pod_portforward(self):
"""
Test case for connect_post_namespaced_pod_portforward
connect POST requests to portforward of Pod
"""
pass
def test_connect_post_namespaced_pod_proxy(self):
"""
Test case for connect_post_namespaced_pod_proxy
connect POST requests to proxy of Pod
"""
pass
def test_connect_post_namespaced_pod_proxy_0(self):
"""
Test case for connect_post_namespaced_pod_proxy_0
connect POST requests to proxy of Pod
"""
pass
def test_connect_post_namespaced_service_proxy(self):
"""
Test case for connect_post_namespaced_service_proxy
connect POST requests to proxy of Service
"""
pass
def test_connect_post_namespaced_service_proxy_0(self):
"""
Test case for connect_post_namespaced_service_proxy_0
connect POST requests to proxy of Service
"""
pass
def test_connect_post_node_proxy(self):
"""
Test case for connect_post_node_proxy
connect POST requests to proxy of Node
"""
pass
def test_connect_post_node_proxy_0(self):
"""
Test case for connect_post_node_proxy_0
connect POST requests to proxy of Node
"""
pass
def test_connect_put_namespaced_pod_proxy(self):
"""
Test case for connect_put_namespaced_pod_proxy
connect PUT requests to proxy of Pod
"""
pass
def test_connect_put_namespaced_pod_proxy_0(self):
"""
Test case for connect_put_namespaced_pod_proxy_0
connect PUT requests to proxy of Pod
"""
pass
def test_connect_put_namespaced_service_proxy(self):
"""
Test case for connect_put_namespaced_service_proxy
connect PUT requests to proxy of Service
"""
pass
def test_connect_put_namespaced_service_proxy_0(self):
"""
Test case for connect_put_namespaced_service_proxy_0
connect PUT requests to proxy of Service
"""
pass
def test_connect_put_node_proxy(self):
"""
Test case for connect_put_node_proxy
connect PUT requests to proxy of Node
"""
pass
def test_connect_put_node_proxy_0(self):
"""
Test case for connect_put_node_proxy_0
connect PUT requests to proxy of Node
"""
pass
def test_create_namespace(self):
"""
Test case for create_namespace
create a Namespace
"""
pass
def test_create_namespaced_binding(self):
"""
Test case for create_namespaced_binding
create a Binding
"""
pass
def test_create_namespaced_binding_binding(self):
"""
Test case for create_namespaced_binding_binding
create binding of a Binding
"""
pass
def test_create_namespaced_config_map(self):
"""
Test case for create_namespaced_config_map
create a ConfigMap
"""
pass
def test_create_namespaced_endpoints(self):
"""
Test case for create_namespaced_endpoints
create a Endpoints
"""
pass
def test_create_namespaced_event(self):
"""
Test case for create_namespaced_event
create a Event
"""
pass
def test_create_namespaced_limit_range(self):
"""
Test case for create_namespaced_limit_range
create a LimitRange
"""
pass
def test_create_namespaced_persistent_volume_claim(self):
"""
Test case for create_namespaced_persistent_volume_claim
create a PersistentVolumeClaim
"""
pass
def test_create_namespaced_pod(self):
"""
Test case for create_namespaced_pod
create a Pod
"""
pass
def test_create_namespaced_pod_template(self):
"""
Test case for create_namespaced_pod_template
create a PodTemplate
"""
pass
def test_create_namespaced_replication_controller(self):
"""
Test case for create_namespaced_replication_controller
create a ReplicationController
"""
pass
def test_create_namespaced_resource_quota(self):
"""
Test case for create_namespaced_resource_quota
create a ResourceQuota
"""
pass
def test_create_namespaced_secret(self):
"""
Test case for create_namespaced_secret
create a Secret
"""
pass
def test_create_namespaced_service(self):
"""
Test case for create_namespaced_service
create a Service
"""
pass
def test_create_namespaced_service_account(self):
"""
Test case for create_namespaced_service_account
create a ServiceAccount
"""
pass
def test_create_node(self):
"""
Test case for create_node
create a Node
"""
pass
def test_create_persistent_volume(self):
"""
Test case for create_persistent_volume
create a PersistentVolume
"""
pass
def test_delete_namespace(self):
"""
Test case for delete_namespace
delete a Namespace
"""
pass
def test_delete_namespaced_config_map(self):
"""
Test case for delete_namespaced_config_map
delete a ConfigMap
"""
pass
def test_delete_namespaced_endpoints(self):
"""
Test case for delete_namespaced_endpoints
delete a Endpoints
"""
pass
def test_delete_namespaced_event(self):
"""
Test case for delete_namespaced_event
delete a Event
"""
pass
def test_delete_namespaced_limit_range(self):
"""
Test case for delete_namespaced_limit_range
delete a LimitRange
"""
pass
def test_delete_namespaced_persistent_volume_claim(self):
"""
Test case for delete_namespaced_persistent_volume_claim
delete a PersistentVolumeClaim
"""
pass
def test_delete_namespaced_pod(self):
"""
Test case for delete_namespaced_pod
delete a Pod
"""
pass
def test_delete_namespaced_pod_template(self):
"""
Test case for delete_namespaced_pod_template
delete a PodTemplate
"""
pass
def test_delete_namespaced_replication_controller(self):
"""
Test case for delete_namespaced_replication_controller
delete a ReplicationController
"""
pass
def test_delete_namespaced_resource_quota(self):
"""
Test case for delete_namespaced_resource_quota
delete a ResourceQuota
"""
pass
def test_delete_namespaced_secret(self):
"""
Test case for delete_namespaced_secret
delete a Secret
"""
pass
def test_delete_namespaced_service(self):
"""
Test case for delete_namespaced_service
delete a Service
"""
pass
def test_delete_namespaced_service_account(self):
"""
Test case for delete_namespaced_service_account
delete a ServiceAccount
"""
pass
def test_delete_node(self):
"""
Test case for delete_node
delete a Node
"""
pass
def test_delete_persistent_volume(self):
"""
Test case for delete_persistent_volume
delete a PersistentVolume
"""
pass
def test_deletecollection_namespace(self):
"""
Test case for deletecollection_namespace
delete collection of Namespace
"""
pass
def test_deletecollection_namespaced_config_map(self):
"""
Test case for deletecollection_namespaced_config_map
delete collection of ConfigMap
"""
pass
def test_deletecollection_namespaced_endpoints(self):
"""
Test case for deletecollection_namespaced_endpoints
delete collection of Endpoints
"""
pass
def test_deletecollection_namespaced_event(self):
"""
Test case for deletecollection_namespaced_event
delete collection of Event
"""
pass
def test_deletecollection_namespaced_limit_range(self):
"""
Test case for deletecollection_namespaced_limit_range
delete collection of LimitRange
"""
pass
def test_deletecollection_namespaced_persistent_volume_claim(self):
"""
Test case for deletecollection_namespaced_persistent_volume_claim
delete collection of PersistentVolumeClaim
"""
pass
def test_deletecollection_namespaced_pod(self):
"""
Test case for deletecollection_namespaced_pod
delete collection of Pod
"""
pass
def test_deletecollection_namespaced_pod_template(self):
"""
Test case for | |
<filename>nipype/interfaces/mrtrix3/utils.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# -*- coding: utf-8 -*-
import os.path as op
from ...utils.filemanip import split_filename
from ..base import (
CommandLineInputSpec,
CommandLine,
traits,
TraitedSpec,
File,
InputMultiPath,
isdefined,
)
from .base import MRTrix3BaseInputSpec, MRTrix3Base
class BrainMaskInputSpec(MRTrix3BaseInputSpec):
in_file = File(
exists=True,
argstr="%s",
mandatory=True,
position=-2,
desc="input diffusion weighted images",
)
out_file = File(
"brainmask.mif",
argstr="%s",
mandatory=True,
position=-1,
usedefault=True,
desc="output brain mask",
)
class BrainMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="the output response file")
class BrainMask(CommandLine):
"""
Convert a mesh surface to a partial volume estimation image
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> bmsk = mrt.BrainMask()
>>> bmsk.inputs.in_file = 'dwi.mif'
>>> bmsk.cmdline # doctest: +ELLIPSIS
'dwi2mask dwi.mif brainmask.mif'
>>> bmsk.run() # doctest: +SKIP
"""
_cmd = "dwi2mask"
input_spec = BrainMaskInputSpec
output_spec = BrainMaskOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = op.abspath(self.inputs.out_file)
return outputs
class MRCatInputSpec(MRTrix3BaseInputSpec):
in_files = traits.List(
File(exists=True),
argstr="%s",
position=-2,
mandatory=True,
desc="files to concatenate",
)
out_file = File(
"concatenated.mif",
argstr="%s",
mandatory=True,
position=-1,
usedefault=True,
desc="output concatenated image",
)
axis = traits.Int(
argstr="-axis %s",
desc="""specify axis along which concatenation should be performed. By default,
the program will use the last non-singleton, non-spatial axis of any of
the input images - in other words axis 3 or whichever axis (greater than
3) of the input images has size greater than one""",
)
datatype = traits.Enum(
"float32",
"float32le",
"float32be",
"float64",
"float64le",
"float64be",
"int64",
"uint64",
"int64le",
"uint64le",
"int64be",
"uint64be",
"int32",
"uint32",
"int32le",
"uint32le",
"int32be",
"uint32be",
"int16",
"uint16",
"int16le",
"uint16le",
"int16be",
"uint16be",
"cfloat32",
"cfloat32le",
"cfloat32be",
"cfloat64",
"cfloat64le",
"cfloat64be",
"int8",
"uint8",
"bit",
argstr="-datatype %s",
desc="specify output image data type",
)
class MRCatOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="the output concatenated image")
class MRCat(CommandLine):
"""
Concatenate several images into one
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> mrcat = mrt.MRCat()
>>> mrcat.inputs.in_files = ['dwi.mif','mask.mif']
>>> mrcat.cmdline # doctest: +ELLIPSIS
'mrcat dwi.mif mask.mif concatenated.mif'
>>> mrcat.run() # doctest: +SKIP
"""
_cmd = "mrcat"
input_spec = MRCatInputSpec
output_spec = MRCatOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = op.abspath(self.inputs.out_file)
return outputs
class Mesh2PVEInputSpec(CommandLineInputSpec):
in_file = File(
exists=True, argstr="%s", mandatory=True, position=-3, desc="input mesh"
)
reference = File(
exists=True,
argstr="%s",
mandatory=True,
position=-2,
desc="input reference image",
)
in_first = File(
exists=True,
argstr="-first %s",
desc="indicates that the mesh file is provided by FSL FIRST",
)
out_file = File(
"mesh2volume.nii.gz",
argstr="%s",
mandatory=True,
position=-1,
usedefault=True,
desc="output file containing SH coefficients",
)
class Mesh2PVEOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="the output response file")
class Mesh2PVE(CommandLine):
"""
Convert a mesh surface to a partial volume estimation image
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> m2p = mrt.Mesh2PVE()
>>> m2p.inputs.in_file = 'surf1.vtk'
>>> m2p.inputs.reference = 'dwi.mif'
>>> m2p.inputs.in_first = 'T1.nii.gz'
>>> m2p.cmdline # doctest: +ELLIPSIS
'mesh2pve -first T1.nii.gz surf1.vtk dwi.mif mesh2volume.nii.gz'
>>> m2p.run() # doctest: +SKIP
"""
_cmd = "mesh2pve"
input_spec = Mesh2PVEInputSpec
output_spec = Mesh2PVEOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = op.abspath(self.inputs.out_file)
return outputs
class Generate5ttInputSpec(MRTrix3BaseInputSpec):
algorithm = traits.Enum(
"fsl",
"gif",
"freesurfer",
argstr="%s",
position=-3,
mandatory=True,
desc="tissue segmentation algorithm",
)
in_file = File(
exists=True, argstr="%s", mandatory=True, position=-2, desc="input image"
)
out_file = File(argstr="%s", mandatory=True, position=-1, desc="output image")
class Generate5ttOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output image")
class Generate5tt(MRTrix3Base):
"""
Generate a 5TT image suitable for ACT using the selected algorithm
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> gen5tt = mrt.Generate5tt()
>>> gen5tt.inputs.in_file = 'T1.nii.gz'
>>> gen5tt.inputs.algorithm = 'fsl'
>>> gen5tt.inputs.out_file = '5tt.mif'
>>> gen5tt.cmdline # doctest: +ELLIPSIS
'5ttgen fsl T1.nii.gz 5tt.mif'
>>> gen5tt.run() # doctest: +SKIP
"""
_cmd = "5ttgen"
input_spec = Generate5ttInputSpec
output_spec = Generate5ttOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = op.abspath(self.inputs.out_file)
return outputs
class TensorMetricsInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr="%s",
mandatory=True,
position=-1,
desc="input DTI image",
)
out_fa = File(argstr="-fa %s", desc="output FA file")
out_adc = File(argstr="-adc %s", desc="output ADC file")
out_ad = File(argstr="-ad %s", desc="output AD file")
out_rd = File(argstr="-rd %s", desc="output RD file")
out_cl = File(argstr="-cl %s", desc="output CL file")
out_cp = File(argstr="-cp %s", desc="output CP file")
out_cs = File(argstr="-cs %s", desc="output CS file")
out_evec = File(argstr="-vector %s", desc="output selected eigenvector(s) file")
out_eval = File(argstr="-value %s", desc="output selected eigenvalue(s) file")
component = traits.List(
[1],
usedefault=True,
argstr="-num %s",
sep=",",
desc=(
"specify the desired eigenvalue/eigenvector(s). Note that "
"several eigenvalues can be specified as a number sequence"
),
)
in_mask = File(
exists=True,
argstr="-mask %s",
desc=(
"only perform computation within the specified binary" " brain mask image"
),
)
modulate = traits.Enum(
"FA",
"none",
"eval",
argstr="-modulate %s",
desc=("how to modulate the magnitude of the" " eigenvectors"),
)
class TensorMetricsOutputSpec(TraitedSpec):
out_fa = File(desc="output FA file")
out_adc = File(desc="output ADC file")
out_ad = File(desc="output AD file")
out_rd = File(desc="output RD file")
out_cl = File(desc="output CL file")
out_cp = File(desc="output CP file")
out_cs = File(desc="output CS file")
out_evec = File(desc="output selected eigenvector(s) file")
out_eval = File(desc="output selected eigenvalue(s) file")
class TensorMetrics(CommandLine):
"""
Compute metrics from tensors
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> comp = mrt.TensorMetrics()
>>> comp.inputs.in_file = 'dti.mif'
>>> comp.inputs.out_fa = 'fa.mif'
>>> comp.cmdline # doctest: +ELLIPSIS
'tensor2metric -num 1 -fa fa.mif dti.mif'
>>> comp.run() # doctest: +SKIP
"""
_cmd = "tensor2metric"
input_spec = TensorMetricsInputSpec
output_spec = TensorMetricsOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
for k in list(outputs.keys()):
if isdefined(getattr(self.inputs, k)):
outputs[k] = op.abspath(getattr(self.inputs, k))
return outputs
class ComputeTDIInputSpec(CommandLineInputSpec):
in_file = File(
exists=True, argstr="%s", mandatory=True, position=-2, desc="input tractography"
)
out_file = File(
"tdi.mif", argstr="%s", usedefault=True, position=-1, desc="output TDI file"
)
reference = File(
exists=True,
argstr="-template %s",
desc="a reference" "image to be used as template",
)
vox_size = traits.List(
traits.Int, argstr="-vox %s", sep=",", desc="voxel dimensions"
)
data_type = traits.Enum(
"float",
"unsigned int",
argstr="-datatype %s",
desc="specify output image data type",
)
use_dec = traits.Bool(argstr="-dec", desc="perform mapping in DEC space")
dixel = File(
argstr="-dixel %s",
desc="map streamlines to"
"dixels within each voxel. Directions are stored as"
"azimuth elevation pairs.",
)
max_tod = traits.Int(
argstr="-tod %d",
desc="generate a Track Orientation " "Distribution (TOD) in each voxel.",
)
contrast = traits.Enum(
"tdi",
"length",
"invlength",
"scalar_map",
"scalar_map_conut",
"fod_amp",
"curvature",
argstr="-constrast %s",
desc="define the desired " "form of contrast for the output image",
)
in_map = File(
exists=True,
argstr="-image %s",
desc="provide the"
"scalar image map for generating images with "
"'scalar_map' contrasts, or the SHs image for fod_amp",
)
stat_vox = traits.Enum(
"sum",
"min",
"mean",
"max",
argstr="-stat_vox %s",
desc="define the statistic for choosing the final"
"voxel intesities for a given contrast",
)
stat_tck = traits.Enum(
"mean",
"sum",
"min",
"max",
"median",
"mean_nonzero",
"gaussian",
"ends_min",
"ends_mean",
"ends_max",
"ends_prod",
argstr="-stat_tck %s",
desc="define the statistic for choosing "
"the contribution to be made by each streamline as a function of"
" the samples taken along their lengths.",
)
fwhm_tck = traits.Float(
argstr="-fwhm_tck %f",
desc="define the statistic for choosing the"
" contribution to be made by each streamline as a function of the "
"samples taken along their lengths",
)
map_zero = traits.Bool(
argstr="-map_zero",
desc="if a streamline has zero contribution based "
"on the contrast & statistic, typically it is not mapped; use this "
"option to still contribute to the map even if this is the case "
"(these non-contributing voxels can then influence the mean value in "
"each voxel of the map)",
)
upsample = traits.Int(
argstr="-upsample %d",
desc="upsample the tracks by"
" some ratio using Hermite interpolation before "
"mappping",
)
precise = traits.Bool(
argstr="-precise",
desc="use a more precise streamline mapping "
"strategy, that accurately quantifies the length through each voxel "
"(these lengths are then taken into account during TWI calculation)",
)
ends_only = traits.Bool(
argstr="-ends_only", desc="only map the streamline" " endpoints to the image"
)
tck_weights = File(
exists=True,
argstr="-tck_weights_in %s",
desc="specify" " a text scalar file containing the streamline weights",
)
nthreads = traits.Int(
argstr="-nthreads %d",
desc="number of threads. if zero, the number" " of available cpus will be used",
nohash=True,
)
class ComputeTDIOutputSpec(TraitedSpec):
out_file = File(desc="output TDI file")
class ComputeTDI(MRTrix3Base):
"""
Use track data as a form of contrast for producing a high-resolution
image.
.. admonition:: References
* For TDI or DEC TDI: <NAME>.; <NAME>.; <NAME>. &
<NAME>. Track-density imaging (TDI): Super-resolution white
matter imaging using whole-brain track-density mapping. NeuroImage,
2010, 53, 1233-1243
* If using -contrast length | |
:returns:
"""
pass
def list_device_definition_versions(self, DeviceDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a device definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListDeviceDefinitionVersions>`_
**Request Syntax**
::
response = client.list_device_definition_versions(
DeviceDefinitionId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type DeviceDefinitionId: string
:param DeviceDefinitionId: **[REQUIRED]** The ID of the device definition.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_device_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of device definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListDeviceDefinitions>`_
**Request Syntax**
::
response = client.list_device_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_function_definition_versions(self, FunctionDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a Lambda function definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListFunctionDefinitionVersions>`_
**Request Syntax**
::
response = client.list_function_definition_versions(
FunctionDefinitionId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --* success
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type FunctionDefinitionId: string
:param FunctionDefinitionId: **[REQUIRED]** The ID of the Lambda function definition.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_function_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of Lambda function definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListFunctionDefinitions>`_
**Request Syntax**
::
response = client.list_function_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* Success. The response contains the IDs of all the Greengrass Lambda function definitions in this account.
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_group_certificate_authorities(self, GroupId: str) -> Dict:
"""
Retrieves the current CAs for a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListGroupCertificateAuthorities>`_
**Request Syntax**
::
response = client.list_group_certificate_authorities(
GroupId='string'
)
**Response Syntax**
::
{
'GroupCertificateAuthorities': [
{
'GroupCertificateAuthorityArn': 'string',
'GroupCertificateAuthorityId': 'string'
},
]
}
**Response Structure**
- *(dict) --* Success. The response body contains the PKI Configuration.
- **GroupCertificateAuthorities** *(list) --* A list of certificate authorities associated with the group.
- *(dict) --* Information about a certificate authority for a group.
- **GroupCertificateAuthorityArn** *(string) --* The ARN of the certificate authority for the group.
- **GroupCertificateAuthorityId** *(string) --* The ID of the certificate authority for the group.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:rtype: dict
:returns:
"""
pass
def list_group_versions(self, GroupId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListGroupVersions>`_
**Request Syntax**
::
response = client.list_group_versions(
GroupId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --* Success. The response contains the list of versions and metadata for the given group.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type GroupId: string
:param GroupId: **[REQUIRED]** The ID of the Greengrass group.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_groups(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of groups.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListGroups>`_
| |
<< (ClickHouseParser.K_LEFT - 64))
| (1 << (ClickHouseParser.K_LIKE - 64))
| (1 << (ClickHouseParser.K_LIMIT - 64))
| (1 << (ClickHouseParser.K_MAIN - 64))
| (1 << (ClickHouseParser.K_MATERIALIZED - 64))
| (1 << (ClickHouseParser.K_MODIFY - 64))
| (1 << (ClickHouseParser.K_NOT - 64))
| (1 << (ClickHouseParser.K_NULL - 64))
| (1 << (ClickHouseParser.K_NULLS - 64))
| (1 << (ClickHouseParser.K_OFFSET - 64))
| (1 << (ClickHouseParser.K_ON - 64))
| (1 << (ClickHouseParser.K_OPTIMIZE - 64))
| (1 << (ClickHouseParser.K_ORDER - 64))
| (1 << (ClickHouseParser.K_OR - 64))
| (1 << (ClickHouseParser.K_OUTFILE - 64))
| (1 << (ClickHouseParser.K_PARTITION - 64))
| (1 << (ClickHouseParser.K_POPULATE - 64))
| (1 << (ClickHouseParser.K_PREWHERE - 64))
| (1 << (ClickHouseParser.K_PROCESSLIST - 64))
| (1 << (ClickHouseParser.K_QUERY - 64))
| (1 << (ClickHouseParser.K_RENAME - 64))
| (1 << (ClickHouseParser.K_RETURN - 64))
| (1 << (ClickHouseParser.K_RIGHT - 64))
| (1 << (ClickHouseParser.K_SAMPLE - 64))
| (1 << (ClickHouseParser.K_SELECT - 64))
| (1 << (ClickHouseParser.K_SET - 64))
| (1 << (ClickHouseParser.K_SETTINGS - 64))
| (1 << (ClickHouseParser.K_SHOW - 64))
| (1 << (ClickHouseParser.K_SYNC - 64))
| (1 << (ClickHouseParser.K_TABLE - 64))
| (1 << (ClickHouseParser.K_TABLES - 64))
| (1 << (ClickHouseParser.K_TEMPORARY - 64))
| (1 << (ClickHouseParser.K_TEST - 64))
| (1 << (ClickHouseParser.K_THEN - 64))
| (1 << (ClickHouseParser.K_TOTALS - 64))
| (1 << (ClickHouseParser.K_TO - 64))
| (1 << (ClickHouseParser.K_OUTER - 64))
| (1 << (ClickHouseParser.K_VALUES - 64))
| (1 << (ClickHouseParser.K_VIEW - 64))
| (1 << (ClickHouseParser.K_UNION - 64))
| (1 << (ClickHouseParser.K_USE - 64))
| (1 << (ClickHouseParser.K_USING - 64))
| (1 << (ClickHouseParser.K_WHEN - 64))
| (1 << (ClickHouseParser.K_WHERE - 64))
| (1 << (ClickHouseParser.K_WITH - 64))
| (1 << (ClickHouseParser.LPAREN - 64))
| (1 << (ClickHouseParser.STAR - 64))
)
)
!= 0
)
or (
(((_la - 132)) & ~0x3F) == 0
and (
(1 << (_la - 132))
& (
(1 << (ClickHouseParser.MINUS - 132))
| (1 << (ClickHouseParser.LBRAKET - 132))
| (1 << (ClickHouseParser.T_FLOAT32 - 132))
| (1 << (ClickHouseParser.T_FLOAT64 - 132))
| (1 << (ClickHouseParser.T_UINT8 - 132))
| (1 << (ClickHouseParser.T_UINT16 - 132))
| (1 << (ClickHouseParser.T_UINT32 - 132))
| (1 << (ClickHouseParser.T_UINT64 - 132))
| (1 << (ClickHouseParser.T_INT8 - 132))
| (1 << (ClickHouseParser.T_INT16 - 132))
| (1 << (ClickHouseParser.T_INT32 - 132))
| (1 << (ClickHouseParser.T_INT64 - 132))
| (1 << (ClickHouseParser.T_ENUM8 - 132))
| (1 << (ClickHouseParser.T_ENUM16 - 132))
| (1 << (ClickHouseParser.T_UUID - 132))
| (1 << (ClickHouseParser.T_DATE - 132))
| (1 << (ClickHouseParser.T_DATETIME - 132))
| (1 << (ClickHouseParser.T_STRING - 132))
| (1 << (ClickHouseParser.T_FIXEDSTRING - 132))
| (1 << (ClickHouseParser.T_NULL - 132))
| (1 << (ClickHouseParser.T_INTERVAL_YEAR - 132))
| (1 << (ClickHouseParser.T_INTERVAL_MONTH - 132))
| (1 << (ClickHouseParser.T_INTERVAL_WEEK - 132))
| (1 << (ClickHouseParser.T_INTERVAL_DAY - 132))
| (1 << (ClickHouseParser.T_INTERVAL_HOUR - 132))
| (1 << (ClickHouseParser.T_INTERVAL_MINUTE - 132))
| (1 << (ClickHouseParser.T_INTERVAL_SECOND - 132))
| (1 << (ClickHouseParser.F_COUNT - 132))
| (1 << (ClickHouseParser.F_SUM - 132))
| (1 << (ClickHouseParser.IDENTIFIER - 132))
| (1 << (ClickHouseParser.NUMERIC_LITERAL - 132))
| (1 << (ClickHouseParser.STRING_LITERAL - 132))
| (1 << (ClickHouseParser.QUOTED_LITERAL - 132))
)
)
!= 0
)
):
self.state = 1118
self.expr(0)
self.state = 1123
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == ClickHouseParser.COMMA:
self.state = 1119
self.match(ClickHouseParser.COMMA)
self.state = 1120
self.expr(0)
self.state = 1125
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1128
self.match(ClickHouseParser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Function_argumentsContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(ClickHouseParser.LPAREN, 0)
def RPAREN(self):
return self.getToken(ClickHouseParser.RPAREN, 0)
def expr(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(ClickHouseParser.ExprContext)
else:
return self.getTypedRuleContext(ClickHouseParser.ExprContext, i)
def COMMA(self, i: int = None):
if i is None:
return self.getTokens(ClickHouseParser.COMMA)
else:
return self.getToken(ClickHouseParser.COMMA, i)
def getRuleIndex(self):
return ClickHouseParser.RULE_function_arguments
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterFunction_arguments"):
listener.enterFunction_arguments(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitFunction_arguments"):
listener.exitFunction_arguments(self)
def function_arguments(self):
localctx = ClickHouseParser.Function_argumentsContext(self, self._ctx, self.state)
self.enterRule(localctx, 156, self.RULE_function_arguments)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1130
self.match(ClickHouseParser.LPAREN)
self.state = 1139
self._errHandler.sync(self)
_la = self._input.LA(1)
if (
(
((_la) & ~0x3F) == 0
and (
(1 << _la)
& (
(1 << ClickHouseParser.K_ADD)
| (1 << ClickHouseParser.K_AFTER)
| (1 << ClickHouseParser.K_ALL)
| (1 << ClickHouseParser.K_ALIAS)
| (1 << ClickHouseParser.K_ALTER)
| (1 << ClickHouseParser.K_AND)
| (1 << ClickHouseParser.K_ANY)
| (1 << ClickHouseParser.K_ARRAY)
| (1 << ClickHouseParser.K_AS)
| (1 << ClickHouseParser.K_ASCENDING)
| (1 << ClickHouseParser.K_ASC)
| (1 << ClickHouseParser.K_ASYNC)
| (1 << ClickHouseParser.K_ATTACH)
| (1 << ClickHouseParser.K_BETWEEN)
| (1 << ClickHouseParser.K_BY)
| (1 << ClickHouseParser.K_CASE)
| (1 << ClickHouseParser.K_CAST)
| (1 << ClickHouseParser.K_CHECK)
| (1 << ClickHouseParser.K_COLUMN)
| (1 << ClickHouseParser.K_COLLATE)
| (1 << ClickHouseParser.K_CREATE)
| (1 << ClickHouseParser.K_CROSS)
| (1 << ClickHouseParser.K_DESCRIBE)
| (1 << ClickHouseParser.K_DESCENDING)
| (1 << ClickHouseParser.K_DESC)
| (1 << ClickHouseParser.K_DATABASE)
| (1 << ClickHouseParser.K_DATABASES)
| (1 << ClickHouseParser.K_DEFAULT)
| (1 << ClickHouseParser.K_DETACH)
| (1 << ClickHouseParser.K_DISTINCT)
| (1 << ClickHouseParser.K_DROP)
| (1 << ClickHouseParser.K_ELSE)
| (1 << ClickHouseParser.K_END)
| (1 << ClickHouseParser.K_ENGINE)
| (1 << ClickHouseParser.K_EXISTS)
| (1 << ClickHouseParser.K_FINAL)
| (1 << ClickHouseParser.K_FIRST)
| (1 << ClickHouseParser.K_FROM)
| (1 << ClickHouseParser.K_FORMAT)
| (1 << ClickHouseParser.K_FULL)
| (1 << ClickHouseParser.K_GLOBAL)
| (1 << ClickHouseParser.K_GROUP)
| (1 << ClickHouseParser.K_HAVING)
| (1 << ClickHouseParser.K_ID)
| (1 << ClickHouseParser.K_IF)
| (1 << ClickHouseParser.K_INNER)
| (1 << ClickHouseParser.K_INSERT)
| (1 << ClickHouseParser.K_INTERVAL)
| (1 << ClickHouseParser.K_INTO)
| (1 << ClickHouseParser.K_IN)
| (1 << ClickHouseParser.K_IS)
| (1 << ClickHouseParser.K_JOIN)
| (1 << ClickHouseParser.K_KILL)
| (1 << ClickHouseParser.K_LAST)
)
)
!= 0
)
or (
(((_la - 64)) & ~0x3F) == 0
and (
(1 << (_la - 64))
& (
(1 << (ClickHouseParser.K_LEFT - 64))
| (1 << (ClickHouseParser.K_LIKE - 64))
| (1 << (ClickHouseParser.K_LIMIT - 64))
| (1 << (ClickHouseParser.K_MAIN - 64))
| (1 << (ClickHouseParser.K_MATERIALIZED - 64))
| (1 << (ClickHouseParser.K_MODIFY - 64))
| (1 << (ClickHouseParser.K_NOT - 64))
| (1 << (ClickHouseParser.K_NULL - 64))
| (1 << (ClickHouseParser.K_NULLS - 64))
| (1 << (ClickHouseParser.K_OFFSET - 64))
| (1 << (ClickHouseParser.K_ON - 64))
| (1 << (ClickHouseParser.K_OPTIMIZE - 64))
| (1 << (ClickHouseParser.K_ORDER - 64))
| (1 << (ClickHouseParser.K_OR - 64))
| (1 << (ClickHouseParser.K_OUTFILE - 64))
| (1 << (ClickHouseParser.K_PARTITION - 64))
| (1 << (ClickHouseParser.K_POPULATE - 64))
| (1 << (ClickHouseParser.K_PREWHERE - 64))
| (1 << (ClickHouseParser.K_PROCESSLIST - 64))
| (1 << (ClickHouseParser.K_QUERY - 64))
| (1 << (ClickHouseParser.K_RENAME - 64))
| (1 << (ClickHouseParser.K_RETURN - 64))
| (1 << (ClickHouseParser.K_RIGHT - 64))
| (1 << (ClickHouseParser.K_SAMPLE - 64))
| (1 << (ClickHouseParser.K_SELECT - 64))
| (1 << (ClickHouseParser.K_SET - 64))
| (1 << (ClickHouseParser.K_SETTINGS - 64))
| (1 << (ClickHouseParser.K_SHOW - 64))
| (1 << (ClickHouseParser.K_SYNC - 64))
| (1 << (ClickHouseParser.K_TABLE - 64))
| (1 << (ClickHouseParser.K_TABLES - 64))
| (1 << (ClickHouseParser.K_TEMPORARY - 64))
| (1 << (ClickHouseParser.K_TEST - 64))
| (1 << (ClickHouseParser.K_THEN - 64))
| (1 << (ClickHouseParser.K_TOTALS - 64))
| (1 << (ClickHouseParser.K_TO - 64))
| (1 << (ClickHouseParser.K_OUTER - 64))
| (1 << (ClickHouseParser.K_VALUES - 64))
| (1 << (ClickHouseParser.K_VIEW - 64))
| (1 << (ClickHouseParser.K_UNION - 64))
| (1 << (ClickHouseParser.K_USE - 64))
| (1 << (ClickHouseParser.K_USING - 64))
| (1 << (ClickHouseParser.K_WHEN - 64))
| (1 << (ClickHouseParser.K_WHERE - 64))
| (1 << (ClickHouseParser.K_WITH - 64))
| (1 << (ClickHouseParser.LPAREN - 64))
| (1 << (ClickHouseParser.STAR - 64))
)
)
!= 0
)
or (
(((_la - 132)) & ~0x3F) == 0
and (
(1 << (_la - 132))
& (
(1 << (ClickHouseParser.MINUS - 132))
| (1 << (ClickHouseParser.LBRAKET - 132))
| (1 << (ClickHouseParser.T_FLOAT32 - 132))
| (1 << (ClickHouseParser.T_FLOAT64 - 132))
| (1 << (ClickHouseParser.T_UINT8 - 132))
| (1 << (ClickHouseParser.T_UINT16 - 132))
| (1 << (ClickHouseParser.T_UINT32 - 132))
| (1 << (ClickHouseParser.T_UINT64 - 132))
| (1 << (ClickHouseParser.T_INT8 - 132))
| (1 << (ClickHouseParser.T_INT16 - | |
u"cadmium",
u"showers: angular distribution",
u"magnetic field: measurement methods",
u"cross section: high energy behavior",
u"Walecka model",
u"neutrino: Dirac: mass",
u"nuclear matter: saturation",
u"Delta(1232): width",
u"damage",
u"Sp(2)",
u"neutralino: direct detection",
u"analyzing power: tensor",
u"n: interference",
u"compacton",
u"charmed meson: rare decay",
u"nucleon: interaction",
u"interference: laser",
u"exchange: one-meson",
u"spin: wave",
u"symmetry breaking: translation",
u"detector: acceleration",
u"quantization: topological",
u"hadron: final state",
u"action: local",
u"equation of state: time dependence",
u"membrane model: interaction",
u"cosmological model: ekpyrotic",
u"pi: mass difference",
u"gravitation: validity test",
u"set theory",
u"group: Mathieu",
u"tau-: width",
u"Kondo model",
u"boundary condition: anti-de Sitter",
u"antineutrino: energy spectrum",
u"glueball: hadronic decay",
u"action: topological",
u"Riccati equation",
u"intermediate boson: hadronic decay",
u"electromagnetic field: effect",
u"MIMAC",
u"muonium: hyperfine structure",
u"axion: density",
u"oscillation: spectrum",
u"vortex: moduli space",
u"sparticle: associated production",
u"mass number",
u"regeneration",
u"astrophysics: relativistic",
u"entropy: fluctuation",
u"top: angular distribution",
u"helium: density",
u"gluon: associated production",
u"electromagnetic decay",
u"quantum mechanics: deformation",
u"bottom meson: decay",
u"atomic physics: parity",
u"photon nucleus: inelastic scattering",
u"charmed baryon: width",
u"membrane: BPS",
u"nucleus: mass",
u"CREAM",
u"sulfur: fluorine",
u"energy: absorption",
u"expansion: nonrelativistic",
u"stability: topological",
u"top: multiple production",
u"Z': electroproduction",
u"geometry: deformation",
u"factorization: dependence",
u"geometry: Poisson",
u"spinor: massless",
u"quark: doublet",
u"background: warped",
u"rho(1700)",
u"effect: acoustic",
u"induction",
u"gauge field theory: quantization",
u"muon+: polarized beam",
u"omega(783): branching ratio",
u"surface: lunar",
u"string: scattering",
u"baryon antibaryon: asymmetry",
u"forward production",
u"K0: rare decay",
u"operator: transition",
u"tau: electroproduction",
u"quantum number: exotic",
u"higher-order: 5",
u"Lambda/c+: hadroproduction",
u"scattering: relativistic",
u"Tolman-Oppenheimer-Volkoff equation",
u"chi/c0(3415): hadronic decay",
u"hyperon: width",
u"scattering: W W",
u"a0(980): hadronic decay",
u"quantum gravity: simplex",
u"approximation: infrared",
u"neutral particle: long-lived",
u"detector: calibration",
u"sfermion: heavy",
u"expansion: momentum",
u"h/c(3526)",
u"approximation: dipole",
u"gap equation: solution",
u"spectrum: tensor",
u"effective potential: scalar",
u"parity: Z(2)",
u"Z': hadronic decay",
u"tritium: hypernucleus",
u"lepton: particle identification",
u"threshold: energy",
u"effect: beam-beam",
u"deuteron: beam",
u"Lambda Lambda",
u"general relativity: solution",
u"time: calibration",
u"symmetry: SU(6) x O(3)",
u"vector meson: form factor",
u"soliton: scalar",
u"magnetic monopole: Dirac",
u"D0: radiative decay",
u"M-theory: solution",
u"energy: Coulomb",
u"symmetry: Kac-Moody",
u"group theory: deformation",
u"ghost: scalar",
u"resonance: spectrum",
u"bottom: decay modes",
u"matter: wave",
u"Spin(7)",
u"gluon: interaction",
u"algebra: octonion",
u"fermion: confinement",
u"meson: neutral particle",
u"free electron laser: X-ray",
u"transformation: nonlinear",
u"resonance: model",
u"neutron star: collapse",
u"unparticle: tensor",
u"gauge boson: scattering",
u"scattering amplitude: parametrization",
u"quarkonium: mass difference",
u"domain wall: tension",
u"regularization: ultraviolet",
u"isospin: triplet",
u"jet: correlation",
u"current: conformal",
u"Sommerfeld enhancement",
u"germanium: nuclide",
u"Lipatov equation: solution",
u"electron: drift velocity",
u"string: fluid",
u"intermediate boson: mass",
u"K*(892): polarization",
u"coupling: kinetic",
u"model: confinement",
u"width: branching ratio",
u"static",
u"charged particle: long-lived",
u"positron: yield",
u"model: Dirac",
u"anisotropy: dipole",
u"ANITA",
u"CUORE",
u"jet: top",
u"K: multiplicity",
u"symmetry: nonlinear",
u"Boulby",
u"Fierz-Pauli equation",
u"B+",
u"bound state: energy spectrum",
u"energy: scaling",
u"hodoscope",
u"manual",
u"charmonium: width",
u"vector meson: spectral representation",
u"quark: energy spectrum",
u"dimuon: charge",
u"pseudoparticle",
u"plasma: frequency",
u"muon nucleus: nuclear reaction",
u"false vacuum: bubble",
u"exchange: Regge",
u"differential cross section: asymmetry",
u"topology: defect",
u"stacking",
u"fermion: valence",
u"Lambda Lambda: interaction",
u"nucleon nucleon: force",
u"muon deuteron: deep inelastic scattering",
u"polarization: power spectrum",
u"space: S(7)",
u"beam",
u"color: exchange",
u"neutrino nucleon: exclusive reaction",
u"beam dynamics: longitudinal",
u"axion: decay",
u"string: effect",
u"pi+ pi-: annihilation",
u"pi K: scattering length",
u"pi p: interaction",
u"geometry: discrete",
u"dilepton: thermal",
u"mass: sea",
u"metric: deformation",
u"string: fusion",
u"algebra: Temperley-Lieb",
u"tracking detector: performance",
u"gauge field theory: thermal",
u"nucleus: fission",
u"midisuperspace",
u"bremsstrahlung: emission",
u"p: showers",
u"Chan-Paton factor",
u"dibaryon: mass",
u"synchrotron oscillation",
u"resonance: massive",
u"charged particle: trajectory",
u"algebra: SU(3)",
u"space-time: Einstein-Cartan",
u"radiation: flux",
u"Vaidya",
u"critical phenomena: stability",
u"field equations: bounce",
u"power spectrum: oscillation",
u"p: interaction",
u"symmetry: Killing",
u"antimatter: production",
u"p: size",
u"cosmic radiation: density",
u"astrophysics: perturbation",
u"R-hadron",
u"pi pi: phase shift",
u"exchange: multiple",
u"D: excited state",
u"helium: yield",
u"color: 3",
u"neutrino: radiation",
u"pseudoscalar meson: wave function",
u"antideuteron",
u"hidden variable",
u"gauge field theory: Z(N)",
u"evolution equation: solution",
u"pi0: mass",
u"bending magnet: superconductivity",
u"eta(958): wave function",
u"correction: Yukawa",
u"Holst term",
u"bilepton: mass",
u"fluid: charge",
u"quark: correlation",
u"gravitino: lifetime",
u"toponium",
u"D0 anti-D0: oscillation",
u"operator: Virasoro",
u"n: confinement",
u"B+: lifetime",
u"space-time: S(3) x R(1)",
u"particle: charge",
u"pi0: electromagnetic decay",
u"jet: suppression",
u"quark quark: scattering amplitude",
u"isospin: 3/2",
u"radiation: length",
u"scaling: correction",
u"superfluid: relativistic",
u"form factor: momentum dependence",
u"pi: decay modes",
u"pseudoscalar meson: mixing",
u"fermion: symplectic",
u"potential: complex",
u"fragmentation: model",
u"space-time: bubble",
u"neutrino: right-handed: mass",
u"partial wave: interference",
u"slepton: hadroproduction",
u"fermion: technicolor",
u"Higgs particle: quantum number",
u"form factor: momentum transfer",
u"Hubble constant: time dependence",
u"muon: drift chamber",
u"eta: electromagnetic decay",
u"beam dynamics: nonlinear",
u"Lambda/b0: hadroproduction",
u"algebra: SO(2,1)",
u"neutrino/tau: flux",
u"pi: angular distribution",
u"pulsar: emission",
u"expansion: topological",
u"effective action: nonlocal",
u"torus: fuzzy",
u"local",
u"positron: annihilation",
u"viscosity: correction",
u"electron: coupling",
u"optics: background",
u"triplet: SU(2)",
u"quantum chromodynamics: action",
u"eta/c(2980): radiative decay",
u"aberration",
u"geometry: fluctuation",
u"conifold: transition",
u"bottom meson: rare decay",
u"baryon resonance: photoproduction",
u"vector meson: interaction",
u"pentaquark: wave function",
u"gravastar",
u"carbon: crystal",
u"lattice field theory: compact",
u"symmetry: SU(2) x SU(2) x U(1)",
u"B/c+: hadronic decay",
u"channel cross section: mass dependence",
u"color: charge",
u"detector: geometry",
u"W': hadroproduction",
u"momentum: fluctuation",
u"superfield: Higgs",
u"FINUDA",
u"p n: radiative capture",
u"Bethe-Salpeter equation: coupled channel",
u"lepton: momentum",
u"hyperon: pair production",
u"experimental methods: sensitivity",
u"field theory: messenger",
u"pi-: photoproduction",
u"superpotential: twist",
u"resistive plate chamber: glass",
u"velocity: spectrum",
u"p deuteron: inelastic scattering",
u"pi: absorption",
u"scalar particle: heavy",
u"beam: stability",
u"field equations: Toda",
u"K-: condensation",
u"quantum gravity: linear",
u"atom: gas",
u"symmetry: SU(N) x SU(N)",
u"Ward identity: conformal",
u"nucleon: mass spectrum",
u"nucleus: ground state",
u"tensor: conformal",
u"interpretation of experiments: DAMA",
u"deuteron nucleus: scattering",
u"charged particle: rapidity",
u"beam: orbit",
u"throat: warped",
u"gauge boson: multiple production",
u"nucleon nucleon: scattering length",
u"pi: associated production",
u"wormhole: Lorentz",
u"hadron: spin",
u"flux tube: electric",
u"pi pi: inelastic scattering",
u"anti-p p: ratio",
u"lepton: mass formula",
u"renormalization group: higher-order",
u"supersymmetry: vector",
u"pi: width",
u"star: energy loss",
u"Nambu bracket",
u"Ward identity: chiral",
u"quarkonium: hybrid",
u"B-L number: invariance",
u"transformation: local",
u"bottom baryon: hadronic decay",
u"potassium",
u"vorton",
u"twist: 4",
u"dual resonance model",
u"optics: communications",
u"pressure: Casimir",
u"pi nucleus: inclusive reaction",
u"model: nonrelativistic",
u"charm: semileptonic decay",
u"partition function: torus",
u"lepton: model",
u"B/s0: decay",
u"photon axion: oscillation",
u"smuon",
u"catastrophe theory",
u"membrane: production",
u"space-time: asymmetry",
u"plasma: oscillation",
u"scintillation counter: liquid argon",
u"capture: solar",
u"vacuum state: instanton",
u"Born-Infeld model: nonlinear",
u"dyon: condensation",
u"thermodynamics: critical phenomena",
u"pi+ nucleus: nuclear reaction",
u"magnet: multipole",
u"algebra: Cartan",
u"eta: pair production",
u"D/s+: leptonic decay",
u"Mathieu",
u"vacuum state: quantum",
u"Z': signature",
u"matter: rotation",
u"eta/c(2980): mass",
u"p deuteron: elastic scattering",
u"spin: fluctuation",
u"cesium: atom",
u"Chern-Simons term: induced",
u"fluctuation: stochastic",
u"field theory: monopole",
u"condensation: magnetic",
u"isospin: conservation law",
u"water: solids",
u"antineutrino: mixing angle",
u"p: relativistic",
u"radioactivity: background",
u"isospin: 1",
u"photon deuteron: inclusive reaction",
u"final focus",
u"muon+ muon-: storage ring",
u"instanton: gas",
u"final state: two-pion",
u"gradient",
u"anti-D0",
u"ruthenium",
u"vector boson: massive",
u"nucleus: spin",
u"tetraquark: hadronic decay",
u"muon+ muon-: ratio",
u"Hall effect: spin",
u"lepton: energy",
u"CERN SPS Coll",
u"Oak Ridge SNS PS",
u"lectures: introductory",
u"invisible decay",
u"quintessence: coupling",
u"quark quark: scattering",
u"p: electroproduction",
u"conservation law: Noether",
u"hadron: correlation",
u"Z0: pole",
u"superfield: vector",
u"string: pair production",
u"coupling: gravitation",
u"dissociation: electromagnetic",
u"production: strangeness",
u"field equations: relativistic",
u"D: form factor",
u"quantum number: conservation law",
u"p-adic",
u"photon: momentum",
u"p: radiation",
u"gravitino: density",
u"dilaton: coupling constant",
u"acceleration: stochastic",
u"condensation: vector",
u"black hole: semiclassical",
u"generalized parton distribution: moment",
u"model: liquid",
u"Ponzano-Regge model",
u"trigger: design",
u"zero mode: chiral",
u"programming: manual",
u"dark matter: power spectrum",
u"dark energy: decay",
u"pressure: perturbation",
u"quadrupole lens: superconductivity",
u"B/s: decay modes",
u"strange particle: yield",
u"microstate",
u"quantum gravity: renormalizable",
u"chargino: production",
u"supersymmetry: twist",
u"parity: operator",
u"magnetic field: time dependence",
u"sparticle: heavy",
u"bound state: Majorana",
u"fluctuation: vector",
u"operator: dimension: 4",
u"elements",
u"critical phenomena: conformal",
u"many-body problem: relativistic",
u"new particle: decay modes",
u"J/psi(3100): final state",
u"Yang-Baxter",
u"scale: compactification",
u"symmetry: Z(2) x Z(2)",
u"scalar particle: propagator",
u"bottom particle: hadroproduction",
u"screening: magnetic",
u"radiation: quantum",
u"scalar particle: triplet",
u"bottom: hadronic decay",
u"astrophysics: plasma",
u"mechanics: stability",
u"scattering: WIMP nucleus",
u"group: Coxeter",
u"second-class current",
u"magnetic field: color",
u"coupling: Coulomb",
u"perturbation: effect",
u"analysis",
u"efficiency: angular dependence",
u"transformation: CP",
u"quark hadron: duality",
u"baryon: propagator",
u"quantization: symplectic",
u"gauge boson: scattering amplitude",
u"Meissner effect: duality",
u"mechanics: action",
u"bottom particle: decay modes",
u"positron: spectrum",
u"loop equation",
u"tachyon: coupling",
u"gluon: angular momentum",
u"lattice field theory: finite temperature",
u"particle separator",
u"SU(3) x SU(2) x U(1)",
u"cross section: angular dependence",
u"matter: nonrelativistic",
u"jet: yield",
u"Z0: rare decay",
u"Higgs | |
# Simulates a network with nodes, where each node can be either a
# transmitter or receiver (but not both) at any time step. The simulation
# examines the coverage based on the signal-to-interference ratio (SINR).
# The network has a random medium access control (MAC) scheme based on a
# determinantal point process, as outlined in the paper[1] by
# B\laszczyszyn, Brochard and Keeler. This code validates by simulation
# Propositions IV.1 and IV.2 in the paper[1]. This result gives the
# probability of coverage based on the SINR value of a transmitter-receiver
# pair in a non-random network of transmitter-or-receiver nodes such as a
# realization of a random point process.
#
# More specifically, the code estimates the probability of x and y being
# connected (ie SINR(x,y)>tau)given that x is transmitting and
# y isn't.
#
# The simulation section estimates the empirical probability of SINR-based
# coverage. For a large enough number of simulations, this empirical result
# will agree with the analytic results given in the paper[2].
#
# By coverage, it is assumed that the SINR of the transmitter is larger
# than some threshold at the corresponding receiver.
#
# Probabilities for other events are calculated/estimated including:
#
# Event A=SINR(x,y) > tau
# Event B=Transmitter exists
# Event C=Receiver exists
#
# This code was originally written by <NAME> for the paper by
# B\laszczyszyn, Brochard and Keeler[1].
#
# If you use this code in published research, please cite paper[1].
#
# References:
#
# [1] B\laszczyszyn, Brochard and Keeler, "Coverage probability in
# wireless networks with determinantal scheduling", 2020.
#
# Author: <NAME>, 2020.
from funProbCovTXRXDet import funProbCovTXRXDet
import numpy as np # NumPy package for arrays, random number generation, etc
import matplotlib.pyplot as plt # for plotting
# simulate determintal point process
from funSimSimpleDPP import funSimSimpleDPP
from funPalmK import funPalmK # find Palm distribution (for a single point)
from funLtoK import funLtoK # convert L kernel to a (normalized) K kernel
plt.close("all") # close all figures
#set random seed for reproducibility
np.random.seed(1)
###START -- Parameters -- START###
choiceExample = 1 # 1 or 2 for a random (uniform) or deterministic example
numbSim = 10**4 # number of simulations
numbNodes = 10 # number of pairs
indexTrans = 0 # index for transmitter
indexRec = 1 # index for receiver
#above indices are bounded by numbNodes
#fading model
muFading = 1/3 # Rayleigh fading average
#path loss model
betaPath = 2 # pathloss exponent
kappaPath = 1 # rescaling constant for pathloss function
thresholdSINR = 0.1 # SINR threshold value
constNoise = 0 # noise constant
#Determinantal kernel parameters
choiceKernel = 1 # 1 for Gaussian (ie squared exponetial );2 for Cauchy
#3 for independent (ie binomial) model
sigma = 1 # parameter for Gaussian and Cauchy kernel
alpha = 1 # parameter for Cauchy kernel
pAloha = 0.5 # parameter for independent kernel (ie proportion transmitting)
#Simulation window parameters
xMin = -1
xMax = 1 # x dimensions
yMin = -1
yMax = 1 # y dimensions
xDelta = xMax-xMin # rectangle width
yDelta = yMax-yMin # rectangle height
###END -- Parameters -- END###
#Simulate a random point process for the network configuration
#interferer section
if (choiceExample == 1):
#random (uniform) x/y coordinates
#transmitters or receivers
xx = xDelta*(np.random.rand(numbNodes))+xMin
yy = yDelta*(np.random.rand(numbNodes))+yMin
else:
#non-random x/y coordinates
#transmitters or receivers
t = 2*np.pi*np.linspace(0, (numbNodes-1)/numbNodes, numbNodes)
xx = (1+np.cos(5*t+1))/2
yy = (1+np.sin(3*t+2))/2
#transmitter location
xxTX = xx[indexTrans]
yyTX = yy[indexTrans]
#Receiver location
xxRX = xx[indexRec]
yyRX = yy[indexRec]
# START -- CREATE L matrix -- START
sizeL = numbNodes
#Calculate Gaussian or Cauchy kernel based on grid x/y values
#all squared distances of x/y difference pairs
xxDiff = np.outer(xx, np.ones((sizeL,)))-np.outer(np.ones((sizeL,)), xx)
yyDiff = np.outer(yy, np.ones((sizeL,)))-np.outer(np.ones((sizeL,)), yy)
rrDiffSquared = (xxDiff**2+yyDiff**2)
if choiceKernel == 1:
#Gaussian/squared exponential kernel
L = np.exp(-(rrDiffSquared)/sigma**2)
elif choiceKernel == 2:
#Cauchy kernel
L = 1/(1+rrDiffSquared/sigma**2)**(alpha+1/2)
else:
raise Exception('choiceKernel has to be equal to 1 or 2.')
L = 10*L # scale matrix up (increases the eigenvalues ie number of points)
# END-- CREATE L matrix -- # END
#Eigen decomposition
eigenValL, eigenVecL = np.linalg.eig(L)
#Helper functions
def funPathloss(r):
return (kappaPath*(1+r))**(-betaPath) # pathloss function
#Functions for the proability of being connected
def fun_h(s, r):
return (1/(thresholdSINR*(funPathloss(s)/funPathloss(r))+1))
def fun_w(r):
return (np.exp(-(thresholdSINR/muFading)*constNoise/funPathloss(r)))
#initialize boolean vectors/arrays for collecting statistics
booleA = np.zeros(numbSim, dtype=bool) # transmitter is connected
booleB = np.zeros(numbSim, dtype=bool) # transmitter exists
booleC = np.zeros(numbSim, dtype=bool) # receiver exists
#loop through all simulations
for ss in range(numbSim):
#DPP for active transmitter nodes
indexDPP = funSimSimpleDPP(eigenVecL, eigenValL)
booleB[ss] = any(indexDPP == indexTrans) # if transmitter is in subset
booleC[ss] = all(indexDPP != indexRec) # if receiver is not in subset
#if transmitter is in the determinantal subset, calculate its SINR
if booleB[ss]:
#create Boolean variable for active interferers
booleInter = np.zeros(numbNodes, dtype=bool)
booleInter[indexDPP] = True
booleInter[indexTrans] = False # exclude transmitter
#x/y values of interfering nodes
xxInter = xx[booleInter]
yyInter = yy[booleInter]
#number of interferers
numbInter = np.sum(booleInter)
#simulate signal for interferers
fadeRandInter = np.random.exponential(muFading, numbInter) # fading
distPathInter = np.hypot(xxInter-xxRX, yyInter-yyRX) # path distance
proplossInter = fadeRandInter*funPathloss(distPathInter) # pathloss
#simulate signal for transmitter
fadeRandSig = np.random.exponential(muFading) # fading
distPathSig = np.hypot(xxTX-xxRX, yyTX-yyRX) # path distance
proplossSig = fadeRandSig*funPathloss(distPathSig) # pathloss
#Calculate the SINR
SINR = proplossSig/(np.sum(proplossInter)+constNoise)
#see if transmitter is connected
booleA[ss] = (SINR > thresholdSINR)
booleBandC = booleB & booleC # transmitter-receiver pair exists
booleNotC = ~booleC # receiver does not exist
booleBandNotC = booleB & booleNotC # transmitter exists, receiver does not
###START Create kernels and Palm kernels START###
K = funLtoK(L) # caclulate K kernel from kernel L
sizeK = K.shape[0] # number of columns/rows in kernel matrix K
#Calculate all respective distances (based on random network configuration)
#from all transmitters to receiver
dist_ji_xx = np.outer(xx, np.ones((sizeK,)))-np.outer(np.ones((sizeK,)), xxRX)
dist_ji_yy = np.outer(yy, np.ones((sizeK,)))-np.outer(np.ones((sizeK,)), yyRX)
dist_ji = np.hypot(dist_ji_xx, dist_ji_yy) # Euclidean distances
#transmitters to receivers
dist_ii_xx = xxTX-xxRX
dist_ii_yy = yyTX-yyRX
dist_ii = np.hypot(dist_ii_xx, dist_ii_yy) # Euclidean distances
# repeat cols for element-wise evaluation
dist_ii = np.tile(dist_ii, (sizeK, 1))
#apply functions
hMatrix = fun_h(dist_ji, dist_ii) # matrix H for all h_{x_i}(x_j) values
W_x = fun_w(np.hypot(xx-xxRX, yy-yyRX)) # noise factor
##create h matrix corresponding to transmitter
booleAll = np.ones(sizeK, dtype=bool)
booleReduced = booleAll
booleReduced[indexTrans] = False # remove transmitter
#choose transmitter-receiver row
hVectorReduced = hMatrix[booleReduced, indexTrans]
#repeat vector hVectorReduced as rows
hMatrixReduced = np.tile(hVectorReduced, (sizeK-1, 1))
hMatrixReduced = hMatrixReduced.transpose()
#create Palm kernels conditioned on transmitter existing
KPalmReducedTX, KPalmTX = funPalmK(K, indexTrans)
#create Palm kernels conditioned on receiver existing
KPalmRXReduced, KPalmRX = funPalmK(K, indexRec)
#create Palm kernels conditioned on transmitter AND receiver existing
_, KPalmTXRX = funPalmK(KPalmTX, indexRec)
#create reduced (by transmitter) Palm kernel conditioned on transmitter
#AND receiver existing
indexReduced = np.arange(sizeK)[booleReduced]
KPalmSemiReducedTXRX = np.eye(sizeK-1)
for i in range(KPalmTXRX.shape[0]-1):
KPalmSemiReducedTXRX[:, i] = KPalmTXRX[indexReduced, indexReduced[i]]
#calculate final kernels
#for transmitter
KReduced_hTX = np.sqrt(1-hMatrixReduced.transpose()) * \
KPalmReducedTX*np.sqrt(1-hMatrixReduced)
##for reciever and transmitter
KReduced_hRX = np.sqrt(1-hMatrixReduced.transpose()) * \
KPalmSemiReducedTXRX*np.sqrt(1-hMatrixReduced)
###END Create kernels and Palm kernels END###
###START Connection Proability (ie SINR>thresholdConst) START###
#calculate probabiliity for the event that transmitter's
#signal at the receiver has an SINR>thresholdConst, given the pair is
# active (ie trasnmitting and receiving); see Section IV in paper[1].
#probability transmitter exists (ie transmitter at indexTrans) - event B
probB = K[indexTrans, indexTrans]
probB_Emp = np.mean(booleB)
#probability receiver exists (ie no transmitter at indexRec) - event C
probC = 1-K[indexRec, indexRec]
probC_Emp = np.mean(booleC)
#probability transmitter but no receiver
indexPair = np.array([indexTrans, indexRec])
probBNotC = np.linalg.det(K[indexPair, :][:, indexPair])
probBNotC_Emp = np.mean(booleBandNotC)
#
#probability transmitter and receiver existing
probBandC = probB-probBNotC
probBandC_Emp = np.mean(booleBandC)
#probability of SINR>threshold (ie transmiter is connected ) given B
probA_GivenB = np.linalg.det(np.eye(sizeK-1)-KReduced_hTX)*W_x[indexTrans]
probA_GivenB_Emp = np.mean(booleA[booleB])
#probability of SINR>threshold (ie transmiter is connected ) given B and C
probA_GivenBNotC = np.linalg.det(np.eye(sizeK-1)-KReduced_hRX)*W_x[indexTrans]
probA_GivenBNotC_Emp = np.mean(booleA[booleNotC])
#probability B given NOT C (ie a transmitter exists at indexRec)
probB_GivenNotC = KPalmRX[indexTrans, indexTrans]
probB_GivenNotC_Emp = np.mean(booleB[booleNotC])
#probability B given C
probB_GivenC = (probB-(1-probC)*probB_GivenNotC)/probC
probB_GivenC_Emp = np.mean(booleB[booleC])
#probability NOT C (ie a transmitter exists at indexRec) given B
probNotC_GivenB = KPalmTX[indexRec, indexRec]
probNotC_GivenB_Emp = np.mean(booleNotC[booleB])
#probability C given B
probC_GivenB_Emp = np.mean(booleC[booleB])
probC_GivenB = 1-probNotC_GivenB
print('Conditional coverage probability (ie A given B and C).')
#coverage probability ie probability of A given B and C
probA_GivenBandC = (probA_GivenB-probNotC_GivenB*probA_GivenBNotC)/probC_GivenB
print('probA_GivenBandC = ', probA_GivenBandC)
#Estimate empirical probability two different ways
#Directly
probA_GivenBandC_Emp1 = np.mean(booleA[booleBandC])
print('probA_GivenBandC_Emp1 = ', probA_GivenBandC_Emp1)
#Indirectly
probA_GivenBandC_Emp2 = (probA_GivenB_Emp-probNotC_GivenB_Emp*probA_GivenBNotC_Emp)\
/ probC_GivenB_Emp
print('Coverage probability (ie A given B and C).')
#connection probability
probCov = probA_GivenBandC*probBandC
print('probCov = ', probCov)
probCov_Emp1 = np.mean(booleA & booleB & booleC)
print('probCov_Emp1 = ', probCov_Emp1)
#probCov_Emp2=probA_GivenBandC_Emp2*probBandC_Emp
#probCovCond=probA_GivenBandC #conditional coverage probability
#probTXRX=probBandC #probability of pair existing
#connection probability
#probCov=probCovCond*probTXRX
###END Connection Proability (ie SINR>thresholdConst) END###
#TEST
probCov, probTXRX, probCovCond = funProbCovTXRXDet(
xx, yy, fun_h, fun_w, L, indexTrans, indexRec)
if indexDPP.size > 0:
### START -- Plotting -- START ###
markerSize = 13
#random color vector
vectorColor = np.random.rand(3) # random vector for colors of marker
#Plot point process
plt.plot(xx, yy, 'ko', markerfacecolor="None", markersize=markerSize)
#Plot determinantally-thinned point process
plt.plot(xx[indexDPP], yy[indexDPP], 'k.', markerfacecolor=vectorColor,
markersize=1.1*markerSize, markeredgecolor='none')
plt.axis('equal')
plt.axis('off')
plt.legend(('Original point process', | |
= precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat(float(Operation_info[h]['swap_bids_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_asks_price5'][4]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': swap_close_price,
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'LIMIT_MAKER',
'swap_order_type': 'LIMIT'}
elif Operation_info[h]['swap_position'] + Operation_info[h]['spot_balance'] < -1*tolerate_limit / swap_present_price:
# 平仓时合约仓位多于现货仓位,合约要加速平空,合约买在对手价 0607 swap买在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_asks_price5'][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat(float(Operation_info[h]['swap_bids_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_asks_price5'][4]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': swap_close_price,
'spot_close_size': '0',
'swap_close_size': swap_close_size,
'spot_order_type': 'LIMIT_MAKER',
'swap_order_type': 'LIMIT'}
else: # 平时设置在best,spot平多卖出,swap平空买入
mode_take_close_long_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'spot_instrument_id': Necessary_info[h]['spot_instrument_id'],
'spot_close_price': precfloat(float(Operation_info[h]['spot_asks_price5'][0]), Necessary_info[h]['spot_tick_digit']),
'swap_close_price': precfloat(float(Operation_info[h]['swap_bids_price5'][0]), Necessary_info[h]['swap_tick_digit']),
'spot_close_size': spot_close_size,
'swap_close_size': swap_close_size,
'spot_order_type': 'LIMIT_MAKER',
'swap_order_type': 'LIMIT'}
# swap_min_size = precfloat(Necessary_info[h]['swap_min_notional']/swap_present_price, Necessary_info[h]['quantityPrecision'])
if open_long_final_open_mode == 'on': # swap继续开仓开空,swap_position<0,spot_balance>0
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount,swap是open_long_final_open_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position']) * swap_present_price) + '美金'
sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'])>= swap_min_size:
swap_size= swap_min_size
else:
swap_size=0
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size']< float(Operation_info[h]['swap_asks_price5'][0]):
swap_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0]))/2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat(float(Operation_info[h]['swap_asks_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_open_long_final_open_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_price': swap_price, #买在best价
'swap_size': swap_size,
'swap_order_type': 'LIMIT'}
if open_short_final_open_mode == 'on': # swap继续开仓开多,swap_position>0,spot_balance<0
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount,swap是open_short_final_open_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] +Operation_info[h]['swap_position']) * swap_present_price) + '美金'
sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'])>= swap_min_size:
swap_size= swap_min_size
else:
swap_size=0
# 因为成交慢,试著开多在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_asks_price5'][0]):
swap_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat(float(Operation_info[h]['swap_bids_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_open_short_final_open_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_price': swap_price, #买在best价
'swap_size': swap_size,
'swap_order_type': 'LIMIT'}
if open_long_final_close_mode == 'on': # swap仓位多于spot,要平空,swap_position<0,spot_balance>0
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount,swap是open_long_final_close_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] +Operation_info[h]['swap_position']) * swap_present_price) + '美金'
sendmessage(tutu)
# print('spot_balance_23',Operation_info[h]['spot_balance'])
# print('swap_position_34',Operation_info[h]['swap_position'])
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'])>= swap_min_size:
swap_size= swap_min_size
else:
swap_size=0
# 因为成交慢,试著平空/买在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_asks_price5'][0]):
swap_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat(float(Operation_info[h]['swap_bids_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_open_long_final_close_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_price': swap_price, # 买在best价
'swap_size': swap_size,
'swap_order_type': 'LIMIT'}
if open_short_final_close_mode == 'on': # swap仓位多于spot,要平多,swap_position>0,spot_balance<0
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount,swap是open_short_final_close_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] +Operation_info[h]['swap_position']) * swap_present_price) + '美金'
sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'])>= swap_min_size:
swap_size= swap_min_size
else:
swap_size=0
# 因为成交慢,试著平多/卖在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_asks_price5']['asks'][0]):
swap_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat(float(Operation_info[h]['swap_asks_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_open_short_final_close_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_price': swap_price, # 买在best价
'swap_size': swap_size,
'swap_order_type': 'LIMIT'}
if close_long_final_close_mode == 'on': # spot平多,swap继续平空a买在对手价,
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount_Close,swap是close_long_final_close_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position']) * swap_present_price) + '美金'
sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'])>= swap_min_size:
swap_close_size= swap_min_size
else:
swap_close_size=0
# 因为成交慢,试著平空/买在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_asks_price5'][0]):
swap_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_price = precfloat(float(Operation_info[h]['swap_bids_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_close_long_final_close_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_close_price': swap_price, # 买在best价
'swap_close_size': swap_close_size,
'swap_order_type': 'LIMIT'}
if close_short_final_close_mode == 'on': # swap继续平多,
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount_Close,swap是close_short_final_close_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position']) * swap_present_price) + '美金'
sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'])>= swap_min_size:
swap_close_size= swap_min_size
else:
swap_close_size=0
# 因为成交慢,试著平多/卖在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_asks_price5'][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat(float(Operation_info[h]['swap_asks_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_close_short_final_close_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_close_price': swap_close_price, # 买在best价
'swap_close_size': swap_close_size,
'swap_order_type': 'LIMIT'}
if close_long_final_open_mode == 'on': # swap继续开空,
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount_Close,swap是close_final_open_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position']) * swap_present_price) + '美金'
sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'])>= swap_min_size:
swap_close_size= swap_min_size
else:
swap_close_size=0
# 因为成交慢,试著开空/卖在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best ask
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_asks_price5'][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat(float(Operation_info[h]['swap_asks_price5'][0]), Necessary_info[h]['swap_tick_digit'])
mode_take_close_long_final_open_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_close_price': swap_close_price, # 买在best价
'swap_close_size': swap_close_size,
'swap_order_type': 'LIMIT'}
if close_short_final_open_mode == 'on': # swap开多,
tutu = Necessary_info[h]['swap_instrument_id'] + ":" + 'spot已接近Target_Amount_Close,swap是close_final_open_mode,现在相差' + \
str((Operation_info[h]['spot_balance'] + Operation_info[h]['swap_position'] ) * swap_present_price) + '美金'
sendmessage(tutu)
if abs(Operation_info[h]['spot_balance']+Operation_info[h]['swap_position'])>= swap_min_size:
swap_close_size= swap_min_size
else:
swap_close_size=0
# 因为成交慢,试著开多/买在对手价, 0607改成挂在中间价
#如果swap best ask-best bid>1个tick_size,表示可以挂,不然就要挂best bid
if float(Operation_info[h]['swap_bids_price5'][0]) + Necessary_info[h]['swap_tick_size'] < float(Operation_info[h]['swap_asks_price5'][0]):
swap_close_price = precfloat((float(Operation_info[h]['swap_asks_price5'][0]) + float(Operation_info[h]['swap_bids_price5'][0])) / 2, Necessary_info[h]['swap_tick_digit'])
else:
swap_close_price = precfloat((float(Operation_info[h]['swap_bids_price5'][0])), Necessary_info[h]['swap_tick_digit'])
mode_take_close_short_final_open_order = {'swap_instrument_id': Necessary_info[h]['swap_instrument_id'],
'swap_close_price': swap_close_price, # 买在best价
'swap_close_size': swap_close_size,
'swap_order_type': 'LIMIT'}
#print('funding_rate_result_1',funding_rate_result)
if open_long_mode == 'on':
# 下单,买spot,卖swap
spot_order_result, swap_order_result = take_long_order(mode_take_long_order)
# print('spot_order_result_1',spot_order_result)
# print('swap_order_result_1',swap_order_result)
time.sleep(0.1)
if spot_order_result == 'none':
Operation_info[h]['spot_pending_list_left'] = 'off'
elif spot_order_result != 'none':
if int(spot_order_result['orderId'])!=0:
Operation_info[h]['spot_pending_list_left'] = 'on'
#记录买进spot几单了
Operation_info[h]['spot_buy_trading_orders']=Operation_info[h]['spot_buy_trading_orders']+1
else:
Operation_info[h]['spot_pending_list_left'] = 'off'
if swap_order_result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif swap_order_result !='none':
if int(swap_order_result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
if open_short_mode == 'on':
# 下单,买swap,卖spot
spot_order_result, swap_order_result = take_short_order(mode_take_short_order)
time.sleep(0.1)
if spot_order_result == 'none':
Operation_info[h]['spot_pending_list_left'] = 'off'
elif spot_order_result != 'none':
if int(spot_order_result['orderId'])!=0:
Operation_info[h]['spot_pending_list_left'] = 'on'
#记录卖出spot几单了
Operation_info[h]['spot_sell_trading_orders']=Operation_info[h]['spot_sell_trading_orders']+1
else:
Operation_info[h]['spot_pending_list_left'] = 'off'
if swap_order_result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif swap_order_result !='none':
if int(swap_order_result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单了
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
if close_long_mode == 'on':
#卖spot,买swap
spot_close_order_result, swap_close_order_result = take_close_long_order(mode_take_close_long_order)
#print('spot_close_order_result_3',spot_close_order_result)
#print('swap_close_order_result_3',swap_close_order_result)
time.sleep(0.1)
if spot_close_order_result == 'none':
Operation_info[h]['spot_pending_list_left'] = 'off'
elif spot_close_order_result != 'none':
if int(spot_close_order_result['orderId'])!=0:
Operation_info[h]['spot_pending_list_left'] = 'on'
#记录卖出spot几单
Operation_info[h]['spot_sell_trading_orders']=Operation_info[h]['spot_sell_trading_orders']+1
else:
Operation_info[h]['spot_pending_list_left'] = 'off'
if swap_close_order_result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif swap_close_order_result !='none':
if int(swap_close_order_result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
if close_short_mode == 'on':
#买spot,卖swap
spot_close_order_result, swap_close_order_result = take_close_short_order(mode_take_close_short_order)
time.sleep(0.1)
if spot_close_order_result == 'none':
Operation_info[h]['spot_pending_list_left'] = 'off'
elif spot_close_order_result != 'none':
if int(spot_close_order_result['orderId'])!=0:
Operation_info[h]['spot_pending_list_left'] = 'on'
#记录买进spot几单
Operation_info[h]['spot_buy_trading_orders']=Operation_info[h]['spot_buy_trading_orders']+1
else:
Operation_info[h]['spot_pending_list_left'] = 'off'
if swap_close_order_result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif swap_close_order_result !='none':
if int(swap_close_order_result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
if open_long_final_open_mode == 'on':
#下單,卖swap
result = take_open_long_final_open_order(mode_take_open_long_final_open_order)
time.sleep(0.1)
#print('mode_take_open_long_final_open_order',mode_take_open_long_final_open_order)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if open_short_final_open_mode == 'on':
#下單,买swap
result = take_open_short_final_open_order(mode_take_open_short_final_open_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if open_long_final_close_mode == 'on':
# 下单,买swap
result = take_open_long_final_close_order(mode_take_open_long_final_close_order)
#print('open_long_close_result',result)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if open_short_final_close_mode == 'on':
# 下单,卖出swap
result = take_open_short_final_close_order(mode_take_open_short_final_close_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if close_long_final_close_mode == 'on':
# 下单
result = take_close_long_final_close_order(mode_take_close_long_final_close_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if close_short_final_close_mode == 'on':
# 下单,卖出swap
result = take_close_short_final_close_order(mode_take_close_short_final_close_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if close_long_final_open_mode == 'on':
# 下单
result = take_close_long_final_open_order(mode_take_close_long_final_open_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录卖出swap几单
Operation_info[h]['swap_sell_trading_orders']=Operation_info[h]['swap_sell_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
if close_short_final_open_mode == 'on':
# 下单
result = take_close_short_final_open_order(mode_take_close_short_final_open_order)
time.sleep(0.1)
if result == 'none':
Operation_info[h]['swap_pending_list_left'] = 'off'
elif result != 'none':
if int(result['orderId'])!=0:
Operation_info[h]['swap_pending_list_left'] = 'on'
#记录买进swap几单
Operation_info[h]['swap_buy_trading_orders'] = Operation_info[h]['swap_buy_trading_orders']+1
else:
Operation_info[h]['swap_pending_list_left'] = 'off'
Operation_info[h]['spot_pending_list_left'] = 'off'
# 更新新的spot五档
Operation_info[h]['spot_bids_price5'] = []
Operation_info[h]['spot_asks_price5'] = []
try:
spot_depth5 = MQ[h]["DEPTH5_SPOT"].get(timeout=0.2)
# for i in range(5):
# Operation_info[h]['spot_bids_price5'].append(float(spot_depth5['b'][i][0]))
# Operation_info[h]['spot_asks_price5'].append(float(spot_depth5['a'][i][0]))
except:
spot_depth5 = client.get_order_book(symbol=h,limit='5')
print('我spot_depth5还是用restAPI___#')
time.sleep(0.1)
for i in range(5):
Operation_info[h]['spot_bids_price5'].append(float(spot_depth5['bids'][i][0]))
Operation_info[h]['spot_asks_price5'].append(float(spot_depth5['asks'][i][0]))
# print('h_',h)
#更新新的swap五档,注意ws跟restAPI数据型态不同
Operation_info[h]['swap_bids_price5'] = []
Operation_info[h]['swap_asks_price5'] = []
try:
swap_depth5 = MQ[h]["DEPTH5_SWAP"].get(timeout=0.2)
for i in range(5):
Operation_info[h]['swap_bids_price5'].append(float(swap_depth5['b'][i][0]))
Operation_info[h]['swap_asks_price5'].append(float(swap_depth5['a'][i][0]))
except:
swap_depth5 = client.futures_order_book(symbol=h,limit='5')
for i in range(5):
Operation_info[h]['swap_bids_price5'].append(float(swap_depth5['bids'][i][0]))
Operation_info[h]['swap_asks_price5'].append(float(swap_depth5['asks'][i][0]))
time.sleep(0.1)
Nowtime = datetime.now()
| |
DESTROYED.
destroyTime: Output only. The time this CryptoKeyVersion's key material is
scheduled for destruction. Only present if state is DESTROY_SCHEDULED.
externalProtectionLevelOptions: ExternalProtectionLevelOptions stores a
group of additional fields for configuring a CryptoKeyVersion that are
specific to the EXTERNAL protection level.
generateTime: Output only. The time this CryptoKeyVersion's key material
was generated.
importFailureReason: Output only. The root cause of the most recent import
failure. Only present if state is IMPORT_FAILED.
importJob: Output only. The name of the ImportJob used in the most recent
import of this CryptoKeyVersion. Only present if the underlying key
material was imported.
importTime: Output only. The time at which this CryptoKeyVersion's key
material was most recently imported.
name: Output only. The resource name for this CryptoKeyVersion in the
format
`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.
protectionLevel: Output only. The ProtectionLevel describing how crypto
operations are performed with this CryptoKeyVersion.
reimportEligible: Output only. Whether or not this key version is eligible
for reimport, by being specified as a target in
ImportCryptoKeyVersionRequest.crypto_key_version.
state: The current state of the CryptoKeyVersion.
"""
class AlgorithmValueValuesEnum(_messages.Enum):
r"""Output only. The CryptoKeyVersionAlgorithm that this CryptoKeyVersion
supports.
Values:
CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED: Not specified.
GOOGLE_SYMMETRIC_ENCRYPTION: Creates symmetric encryption keys.
RSA_SIGN_PSS_2048_SHA256: RSASSA-PSS 2048 bit key with a SHA256 digest.
RSA_SIGN_PSS_3072_SHA256: RSASSA-PSS 3072 bit key with a SHA256 digest.
RSA_SIGN_PSS_4096_SHA256: RSASSA-PSS 4096 bit key with a SHA256 digest.
RSA_SIGN_PSS_4096_SHA512: RSASSA-PSS 4096 bit key with a SHA512 digest.
RSA_SIGN_PKCS1_2048_SHA256: RSASSA-PKCS1-v1_5 with a 2048 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_3072_SHA256: RSASSA-PKCS1-v1_5 with a 3072 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_4096_SHA256: RSASSA-PKCS1-v1_5 with a 4096 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_4096_SHA512: RSASSA-PKCS1-v1_5 with a 4096 bit key and a
SHA512 digest.
RSA_SIGN_RAW_PKCS1_2048: RSASSA-PKCS1-v1_5 signing without encoding,
with a 2048 bit key.
RSA_SIGN_RAW_PKCS1_3072: RSASSA-PKCS1-v1_5 signing without encoding,
with a 3072 bit key.
RSA_SIGN_RAW_PKCS1_4096: RSASSA-PKCS1-v1_5 signing without encoding,
with a 4096 bit key.
RSA_DECRYPT_OAEP_2048_SHA256: RSAES-OAEP 2048 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_3072_SHA256: RSAES-OAEP 3072 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_4096_SHA256: RSAES-OAEP 4096 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_4096_SHA512: RSAES-OAEP 4096 bit key with a SHA512
digest.
RSA_DECRYPT_OAEP_2048_SHA1: RSAES-OAEP 2048 bit key with a SHA1 digest.
RSA_DECRYPT_OAEP_3072_SHA1: RSAES-OAEP 3072 bit key with a SHA1 digest.
RSA_DECRYPT_OAEP_4096_SHA1: RSAES-OAEP 4096 bit key with a SHA1 digest.
EC_SIGN_P256_SHA256: ECDSA on the NIST P-256 curve with a SHA256 digest.
EC_SIGN_P384_SHA384: ECDSA on the NIST P-384 curve with a SHA384 digest.
EC_SIGN_SECP256K1_SHA256: ECDSA on the non-NIST secp256k1 curve. This
curve is only supported for HSM protection level.
HMAC_SHA256: HMAC-SHA256 signing with a 256 bit key.
EXTERNAL_SYMMETRIC_ENCRYPTION: Algorithm representing symmetric
encryption by an external key manager.
"""
CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED = 0
GOOGLE_SYMMETRIC_ENCRYPTION = 1
RSA_SIGN_PSS_2048_SHA256 = 2
RSA_SIGN_PSS_3072_SHA256 = 3
RSA_SIGN_PSS_4096_SHA256 = 4
RSA_SIGN_PSS_4096_SHA512 = 5
RSA_SIGN_PKCS1_2048_SHA256 = 6
RSA_SIGN_PKCS1_3072_SHA256 = 7
RSA_SIGN_PKCS1_4096_SHA256 = 8
RSA_SIGN_PKCS1_4096_SHA512 = 9
RSA_SIGN_RAW_PKCS1_2048 = 10
RSA_SIGN_RAW_PKCS1_3072 = 11
RSA_SIGN_RAW_PKCS1_4096 = 12
RSA_DECRYPT_OAEP_2048_SHA256 = 13
RSA_DECRYPT_OAEP_3072_SHA256 = 14
RSA_DECRYPT_OAEP_4096_SHA256 = 15
RSA_DECRYPT_OAEP_4096_SHA512 = 16
RSA_DECRYPT_OAEP_2048_SHA1 = 17
RSA_DECRYPT_OAEP_3072_SHA1 = 18
RSA_DECRYPT_OAEP_4096_SHA1 = 19
EC_SIGN_P256_SHA256 = 20
EC_SIGN_P384_SHA384 = 21
EC_SIGN_SECP256K1_SHA256 = 22
HMAC_SHA256 = 23
EXTERNAL_SYMMETRIC_ENCRYPTION = 24
class ProtectionLevelValueValuesEnum(_messages.Enum):
r"""Output only. The ProtectionLevel describing how crypto operations are
performed with this CryptoKeyVersion.
Values:
PROTECTION_LEVEL_UNSPECIFIED: Not specified.
SOFTWARE: Crypto operations are performed in software.
HSM: Crypto operations are performed in a Hardware Security Module.
EXTERNAL: Crypto operations are performed by an external key manager.
"""
PROTECTION_LEVEL_UNSPECIFIED = 0
SOFTWARE = 1
HSM = 2
EXTERNAL = 3
class StateValueValuesEnum(_messages.Enum):
r"""The current state of the CryptoKeyVersion.
Values:
CRYPTO_KEY_VERSION_STATE_UNSPECIFIED: Not specified.
PENDING_GENERATION: This version is still being generated. It may not be
used, enabled, disabled, or destroyed yet. Cloud KMS will
automatically mark this version ENABLED as soon as the version is
ready.
ENABLED: This version may be used for cryptographic operations.
DISABLED: This version may not be used, but the key material is still
available, and the version can be placed back into the ENABLED state.
DESTROYED: This version is destroyed, and the key material is no longer
stored. This version may only become ENABLED again if this version is
reimport_eligible and the original key material is reimported with a
call to KeyManagementService.ImportCryptoKeyVersion.
DESTROY_SCHEDULED: This version is scheduled for destruction, and will
be destroyed soon. Call RestoreCryptoKeyVersion to put it back into
the DISABLED state.
PENDING_IMPORT: This version is still being imported. It may not be
used, enabled, disabled, or destroyed yet. Cloud KMS will
automatically mark this version ENABLED as soon as the version is
ready.
IMPORT_FAILED: This version was not imported successfully. It may not be
used, enabled, disabled, or destroyed. The submitted key material has
been discarded. Additional details can be found in
CryptoKeyVersion.import_failure_reason.
"""
CRYPTO_KEY_VERSION_STATE_UNSPECIFIED = 0
PENDING_GENERATION = 1
ENABLED = 2
DISABLED = 3
DESTROYED = 4
DESTROY_SCHEDULED = 5
PENDING_IMPORT = 6
IMPORT_FAILED = 7
algorithm = _messages.EnumField('AlgorithmValueValuesEnum', 1)
attestation = _messages.MessageField('KeyOperationAttestation', 2)
createTime = _messages.StringField(3)
destroyEventTime = _messages.StringField(4)
destroyTime = _messages.StringField(5)
externalProtectionLevelOptions = _messages.MessageField('ExternalProtectionLevelOptions', 6)
generateTime = _messages.StringField(7)
importFailureReason = _messages.StringField(8)
importJob = _messages.StringField(9)
importTime = _messages.StringField(10)
name = _messages.StringField(11)
protectionLevel = _messages.EnumField('ProtectionLevelValueValuesEnum', 12)
reimportEligible = _messages.BooleanField(13)
state = _messages.EnumField('StateValueValuesEnum', 14)
class CryptoKeyVersionTemplate(_messages.Message):
r"""A CryptoKeyVersionTemplate specifies the properties to use when creating
a new CryptoKeyVersion, either manually with CreateCryptoKeyVersion or
automatically as a result of auto-rotation.
Enums:
AlgorithmValueValuesEnum: Required. Algorithm to use when creating a
CryptoKeyVersion based on this template. For backwards compatibility,
GOOGLE_SYMMETRIC_ENCRYPTION is implied if both this field is omitted and
CryptoKey.purpose is ENCRYPT_DECRYPT.
ProtectionLevelValueValuesEnum: ProtectionLevel to use when creating a
CryptoKeyVersion based on this template. Immutable. Defaults to
SOFTWARE.
Fields:
algorithm: Required. Algorithm to use when creating a CryptoKeyVersion
based on this template. For backwards compatibility,
GOOGLE_SYMMETRIC_ENCRYPTION is implied if both this field is omitted and
CryptoKey.purpose is ENCRYPT_DECRYPT.
protectionLevel: ProtectionLevel to use when creating a CryptoKeyVersion
based on this template. Immutable. Defaults to SOFTWARE.
"""
class AlgorithmValueValuesEnum(_messages.Enum):
r"""Required. Algorithm to use when creating a CryptoKeyVersion based on
this template. For backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is
implied if both this field is omitted and CryptoKey.purpose is
ENCRYPT_DECRYPT.
Values:
CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED: Not specified.
GOOGLE_SYMMETRIC_ENCRYPTION: Creates symmetric encryption keys.
RSA_SIGN_PSS_2048_SHA256: RSASSA-PSS 2048 bit key with a SHA256 digest.
RSA_SIGN_PSS_3072_SHA256: RSASSA-PSS 3072 bit key with a SHA256 digest.
RSA_SIGN_PSS_4096_SHA256: RSASSA-PSS 4096 bit key with a SHA256 digest.
RSA_SIGN_PSS_4096_SHA512: RSASSA-PSS 4096 bit key with a SHA512 digest.
RSA_SIGN_PKCS1_2048_SHA256: RSASSA-PKCS1-v1_5 with a 2048 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_3072_SHA256: RSASSA-PKCS1-v1_5 with a 3072 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_4096_SHA256: RSASSA-PKCS1-v1_5 with a 4096 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_4096_SHA512: RSASSA-PKCS1-v1_5 with a 4096 bit key and a
SHA512 digest.
RSA_SIGN_RAW_PKCS1_2048: RSASSA-PKCS1-v1_5 signing without encoding,
with a 2048 bit key.
RSA_SIGN_RAW_PKCS1_3072: RSASSA-PKCS1-v1_5 signing without encoding,
with a 3072 bit key.
RSA_SIGN_RAW_PKCS1_4096: RSASSA-PKCS1-v1_5 signing without encoding,
with a 4096 bit key.
RSA_DECRYPT_OAEP_2048_SHA256: RSAES-OAEP 2048 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_3072_SHA256: RSAES-OAEP 3072 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_4096_SHA256: RSAES-OAEP 4096 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_4096_SHA512: RSAES-OAEP 4096 bit key with a SHA512
digest.
RSA_DECRYPT_OAEP_2048_SHA1: RSAES-OAEP 2048 bit key with a SHA1 digest.
RSA_DECRYPT_OAEP_3072_SHA1: RSAES-OAEP 3072 bit key with a SHA1 digest.
RSA_DECRYPT_OAEP_4096_SHA1: RSAES-OAEP 4096 bit key with a SHA1 digest.
EC_SIGN_P256_SHA256: ECDSA on the NIST P-256 curve with a SHA256 digest.
EC_SIGN_P384_SHA384: ECDSA on the NIST P-384 curve with a SHA384 digest.
EC_SIGN_SECP256K1_SHA256: ECDSA on the non-NIST secp256k1 curve. This
curve is only supported for HSM protection level.
HMAC_SHA256: HMAC-SHA256 signing with a 256 bit key.
EXTERNAL_SYMMETRIC_ENCRYPTION: Algorithm representing symmetric
encryption by an external key manager.
"""
CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED = 0
GOOGLE_SYMMETRIC_ENCRYPTION = 1
RSA_SIGN_PSS_2048_SHA256 = 2
RSA_SIGN_PSS_3072_SHA256 = 3
RSA_SIGN_PSS_4096_SHA256 = 4
RSA_SIGN_PSS_4096_SHA512 = 5
RSA_SIGN_PKCS1_2048_SHA256 = 6
RSA_SIGN_PKCS1_3072_SHA256 = 7
RSA_SIGN_PKCS1_4096_SHA256 = 8
RSA_SIGN_PKCS1_4096_SHA512 = 9
RSA_SIGN_RAW_PKCS1_2048 = 10
RSA_SIGN_RAW_PKCS1_3072 = 11
RSA_SIGN_RAW_PKCS1_4096 = 12
RSA_DECRYPT_OAEP_2048_SHA256 = 13
RSA_DECRYPT_OAEP_3072_SHA256 = 14
RSA_DECRYPT_OAEP_4096_SHA256 = 15
RSA_DECRYPT_OAEP_4096_SHA512 = 16
RSA_DECRYPT_OAEP_2048_SHA1 = 17
RSA_DECRYPT_OAEP_3072_SHA1 = 18
RSA_DECRYPT_OAEP_4096_SHA1 = 19
EC_SIGN_P256_SHA256 = 20
EC_SIGN_P384_SHA384 = 21
EC_SIGN_SECP256K1_SHA256 = 22
HMAC_SHA256 = 23
EXTERNAL_SYMMETRIC_ENCRYPTION = | |
# update_I()
#-------------------------------------------------------------------
def update_q0(self):
if (self.DEBUG):
print 'Calling update_q0()...'
#-----------------------------------------------
# Note: self.q0 = np.float64(0) in __init__().
# Most infil methods don't compute q0.
# This method is over-ridden for Richards 1D
#-----------------------------------------------
pass
# update_q0()
#-------------------------------------------------------------------
def check_infiltration(self):
if (self.DEBUG):
print 'Calling check_infiltration()...'
#--------------------------------------
# Check for NaNs in infiltration rate
#--------------------------------------
# NB! Don't set DONE = False, it may
# already have been set to True
#--------------------------------------
if (np.size( self.IN ) == 1):
OK = np.isfinite( self.IN )
nbad = 1
else:
wbad = np.where( np.logical_not(np.isfinite( self.IN )) )
nbad = np.size( wbad[0] )
### nbad = np.size(wbad, 0)
OK = (nbad == 0)
if (OK):
return
#------------------------------------------
# Issue warning message and abort the run
#------------------------------------------
msg = np.array(['ERROR: Aborting model run.', \
' NaNs found in infiltration rates.', \
' Number of NaN values = ' + str(nbad) ])
## GUI_Error_Message(msg) #########
print '##############################################'
for line in msg:
print line
print '##############################################'
print ' '
self.status = 'failed'
self.DONE = True
# check_infiltration
#-----------------------------------------------------------------------
def check_low_rainrate(self):
#------------------------------------------------------------
# Notes: If (P_total < Ks), then we need to set the infil
# rate to P_total. P_total = (P + SM).
#
# This needs to be called by Green-Ampt and Smith-
# Parlange methods for computing IN; perhaps by
# any method based on total infiltrated depth, I.
# This isn't needed for Richards' equation method.
#------------------------------------------------------------
#--------------------------------------
# Is P_total less than Ks anywhere ?
# If so, set IN = P_total there.
#--------------------------------------
nPt = np.size( self.P_total )
nK = np.size( self.Ks[0] )
if ((nPt == 1) and (nK == 1)):
#----------------------------------
# P_total and Ks are both scalars
#----------------------------------
if (self.P_total < self.Ks[0]):
self.IN = self.P_total
else:
#---------------------------------
# Either P_total or Ks is a grid
# so IN will have become a grid
#---------------------------------
w = np.where( self.P_total < self.Ks[0] )
nw = np.size( w[0] )
if (nw != 0):
if (nPt > 1):
self.IN[w] = self.P_total[w]
else:
self.IN[w] = self.P_total
# check_low_rainrate
#-------------------------------------------------------------------
def open_input_files(self):
#-------------------------------------------------------
# This method works for Green-Ampt and Smith-Parlange
# but must be overridden for Richards 1D.
#-------------------------------------------------------
# NB! Green-Ampt and Smith-Parlange currently only
# support ONE layer (n_layers == 1).
#-------------------------------------------------------
self.Ks_unit = [] # (empty lists to hold file objects)
self.Ki_unit = []
self.qs_unit = []
self.qi_unit = []
self.G_unit = []
self.gam_unit = []
for k in xrange(self.n_layers):
self.Ks_file[k] = self.in_directory + self.Ks_file[k]
self.Ki_file[k] = self.in_directory + self.Ki_file[k]
self.qs_file[k] = self.in_directory + self.qs_file[k]
self.qi_file[k] = self.in_directory + self.qi_file[k]
self.G_file[k] = self.in_directory + self.G_file[k]
self.gam_file[k] = self.in_directory + self.gam_file[k]
self.Ks_unit.append( model_input.open_file(self.Ks_type[k], self.Ks_file[k]) )
self.Ki_unit.append( model_input.open_file(self.Ki_type[k], self.Ki_file[k]) )
self.qs_unit.append( model_input.open_file(self.qs_type[k], self.qs_file[k]) )
self.qi_unit.append( model_input.open_file(self.qi_type[k], self.qi_file[k]) )
self.G_unit.append( model_input.open_file(self.G_type[k], self.G_file[k]) )
self.gam_unit.append( model_input.open_file(self.gam_type[k], self.gam_file[k]) )
# open_input_files()
#-------------------------------------------------------------------
def read_input_files(self):
if (self.DEBUG):
print 'Calling read_input_files()...'
rti = self.rti
#-------------------------------------------------------
# All grids are assumed to have data type of Float32.
#-------------------------------------------------------
# This method works for Green-Ampt and Smith-Parlange
# but must be overridden for Richards 1D.
#-------------------------------------------------------
# NB! Green-Ampt and Smith-Parlange currently only
# support ONE layer (n_layers == 1).
#-------------------------------------------------------
for k in xrange(self.n_layers):
Ks = model_input.read_next(self.Ks_unit[k], self.Ks_type[k], rti)
if (Ks is not None): self.Ks[k] = Ks
Ki = model_input.read_next(self.Ki_unit[k], self.Ki_type[k], rti)
if (Ki is not None): self.Ki[k] = Ki
qs = model_input.read_next(self.qs_unit[k], self.qs_type[k], rti)
if (qs is not None): self.qs[k] = qs
qi = model_input.read_next(self.qi_unit[k], self.qi_type[k], rti)
if (qi is not None): self.qi[k] = qi
G = model_input.read_next(self.G_unit[k], self.G_type[k], rti)
if (G is not None): self.G[k] = G
gam = model_input.read_next(self.gam_unit[k], self.gam_type[k], rti)
if (gam is not None): self.gam[k] = gam
# read_input_files()
#-------------------------------------------------------------------
def close_input_files(self):
#-------------------------------------------------------
# This method works for Green-Ampt and Smith-Parlange
# but must be overridden for Richards 1D.
#-------------------------------------------------------
# NB! Green-Ampt and Smith-Parlange currently only
# support ONE layer (n_layers == 1).
#-------------------------------------------------------
for k in xrange(self.n_layers):
if (self.Ks_type[k] != 'Scalar'): self.Ks_unit[k].close()
if (self.Ki_type[k] != 'Scalar'): self.Ki_unit[k].close()
if (self.qs_type[k] != 'Scalar'): self.qs_unit[k].close()
if (self.qi_type[k] != 'Scalar'): self.qi_unit[k].close()
if (self.G_type[k] != 'Scalar'): self.G_unit[k].close()
if (self.gam_type[k] != 'Scalar'): self.gam_unit[k].close()
#------------------------------------------------------------
## if (self.Ks_file[k] != ''): self.Ks_unit[k].close()
## if (self.Ki_file[k] != ''): self.Ki_unit[k].close()
## if (self.qs_file[k] != ''): self.qs_unit[k].close()
## if (self.qi_file[k] != ''): self.qi_unit[k].close()
## if (self.G_file[k] != ''): self.G_unit[k].close()
## if (self.gam_file[k] != ''): self.gam_unit[k].close()
# close_input_files()
#-------------------------------------------------------------------
def update_outfile_names(self):
#-------------------------------------------------
# Notes: Append out_directory to outfile names.
#-------------------------------------------------
self.v0_gs_file = (self.out_directory + self.v0_gs_file)
self.I_gs_file = (self.out_directory + self.I_gs_file)
self.q0_gs_file = (self.out_directory + self.q0_gs_file)
self.Zw_gs_file = (self.out_directory + self.Zw_gs_file)
#-------------------------------------------------------------
self.v0_ts_file = (self.out_directory + self.v0_ts_file)
self.I_ts_file = (self.out_directory + self.I_ts_file)
self.q0_ts_file = (self.out_directory + self.q0_ts_file)
self.Zw_ts_file = (self.out_directory + self.Zw_ts_file)
#-----------------------------------------------------------------
self.q_ps_file = (self.out_directory + self.q_ps_file)
self.p_ps_file = (self.out_directory + self.p_ps_file)
self.K_ps_file = (self.out_directory + self.K_ps_file)
self.v_ps_file = (self.out_directory + self.v_ps_file)
#-------------------------------------------------------------
self.q_cs_file = (self.out_directory + self.q_cs_file)
self.p_cs_file = (self.out_directory + self.p_cs_file)
self.K_cs_file = (self.out_directory + self.K_cs_file)
self.v_cs_file = (self.out_directory + self.v_cs_file)
## self.v0_gs_file = (self.case_prefix + '_2D-v0.rts')
## self.q0_gs_file = (self.case_prefix + '_2D-q0.rts')
## self.I_gs_file = (self.case_prefix + '_2D-I.rts')
## self.Zw_gs_file = (self.case_prefix + '_2D-Zw.rts')
## #---------------------------------------------------------
## self.v0_ts_file = (self.case_prefix + '_0D-v0.txt')
## self.q0_ts_file = (self.case_prefix + '_0D-q0.txt')
## self.I_ts_file = (self.case_prefix + '_0D-I.txt')
## self.Zw_ts_file = (self.case_prefix + '_0D-Zw.txt')
## #---------------------------------------------------------
## self.q_cs_file = (self.case_prefix + '_3D-q.rt3')
## self.p_cs_file = (self.case_prefix + '_3D-p.rt3')
## self.K_cs_file = (self.case_prefix + '_3D-K.rt3')
## self.v_cs_file = (self.case_prefix + '_3D-v.rt3')
## #---------------------------------------------------------
## self.q_ps_file = (self.case_prefix + '_1D-q.txt')
## self.p_ps_file = (self.case_prefix + '_1D_p.txt')
## self.K_ps_file = (self.case_prefix + '_1D_K.txt')
## self.v_ps_file = (self.case_prefix + '_1D_v.txt')
# update_outfile_names()
#-------------------------------------------------------------------
def open_output_files(self):
#-------------------------------------------------
# Notes: v0 = infiltration rate at surface
# q0 = soil moisture at surface
# I = total infiltrated depth
# Zw = wetting front
# q = soil moisture
# p = pressure head
# K = hydraulic conductivity
# v = vertical flow rate (see v0)
#-------------------------------------------------
model_output.check_netcdf()
self.update_outfile_names()
#--------------------------------------
# Open new files to write grid stacks
#--------------------------------------
if (self.SAVE_V0_GRIDS):
model_output.open_new_gs_file( self, self.v0_gs_file, self.rti,
var_name='v0',
long_name='infiltration_rate_at_surface',
units_name='m/s')
if (self.SAVE_I_GRIDS):
model_output.open_new_gs_file( self, self.I_gs_file, self.rti,
var_name='I',
long_name='total_infiltrated_depth',
units_name='m')
if (self.SAVE_Q0_GRIDS):
model_output.open_new_gs_file( self, self.q0_gs_file, self.rti,
var_name='q0',
long_name='soil_moisture_at_surface',
units_name='none')
if (self.SAVE_ZW_GRIDS):
model_output.open_new_gs_file( self, self.Zw_gs_file, self.rti,
var_name='Zw',
long_name='depth_to_wetting_front',
units_name='m')
#--------------------------------------
# Open new files to write time series
#--------------------------------------
IDs = self.outlet_IDs
if (self.SAVE_V0_PIXELS):
model_output.open_new_ts_file( self, self.v0_ts_file, IDs,
var_name='v0',
long_name='infiltration_rate_at_surface',
units_name='m/s')
if (self.SAVE_I_PIXELS):
model_output.open_new_ts_file( self, self.I_ts_file, IDs,
var_name='I',
long_name='total_infiltrated_depth',
units_name='m')
if (self.SAVE_Q0_PIXELS):
model_output.open_new_ts_file( self, self.q0_ts_file, IDs,
var_name='q0',
long_name='soil_moisture_at_surface',
units_name='none')
if (self.SAVE_ZW_PIXELS):
model_output.open_new_ts_file( self, self.Zw_ts_file, IDs,
var_name='Zw',
long_name='depth_to_wetting_front',
units_name='m')
#-----------------------------------------------------
# Remaining parts are only valid for Richards method
#-----------------------------------------------------
if not(self.RICHARDS):
return
#--------------------------------------------------
# Open "profile files" to write vertical profiles
#--------------------------------------------------
if (self.SAVE_Q_PROFILES):
model_output.open_new_ps_file( self, self.q_ps_file, IDs,
z_values=self.z, z_units='m',
var_name='q',
long_name='soil_water_content',
units_name='none')
if (self.SAVE_P_PROFILES):
model_output.open_new_ps_file( self, self.p_ps_file, IDs,
z_values=self.z, z_units='m',
var_name='p',
long_name='pressure_head',
units_name='m')
#################################################################
# NOTE: Should we convert these units from "m/s" to "mm/hr" ??
#################################################################
if (self.SAVE_K_PROFILES):
model_output.open_new_ps_file( self, self.K_ps_file, IDs,
z_values=self.z, z_units='m',
var_name='K',
long_name='hydraulic_conductivity',
units_name='m/s')
if (self.SAVE_V_PROFILES):
model_output.open_new_ps_file( self, self.v_ps_file, IDs,
z_values=self.z, z_units='m',
var_name='v',
long_name='vertical_flow_rate',
units_name='m/s')
#---------------------------------------------
# Open "cube files" to write 3D grid "cubes"
#---------------------------------------------
if (self.SAVE_Q_CUBES):
model_output.open_new_cs_file( self, self.q_cs_file, self.rti,
var_name='q',
long_name='soil_water_content',
units_name='none')
if (self.SAVE_P_CUBES):
model_output.open_new_cs_file( self, self.p_cs_file, self.rti,
var_name='p',
long_name='pressure_head',
units_name='m')
#################################################################
# NOTE: Should we convert these units from "m/s" to "mm/hr" ??
#################################################################
if (self.SAVE_K_CUBES):
model_output.open_new_cs_file( self, self.K_cs_file, self.rti,
var_name='K',
long_name='hydraulic_conductivity',
units_name='m/s')
if (self.SAVE_V_CUBES):
model_output.open_new_cs_file( self, self.v_cs_file, self.rti,
var_name='v',
long_name='vertical_flow_rate',
units_name='m/s')
#--------------------------------------------------
# Open "profile files" to write vertical profiles
#--------------------------------------------------
## if (self.SAVE_Q_PROFILES):
## self.q_profile_unit = open(self.q_ps_file, 'w')
## write_ps_file_header(self.q_profile_unit, IDs, var_name='q')
##
## if (self.SAVE_P_PROFILES):
## self.p_profile_unit = open(self.p_ps_file, 'w')
## write_ps_file_header(self.p_profile_unit, IDs, var_name='p')
##
## if (self.SAVE_K_PROFILES):
## self.K_profile_unit = open(self.K_ps_file, 'w')
## write_ps_file_header(self.K_profile_unit, IDs, var_name='K')
##
## if (self.SAVE_V_PROFILES):
## self.v_profile_unit = open(self.v_ps_file, 'w')
## write_ps_file_header(self.v_profile_unit, IDs, var_name='v')
#---------------------------------------
# Open RT3 files to write grid "cubes"
#---------------------------------------
## if (self.SAVE_Q_STACKS):
## self.q_stack_unit = open(self.q_cs_file, 'wb')
## if (self.SAVE_P_STACKS): | |
<filename>kivymd/uix/button.py
"""
Components/Button
=================
.. seealso::
`Material Design spec, Buttons <https://material.io/components/buttons>`_
`Material Design spec, Buttons: floating action button <https://material.io/components/buttons-floating-action-button>`_
.. rubric:: Buttons allow users to take actions, and make choices,
with a single tap.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/buttons.png
:align: center
`KivyMD` provides the following button classes for use:
- MDIconButton_
- MDFloatingActionButton_
- MDFlatButton_
- MDRaisedButton_
- MDRectangleFlatButton_
- MDRectangleFlatIconButton_
- MDRoundFlatButton_
- MDRoundFlatIconButton_
- MDFillRoundFlatButton_
- MDFillRoundFlatIconButton_
- MDTextButton_
- MDFloatingActionButtonSpeedDial_
.. MDIconButton:
MDIconButton
------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-icon-button.gif
:align: center
.. code-block:: python
.. MDIconButton:
MDIconButton
------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-icon-button.gif
:align: center
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
Screen:
MDIconButton:
icon: "language-python"
pos_hint: {"center_x": .5, "center_y": .5}
'''
class Example(MDApp):
def build(self):
return Builder.load_string(KV)
Example().run()
The :class:`~MDIconButton.icon` parameter must have the name of the icon
from ``kivymd/icon_definitions.py`` file.
You can also use custom icons:
.. code-block:: kv
MDIconButton:
icon: "data/logo/kivy-icon-256.png"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-icon-custom-button.gif
:align: center
By default, :class:`~MDIconButton` button has a size ``(dp(48), dp (48))``.
Use :class:`~BaseButton.user_font_size` attribute to resize the button:
.. code-block:: kv
MDIconButton:
icon: "android"
user_font_size: "64sp"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-icon-button-user-font-size.gif
:align: center
By default, the color of :class:`~MDIconButton`
(depending on the style of the application) is black or white.
You can change the color of :class:`~MDIconButton` as the text color
of :class:`~kivymd.uix.label.MDLabel`:
.. code-block:: kv
MDIconButton:
icon: "android"
theme_text_color: "Custom"
text_color: app.theme_cls.primary_color
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-icon-button-theme-text-color.png
:align: center
.. MDFloatingActionButton:
MDFloatingActionButton
----------------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-floating-action-button.png
:align: center
The above parameters for :class:`~MDIconButton` apply
to :class:`~MDFloatingActionButton`.
To change :class:`~MDFloatingActionButton` background, use the
``md_bg_color`` parameter:
.. code-block:: kv
MDFloatingActionButton:
icon: "android"
md_bg_color: app.theme_cls.primary_color
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-floating-action-button-md-bg-color.png
:align: center
The length of the shadow is controlled by the ``elevation_normal`` parameter:
.. code-block:: kv
MDFloatingActionButton:
icon: "android"
elevation_normal: 12
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-floating-action-button-elevation-normal.png
:align: center
.. MDFlatButton:
MDFlatButton
------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-flat-button.gif
:align: center
To change the text color of: class:`~MDFlatButton` use the ``text_color`` parameter:
.. code-block:: kv
MDFlatButton:
text: "MDFLATBUTTON"
text_color: 0, 0, 1, 1
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-flat-button-text-color.png
:align: center
Or use markup:
.. code-block:: kv
MDFlatButton:
text: "[color=#00ffcc]MDFLATBUTTON[/color]"
markup: True
To specify the font size and font name, use the parameters as in the usual
`Kivy` buttons:
.. code-block:: kv
MDFlatButton:
text: "MDFLATBUTTON"
font_size: "18sp"
font_name: "path/to/font"
.. warning:: You cannot use the ``size_hint_x`` parameter for `KivyMD` buttons
(the width of the buttons is set automatically)!
.. MDRaisedButton:
MDRaisedButton
--------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-raised-button.gif
:align: center
This button is similar to the :class:`~MDFlatButton` button except that you
can set the background color for :class:`~MDRaisedButton`:
.. code-block:: kv
MDRaisedButton:
text: "MDRAISEDBUTTON"
md_bg_color: 1, 0, 1, 1
.. MDRectangleFlatButton:
MDRectangleFlatButton
---------------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-rectangle-flat-button.gif
:align: center
Button parameters :class:`~MDRectangleFlatButton` are the same as
button :class:`~MDRaisedButton`:
.. code-block:: kv
MDRectangleFlatButton:
text: "MDRECTANGLEFLATBUTTON"
text_color: 0, 0, 1, 1
md_bg_color: 1, 1, 0, 1
.. note:: Note that the frame color will be the same as the text color.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-rectangle-flat-button-md-bg-color.png
:align: center
.. MDRectangleFlatIconButton:
MDRectangleFlatIconButton
---------------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-rectangle-flat-icon-button.png
:align: center
Button parameters :class:`~MDRectangleFlatButton` are the same as
button :class:`~MDRectangleFlatButton`:
.. code-block:: kv
MDRectangleFlatIconButton:
icon: "android"
text: "MDRECTANGLEFLATICONBUTTON"
Without border
--------------
.. code-block:: python
from kivy.uix.screenmanager import Screen
from kivymd.app import MDApp
from kivymd.uix.button import MDRectangleFlatIconButton
class Example(MDApp):
def build(self):
screen = Screen()
screen.add_widget(
MDRectangleFlatIconButton(
text="MDRectangleFlatIconButton",
icon="language-python",
line_color=(0, 0, 0, 0),
pos_hint={"center_x": .5, "center_y": .5},
)
)
return screen
Example().run()
.. code-block:: kv
MDRectangleFlatIconButton:
text: "MDRectangleFlatIconButton"
icon: "language-python"
line_color: 0, 0, 0, 0
pos_hint: {"center_x": .5, "center_y": .5}
.. MDRoundFlatButton:
MDRoundFlatButton
-----------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-round-flat-button.png
:align: center
Button parameters :class:`~MDRoundFlatButton` are the same as
button :class:`~MDRectangleFlatButton`:
.. code-block:: kv
MDRoundFlatButton:
text: "MDROUNDFLATBUTTON"
.. warning:: The border color does change when using ``text_color`` parameter.
.. code-block:: kv
MDRoundFlatButton:
text: "MDROUNDFLATBUTTON"
text_color: 0, 1, 0, 1
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-round-flat-button-text-color.png
:align: center
.. MDRoundFlatIconButton:
MDRoundFlatIconButton
---------------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-round-flat-icon-button.png
:align: center
Button parameters :class:`~MDRoundFlatIconButton` are the same as
button :class:`~MDRoundFlatButton`:
.. code-block:: kv
MDRoundFlatIconButton:
icon: "android"
text: "MDROUNDFLATICONBUTTON"
.. MDFillRoundFlatButton:
MDFillRoundFlatButton
---------------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-fill-round-flat-button.png
:align: center
Button parameters :class:`~MDFillRoundFlatButton` are the same as
button :class:`~MDRaisedButton`.
.. MDFillRoundFlatIconButton:
MDFillRoundFlatIconButton
---------------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-fill-round-flat-icon-button.png
:align: center
Button parameters :class:`~MDFillRoundFlatIconButton` are the same as
button :class:`~MDRaisedButton`.
.. note:: Notice that the width of the :class:`~MDFillRoundFlatIconButton`
button matches the size of the button text.
.. MDTextButton:
MDTextButton
------------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/md-text-button.png
:align: center
.. code-block:: kv
MDTextButton:
text: "MDTEXTBUTTON"
custom_color: 0, 1, 0, 1
.. MDFloatingActionButtonSpeedDial:
MDFloatingActionButtonSpeedDial
-------------------------------
.. Note:: See the full list of arguments in the class
:class:`~MDFloatingActionButtonSpeedDial`.
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
Screen:
MDFloatingActionButtonSpeedDial:
data: app.data
rotation_root_button: True
'''
class Example(MDApp):
data = {
'language-python': 'Python',
'language-php': 'PHP',
'language-cpp': 'C++',
}
def build(self):
return Builder.load_string(KV)
Example().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/MDFloatingActionButtonSpeedDial.gif
:align: center
Or without KV Language:
.. code-block:: python
from kivy.uix.screenmanager import Screen
from kivymd.app import MDApp
from kivymd.uix.button import MDFloatingActionButtonSpeedDial
class Example(MDApp):
data = {
'language-python': 'Python',
'language-php': 'PHP',
'language-cpp': 'C++',
}
def build(self):
screen = Screen()
speed_dial = MDFloatingActionButtonSpeedDial()
speed_dial.data = self.data
speed_dial.rotation_root_button = True
screen.add_widget(speed_dial)
return screen
Example().run()
You can use various types of animation of labels for buttons on the stack:
.. code-block:: kv
MDFloatingActionButtonSpeedDial:
hint_animation: True
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/MDFloatingActionButtonSpeedDial-hint.gif
:align: center
You can set your color values for background, text of buttons etc:
.. code-block:: kv
MDFloatingActionButtonSpeedDial:
bg_hint_color: app.theme_cls.primary_light
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/MDFloatingActionButtonSpeedDial-hint-color.png
:align: center
.. seealso::
`See full example <https://github.com/kivymd/KivyMD/wiki/Components-Button>`_
"""
__all__ = (
"MDIconButton",
"MDFloatingActionButton",
"MDFlatButton",
"MDRaisedButton",
"MDRectangleFlatButton",
"MDRectangleFlatIconButton",
"MDRoundFlatButton",
"MDRoundFlatIconButton",
"MDFillRoundFlatButton",
"MDFillRoundFlatIconButton",
"MDTextButton",
"MDFloatingActionButtonSpeedDial",
)
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.graphics.context_instructions import Color
from kivy.graphics.stencil_instructions import (
StencilPop,
StencilPush,
StencilUnUse,
StencilUse,
)
from kivy.graphics.vertex_instructions import Ellipse, RoundedRectangle
from kivy.lang import Builder
from kivy.metrics import dp, sp
from kivy.properties import (
BooleanProperty,
BoundedNumericProperty,
DictProperty,
ListProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivymd import images_path
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import (
CircularElevationBehavior,
CircularRippleBehavior,
CommonElevationBehavior,
RectangularElevationBehavior,
RectangularRippleBehavior,
SpecificBackgroundColorBehavior,
)
from kivymd.uix.tooltip import MDTooltip
Builder.load_string(
"""
#:import images_path kivymd.images_path
#:import md_icons kivymd.icon_definitions.md_icons
<BaseButton>
size_hint: (None, None)
anchor_x: 'center'
anchor_y: 'center'
<BaseFlatButton>
<BaseRaisedButton>
<BaseRoundButton>
canvas:
Clear
Color:
rgba: self._current_button_color if root.icon in md_icons else (0, 0, 0, 0)
Ellipse:
size: self.size
pos: self.pos
source: self.source if hasattr(self, "source") else ""
size:
(dp(48), dp(48)) \
if not root.user_font_size \
else (dp(root.user_font_size + 23), dp(root.user_font_size + 23))
lbl_txt: lbl_txt
padding: (dp(12), dp(12), dp(12), dp(12)) if root.icon in md_icons else (0, 0, 0, 0)
MDIcon:
id: lbl_txt
icon: root.icon
font_size:
root.user_font_size \
if root.user_font_size \
else self.font_size
font_name: root.font_name if root.font_name else self.font_name
theme_text_color: root.theme_text_color
text_color: root.text_color
disabled: root.disabled
valign: 'middle'
halign: 'center'
opposite_colors: root.opposite_colors
<BaseRectangularButton>
canvas:
Clear
Color:
rgba: self._current_button_color
RoundedRectangle:
size: self.size
pos: self.pos
radius: (root._radius, )
lbl_txt: lbl_txt
height: dp(22) + sp(root.font_size)
width: lbl_txt.texture_size[0] + dp(24)
padding: (dp(8), 0) # For MDRectangleFlatIconButton
theme_text_color: 'Primary' if not root.text_color else 'Custom'
markup: False
MDLabel:
id: lbl_txt
text: root.text if root.button_label else ''
font_size: sp(root.font_size)
font_name: root.font_name if root.font_name else self.font_name
size_hint_x: None
text_size: (None, root.height)
height: self.texture_size[1]
theme_text_color: root.theme_text_color
text_color: root._current_text_color
markup: root.markup
disabled: root.disabled
valign: 'middle'
halign: 'center'
opposite_colors: root.opposite_colors
<MDRoundFlatButton>
canvas.before:
Color:
rgba:
(root.theme_cls.primary_color if not root.text_color else root.text_color) \
if not root.disabled else root.theme_cls.disabled_hint_text_color
Line:
width: root.line_width
rounded_rectangle:
(self.x, self.y, self.width, self.height,\
root._radius, root._radius, root._radius, root._radius,\
self.height)
theme_text_color: 'Custom'
text_color:
(root.theme_cls.primary_color if not root.text_color else root.text_color) \
if not root.disabled else root.theme_cls.disabled_hint_text_color
<MDFillRoundFlatButton>
canvas.before:
Color:
rgba:
(root.theme_cls.primary_color if root.md_bg_color == [0.0, 0.0, 0.0, 0.0] else root.md_bg_color) \
if not root.disabled else root.theme_cls.disabled_hint_text_color
RoundedRectangle:
size: self.size
pos: self.pos
radius: [root._radius, ]
<MDFillRoundFlatIconButton>
md_bg_color:
root.theme_cls.primary_color if root._current_button_color == [0.0, 0.0, 0.0, 0.0] \
else root._current_button_color
line_width: 0.001
<MDRectangleFlatButton>
canvas.before:
Color:
rgba:
root.theme_cls.primary_color if not root.text_color else root.text_color
Line:
width: root.line_width
rectangle: (self.x, self.y, self.width, self.height)
theme_text_color: 'Custom'
text_color: root.theme_cls.primary_color if not root.text_color else root.text_color
<MDRectangleFlatIconButton>
canvas.before:
Color:
rgba:
root.line_color if root.line_color else \
(root.theme_cls.primary_color if not root.text_color else root.text_color) \
if not root.disabled else root.theme_cls.disabled_hint_text_color
Line:
width: 1
rectangle: (self.x, self.y, self.width, self.height)
size_hint_x: None
width: lbl_txt.texture_size[0] + lbl_ic.texture_size[0] + box.spacing * 3
markup: False
BoxLayout:
id: box
spacing: dp(10)
MDIcon:
id: lbl_ic
icon: root.icon
theme_text_color: 'Custom'
text_color:
(root.theme_cls.primary_color if not root.text_color else root.text_color) \
if not root.disabled else root.theme_cls.disabled_hint_text_color
size_hint_x: None
width: self.texture_size[0]
Label:
id: lbl_txt
text: root.text
font_size: sp(root.font_size)
font_name: root.font_name if root.font_name else self.font_name
shorten: True
width: self.texture_size[0]
color:
(root.theme_cls.primary_color if not root.text_color else root.text_color) \
if not root.disabled else root.theme_cls.disabled_hint_text_color
markup: root.markup
<MDRoundFlatIconButton>
size_hint_x: None
width: lbl_txt.texture_size[0] + lbl_ic.texture_size[0] + box.spacing * 3
markup: False
BoxLayout:
id: box
spacing: dp(10)
MDIcon:
id: lbl_ic
icon: root.icon
theme_text_color: 'Custom'
text_color:
root.theme_cls.primary_color \
if not root.text_color else root.text_color
size_hint_x: None
width: self.texture_size[0]
Label:
id: lbl_txt
text: root.text
font_size: sp(root.font_size)
font_name: root.font_name if root.font_name else self.font_name
shorten: True
size_hint_x: None
width: self.texture_size[0]
color: root.theme_cls.primary_color if not root.text_color else root.text_color
markup: root.markup
<MDRaisedButton>
md_bg_color: root.theme_cls.primary_color
theme_text_color: 'Custom'
text_color: root.specific_text_color
<MDFloatingActionButton>
# Defaults to | |
cohen_kappa_score(y_vl, pred_test_y_k,weights='quadratic')
cv_score = rmse(y_vl, y_pred)
cv_scores.append(cv_score)
qwk_scores.append(qwk)
all_coefficients[i, :] = coefficients
print( ' cv score {}: RMSE {} QWK {}'.format(i+1, cv_score, qwk))
print("##"*40)
print('cv mean RMSE score : {}'.format( np.mean(cv_scores)))
print('cv std RMSE score : {}'.format( np.std(cv_scores)))
print('cv mean QWK score : {}'.format( np.mean(qwk_scores)))
print('cv std QWK score : {}'.format( np.std(qwk_scores)))
nn2_train = [r for r in pred_oof]
nn2_test = [r for r in y_test]
del train_X,test_X
gc.collect()
return nn2_train,nn2_test,train_num_feat,test_num_feat
nn2_train,nn2_test,train_num_feat,test_num_feat=nn2_model(train1,test1,embedding_matrix,train_img_feat,test_img_feat)
del train_img_feat,test_img_feat
gc.collect()
t9=time.time()
print("model9 cost:{} s".format(t9-t8))
####model 10
###nn3
def nn3_model(train,test,embedding_matrix,train_num_feat,test_num_feat):
maxlen = 200
max_features = None
train_X = train["concat_text"].values
test_X = test["concat_text"].values
tokenizer = Tokenizer(num_words=max_features, filters='')
tokenizer.fit_on_texts(list(train_X)+list(test_X))
train_X = tokenizer.texts_to_sequences(train_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=maxlen)
test_X = pad_sequences(test_X, maxlen=maxlen)
## Get the target values
train_y = train['AdoptionSpeed'].values
def hybrid_model(embedding_matrix):
K.clear_session()
inp_text = Input(shape=(maxlen, ))
emb = Embedding(
input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=maxlen,
trainable=False)(inp_text)
x = SpatialDropout1D(rate=0.22)(emb)
x = Bidirectional(CuDNNLSTM(120, return_sequences=True, kernel_initializer=glorot_uniform(seed=123)))(x)
x1 = Conv1D(filters=96, kernel_size=1, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
x2 = Conv1D(filters=90, kernel_size=2, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
x3 = Conv1D(filters=30, kernel_size=3, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
x4 = Conv1D(filters=10, kernel_size=5, kernel_initializer=glorot_uniform(seed=123),
padding='same', activation='relu')(x)
x1 = GlobalMaxPool1D()(x1)
x2 = GlobalMaxPool1D()(x2)
x3 = GlobalMaxPool1D()(x3)
x4 = GlobalMaxPool1D()(x4)
x5 = AttentionWeightedAverage()(x)
inp_num = Input(shape=(test_num_feat.shape[1], ))
x = concatenate([x1, x2, x3, x5, inp_num])
x = Dense(200, kernel_initializer='glorot_uniform', activation=gelu)(x)
#x = PReLU()(x)
x = Dropout(0.22)(x)
x = BatchNormalization()(x)
x = Dense(200, kernel_initializer='glorot_uniform', activation=gelu)(x)
#x = PReLU()(x)
x = Dropout(0.22)(x)
x = BatchNormalization()(x)
out = Dense(5, activation="softmax",kernel_initializer=glorot_uniform(seed=123))(x)
model = Model(inputs=[inp_text, inp_num], outputs=out)
model.compile(loss='categorical_crossentropy', optimizer=AdamW(weight_decay=0.02))
return model
kfold = StratifiedKFold(n_splits=5, random_state=1017, shuffle=True)
pred_oof=np.zeros((train_X.shape[0], ))
y_test = np.zeros((test_X.shape[0],))
cv_scores = []
qwk_scores = []
all_coefficients = np.zeros((5, 4))
y_label= to_categorical(train['AdoptionSpeed'])
for i, (train_index, test_index) in enumerate(kfold.split(train_X, train_y)):
print("FOLD | {}/{}".format(i+1,5))
X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = train_X[train_index], train_X[test_index], train_num_feat[
train_index], train_num_feat[test_index], y_label[train_index], y_label[test_index]
filepath="weights_best.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=3, min_lr=0.00001, verbose=2)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=4, verbose=2, mode='auto')
callbacks = [checkpoint, reduce_lr, earlystopping]
model = hybrid_model(embedding_matrix)
if i == 0:print(model.summary())
model.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=20, validation_data=([X_vl, X_vl2], y_vl), verbose=2, callbacks=callbacks,)
model.load_weights(filepath)
class_list=[0,1,2,3,4]
y_pred = np.squeeze(model.predict([X_vl, X_vl2], batch_size=256, verbose=2))
y_pred=np.array([sum(y_pred[ix]*class_list) for
ix in range(len(y_pred[:,0]))])
pred_oof[test_index] = y_pred
test_temp = np.squeeze(model.predict([test_X, test_num_feat], batch_size=256, verbose=2))
test_temp=np.array([sum(test_temp[ix]*class_list) for
ix in range(len(test_temp[:,0]))])
y_test+=np.squeeze(test_temp)/5
y_vl= train_y[test_index]
optR = OptimizedRounder()
optR.fit(y_pred, y_vl)
len_0 = sum([1 for i in y_vl if i==0])
coefficients = optR.coefficients()
pred_test_y_k = optR.predict(y_pred, coefficients, len_0)
print("Valid Counts = ", Counter(y_vl))
print("Predicted Counts = ", Counter(pred_test_y_k))
print("Coefficients = ", coefficients)
qwk = cohen_kappa_score(y_vl, pred_test_y_k,weights='quadratic')
cv_score = rmse(y_vl, y_pred)
cv_scores.append(cv_score)
qwk_scores.append(qwk)
all_coefficients[i, :] = coefficients
print( ' cv score {}: RMSE {} QWK {}'.format(i+1, cv_score, qwk))
print("##"*40)
print('cv mean RMSE score : {}'.format( np.mean(cv_scores)))
print('cv std RMSE score : {}'.format( np.std(cv_scores)))
print('cv mean QWK score : {}'.format( np.mean(qwk_scores)))
print('cv std QWK score : {}'.format( np.std(qwk_scores)))
nn3_train = [r for r in pred_oof]
nn3_test = [r for r in y_test]
del train_X,test_X
gc.collect()
return nn3_train,nn3_test
nn3_train,nn3_test=nn3_model(train1,test1,embedding_matrix,train_num_feat,test_num_feat)
del embedding_matrix,train_num_feat,test_num_feat
gc.collect()
t10=time.time()
print("model10 cost:{} s".format(t10-t9))
######weak model###############################################
data = pd.concat([train_data,test_data])
data.index=range(len(data))
data_id=data['PetID'].values
del train_desc,test_desc
gc.collect()
cols = [x for x in train1.columns if x not in ['Breed1',"breed","color","Breed2","State",'label_description',"lan_type","malai_type","Type","concat_text","is_group","Name",'PetID',"Description",'AdoptionSpeed']]
train1[cols]=train1[cols].fillna(0)
test1[cols]=test1[cols].fillna(0)
############################ 切分数据集 ##########################
print('开始进行一些前期处理')
train_feature = train1[cols].values
test_feature = test1[cols].values
# 五则交叉验证
n_folds = 5
print('处理完毕')
df_stack3 = pd.DataFrame()
df_stack3['PetID']=data['PetID']
for label in ["AdoptionSpeed"]:
score = train_data[label]
########################### SGD(随机梯度下降) ################################
# print('sgd stacking')
# stack_train = np.zeros((len(train_data),1))
# stack_test = np.zeros((len(test_data),1))
# score_va = 0
# sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
# for i, (tr, va) in enumerate(sk.split(train_feature, score)):
# print('stack:%d/%d' % ((i + 1), n_folds))
# sgd = SGDRegressor(random_state=1017,)
# sgd.fit(train_feature[tr], score[tr])
# score_va = sgd.predict(train_feature[va])
# score_te = sgd.predict(test_feature)
# print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))
# stack_train[va,0] = score_va
# stack_test[:,0]+= score_te
# stack_test /= n_folds
# stack = np.vstack([stack_train, stack_test])
# df_stack3['tfidf_sgd_classfiy_{}'.format("feat1")] = stack[:,0]
########################### pac(PassiveAggressiveClassifier) ################################
# print('PAC stacking')
# stack_train = np.zeros((len(train_data),1))
# stack_test = np.zeros((len(test_data),1))
# score_va = 0
# sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
# for i, (tr, va) in enumerate(sk.split(train_feature, score)):
# print('stack:%d/%d' % ((i + 1), n_folds))
# pac = PassiveAggressiveRegressor(random_state=1017)
# pac.fit(train_feature[tr], score[tr])
# score_va = pac.predict(train_feature[va])
# score_te = pac.predict(test_feature)
# print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))
# stack_train[va,0] = score_va
# stack_test[:,0] += score_te
# stack_test /= n_folds
# stack = np.vstack([stack_train, stack_test])
# df_stack3['tfidf_pac_classfiy_{}'.format("feat1")] = stack[:,0]
########################### FTRL ################################
print('MultinomialNB stacking')
stack_train = np.zeros((len(train_data),1))
stack_test = np.zeros((len(test_data),1))
score_va = 0
sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
for i, (tr, va) in enumerate(sk.split(train_feature, score)):
print('stack:%d/%d' % ((i + 1), n_folds))
clf = FTRL(alpha=0.01, beta=0.1, L1=0.00001, L2=1.0, D=train_feature.shape[1], iters=50, inv_link="identity", threads=1)
clf.fit(train_feature[tr], score[tr])
score_va = clf.predict(train_feature[va])
score_te = clf.predict(test_feature)
print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))
stack_train[va,0] = score_va
stack_test[:,0] += score_te
stack_test /= n_folds
stack = np.vstack([stack_train, stack_test])
df_stack3['tfidf_FTRL_classfiy_{}'.format("feat1")] = stack[:,0]
########################### ridge(RidgeClassfiy) ################################
print('RidgeClassfiy stacking')
stack_train = np.zeros((len(train_data),1))
stack_test = np.zeros((len(test_data),1))
score_va = 0
sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
for i, (tr, va) in enumerate(sk.split(train_feature, score)):
print('stack:%d/%d' % ((i + 1), n_folds))
ridge = Ridge(solver="sag", fit_intercept=True, random_state=42, alpha=30)
ridge.fit(train_feature[tr], score[tr])
score_va = ridge.predict(train_feature[va])
score_te = ridge.predict(test_feature)
print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))
stack_train[va,0] = score_va
stack_test[:,0] += score_te
stack_test /= n_folds
stack = np.vstack([stack_train, stack_test])
df_stack3['tfidf_ridge_classfiy_{}'.format("feat1")] = stack[:,0]
############################ Linersvc(LinerSVC) ################################
# print('LinerSVC stacking')
# stack_train = np.zeros((len(train_data),1))
# stack_test = np.zeros((len(test_data),1))
# score_va = 0
# sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
# for i, (tr, va) in enumerate(sk.split(train_feature, score)):
# print('stack:%d/%d' % ((i + 1), n_folds))
# lsvc = LinearSVR(random_state=1017)
# lsvc.fit(train_feature[tr], score[tr])
# score_va = lsvc.predict(train_feature[va])
# score_te = lsvc.predict(test_feature)
# print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))
# stack_train[va,0] = score_va
# stack_test[:,0] += score_te
# stack_test /= n_folds
# stack = np.vstack([stack_train, stack_test])
# df_stack3['tfidf_lsvc_classfiy_{}'.format("feat1")] = stack[:,0]
del stack,stack_train, stack_test,train_feature,test_feature
gc.collect()
# df_stack.to_csv('graph_tfidf_classfiy.csv', index=None, encoding='utf8')
print('tfidf特征已保存\n')
del train1,test1
gc.collect()
cols = [x for x in train2.columns if x not in ['label_description',"is_group","Name",'PetID',"Description",'AdoptionSpeed']]
train2[cols]=train2[cols].fillna(0)
test2[cols]=test2[cols].fillna(0)
############################ 切分数据集 ##########################
print('开始进行一些前期处理')
train_feature = train2[cols].values
test_feature = test2[cols].values
# 五则交叉验证
n_folds = 5
print('处理完毕')
df_stack4 = pd.DataFrame()
df_stack4['PetID']=data['PetID']
for label in ["AdoptionSpeed"]:
score = train_data[label]
########################### SGD(随机梯度下降) ################################
# print('sgd stacking')
# stack_train = np.zeros((len(train_data),1))
# stack_test = np.zeros((len(test_data),1))
# score_va = 0
# sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
# for i, (tr, va) in enumerate(sk.split(train_feature, score)):
# print('stack:%d/%d' % ((i + 1), n_folds))
# sgd = SGDRegressor(random_state=1017,)
# sgd.fit(train_feature[tr], score[tr])
# score_va = sgd.predict(train_feature[va])
# score_te = sgd.predict(test_feature)
# print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))
# stack_train[va,0] = score_va
# stack_test[:,0]+= score_te
# stack_test /= n_folds
# stack = np.vstack([stack_train, stack_test])
# df_stack4['tfidf_sgd_classfiy_{}'.format("feat2")] = stack[:,0]
########################### pac(PassiveAggressiveClassifier) ################################
# print('PAC stacking')
# stack_train = np.zeros((len(train_data),1))
# stack_test = np.zeros((len(test_data),1))
# score_va = 0
# sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
# for i, (tr, va) in enumerate(sk.split(train_feature, score)):
# print('stack:%d/%d' % ((i + 1), n_folds))
# pac = PassiveAggressiveRegressor(random_state=1017)
# pac.fit(train_feature[tr], score[tr])
# score_va = pac.predict(train_feature[va])
# score_te = pac.predict(test_feature)
# print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))
# stack_train[va,0] = score_va
# stack_test[:,0] += score_te
# stack_test /= n_folds
# stack = np.vstack([stack_train, stack_test])
# df_stack4['tfidf_pac_classfiy_{}'.format("feat2")] = stack[:,0]
########################### FTRL ################################
print('MultinomialNB stacking')
stack_train = np.zeros((len(train_data),1))
stack_test = np.zeros((len(test_data),1))
score_va = 0
sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
for i, (tr, va) in enumerate(sk.split(train_feature, score)):
print('stack:%d/%d' % ((i + 1), n_folds))
clf = FTRL(alpha=0.01, beta=0.1, L1=0.00001, L2=1.0, D=train_feature.shape[1], iters=50, inv_link="identity", threads=1)
clf.fit(train_feature[tr], score[tr])
score_va = clf.predict(train_feature[va])
score_te = clf.predict(test_feature)
print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))
stack_train[va,0] = score_va
stack_test[:,0] += score_te
stack_test /= n_folds
stack = np.vstack([stack_train, stack_test])
df_stack4['tfidf_FTRL_classfiy_{}'.format("feat2")] = stack[:,0]
########################### ridge(RidgeClassfiy) ################################
print('RidgeClassfiy stacking')
stack_train = np.zeros((len(train_data),1))
stack_test = np.zeros((len(test_data),1))
score_va = 0
sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
for i, (tr, va) in enumerate(sk.split(train_feature, score)):
print('stack:%d/%d' % ((i + 1), n_folds))
ridge = Ridge(solver="sag", fit_intercept=True, random_state=42, alpha=30)
ridge.fit(train_feature[tr], score[tr])
score_va = ridge.predict(train_feature[va])
score_te = ridge.predict(test_feature)
print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))
stack_train[va,0] = score_va
stack_test[:,0] += score_te
stack_test /= n_folds
stack = np.vstack([stack_train, stack_test])
df_stack4['tfidf_ridge_classfiy_{}'.format("feat2")] = stack[:,0]
############################ Linersvc(LinerSVC) ################################
# print('LinerSVC stacking')
# stack_train = np.zeros((len(train_data),1))
# stack_test = np.zeros((len(test_data),1))
# score_va = 0
# sk = StratifiedKFold( n_splits=5, random_state=1017,shuffle=True)
# | |
from subprocess import STDOUT
import sys
from tf.transformations import rotation_matrix
sys.path.insert(0, './yolov5')
from yolov5.utils.datasets import LoadImages, LoadStreams,LoadWebcam,LoadRealsense
from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords
from yolov5.utils.torch_utils import select_device, time_synchronized
from deep_sort_pytorch.utils.parser import get_config
from deep_sort_pytorch.deep_sort import DeepSort
import argparse
import os
import platform
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
'''
直接再track.py改watchout,在v2.0的基础上加上了
- goodenbox 黄金分割法裁剪框(其实kcf利用了六个点的深度来判断效果更加)
并且和抗的算法匹配
version:3.0
'''
import numpy as np
from visualization_msgs.msg import Marker,MarkerArray
import rospy
from numba import jit
from tf import TransformListener
from APF_BASE_utils import BASE_TOOLS_for_car as To_1
from APF_FOLLOW_utils import FOLLOW_TOOLS_for_car as To_2
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
def bbox_rel(*xyxy):
"""" Calculates the relative bounding box from absolute pixel values. """
bbox_left = min([xyxy[0].item(), xyxy[2].item()])
bbox_top = min([xyxy[1].item(), xyxy[3].item()])
bbox_w = abs(xyxy[0].item() - xyxy[2].item())
bbox_h = abs(xyxy[1].item() - xyxy[3].item())
x_c = (bbox_left + bbox_w / 2)
y_c = (bbox_top + bbox_h / 2)
w = bbox_w
h = bbox_h
return x_c, y_c, w, h
def compute_color_for_labels(label):
"""
Simple function that adds fixed color depending on the class
"""
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
def showdepth(boxes,depth):
for box in boxes:
x1,y1,x2,y2 = [int(i) for i in box]
for u in range(x1,x2):
for v in range(y1,y2):
print(depth[v,u]*0.001)
#注意 offset 光心偏移
def draw_boxes(img, bbox, identities=None, offset=(0, 0)):
for i, box in enumerate(bbox):
x1, y1, x2, y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
import math
x1 = x1 + math.ceil((x2-x1)*0.382)
x2 = x1 + math.ceil((x2-x1)*0.618)
y1 = y1 + math.ceil((y2-y1)*0.382)
y2 = y1 + math.ceil((y2-y1)*0.618)
# print(img.shape)
# print(x1,y1,x2,y2)
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = compute_color_for_labels(id)
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2, 2)[0]
cv2.rectangle(img, (x1, y1), (x2, y2), color, 3)
# cv2.rectangle(
# img, (x1, y1), (x1 + t_size[0] + 3, y1 + t_size[1] + 4), color, -1)
# cv2.putText(img, label, (x1, y1 +
# t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 2)
return img
class Watchout:
def __init__(self):
self.lasttime = rospy.Time.now()
self.thistime = rospy.Time.now()
self.scale = 0.001
self.idcenvel = [] #id cx,cy,vx,vy
self.depth_thres = 10.0 #深度阀值
# 内参
fx = 609.2713012695312
cx = 316.67022705078125
fy = 608.010498046875
cy = 244.8178253173828
self.K = np.array([[1.0/fx,0,-cx/fx],
[0,1.0/fy,-cy/fy],
[0.0 , 0.0, 1.0]])
self.lines = [[0,1],[1,3],[3,2],[2,0],
[0,4],[2,6],[1,5],[3,7],
[4,5],[5,7],[7,6],[6,4]]
self.pub = rospy.Publisher('Personbox',MarkerArray,queue_size=1)
self.rate = rospy.Rate(10)
self.listener = TransformListener()
def watch(self,opt, save_img=False):
out, source,weights, view_img, save_txt, imgsz = \
opt.output, opt.source ,opt.weights, opt.view_img, opt.save_txt, opt.img_size
# initialize deepsort
cfg = get_config()
cfg.merge_from_file(opt.config_deepsort)
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=True)
# Initialize
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = torch.load(weights, map_location=device)['model'].float() # load to FP32
model.to(device).eval()
if half:
model.half() # to FP16
# Set Dataloader
vid_path, vid_writer = None, None
if source=='0':
dataset = LoadWebcam(source,imgsz)
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
else:
dataset = LoadRealsense('0',img_size=imgsz)
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
# run once
_ = model(img.half() if half else img) if device.type != 'cpu' else None
vis, pos_end = To_1.init(mapsize=150, scale=15)
# vis, pos_end, id_ = To_2.init(mapsize=150, scale=15)
for frame_idx, (path, img, im0, depth) in enumerate(dataset):
self.thistime = rospy.Time.now()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
# [xyxy, conf, cls] n*6
pred = non_max_suppression(
pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Print time (inference + NMS)
print('Done. (%.3fs)' % ( t2 - t1))
# Process detections
for i, det in enumerate(pred): # detections per image
im0 = im0.copy()
if det is not None and len(det):
# Rescale boxes from img_size to im0 size 即处理 xyxy
det[:, :4] = scale_coords(
img.shape[2:], det[:, :4], im0.shape).round()
bbox_xywh = []
confs = []
# Adapt detections to deep sort input format
# deepsort的输入类型为 centerx,centery,w,h,confidence,
for *xyxy, conf, cls in det:
x_c, y_c, bbox_w, bbox_h = bbox_rel(*xyxy)
obj = [x_c, y_c, bbox_w, bbox_h]
bbox_xywh.append(obj)
confs.append([conf.item()])
xywhs = torch.Tensor(bbox_xywh)
confss = torch.Tensor(confs)
# Pass detections to deepsort
# outputs : x1 y1 x2 y2 id
outputs = deepsort.update(xywhs, confss, im0)
# draw boxes for visualization
if len(outputs) > 0:
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
draw_boxes(im0, bbox_xyxy, identities)
t3 = rospy.Time.now()
# self.publish3dbox(depth,bbox_xyxy,identities)
# if not self.init:
# import threading
# thread = threading.Thread(target=self.publish3dbox,args=(depth,bbox_xyxy,identities))
# thread.start()
# self.init = 1
# print('开启成功')
blocklist = self.twodbox(depth,bbox_xyxy,identities)
pos_now = (0, 0, 0, 0, 0)
vx, vy, w, f = To_1.Vis_and_deside(vis=vis, pos_now=pos_now,
pos_end=pos_end, blocklist=blocklist)
# vx, vy, w, f, id_ = To_2.Vis_and_deside(vis=vis, pos_now=pos_now,
# pos_end=pos_end, blocklist=blocklist,id_=id_)
print(f'Creating markderarrary use {(rospy.Time.now()-t3).to_sec()} s ')
print(self.idcenvel)
else:
deepsort.increment_ages()
# Stream results
if view_img:
cv2.imshow('watchout', im0)
if cv2.waitKey(1) == ord('q') or rospy.is_shutdown(): # q to quit
# thread.join()
print('Done. (%.3fs)' % (time.time() - t0))
raise StopIteration
self.lasttime = self.thistime
def goodenbox(self,bbox_xyxy):
pass
# @jit
def create_box(self,depth_img,box,offset=(0,0)):
# 计算公式 x = (u*depth - cx*depth)/fx y = (v*depth - cy*depth)/fy
# 先将 像素坐标 uv1 * depth
x1,y1,x2,y2 = [int(i) for i in box]
w = x2 - x1
h = y2 - y1
#黄金比例切割背景
import math
u1 = math.ceil(x1+0.382*w)
u2 = math.ceil(x1+0.618*w)
v1 = math.ceil(y1+0.382*h)
v2 = math.ceil(y1+0.618*h)
uv1 = []
for u in range(u1,u2):
for v in range(v1,v2):
depth = float(depth_img[v,u])*self.scale
if depth > self.depth_thres:
continue
else:
uv1.append([u*depth,v*depth,depth])
if(len(uv1)<1):
print("create_error")
return 0,0,None
# 3*n
uvd = np.array(uv1).T
# 将 uvd * 相机内参矩阵 K 转化为相机坐标的 xyz 但 相机坐标的 xyz 对应着三维空间中的 yzx
# n*3
yzx = self.K.dot(uvd).T
# 用均值代替质心
cx = yzx[:,2].mean()
cy = yzx[:,0].mean()
# 找到八个顶点
xmax = yzx[:,2].max()
xmin = yzx[:,2].min()
ymax = yzx[:,0].max()
ymin = yzx[:,0].min()
zmax = yzx[:,1].max()
zmin = yzx[:,1].min()
from sensor_msgs.msg import PointCloud
pcl = PointCloud()
pcl.header.frame_id = '/camera'
pcl.header.frame_id = self.thistime
pcl.points.append((cx,cy,0))
pcl.points.append((xmax,ymax,zmax))
pcl.points.append((xmin,ymin,zmin))
# tranform point in camera to gobal
import tf
try:
self.listener.lookupTransform('/map','/camera',rospy.Time(3))
except:
exit
self.listener.transformPointCloud('/map',pcl)
from geometry_msgs.msg import Point
points = [Point(xmin,ymin,zmin),Point(xmax,ymin,zmin),
Point(xmin,ymax,zmin),Point(xmax,ymax,zmin),
Point(xmin,ymin,zmax),Point(xmax,ymin,zmax),
Point(xmin,ymax,zmax),Point(xmax,ymax,zmax)]
# 创建 bbox
marker = Marker()
marker.header.frame_id = 'map'
marker.header.stamp = rospy.Time.now()
marker.action = Marker.ADD
marker.type = Marker.LINE_LIST
# marker.lifetime = rospy.Duration(0)
marker.color.r = 1
marker.color.g = 0
marker.color.b = 0
marker.color.a = 1
marker.scale.x = 0.2
marker.points = []
for line in self.lines:
marker.points.append(points[line[0]])
marker.points.append(points[line[1]])
return cx , cy , marker
# @jit
def publish3dbox(self,depth_img,bbox,identities=None,offset=(0,0)):
markerarray = MarkerArray()
dt = (self.thistime - self.lasttime).to_sec()
idcentvel_tmp = []
# 生成markerarray 并 进行匹配计算 idcentvel
for i,id_ in enumerate(identities):
marker = Marker()
cx,cy,marker = self.create_box(depth_img,bbox[i],offset)
marker.id = id_
markerarray.markers.append(marker)
flag = 0
# 妙处:初始化时是空列表,同时完成了第一次时间的初始化
for idcv in self.idcenvel:
if id_ == idcv[0]:
vx = (cx - idcv[1])/dt
vy = (cy - idcv[2])/dt
idcentvel_tmp.append([id_,cx,cy,vx,vy])
flag = 1
break
if not flag:
vx=vy=0.0
idcentvel_tmp.append([id_,cx,cy,vx,vy])
self.idcenvel = idcentvel_tmp
print('idcenvel',self.idcenvel)
self.pub.publish(markerarray)
def drawsquare(self,xyxy,depth):
# 计算公式 x = (u*depth - cx*depth)/fx y = (v*depth - cy*depth)/fy
# 先将 像素坐标 uv1 * depth
x1,y1,x2,y2 = [int(i) for i in xyxy]
w = x2 - x1
h = y2 - y1
#黄金比例切割背景
import math
u1 = math.ceil(x1+0.382*w)
u2 = math.ceil(x1+0.618*w)
v1 = math.ceil(y1+0.382*h)
v2 = math.ceil(y1+0.618*h)
uvd = []
for u in range(u1,u2):
for v in range(v1,v2):
depth_ = float(depth[v,u])*self.scale
if depth_ > 10: continue
else: uvd.append([u*depth_,v*depth_,depth_])
yzx = self.K.dot(np.array(uvd).T).T
# 用均值代替质心
cx = yzx[:,2].mean()
cy = yzx[:,0].mean()
# 找到八个顶点
xmax = yzx[:,2].max()
xmin = yzx[:,2].min()
ymax = yzx[:,0].max()
ymin = yzx[:,0].min()
zmax = yzx[:,1].max()
| |
resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesGitRepoArgs' git_repo: GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesGlusterfsArgs' glusterfs: Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesHostPathArgs' host_path: HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesIscsiArgs' iscsi: ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesNfsArgs' nfs: NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesPersistentVolumeClaimArgs' persistent_volume_claim: PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesPhotonPersistentDiskArgs' photon_persistent_disk: PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesPortworxVolumeArgs' portworx_volume: PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesProjectedArgs' projected: Items for all in one resources secrets, configmaps, and downward API
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesQuobyteArgs' quobyte: Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesRbdArgs' rbd: RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesScaleIOArgs' scale_io: ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesSecretArgs' secret: Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesStorageosArgs' storageos: StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
:param 'DatadogAgentSpecClusterChecksRunnerConfigVolumesVsphereVolumeArgs' vsphere_volume: VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
"""
pulumi.set(__self__, "name", name)
if aws_elastic_block_store is not None:
pulumi.set(__self__, "aws_elastic_block_store", aws_elastic_block_store)
if azure_disk is not None:
pulumi.set(__self__, "azure_disk", azure_disk)
if azure_file is not None:
pulumi.set(__self__, "azure_file", azure_file)
if cephfs is not None:
pulumi.set(__self__, "cephfs", cephfs)
if cinder is not None:
pulumi.set(__self__, "cinder", cinder)
if config_map is not None:
pulumi.set(__self__, "config_map", config_map)
if csi is not None:
pulumi.set(__self__, "csi", csi)
if downward_api is not None:
pulumi.set(__self__, "downward_api", downward_api)
if empty_dir is not None:
pulumi.set(__self__, "empty_dir", empty_dir)
if fc is not None:
pulumi.set(__self__, "fc", fc)
if flex_volume is not None:
pulumi.set(__self__, "flex_volume", flex_volume)
if flocker is not None:
pulumi.set(__self__, "flocker", flocker)
if gce_persistent_disk is not None:
pulumi.set(__self__, "gce_persistent_disk", gce_persistent_disk)
if git_repo is not None:
pulumi.set(__self__, "git_repo", git_repo)
if glusterfs is not None:
pulumi.set(__self__, "glusterfs", glusterfs)
if host_path is not None:
pulumi.set(__self__, "host_path", host_path)
if iscsi is not None:
pulumi.set(__self__, "iscsi", iscsi)
if nfs is not None:
pulumi.set(__self__, "nfs", nfs)
if persistent_volume_claim is not None:
pulumi.set(__self__, "persistent_volume_claim", persistent_volume_claim)
if photon_persistent_disk is not None:
pulumi.set(__self__, "photon_persistent_disk", photon_persistent_disk)
if portworx_volume is not None:
pulumi.set(__self__, "portworx_volume", portworx_volume)
if projected is not None:
pulumi.set(__self__, "projected", projected)
if quobyte is not None:
pulumi.set(__self__, "quobyte", quobyte)
if rbd is not None:
pulumi.set(__self__, "rbd", rbd)
if scale_io is not None:
pulumi.set(__self__, "scale_io", scale_io)
if secret is not None:
pulumi.set(__self__, "secret", secret)
if storageos is not None:
pulumi.set(__self__, "storageos", storageos)
if vsphere_volume is not None:
pulumi.set(__self__, "vsphere_volume", vsphere_volume)
@property
@pulumi.getter
def name(self) -> str:
"""
Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="awsElasticBlockStore")
def aws_elastic_block_store(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesAwsElasticBlockStore']:
"""
AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
"""
return pulumi.get(self, "aws_elastic_block_store")
@property
@pulumi.getter(name="azureDisk")
def azure_disk(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesAzureDisk']:
"""
AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
"""
return pulumi.get(self, "azure_disk")
@property
@pulumi.getter(name="azureFile")
def azure_file(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesAzureFile']:
"""
AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
"""
return pulumi.get(self, "azure_file")
@property
@pulumi.getter
def cephfs(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesCephfs']:
"""
CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
"""
return pulumi.get(self, "cephfs")
@property
@pulumi.getter
def cinder(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesCinder']:
"""
Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md
"""
return pulumi.get(self, "cinder")
@property
@pulumi.getter(name="configMap")
def config_map(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesConfigMap']:
"""
ConfigMap represents a configMap that should populate this volume
"""
return pulumi.get(self, "config_map")
@property
@pulumi.getter
def csi(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesCsi']:
"""
CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).
"""
return pulumi.get(self, "csi")
@property
@pulumi.getter(name="downwardAPI")
def downward_api(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesDownwardAPI']:
"""
DownwardAPI represents downward API about the pod that should populate this volume
"""
return pulumi.get(self, "downward_api")
@property
@pulumi.getter(name="emptyDir")
def empty_dir(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesEmptyDir']:
"""
EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
"""
return pulumi.get(self, "empty_dir")
@property
@pulumi.getter
def fc(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesFc']:
"""
FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
"""
return pulumi.get(self, "fc")
@property
@pulumi.getter(name="flexVolume")
def flex_volume(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesFlexVolume']:
"""
FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.
"""
return pulumi.get(self, "flex_volume")
@property
@pulumi.getter
def flocker(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesFlocker']:
"""
Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
"""
return pulumi.get(self, "flocker")
@property
@pulumi.getter(name="gcePersistentDisk")
def gce_persistent_disk(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesGcePersistentDisk']:
"""
GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
"""
return pulumi.get(self, "gce_persistent_disk")
@property
@pulumi.getter(name="gitRepo")
def git_repo(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesGitRepo']:
"""
GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.
"""
return pulumi.get(self, "git_repo")
@property
@pulumi.getter
def glusterfs(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesGlusterfs']:
"""
Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md
"""
return pulumi.get(self, "glusterfs")
@property
@pulumi.getter(name="hostPath")
def host_path(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesHostPath']:
"""
HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.
"""
return pulumi.get(self, "host_path")
@property
@pulumi.getter
def iscsi(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesIscsi']:
"""
ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md
"""
return pulumi.get(self, "iscsi")
@property
@pulumi.getter
def nfs(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesNfs']:
"""
NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
"""
return pulumi.get(self, "nfs")
@property
@pulumi.getter(name="persistentVolumeClaim")
def persistent_volume_claim(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesPersistentVolumeClaim']:
"""
PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
"""
return pulumi.get(self, "persistent_volume_claim")
@property
@pulumi.getter(name="photonPersistentDisk")
def photon_persistent_disk(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesPhotonPersistentDisk']:
"""
PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
"""
return pulumi.get(self, "photon_persistent_disk")
@property
@pulumi.getter(name="portworxVolume")
def portworx_volume(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesPortworxVolume']:
"""
PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
"""
return pulumi.get(self, "portworx_volume")
@property
@pulumi.getter
def projected(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesProjected']:
"""
Items for all in one resources secrets, configmaps, and downward API
"""
return pulumi.get(self, "projected")
@property
@pulumi.getter
def quobyte(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerConfigVolumesQuobyte']:
"""
| |
"""Graph visualization with HTML/CSS/JS based on d3.v7.js."""
from ..conversion import _internal
from ..utils.args import check_arg as _ca
from . import data_structures as _ds
from . import template_system as _ts
def d3(data,
graph_height=450, details_height=100,
show_details=False, show_details_toggle_button=True,
show_menu=False, show_menu_toggle_button=True,
show_node=True, node_size_factor=1.0,
node_size_data_source='size', use_node_size_normalization=False,
node_size_normalization_min=5.0, node_size_normalization_max=75.0,
node_drag_fix=False, node_hover_neighborhood=False, node_hover_tooltip=True,
show_node_image=True, node_image_size_factor=1.0,
show_node_label=True, show_node_label_border=True, node_label_data_source='id',
node_label_size_factor=1.0, node_label_rotation=0.0, node_label_font='Arial',
show_edge=True, edge_size_factor=1.0,
edge_size_data_source='size', use_edge_size_normalization=False,
edge_size_normalization_min=0.2, edge_size_normalization_max=5.0,
edge_curvature=0.0, edge_hover_tooltip=True,
show_edge_label=False, show_edge_label_border=True, edge_label_data_source='id',
edge_label_size_factor=1.0, edge_label_rotation=0.0, edge_label_font='Arial',
zoom_factor=0.75, large_graph_threshold=500,
layout_algorithm_active=True,
use_many_body_force=True, many_body_force_strength=-70.0,
many_body_force_theta=0.9,
use_many_body_force_min_distance=False, many_body_force_min_distance=10.0,
use_many_body_force_max_distance=False, many_body_force_max_distance=1000.0,
use_links_force=True, links_force_distance=50.0, links_force_strength=0.5,
use_collision_force=False, collision_force_radius=25.0,
collision_force_strength=0.7,
use_x_positioning_force=False, x_positioning_force_strength=0.2,
use_y_positioning_force=False, y_positioning_force_strength=0.2,
use_centering_force=True):
"""Create an interactive graph visualization with HTML/CSS/JS based on d3.v7.js.
Parameters
----------
data : str, dict, graph object, list
The input data needs to be in a custom format called
:ref:`gravis JSON Graph Format (gJGF) <gJGF-format>`.
It can be provided in following ways:
- *str*: A string in gJGF, or a filepath to a text file in gJGF.
- *dict*: A dict adhering to gJGF.
- *graph object*: An object from a
:ref:`supported graph library <supported-graph-libraries>`,
which internally gets converted to gJGF.
- *list*: Instead of a single graph, it is possible to provide a list of graphs.
They can be all be of the same type, but a mix of different types is also accepted.
The first graph in the list is shown after the visualization has loaded.
The other graphs can be chosen in the data selection menu of the
interactive visualization.
graph_height : int, float
Height of the graph container in pixels (px).
details_height : int, float
Height of the details container in pixels (px).
show_details : bool
If True, the details container is shown on load, otherwise hidden.
show_details_toggle_button : bool
If True, a button is shown that allows to toggle the visibility of the details container.
show_menu : bool
If True, the menu container is shown on load, otherwise hidden.
show_menu_toggle_button : bool
If True, a button is shown that allows to toggle the visibility of the menu container.
show_node : bool
If True, nodes are shown on load, otherwise hidden.
node_size_factor : int, float
A scaling factor that modifies node size.
node_size_data_source : str
Name of the numerical node property that is used as source for node size on load.
use_node_size_normalization : bool
If True, node sizes are normalized to lie in an interval between a
chosen min and max value.
node_size_normalization_min : int, float
Minimum value for node size if node size normalization is active.
node_size_normalization_max : int, float
Maximum value for node size if node size normalization is active.
node_drag_fix : bool
If True, the position of a node becomes fixed after dragging it, i.e. the
layout algorithm does not change its position but the user can drag it again.
node_hover_neighborhood : bool
If True, hovering a node leads to highlighting its neighborhood which consists of
all incident edges and adjacent nodes.
node_hover_tooltip : bool
If True, hovering a node leads to popping up a tooltip if the hover property in the
metadata of this node contains a non-empty string or HTML text.
show_node_image : bool
If True, node images are shown on load for all nodes whose image property in the metadata
contains a valid image URL from which an image can be fetched.
node_image_size_factor : int, float
A scaling factor that modifies node image size.
show_node_label : bool
If True, node labels are shown on load, otherwise hidden.
show_node_label_border : bool
If True, node labels have a small border in the color of the background to better
separate the text from other visual elements like edges or nodes.
node_label_data_source : str
Name of the node property that is used as source for node label text on load.
node_label_size_factor : int, float
A scaling factor that modifies node label size.
node_label_rotation : int, float
An angle that modifies node label orientation.
node_label_font : str
Name of the font that is used for node labels.
show_edge : bool
If True, edges are shown on load, otherwise hidden.
edge_size_factor : int, float
A scaling factor that modifies edge size (=edge width).
edge_size_data_source : str
Name of the edge property that is used as source for edge size on load.
use_edge_size_normalization : bool
If True, edge sizes are normalized to lie in an interval between a
chosen min and max value.
edge_size_normalization_min : int, float
Minimum value for edge size if node size normalization is active.
edge_size_normalization_max : int, float
Maximum value for edge size if node size normalization is active.
edge_curvature : int, float
A factor that modifies edge curvature, where 0.0 means straight lines.
edge_hover_tooltip : bool
If True, hovering an edge leads to popping up a tooltip if the hover property in the
metadata of this edge contains a non-empty string or HTML text.
show_edge_label : bool
If True, edge labels are shown on load, otherwise hidden.
show_edge_label_border : bool
If True, edge labels have a small border in the color of the background to better
separate the text from other visual elements like edges or nodes.
edge_label_data_source : str
Name of the edge property that is used as source for edge label text on load.
edge_label_size_factor : int, float
A scaling factor that modifies edge label size.
edge_label_rotation : int, float
An angle that modifies edge label orientation.
edge_label_font : str
Name of the font that is used for edge labels.
zoom_factor : int, float
Factor that modifies how close the camera is to the drawing area on load.
large_graph_threshold : int, float
Number that determines from when on a graph is considered to be large, which
means that before visualizing it an initial layout is calculated without moving anything.
layout_algorithm_active : bool
If True, the layout algorithm is active on load and leads to movement, otherwise inactive.
use_many_body_force : bool
If True, many body force is active in the layout algorithm.
This force acts between any pair of nodes but can be restricted to only act on nodes
within a certain distance.
many_body_force_strength : int, float
Number that determines the strength of the force. It can be positive to cause attraction
or negative (usual case) to cause repulsion between nodes.
many_body_force_theta : int, float
Number that determines the accuracy of the Barnes–Hut approximation of the
many-body simulation where nodes are grouped instead of treated individually
to improve performance.
use_many_body_force_min_distance : bool
If True, a minimum distance between nodes is used in the many-body force calculation.
This effectively leads to an upper bound on the strength of the force between any two
nodes and avoids instability.
many_body_force_min_distance : int, float
Number that determines the minimum distance between nodes over which the many-body force
is active.
use_many_body_force_max_distance : bool
If True, a maximum distance between nodes is used in the many-body force calculation.
This can improve performance but results in a more localized layout.
many_body_force_max_distance : int, float
Number that determines the maximum distance between nodes over which the many-body force
is active.
use_links_force : bool
If True, link force is active in the layout algorithm.
This force acts between pairs of nodes that are connected by an edge. It pushes them
together or apart in order to come close to a certain distance between connected nodes.
links_force_distance : int, float
Number that determines the preferred distance between connected nodes.
links_force_strength : int, float
Number that determines the strength of the links force.
use_collision_force : bool
If True, collision force is active in the layout algorithm.
This force treats nodes as circles instead of points and acts on pairs of nodes that
overlap in order to push them apart.
collision_force_radius : int, float
Number that determines the radius of the circle around each node.
collision_force_strength : int, float
Number that determines the strength of the force.
use_x_positioning_force : bool
If True, x-positioning force is active in the | |
json.dumps(nds_travel_mode, ensure_ascii=False)
break
else:
travel_mode = ""
if travel_mode:
self.logger.debug(u"Returning travel mode JSON from the network dataset for travel mode name: {}".format(travel_mode_name))
return travel_mode
def _getPortalTravelModeJSON(self, travel_mode_name):
'''Returns a stringified JSON for the travel mode name from the list of travel modes supported in the portal.
If the travel mode name does not exist, an empty string is retured. The travel mode name lookup is done in case
insesitive manner.'''
travel_mode = ""
self.supportedTravelModeNames = []
rest_info = get_rest_info()
try:
if "owningSystemUrl" in rest_info:
#Look up travel mode name using GetTravelModes routingUtilities service.
portal_self = get_portal_self()
if "helperServices" in portal_self:
routing_utilities_url = portal_self["helperServices"]["routingUtilities"]["url"]
else:
#The server is federated, but something went wrong when determing the URL to the routingUtilities
#service. So look up travel mode name in the network dataset
travel_mode = self._getNDSTravelModeJSON(travel_mode_name)
return travel_mode
else:
#As the server is not federated, look up travel mode name in the network dataset
travel_mode = self._getNDSTravelModeJSON(travel_mode_name)
return travel_mode
#Call GetTravelModes service using REST and get a dict of travel mode names and travel mode JSON
gp_server_request_props = json.loads(arcpy.gp._arc_object.serverrequestproperties())
token = gp_server_request_props.get("token", "")
referer = gp_server_request_props.get("referer", "")
get_travel_modes_url = u"{0}/GetTravelModes/execute".format(routing_utilities_url)
request_parameters = {"token" : token}
get_travel_modes_response = make_http_request(get_travel_modes_url, request_parameters, "gzip", referer)
result_rows = get_travel_modes_response["results"][0]["value"]["features"]
supported_travel_modes = {}
for row in result_rows:
attributes = row["attributes"]
travel_mode_json = attributes["TravelMode"]
self.supportedTravelModeNames.append(attributes["Name"])
supported_travel_modes[attributes["Name"].upper()] = travel_mode_json
supported_travel_modes[attributes["AltName"].upper()] = travel_mode_json
travel_mode = supported_travel_modes.get(travel_mode_name.upper(), "")
except Exception as ex:
self.logger.warning("Failed to get a list of supported travel modes from the portal")
if self.logger.DEBUG:
self._handleException()
if travel_mode:
self.logger.debug(u"Returning travel mode JSON from the portal for travel mode name: {}".format(travel_mode_name))
return travel_mode
def _selectTravelMode(self):
'''Select a travel mode for the analysis'''
#Instance attributes that are set in this method
self.travelModeObject = None
self.portalTravelMode = None
self.customTravelModeDistanceAttribute = ""
self.customTravelModeTimeAttribute = ""
self.customTravelModeImpedanceAttribute = ""
self.walkingRestriction = ""
#Create a mapping of cost attributes from the network dataset and the values of impedance parameter
#For all network datasets, assume the cost attributes to be same as those that are found in the first section
impedance_parameter_mappings = {
"Drive Time" : self.parser.get(self.templateNDS, "time_attribute"),
"Truck Time" : self.parser.get(self.templateNDS, "truck_time_attribute"),
"Walk Time" : self.parser.get(self.templateNDS, "walk_time_attribute"),
"Travel Distance" : self.parser.get(self.templateNDS, "distance_attribute"),
}
#Get network dataset travel mode name
#If the travel mode is JSON string pass the JSON to the big button tool
nds_travel_modes = pickle.loads(self.parser.get(self.templateNDS, "travel_modes"))
self.portalTravelMode = nds_travel_modes.get((self.travelMode.upper(), self.isMeasurementUnitsTimeBased),
self.travelMode)
#If travel mode is not a JSON look for the travel mode name in portal.
if self.isCustomTravelMode:
self.portalTravelMode = "CUSTOM"
else:
try:
self.travelModeObject = arcpy.na.TravelMode(self.portalTravelMode)
except ValueError as ex:
#Get the travel mode JSON from the portal based on its name
self.portalTravelMode = self._getPortalTravelModeJSON(self.portalTravelMode)
if self.portalTravelMode:
self.travelModeObject = arcpy.na.TravelMode(self.portalTravelMode)
else:
valid_travel_mode_names = u" | ".join(sorted(self.supportedTravelModeNames) + ["Custom"])
arcpy.AddIDMessage("ERROR", 30158, self.travelMode, valid_travel_mode_names)
raise InputError
#If the travel mode has a 0 simplification tolerance, set it to None with unknown units
portal_travel_mode = json.loads(self.portalTravelMode, "utf-8")
if portal_travel_mode["simplificationTolerance"] == 0:
portal_travel_mode["simplificationTolerance"] = None
portal_travel_mode["simplificationToleranceUnits"] = "esriUnknownUnits"
self.portalTravelMode = json.dumps(portal_travel_mode, ensure_ascii=False)
travel_mode_restrictions = self.travelModeObject.restrictions if self.travelModeObject else []
self.logger.debug(u"Travel Mode used for the analysis: {0}".format(self.portalTravelMode))
#For custom travel mode always assume a fixed distance attribute and a fixed time attribute if impedance is
#distance based
self.customTravelModeDistanceAttribute = impedance_parameter_mappings["Travel Distance"]
self.customTravelModeTimeAttribute = impedance_parameter_mappings["Drive Time"]
self.walkingRestriction = self.parser.get(self.templateNDS, "walking_restriction")
trucking_restriction = self.parser.get(self.templateNDS, "trucking_restriction")
is_custom_travel_mode_impedance_time_based = False
self.customTravelModeImpedanceAttribute = self.customTravelModeDistanceAttribute
if self.impedance != "Travel Distance":
self.customTravelModeTimeAttribute = impedance_parameter_mappings[self.impedance]
self.customTravelModeImpedanceAttribute = self.customTravelModeTimeAttribute
is_custom_travel_mode_impedance_time_based = True
##Check for failure conditions when using custom travel modes
non_walking_restrictions = self.parser.get(self.templateNDS, "non_walking_restrictions").split(";")
if self.isCustomTravelMode:
#Fail if break units and impedance are not compatible
if self.measurementUnits:
if self.isMeasurementUnitsTimeBased:
if not is_custom_travel_mode_impedance_time_based:
arcpy.AddIDMessage("ERROR", 30148, self.measurementUnits, self.impedance)
raise InputError
else:
if is_custom_travel_mode_impedance_time_based:
arcpy.AddIDMessage("ERROR", 30148, self.measurementUnits, self.impedance)
raise InputError
#Fail if walking and any of "Driving *" restrictions are used
if set(self.restrictions).intersection(set(non_walking_restrictions)):
if self.walkingRestriction in self.restrictions:
arcpy.AddIDMessage("ERROR", 30147, ", ".join(non_walking_restrictions))
raise InputError
else:
#Fail if walking and any of "Driving *" restrictions are used
if set(travel_mode_restrictions).intersection(set(non_walking_restrictions)):
if self.walkingRestriction in travel_mode_restrictions:
arcpy.AddIDMessage("ERROR", 30158, self.travelModeObject.name)
arcpy.AddIDMessage("ERROR", 30147, ", ".join(non_walking_restrictions))
raise InputError
def _checkWalkingExtent(self, *analysis_inputs):
'''When using Walking restriction, fail if inputs are more than maximum supported miles apart '''
#nau.max_distance_between points returns the distance in meters. So convert to miles
max_distance_inputs = 0
if self.isCustomTravelMode:
if self.walkingRestriction in self.restrictions:
max_distance_inputs = nau.max_distance_between_points(analysis_inputs) * self.METER_TO_MILES
else:
travel_mode_type = "OTHER"
if hasattr(self.travelModeObject, "type"):
travel_mode_type = self.travelModeObject.type
if self.walkingRestriction in self.travelModeObject.restrictions or travel_mode_type == "WALK":
max_distance_inputs = nau.max_distance_between_points(analysis_inputs) * self.METER_TO_MILES
if max_distance_inputs > self.MAX_WALKING_MODE_DISTANCE_MILES:
arcpy.AddIDMessage("ERROR", 30145, self.MAX_WALKING_MODE_DISTANCE_MILES,
int(self.MAX_WALKING_MODE_DISTANCE_MILES / self.METER_TO_MILES / 1000))
raise InputError
def _checkMaxOutputFeatures(self, analysis_output, error_message_code=30142):
'''Check if the count of output features exceeds the maximum number of records that can be successfully
returned by the service'''
output_features_count = int(arcpy.management.GetCount(analysis_output).getOutput(0))
if output_features_count > self.maxFeatures:
arcpy.AddIDMessage("ERROR", error_message_code, output_features_count, self.maxFeatures)
raise arcpy.ExecuteError
def _logToolExecutionMessages(self):
'''Log messages from execution of remote tool or big button tool'''
result_severity = self.toolResult.maxSeverity
warning_messages = self.toolResult.getMessages(1)
error_messages = self.toolResult.getMessages(2)
#for remote tools executed synchronously, maxSeverity and getMessages is determined using arcpy
if result_severity == -1:
result_severity = arcpy.GetMaxSeverity()
warning_messages = arcpy.GetMessages(1)
error_messages = arcpy.GetMessages(2)
#print error and warning messages
if result_severity == 1:
#Do not output the WARNING 000685 message (empty route edges messages) as they will be always empty
if self.__class__.__name__ == "SolveLocationAllocation":
for msg in warning_messages.split("\n"):
if not msg.startswith("WARNING 000685:"):
self.logger.warning(msg)
else:
self.logger.warning(warning_messages)
elif result_severity == 0:
if self.logger.DEBUG:
info_messages = self.toolResult.getMessages()
if not info_messages:
#For remote tool executed synchoronously, get messages from arcpy
info_messages = arcpy.GetMessages()
self.logger.debug(info_messages)
else:
#Tool failed. Add warning and error messages and raise exception
self.logger.warning(warning_messages)
self.logger.error(error_messages)
raise InputError
def _handleInputErrorException(self, ex):
'''Exception handler for InputError'''
self.solveSucceeded = False
#Handle errors due to invalid inputs
self.logger.error(ex.message)
def _handleArcpyExecuteErrorException(self):
'''Exeception handler for arcpy.ExecuteError'''
self.solveSucceeded = False
#Handle GP exceptions
if self.logger.DEBUG:
#Get the line number at which the GP error occurred
tb = sys.exc_info()[2]
self.logger.error(u"A geoprocessing error occurred in file {0}, line {1}".format(__file__,
tb.tb_lineno))
else:
self.logger.error("A geoprocessing error occurred.")
for msg in arcpy.GetMessages(1).split("\n"):
self.logger.warning(msg)
for msg in arcpy.GetMessages(2).split("\n"):
self.logger.error(msg)
def _handleException(self):
'''handler for generic Exception'''
self.solveSucceeded = False
#Handle python errors
if self.logger.DEBUG:
#Get a nicely formatted traceback object except the first line.
msgs = traceback.format_exception(*sys.exc_info())[1:]
msgs[0] = "A python error occurred in " + msgs[0].lstrip()
for msg in msgs:
self.logger.error(msg.strip())
else:
self.logger.error("A python error occurred.")
def _executeBigButtonTool(self, tool_parameters):
'''Execute the big button tool and return the tool result as an instance attribute'''
#Call the big button tool
tool = getattr(arcpy, self.TOOL_NAME)
if self.logger.DEBUG:
self.logger.debug("uParameters passed when executing {0} tool".format(self.TOOL_NAME))
for param_name in sorted(tool_parameters):
self.logger.debug(u"{0}: {1}".format(param_name, tool_parameters[param_name]))
self.toolResult = tool(**tool_parameters)
if self.logger.DEBUG:
self.logger.debug(u"{0} tool {1}".format(self.TOOL_NAME,
self.toolResult.getMessage(self.toolResult.messageCount - 1)))
class FindRoutes(NetworkAnalysisService):
'''FindRoutes geoprocessing service'''
OUTPUT_ROUTES_NAME = "Routes"
OUTPUT_ROUTE_EDGES_NAME = "RouteEdges"
OUTPUT_DIRECTIONS_NAME = "Directions"
OUTPUT_STOPS_NAME = "Stops"
ORDERING_KEYWORDS = {
"Preserve First and Last" : "PRESERVE_BOTH",
"Preserve First" : "PRESERVE_FIRST",
"Preserve Last" : "PRESERVE_LAST",
"Preserve None": "PRESERVE_NONE",
}
EXTENT_FIELDS = NetworkAnalysisService.EXTENT_FIELDS[:]
EXTENT_FIELDS[2] = "GPRouteService"
#MAX_FEATURES = 1000000
REMOTE_TOOL_RESTRICTIONS_PARAM_INDEX = 14
TOOL_NAME = "FindRoutes_na"
HELPER_SERVICES_KEY = "asyncRoute"
def __init__(self, *args, **kwargs):
'''Constructor'''
#Call the base class constructor to sets the common tool parameters as instance attributes
super(FindRoutes, self).__init__(*args, **kwargs)
#Store tool parameters as instance attributes
self.stops = kwargs.get("Stops", None)
self.reorderStops = kwargs.get("Reorder_Stops_to_Find_Optimal_Routes", None)
self.orderingType = kwargs.get("Preserve_Terminal_Stops", None)
self.returnToStart = kwargs.get("Return_to_Start", None)
self.useTimeWindows = kwargs.get("Use_Time_Windows", None)
self.timeZoneForTimeWindows = kwargs.get("Time_Zone_for_Time_Windows", None)
self.routeShape = kwargs.get("Route_Shape", None)
self.routeLineSimplicationTolerance = kwargs.get("Route_Line_Simplification_Tolerance", None)
#Set simplification tolerance to None if value is 0 or not specified
if self.routeLineSimplicationTolerance:
if str_to_float(self.routeLineSimplicationTolerance.split(" ")[0]) == 0:
self.routeLineSimplicationTolerance = None
else:
self.routeLineSimplicationTolerance = None
self.populateRouteEdges = kwargs.get("Populate_Route_Edges", None)
self.populateDirections = kwargs.get("Populate_Directions", None)
self.directionsLanguage = kwargs.get("Directions_Language", None)
self.directionsDistanceUnits = kwargs.get("Directions_Distance_Units", None)
self.directionsStyleName = kwargs.get("Directions_Style_Name", None)
self.saveRouteData = kwargs.get("Save_Route_Data", None)
#Print tool parameter values for debugging
if self.logger.DEBUG:
for param in sorted(kwargs):
self.logger.debug(u"{0}: {1}".format(param, kwargs[param]))
#derived outputs
self.outputRoutes = os.path.join(self.outputGeodatabase, self.OUTPUT_ROUTES_NAME)
self.outputRouteEdges = os.path.join(self.outputGeodatabase, self.OUTPUT_ROUTE_EDGES_NAME)
self.outputDirections = os.path.join(self.outputGeodatabase, self.OUTPUT_DIRECTIONS_NAME)
self.outputStops = os.path.join(self.outputGeodatabase, self.OUTPUT_STOPS_NAME)
self.outputRouteData = | |
# -*- coding: utf-8 -*-
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import collections
import datetime
import ipaddress
import os
import pytest
import pytz
import six
from cryptography import utils, x509
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat._der import (
BIT_STRING, CONSTRUCTED, CONTEXT_SPECIFIC, DERReader, GENERALIZED_TIME,
INTEGER, OBJECT_IDENTIFIER, PRINTABLE_STRING, SEQUENCE, SET, UTC_TIME
)
from cryptography.hazmat.backends.interfaces import (
DSABackend, EllipticCurveBackend, RSABackend, X509Backend
)
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
dsa, ec, ed25519, ed448, padding, rsa
)
from cryptography.hazmat.primitives.asymmetric.utils import (
decode_dss_signature
)
from cryptography.x509.name import _ASN1Type
from cryptography.x509.oid import (
AuthorityInformationAccessOID, ExtendedKeyUsageOID, ExtensionOID,
NameOID, SignatureAlgorithmOID
)
from ..hazmat.primitives.fixtures_dsa import DSA_KEY_2048
from ..hazmat.primitives.fixtures_ec import EC_KEY_SECP256R1
from ..hazmat.primitives.fixtures_rsa import RSA_KEY_2048, RSA_KEY_512
from ..hazmat.primitives.test_ec import _skip_curve_unsupported
from ..utils import load_vectors_from_file
@utils.register_interface(x509.ExtensionType)
class DummyExtension(object):
oid = x509.ObjectIdentifier("1.2.3.4")
@utils.register_interface(x509.GeneralName)
class FakeGeneralName(object):
def __init__(self, value):
self._value = value
value = utils.read_only_property("_value")
def _load_cert(filename, loader, backend):
cert = load_vectors_from_file(
filename=filename,
loader=lambda pemfile: loader(pemfile.read(), backend),
mode="rb"
)
return cert
ParsedCertificate = collections.namedtuple(
"ParsedCertificate",
["not_before_tag", "not_after_tag", "issuer", "subject"]
)
def _parse_cert(der):
# See the Certificate structured, defined in RFC 5280.
with DERReader(der).read_single_element(SEQUENCE) as cert:
tbs_cert = cert.read_element(SEQUENCE)
# Skip outer signature algorithm
_ = cert.read_element(SEQUENCE)
# Skip signature
_ = cert.read_element(BIT_STRING)
with tbs_cert:
# Skip version
_ = tbs_cert.read_optional_element(CONTEXT_SPECIFIC | CONSTRUCTED | 0)
# Skip serialNumber
_ = tbs_cert.read_element(INTEGER)
# Skip inner signature algorithm
_ = tbs_cert.read_element(SEQUENCE)
issuer = tbs_cert.read_element(SEQUENCE)
validity = tbs_cert.read_element(SEQUENCE)
subject = tbs_cert.read_element(SEQUENCE)
# Skip subjectPublicKeyInfo
_ = tbs_cert.read_element(SEQUENCE)
# Skip issuerUniqueID
_ = tbs_cert.read_optional_element(CONTEXT_SPECIFIC | CONSTRUCTED | 1)
# Skip subjectUniqueID
_ = tbs_cert.read_optional_element(CONTEXT_SPECIFIC | CONSTRUCTED | 2)
# Skip extensions
_ = tbs_cert.read_optional_element(CONTEXT_SPECIFIC | CONSTRUCTED | 3)
with validity:
not_before_tag, _ = validity.read_any_element()
not_after_tag, _ = validity.read_any_element()
return ParsedCertificate(
not_before_tag=not_before_tag,
not_after_tag=not_after_tag,
issuer=issuer,
subject=subject,
)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestCertificateRevocationList(object):
def test_load_pem_crl(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
assert isinstance(crl, x509.CertificateRevocationList)
fingerprint = binascii.hexlify(crl.fingerprint(hashes.SHA1()))
assert fingerprint == b"3234b0cb4c0cedf6423724b736729dcfc9e441ef"
assert isinstance(crl.signature_hash_algorithm, hashes.SHA256)
assert (
crl.signature_algorithm_oid ==
SignatureAlgorithmOID.RSA_WITH_SHA256
)
def test_load_der_crl(self, backend):
crl = _load_cert(
os.path.join("x509", "PKITS_data", "crls", "GoodCACRL.crl"),
x509.load_der_x509_crl,
backend
)
assert isinstance(crl, x509.CertificateRevocationList)
fingerprint = binascii.hexlify(crl.fingerprint(hashes.SHA1()))
assert fingerprint == b"dd3db63c50f4c4a13e090f14053227cb1011a5ad"
assert isinstance(crl.signature_hash_algorithm, hashes.SHA256)
def test_invalid_pem(self, backend):
with pytest.raises(ValueError):
x509.load_pem_x509_crl(b"notacrl", backend)
def test_invalid_der(self, backend):
with pytest.raises(ValueError):
x509.load_der_x509_crl(b"notacrl", backend)
def test_unknown_signature_algorithm(self, backend):
crl = _load_cert(
os.path.join(
"x509", "custom", "crl_md2_unknown_crit_entry_ext.pem"
),
x509.load_pem_x509_crl,
backend
)
with pytest.raises(UnsupportedAlgorithm):
crl.signature_hash_algorithm()
def test_issuer(self, backend):
crl = _load_cert(
os.path.join("x509", "PKITS_data", "crls", "GoodCACRL.crl"),
x509.load_der_x509_crl,
backend
)
assert isinstance(crl.issuer, x509.Name)
assert list(crl.issuer) == [
x509.NameAttribute(x509.OID_COUNTRY_NAME, u'US'),
x509.NameAttribute(
x509.OID_ORGANIZATION_NAME, u'Test Certificates 2011'
),
x509.NameAttribute(x509.OID_COMMON_NAME, u'Good CA')
]
assert crl.issuer.get_attributes_for_oid(x509.OID_COMMON_NAME) == [
x509.NameAttribute(x509.OID_COMMON_NAME, u'Good CA')
]
def test_equality(self, backend):
crl1 = _load_cert(
os.path.join("x509", "PKITS_data", "crls", "GoodCACRL.crl"),
x509.load_der_x509_crl,
backend
)
crl2 = _load_cert(
os.path.join("x509", "PKITS_data", "crls", "GoodCACRL.crl"),
x509.load_der_x509_crl,
backend
)
crl3 = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
assert crl1 == crl2
assert crl1 != crl3
assert crl1 != object()
def test_update_dates(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
assert isinstance(crl.next_update, datetime.datetime)
assert isinstance(crl.last_update, datetime.datetime)
assert crl.next_update.isoformat() == "2016-01-01T00:00:00"
assert crl.last_update.isoformat() == "2015-01-01T00:00:00"
def test_revoked_cert_retrieval(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
for r in crl:
assert isinstance(r, x509.RevokedCertificate)
# Check that len() works for CRLs.
assert len(crl) == 12
def test_get_revoked_certificate_by_serial_number(self, backend):
crl = _load_cert(
os.path.join(
"x509", "PKITS_data", "crls", "LongSerialNumberCACRL.crl"),
x509.load_der_x509_crl,
backend
)
serial_number = 725064303890588110203033396814564464046290047507
revoked = crl.get_revoked_certificate_by_serial_number(serial_number)
assert revoked.serial_number == serial_number
assert crl.get_revoked_certificate_by_serial_number(500) is None
def test_revoked_cert_retrieval_retain_only_revoked(self, backend):
"""
This test attempts to trigger the crash condition described in
https://github.com/pyca/cryptography/issues/2557
PyPy does gc at its own pace, so it will only be reliable on CPython.
"""
revoked = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)[11]
assert revoked.revocation_date == datetime.datetime(2015, 1, 1, 0, 0)
assert revoked.serial_number == 11
def test_extensions(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_ian_aia_aki.pem"),
x509.load_pem_x509_crl,
backend
)
crl_number = crl.extensions.get_extension_for_oid(
ExtensionOID.CRL_NUMBER
)
aki = crl.extensions.get_extension_for_class(
x509.AuthorityKeyIdentifier
)
aia = crl.extensions.get_extension_for_class(
x509.AuthorityInformationAccess
)
ian = crl.extensions.get_extension_for_class(
x509.IssuerAlternativeName
)
assert crl_number.value == x509.CRLNumber(1)
assert crl_number.critical is False
assert aki.value == x509.AuthorityKeyIdentifier(
key_identifier=(
b'<KEY>'
),
authority_cert_issuer=None,
authority_cert_serial_number=None
)
assert aia.value == x509.AuthorityInformationAccess([
x509.AccessDescription(
AuthorityInformationAccessOID.CA_ISSUERS,
x509.DNSName(u"cryptography.io")
)
])
assert ian.value == x509.IssuerAlternativeName([
x509.UniformResourceIdentifier(u"https://cryptography.io"),
])
def test_delta_crl_indicator(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_delta_crl_indicator.pem"),
x509.load_pem_x509_crl,
backend
)
dci = crl.extensions.get_extension_for_oid(
ExtensionOID.DELTA_CRL_INDICATOR
)
assert dci.value == x509.DeltaCRLIndicator(12345678901234567890)
assert dci.critical is False
def test_signature(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
assert crl.signature == binascii.unhexlify(
b"536a5a0794f68267361e7bc2f19167a3e667a2ab141535616855d8deb2ba1af"
b"9fd4546b1fe76b454eb436af7b28229fedff4634dfc9dd92254266219ae0ea8"
b"75d9ff972e9a2da23d5945f073da18c50a4265bfed9ca16586347800ef49dd1"
b"6856d7265f4f3c498a57f04dc04404e2bd2e2ada1f5697057aacef779a18371"
b"c621edc9a5c2b8ec1716e8fa22feeb7fcec0ce9156c8d344aa6ae8d1a5d99d0"
b"9386df36307df3b63c83908f4a61a0ff604c1e292ad63b349d1082ddd7ae1b7"
b"c178bba995523ec6999310c54da5706549797bfb1230f5593ba7b4353dade4f"
b"d2be13a57580a6eb20b5c4083f000abac3bf32cd8b75f23e4c8f4b3a79e1e2d"
b"58a472b0"
)
def test_tbs_certlist_bytes(self, backend):
crl = _load_cert(
os.path.join("x509", "PKITS_data", "crls", "GoodCACRL.crl"),
x509.load_der_x509_crl,
backend
)
ca_cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
ca_cert.public_key().verify(
crl.signature, crl.tbs_certlist_bytes,
padding.PKCS1v15(), crl.signature_hash_algorithm
)
def test_public_bytes_pem(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_empty.pem"),
x509.load_pem_x509_crl,
backend
)
# Encode it to PEM and load it back.
crl = x509.load_pem_x509_crl(crl.public_bytes(
encoding=serialization.Encoding.PEM,
), backend)
assert len(crl) == 0
assert crl.last_update == datetime.datetime(2015, 12, 20, 23, 44, 47)
assert crl.next_update == datetime.datetime(2015, 12, 28, 0, 44, 47)
def test_public_bytes_der(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
# Encode it to DER and load it back.
crl = x509.load_der_x509_crl(crl.public_bytes(
encoding=serialization.Encoding.DER,
), backend)
assert len(crl) == 12
assert crl.last_update == datetime.datetime(2015, 1, 1, 0, 0, 0)
assert crl.next_update == datetime.datetime(2016, 1, 1, 0, 0, 0)
@pytest.mark.parametrize(
("cert_path", "loader_func", "encoding"),
[
(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
serialization.Encoding.PEM,
),
(
os.path.join("x509", "PKITS_data", "crls", "GoodCACRL.crl"),
x509.load_der_x509_crl,
serialization.Encoding.DER,
),
]
)
def test_public_bytes_match(self, cert_path, loader_func, encoding,
backend):
crl_bytes = load_vectors_from_file(
cert_path, lambda pemfile: pemfile.read(), mode="rb"
)
crl = loader_func(crl_bytes, backend)
serialized = crl.public_bytes(encoding)
assert serialized == crl_bytes
def test_public_bytes_invalid_encoding(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_empty.pem"),
x509.load_pem_x509_crl,
backend
)
with pytest.raises(TypeError):
crl.public_bytes('NotAnEncoding')
def test_verify_bad(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "invalid_signature.pem"),
x509.load_pem_x509_crl,
backend
)
crt = _load_cert(
os.path.join("x509", "custom", "invalid_signature.pem"),
x509.load_pem_x509_certificate,
backend
)
assert not crl.is_signature_valid(crt.public_key())
def test_verify_good(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "valid_signature.pem"),
x509.load_pem_x509_crl,
backend
)
crt = _load_cert(
os.path.join("x509", "custom", "valid_signature.pem"),
x509.load_pem_x509_certificate,
backend
)
assert crl.is_signature_valid(crt.public_key())
def test_verify_argument_must_be_a_public_key(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "valid_signature.pem"),
x509.load_pem_x509_crl,
backend
)
with pytest.raises(TypeError):
crl.is_signature_valid("not a public key")
with pytest.raises(TypeError):
crl.is_signature_valid(object)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestRevokedCertificate(object):
def test_revoked_basics(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
for i, rev in enumerate(crl):
assert isinstance(rev, x509.RevokedCertificate)
assert isinstance(rev.serial_number, int)
assert isinstance(rev.revocation_date, datetime.datetime)
assert isinstance(rev.extensions, x509.Extensions)
assert rev.serial_number == i
assert rev.revocation_date.isoformat() == "2015-01-01T00:00:00"
def test_revoked_extensions(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
exp_issuer = [
x509.DirectoryName(x509.Name([
x509.NameAttribute(x509.OID_COUNTRY_NAME, u"US"),
x509.NameAttribute(x509.OID_COMMON_NAME, u"cryptography.io"),
]))
]
# First revoked cert doesn't have extensions, test if it is handled
# correctly.
rev0 = crl[0]
# It should return an empty Extensions object.
assert isinstance(rev0.extensions, x509.Extensions)
assert len(rev0.extensions) == 0
with pytest.raises(x509.ExtensionNotFound):
rev0.extensions.get_extension_for_oid(x509.OID_CRL_REASON)
with pytest.raises(x509.ExtensionNotFound):
rev0.extensions.get_extension_for_oid(x509.OID_CERTIFICATE_ISSUER)
with pytest.raises(x509.ExtensionNotFound):
rev0.extensions.get_extension_for_oid(x509.OID_INVALIDITY_DATE)
# Test manual retrieval of extension values.
rev1 = crl[1]
assert isinstance(rev1.extensions, x509.Extensions)
reason = rev1.extensions.get_extension_for_class(
x509.CRLReason).value
assert reason == x509.CRLReason(x509.ReasonFlags.unspecified)
issuer = rev1.extensions.get_extension_for_class(
x509.CertificateIssuer).value
assert issuer == x509.CertificateIssuer(exp_issuer)
date = rev1.extensions.get_extension_for_class(
x509.InvalidityDate).value
assert date == x509.InvalidityDate(datetime.datetime(2015, 1, 1, 0, 0))
# Check if all reason flags can be found in the CRL.
flags = set(x509.ReasonFlags)
for rev in crl:
try:
r = rev.extensions.get_extension_for_class(x509.CRLReason)
except x509.ExtensionNotFound:
# Not all revoked certs have a reason extension.
pass
else:
flags.discard(r.value.reason)
assert len(flags) == 0
def test_no_revoked_certs(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_empty.pem"),
x509.load_pem_x509_crl,
backend
)
assert len(crl) == 0
def test_duplicate_entry_ext(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_dup_entry_ext.pem"),
x509.load_pem_x509_crl,
backend
)
with pytest.raises(x509.DuplicateExtension):
crl[0].extensions
def test_unsupported_crit_entry_ext(self, backend):
crl = _load_cert(
os.path.join(
"x509", "custom", "crl_md2_unknown_crit_entry_ext.pem"
),
x509.load_pem_x509_crl,
backend
)
ext = crl[0].extensions.get_extension_for_oid(
x509.ObjectIdentifier("1.2.3.4")
)
assert ext.value.value == b"\n\x01\x00"
def test_unsupported_reason(self, backend):
crl = _load_cert(
os.path.join(
"x509", "custom", "crl_unsupported_reason.pem"
),
x509.load_pem_x509_crl,
backend
)
with pytest.raises(ValueError):
crl[0].extensions
def test_invalid_cert_issuer_ext(self, backend):
crl = _load_cert(
os.path.join(
"x509", "custom", "crl_inval_cert_issuer_entry_ext.pem"
),
x509.load_pem_x509_crl,
backend
)
with pytest.raises(ValueError):
crl[0].extensions
def test_indexing(self, backend):
crl = _load_cert(
os.path.join("x509", "custom", "crl_all_reasons.pem"),
x509.load_pem_x509_crl,
backend
)
with pytest.raises(IndexError):
crl[-13]
with pytest.raises(IndexError):
crl[12]
assert crl[-1].serial_number == crl[11].serial_number
assert len(crl[2:4]) == 2
assert crl[2:4][0].serial_number == crl[2].serial_number
assert crl[2:4][1].serial_number == crl[3].serial_number
def test_get_revoked_certificate_doesnt_reorder(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, | |
import os
import time
import json
import zipfile
import base64
from distutils.util import strtobool
from uuid import uuid1
import django
from django.core.exceptions import ValidationError
from django.conf import settings
from django.http.response import FileResponse, HttpResponse
from django.db.models import Q
from django.contrib.auth.models import User
from django.contrib.auth import password_validation
from rest_framework import generics, permissions, viewsets, status, pagination
from rest_framework.response import Response
from rest_framework.decorators import action
from neo4j import exceptions as neo4j_exceptions
from polyglotdb import CorpusContext
from polyglotdb.query.base.func import Count
from . import models
from . import serializers
from .utils import get_used_ports
from .tasks import import_corpus_task, run_query_task, run_enrichment_task, reset_enrichment_task, delete_enrichment_task, run_query_export_task, run_query_generate_subset_task, run_spade_script_task
import logging
log = logging.getLogger('polyglot_server')
class UserViewSet(viewsets.ModelViewSet):
model = User
queryset = User.objects.all()
serializer_class = serializers.UserSerializer
def create(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
try:
user = User.objects.get(username=request.data['username'])
return Response('Username is taken.', status=status.HTTP_409_CONFLICT)
except User.DoesNotExist:
pass
try:
password_validation.validate_password(request.data['password'])
except ValidationError as e:
return Response(" ".join(e.messages), status=status.HTTP_400_BAD_REQUEST)
user = User.objects.create_user(username=request.data['username'], password=request.data['password'])
user.profile.user_type = request.data['user_type']
user.save()
user.profile.update_role_permissions()
serialized = serializers.UserSerializer(user)
return Response(serialized.data, status=status.HTTP_201_CREATED)
def list(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
users = User.objects.select_related('profile').all()
return Response(self.serializer_class(users, many=True).data)
def destroy(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
user = self.get_object()
tutorial_corpus = user.profile.get_tutorial_corpus()
if tutorial_corpus is not None:
tutorial_corpus.database.delete()
return super(UserViewSet, self).destroy(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
return super(UserViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
user = self.get_object()
user.username = request.data['username']
ignore_perms = request.data['user_type'] != user.profile.user_type
if ignore_perms:
user.profile.user_type = request.data['user_type']
user.save()
for corpus_id, perm_data in request.data['corpus_permissions'].items():
perm = models.CorpusPermissions.objects.get(user=user, corpus_id=int(corpus_id))
if ignore_perms:
perm.set_role_permissions()
else:
for k, v in perm_data.items():
if k == 'corpus':
continue
setattr(perm, k, v)
perm.save()
user = self.get_object()
serialized = serializers.UserSerializer(user)
return Response(serialized.data, status=status.HTTP_200_OK)
@action(detail=False, methods=['get'])
def current_user(self, request):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
return Response(self.serializer_class(request.user).data)
@action(detail=False, methods=['put'])
def change_password(self, request):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
try:
password_validation.validate_password(request.data['password'])
except ValidationError as e:
return Response(" ".join(e.messages), status=status.HTTP_400_BAD_REQUEST)
request.user.set_password(request.data['password'])
request.user.save()
return Response(self.serializer_class(request.user).data)
@action(detail=True, methods=['post'])
def create_tutorial_corpus(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
user = self.get_object()
c = user.profile.get_tutorial_corpus()
if c:
return Response(serializers.CorpusSerializer(c).data,
status=status.HTTP_304_NOT_MODIFIED)
c = user.profile.create_tutorial_corpus()
return Response(serializers.CorpusSerializer(c).data,
status=status.HTTP_201_CREATED)
class AppViewSet(viewsets.ViewSet):
def list(self, request):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
apps = ['base']
for a in settings.INSTALLED_APPS:
if a.startswith('iscan.'):
apps.append(a.replace('iscan.', ''))
return Response(apps)
class RoleChoiceViewSet(viewsets.ViewSet):
def list(self, request):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
choices = [{'id': k, 'name':v} for k, v in models.Profile.TYPE_CHOICES]
return Response(choices)
class CorpusTypeChoiceViewSet(viewsets.ViewSet):
def list(self, request):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
choices = [{'id': k, 'name':v} for k, v in models.Corpus.TYPE_CHOICES]
return Response(choices)
class DatabaseViewSet(viewsets.ModelViewSet):
model = models.Database
queryset = models.Database.objects.all()
serializer_class = serializers.DatabaseSerializer
def create(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
used_ports = get_used_ports()
current_ports = []
data_dict = {'name': request.data.get('name'),
'neo4j_http_port': request.data.get('neo4j_http_port', None),
'neo4j_https_port': request.data.get('neo4j_https_port', None),
'neo4j_bolt_port': request.data.get('neo4j_bolt_port', None),
'neo4j_admin_port': request.data.get('neo4j_admin_port', None),
'influxdb_http_port': request.data.get('influxdb_http_port', None),
'influxdb_meta_port': request.data.get('influxdb_meta_port', None),
'influxdb_udp_port': request.data.get('influxdb_udp_port', None),
'influxdb_admin_port': request.data.get('influxdb_admin_port', None)}
ports = {'neo4j': settings.BASE_NEO4J_PORT, 'influxdb': settings.BASE_INFLUXDB_PORT}
for k, v in data_dict.items():
if 'port' not in k:
continue
if v is None:
if 'neo4j' in k:
port_key = 'neo4j'
else:
port_key = '<KEY>'
while True:
if ports[port_key] not in used_ports and ports[port_key] not in current_ports:
data_dict[k] = ports[port_key]
current_ports.append(ports[port_key])
ports[port_key] += 1
break
ports[port_key] += 1
serializer = serializers.DatabaseSerializer(data=data_dict)
if serializer.is_valid():
database = serializer.save()
database.install()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if request.user.is_superuser:
databases = models.Database.objects.all()
else:
databases = models.Database.objects.filter(corpora__user_permissions__user=request.user,
corpora__user_permissions__can_access_database=True).all()
return Response(self.serializer_class(databases, many=True).data)
@action(detail=False, methods=['post'])
def refresh_databases(self, request):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if request.user.is_superuser:
corpora = models.Corpus.objects.all()
#FIXME TOO MUCH HARDCODING
corpus_names = [x.name for x in corpora]
requery = False
for dataset in os.listdir(settings.SOURCE_DATA_DIRECTORY):
if dataset not in corpus_names:
d, _ = models.Database.objects.get_or_create(name=dataset)
c = models.Corpus.objects.create(name=dataset, database=d)
if 'input_format' in c.configuration_data:
input_format = c.configuration_data['input_format'][0].upper()
if input_format == "MFA":
input_format = Corpus.MFA
if input_format == "MAUS":
input_format = Corpus.MAUS
if input_format == "FAVE":
input_format = Corpus.FAVE
if input_format == "LABCAT":
input_format = Corpus.LABCAT
if input_format == "PARTITUR":
input_format = Corpus.PARTITUR
if input_format == "TIMIT":
input_format = Corpus.TIMIT
if input_format == "BUCKEYE":
input_format = Corpus.BUCKEYE
c.input_format = input_format
c.save()
databases = models.Database.objects.all()
return Response(self.serializer_class(databases, many=True).data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@action(detail=True, methods=['post'])
def start(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
database = self.get_object()
if not request.user.is_superuser:
permissions = models.CorpusPermissions.objects.filter(user=request.user, corpus__database=database,
can_access_database=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
try:
success = database.start()
except Exception as e:
return Response(data=str(e), status=status.HTTP_423_LOCKED)
return Response(data=success)
@action(detail=True, methods=['post'])
def stop(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
database = self.get_object()
if not request.user.is_superuser:
permissions = models.CorpusPermissions.objects.filter(user=request.user, corpus__database=database,
can_access_database=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
try:
success = database.stop()
except Exception as e:
return Response(data=str(e), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(data=success)
def destroy(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
return super(DatabaseViewSet, self).destroy(request, pk)
@action(detail=True, methods=['get'])
def ports(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
database = self.get_object()
if not request.user.is_superuser:
permissions = models.CorpusPermissions.objects.filter(user=request.user, corpus__database=database).all()
permissions = [x.can_access_database for x in permissions]
if not len(permissions) or not any(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
data = database.ports
return Response(data)
@action(detail=True, methods=['get'])
def data_directory(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
database = self.get_object()
if not request.user.is_superuser:
permissions = models.CorpusPermissions.objects.filter(user=request.user, corpus__database=database).all()
permissions = [x.can_access_database for x in permissions]
if not len(permissions) or not any(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
data = database.directory
return Response(data)
@action(detail=True, methods=['get'])
def corpora(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
database = self.get_object()
if not request.user.is_superuser:
permissions = models.CorpusPermissions.objects.filter(user=request.user, corpus__database=database).all()
permissions = [x.can_access_database for x in permissions]
if not len(permissions) or not any(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpora = models.Corpus.objects.filter(database=database)
serializer = serializers.CorpusSerializer(corpora, many=True)
return Response(serializer.data)
class CorpusViewSet(viewsets.ModelViewSet):
model = models.Corpus
queryset = models.Corpus.objects.all()
serializer_class = serializers.CorpusSerializer
def list(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpora = models.Corpus.objects.filter(user_permissions__user=request.user, user_permissions__can_query=True).all()
return Response(self.serializer_class(corpora, many=True).data)
def create(self, request, *args, **kwargs):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
data = {k: v for k, v in request.data.items()}
data['database'] = models.Database.objects.get(pk=int(data['database']))
data['source_directory'] = os.path.join(settings.SOURCE_DATA_DIRECTORY, data['source_directory'])
instance = models.Corpus.objects.create(name=data['name'], database=data['database'])
return Response(self.serializer_class(instance).data)
@action(detail=True, methods=['post'])
def import_corpus(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not corpus.database.is_running:
return Response("Database is not running, cannot import",
status=status.HTTP_400_BAD_REQUEST)
response = Response('Import started', status=status.HTTP_202_ACCEPTED)
task_id = import_corpus_task.delay(corpus.pk)
response["task"] = task_id.task_id
time.sleep(1)
return response
@action(detail=True, methods=['get'])
def status(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if not corpus.database.is_running:
return Response("database not running")
running_enrichments = models.Enrichment.objects.filter(corpus=corpus, running=True).all()
if len(running_enrichments):
return Response('enrichment running')
running_queries = models.Query.objects.filter(corpus=corpus, running=True).all()
if len(running_queries):
return Response('query running')
return Response('ready')
@action(detail=True, methods=['get'])
def property_values(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
type = request.GET.get('type', None)
prop = request.GET.get('prop', 'label')
corpus = self.get_object()
with CorpusContext(corpus.config) as c:
ann = getattr(c, type)
resp = sorted(c.query_metadata(ann).levels(getattr(ann, prop)))
return Response(resp)
@action(detail=True, methods=['get'])
def autocomplete(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
prefix = request.GET.get('prefix', None)
category = request.GET.get('category', None)
if category in ['speaker', 'discourse']:
category = category.title()
else:
category += '_type'
prop = request.GET.get('prop', 'label')
if prefix is None:
return Response("Please provide a prefix",
status=status.HTTP_400_BAD_REQUEST)
for x in ['\'', '\"']:
#Escape characters
prefix = prefix.replace(x, '\\{}'.format(x))
corpus = self.get_object()
with CorpusContext(corpus.config) as c:
statement = """MATCH (n:{category}:{corpus_name})
WHERE n.{prop} =~ '(?i){prefix}.*'
RETURN DISTINCT n.{prop}
LIMIT 10""".format(corpus_name=c.cypher_safe_name, category=category, prop=prop, prefix=prefix)
resp = c.execute_cypher(statement).value()
return Response(resp)
@action(detail=True, methods=['get'])
def speakers(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
speakers = c.speakers
return Response(speakers)
@action(detail=True, methods=['get'])
def words(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
count = request.GET.get('count', None)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if count is None or not count.isdigit():
return Response(
'There must be a requested number of words',
status=status.HTTP_400_BAD_REQUEST)
with CorpusContext(corpus.config) as c:
statement | |
<reponame>microtodd/yamledit
#!/usr/bin/python
#
# yamledit.py
# github.com/microtodd/yamledit
#
import os
import sys
import getopt
import ruamel.yaml
from ruamel import yaml
from ruamel.yaml.scalarstring import SingleQuotedScalarString, DoubleQuotedScalarString
__version__ = '0.5'
# TODO
#
# ) merge two yaml files capability
# ) Support input pipe instead of file
#
## printHelp
#
def printHelp():
print ''' yamledit.py
Editor for Commandline for YAML
Options:
-h Print this help
-v Version
-f <filename> Input file
-o <filename> Output file, if not specified goes to STDOUT
-y If passed then any user confirmation is assumed 'yes'
-q If passed then everything is silent. This option implies -y.
You must pick one and only one: -r or -c or -n or -d or -g
If you pick -r or -c or -d, you must specify -f as well
<newvalue> can be a comma-separated list, which is treated as a YAML list
-r <key> <newvalue> Replace. 'key' is of format foo.bar.biz.baz
If key does not exist, returns error.
If used it must be the last option used.
-c <key> <newvalue> Create. 'key' is of format foo.bar.biz.baz.
If key already exists, will prompt to overwrite
unless -y is selected.
If used it must be the last option used.
-n <key> <value> New file with 'key' with value 'value'.
-d <key> Delete 'key'
-g <key> Print the value of <key>, to STDOUT or to the filename
'''
## printVersion
#
def printVersion():
print ' yamledit.py Version ' + str(__version__)
## createFile
#
# @param[in] filename
# @param[in] data
# @param[in] autoConfirm
# @param[in] quiet
#
def createFile(outputFileName, data, autoConfirm, quiet):
# see if file exists
if os.path.exists(outputFileName):
# See if we autoconfirmed
if autoConfirm or quiet:
pass
else:
userInput = raw_input('File \'' + str(outputFileName) + '\' exists. Overwrite? (y/n): ')
if userInput != 'y' and userInput != 'Y':
print 'Aborting.'
return
# Create the file
newFile = open(outputFileName,'w')
newFile.write( ruamel.yaml.round_trip_dump(data) )
newFile.close()
## createTxtFile
#
# @param[in] filename
# @param[in] data
# @param[in] autoConfirm
# @param[in] quiet
#
def createTxtFile(outputFileName, data, autoConfirm, quiet):
# see if file exists
if os.path.exists(outputFileName):
# See if we autoconfirmed
if autoConfirm or quiet:
pass
else:
userInput = raw_input('File \'' + str(outputFileName) + '\' exists. Overwrite? (y/n): ')
if userInput != 'y' and userInput != 'Y':
print 'Aborting.'
return
# Create the file
newFile = open(outputFileName,'w')
newFile.write( data )
newFile.close()
## replaceValue
#
# @param[in] inputFileName
# @param[in] outputFileName
# @param[in] [keyName,newValue]
# @param[in] autoConfirm
# @param[in] quiet
#
def replaceValue(inputFileName, outputFileName, values, autoConfirm, quiet):
keyName = values[0]
newValue = values[1]
inputFile = None # Handle to input file data
# Open file
try:
inputFile = open(inputFileName)
except Exception as e:
raise Exception('Could not open/parse file \'' + str(inputFileName) + '\': ' + str(e))
# Load it
data = ruamel.yaml.round_trip_load(inputFile, preserve_quotes=True)
# See if the key exists
# TODO move this piece into a method called 'findNode', and let createValue use it as well
keyPath = str(keyName).split('.')
lastNodeName = keyPath.pop()
currentNode = data
for nodeName in keyPath:
if nodeName in currentNode:
currentNode = currentNode[nodeName]
else:
raise Exception('Could not find \'' + str(keyName) + '\' in yaml file')
# Check that last key
if lastNodeName not in currentNode:
raise Exception('Could not find \'' + str(keyName) + '\' in yaml file')
# Update the value
if not quiet:
extra = ''
if str(newValue).find(',') != -1:
extra = ' (a list)'
if isinstance(currentNode[lastNodeName],str):
print 'Updating \'' + str(keyName) + '\' from \'' + currentNode[lastNodeName] + '\' to \'' + newValue + '\'' + extra
else:
print 'Updating \'' + str(keyName) + '\', which is not currently a string, to \'' + newValue + '\'' + extra
if autoConfirm == False and quiet == False:
userInput = raw_input('Continue? (y/n): ')
if userInput != 'y' and userInput != 'Y':
print 'Aborting.'
return
# See if new value is a string or a list
if str(newValue).find(',') == -1:
currentNode[lastNodeName] = newValue
else:
newValueList = str(newValue).split(',')
# If this was a trailing ',', then we treat it as a list but we are not going to add a null entry
if newValueList[-1] == '':
newValueList.pop()
currentNode[lastNodeName] = newValueList
# Output
if outputFileName is None:
print ruamel.yaml.round_trip_dump(data)
else:
createFile(outputFileName, data, autoConfirm, quiet)
## createValue
#
# @param[in] inputFileName
# @param[in] outputFileName
# @param[in] [keyName,newValue]
# @param[in] autoConfirm
# @param[in] quiet
#
def createValue(inputFileName, outputFileName, values, autoConfirm, quiet):
keyName = values[0]
newValue = values[1]
inputFile = None # Handle to input file data
# Open file
try:
inputFile = open(inputFileName)
except Exception as e:
raise Exception('Could not open/parse file \'' + str(inputFileName) + '\': ' + str(e))
# Load it
data = ruamel.yaml.round_trip_load(inputFile, preserve_quotes=True)
# See if the key exists, create the new path if necessary
keyAlreadyExists = True
keyPath = str(keyName).split('.')
lastNodeName = keyPath.pop()
currentNode = data
for nodeName in keyPath:
if nodeName in currentNode:
currentNode = currentNode[nodeName]
else:
keyAlreadyExists = False
currentNode[nodeName] = {}
currentNode = currentNode[nodeName]
if lastNodeName not in currentNode:
keyAlreadyExists = False
currentNode[lastNodeName] = {}
outputMessage = 'Creating '
if keyAlreadyExists:
outputMessage = 'Updating existing key '
if not quiet:
extra = ''
if str(newValue).find(',') != -1:
extra = ' (a list)'
if isinstance(currentNode[lastNodeName],str):
print outputMessage + '\'' + str(keyName) + '\' from \'' + currentNode[lastNodeName] + '\' to \'' + newValue + '\'' + extra
else:
print outputMessage + '\'' + str(keyName) + '\' as \'' + newValue + '\'' + extra
if autoConfirm == False and quiet == False:
userInput = raw_input('Continue? (y/n): ')
if userInput != 'y' and userInput != 'Y':
print 'Aborting.'
return
# See if new value is a string or a list
if str(newValue).find(',') == -1:
currentNode[lastNodeName] = newValue
else:
newValueList = str(newValue).split(',')
# If this was a trailing ',', then we treat it as a list but we are not going to add a null entry
if newValueList[-1] == '':
newValueList.pop()
currentNode[lastNodeName] = newValueList
# Output
if outputFileName is None:
print ruamel.yaml.round_trip_dump(data)
else:
createFile(outputFileName, data, autoConfirm, quiet)
## newFile
#
# @param[in] outputFileName
# @param[in] [keyName,newValue]
# @param[in] autoConfirm
# @param[in] quiet
#
def newFile(outputFileName, values, autoConfirm, quiet):
keyName = values[0]
newValue = values[1]
# New data
newData = ''
# See if the key exists, create the new path if necessary
numTabs = 0
keyPath = str(keyName).split('.')
lastNodeName = keyPath.pop()
for nodeName in keyPath:
# Build out the data
if numTabs == 0:
newData += str(nodeName) + ':'
# Make sure we put the applicable number of tabs in
else:
newData += '\n'
for x in range(0, numTabs):
newData += ' '
newData += str(nodeName) + ':'
numTabs += 1
# Last node, again make sure we do the applicable number of tabs
newData += '\n'
for x in range(0, numTabs):
newData += ' '
newData += lastNodeName + ': ' + newValue + '\n'
# Confirm
if autoConfirm == False and quiet == False:
userInput = raw_input('Create new yaml? (y/n): ')
if userInput != 'y' and userInput != 'Y':
print 'Aborting.'
return
# Prep the yaml object
data = ruamel.yaml.round_trip_load(newData, preserve_quotes=True)
# Output
if outputFileName is None:
print ruamel.yaml.round_trip_dump(data)
else:
createFile(outputFileName, data, autoConfirm, quiet)
## deleteKey
#
# @param[in] inputFileName
# @param[in] outputFileName
# @param[in] keyName
# @param[in] autoConfirm
# @param[in] quiet
#
def deleteKey(inputFileName, outputFileName, keyName, autoConfirm, quiet):
inputFile = None # Handle to input file data
# Open file
try:
inputFile = open(inputFileName)
except Exception as e:
raise Exception('Could not open/parse file \'' + str(inputFileName) + '\': ' + str(e))
# Load it
data = ruamel.yaml.round_trip_load(inputFile, preserve_quotes=True)
# See if the key exists
# TODO move this piece into a method called 'findNode', and let createValue use it as well
keyPath = str(keyName).split('.')
lastNodeName = keyPath.pop()
currentNode = data
for nodeName in keyPath:
if nodeName in currentNode:
currentNode = currentNode[nodeName]
else:
raise Exception('Could not find \'' + str(keyName) + '\' in yaml file')
# Check that last key
if lastNodeName not in currentNode:
raise Exception('Could not find \'' + str(keyName) + '\' in yaml file')
# Update the value
if not quiet:
if isinstance(currentNode[lastNodeName],str):
print 'Removing key \'' + str(keyName) + '\' which has value \'' + currentNode[lastNodeName] +'\''
else:
print 'Removing key \'' + str(keyName) + '\', which is not currently a string'
if autoConfirm == False and quiet == | |
local2canonical ADD INDEX( src ), ADD INDEX( local ), ADD INDEX( canonical )"
execute( c, txt )
txt = "ALTER TABLE local2canonical ADD FULLTEXT( canonical )"
execute( c, txt )
if SQLITE:
txt = "CREATE INDEX local2canonical_index ON local2canonical( src, local, canonical )"
execute( c, txt )
conn.commit()
return 0
# --------------------------------------------------------------------------
def int_build_local2canonical( src, **kwargs ):
c = kwargs[ 'c' ]
# ifile = os.path.join( "..", "Index-Sources", fb.get_source_from_src( src ), fb.Local2Canon )
source = fb.get_source_from_src( src )
ifile = conf.val( 'local2canon', source )
# ifile = Path( conf.get_source_path( source ), conf.val( 'local2canon', source ))
with open( ifile ) as fd:
for line in fd:
line = line.strip()
local, canonical = line.split( '|' )
local=local.strip()
canonical=canonical.strip()
if local and canonical:
data = ( src, local, canonical )
txt = 'INSERT INTO local2canonical ( src, local, canonical ) VALUES( %s, %s, %s )'
txt = fix_query( txt )
execute( c, txt, data )
if FULLTEXT and SQLITE:
txt = 'INSERT INTO local2canonical_fts ( src, local, canonical ) VALUES( ?, ?, ? )'
execute( c, txt, data )
# --------------------------------------------------------------------------
# Build title table from data in the Index.Json directory.
def build_titles( dc, c, conn ):
print( "\nBuilding titles", file=sys.stderr, flush=True )
txt = 'DROP TABLE IF EXISTS titles;'
execute( c, txt )
if FULLTEXT and SQLITE:
txt = 'DROP TABLE IF EXISTS titles_fts;'
execute( c, txt )
if MYSQL:
txt = """CREATE TABLE titles (
src VARCHAR(255),
local VARCHAR(255),
title_id MEDIUMINT(8) UNSIGNED,
composer VARCHAR(255),
lyricist VARCHAR(255),
sheet VARCHAR(10),
id MEDIUMINT UNSIGNED AUTO_INCREMENT,
PRIMARY KEY(id),
UNIQUE( title_id, src, local )
)
ENGINE = MYISAM
CHARACTER SET 'utf8mb4'
"""
execute( c, txt )
if SQLITE:
txt = """CREATE TABLE titles (
src VARCHAR(255),
local VARCHAR(255),
title_id MEDIUMINT(8),
composer VARCHAR(255),
lyricist VARCHAR(255),
sheet VARCHAR(10),
id MEDIUMINT AUTO_INCREMENT,
PRIMARY KEY(id),
UNIQUE( title_id, src, local )
)
"""
execute( c, txt )
if FULLTEXT and SQLITE:
txt = """CREATE VIRTUAL TABLE titles_fts USING fts5(
src,
local,
title_id,
composer UNINDEXED,
lyricist UNINDEXED,
sheet UNINDEXED,
content='titles',
content_rowid='id'
)
"""
execute( c, txt )
fb.traverse_sources( build_titles_from_one_index_source, c=c, dc=dc )
if MYSQL:
txt = "ALTER TABLE titles ADD INDEX( title_id ), ADD INDEX( local ), ADD INDEX( src )"
if SQLITE:
txt = "CREATE INDEX titles_index ON titles( title_id, local, src )"
execute( c, txt )
conn.commit()
return 0
# --------------------------------------------------------------------------
def build_titles_from_one_index_source( src, **kwargs ):
fb.get_music_index_data_by_src( src, proc_one_book, **kwargs )
# --------------------------------------------------------------------------
def proc_one_book( src, data, file, **kwargs ):
c = kwargs[ 'c' ]
dc = kwargs[ 'dc' ]
local = data[ 'local' ]
source = data[ 'source' ]
contents = data[ 'contents' ]
sheet_min = 999999
sheet_max = 0
# pages = []
prior_sheet = -1
prior_title = "prior-title"
# print( f" {local:50} {source:20} ({src})", file=sys.stderr, flush=True )
for content in contents:
title = content[ 'title' ]
title_id = fb.get_title_id_from_title( dc, title )
sheet = content[ 'sheet' ] if not content[ 'sheet' ] == '-' else None
composer = content[ 'composer' ] if 'composer' in content else None
if composer:
composer = composer.strip()
lyricist = content[ 'lyricist' ] if 'lyricist' in content else None
if lyricist:
lyricist = lyricist.strip()
proc_one_book_int( c, src, local, title_id, composer, lyricist, sheet )
if sheet:
sheet = sheet.strip()
if not sheet.isnumeric():
print( f"WARNING: Page number contains non-numerics: <{sheet}>, for {src}, {local}, {title}", file=sys.stderr, flush=True )
continue
sheet_min = min( sheet_min, int(sheet) )
sheet_max = max( sheet_max, int(sheet) )
else:
print( f"WARNING: Missing sheet number for {src}, {local}, prior sheet: {prior_sheet}, prior title: {prior_title}", file=sys.stderr, flush=True )
prior_sheet = sheet
prior_title = title
title_id = fb.get_title_id_from_title( dc, '_TitleFirst' )
proc_one_book_int( c, src, local, title_id, None, None, str(sheet_min) )
# page_mid = int( (page_min + page_max)/2)
# title_id = fb.get_title_id_from_title( dc, '_Test-Title-Mid' )
# proc_one_book_int( c, src, local, title_id, None, None, str(sheet_mid) )
title_id = fb.get_title_id_from_title( dc, '_TitleLast' )
proc_one_book_int( c, src, local, title_id, None, None, str(sheet_max) )
# pages=sorted( pages )
# print( f"src: {src}, local: {local}" )
# print( pages )
# print()
# ----------------------------------------------------------------------------------
# Buffalo contains some duplicate data, same title, different call number I think.
# INSERT IGNORE to resolve that.
def proc_one_book_int( c, src, local, title_id, composer, lyricist, sheet ):
data = ( src, local, title_id, composer, lyricist, sheet )
if MYSQL:
txt = 'INSERT IGNORE INTO titles ( src, local, title_id, composer, lyricist, sheet ) VALUES( %s, %s, %s, %s, %s, %s )'
if SQLITE:
txt = 'INSERT OR IGNORE INTO titles ( src, local, title_id, composer, lyricist, sheet ) VALUES( ?, ?, ?, ?, ?, ? )'
execute( c, txt, data )
if FULLTEXT and SQLITE:
txt = 'INSERT OR IGNORE INTO titles_fts ( src, local, title_id, composer, lyricist, sheet ) VALUES( ?, ?, ?, ?, ?, ? )'
execute( c, txt, data )
# ----------------------------------------------------------------------------------
# Build titles_distinct table from data in the Index.Json directory.
# This is first pass over Index.Json directory. Need titles_distinct to build titles.
# Redo this using set() instead of intermediate int_titles for SQLITE case.
# SELECT DISTINCT in Sqlite considers all columns distinct, Mysql only the specified column.
# No! Doing it the same way for both, working from titles_distinct
def build_titles_distinct( c, conn ):
print( "\nBuilding titles_distinct", file=sys.stderr, flush=True )
titles_distinct = set()
raw_index = []
fb.traverse_sources( build_titles_distinct_from_one_index_source, c=c, titles_distinct = titles_distinct, raw_index = raw_index ) # Builds titles_distinct set()
txt = 'DROP TABLE IF EXISTS raw_index;'
execute( c, txt )
txt = 'DROP TABLE IF EXISTS titles_distinct;'
execute( c, txt )
if FULLTEXT and SQLITE:
txt = 'DROP TABLE IF EXISTS titles_distinct_fts;'
execute( c, txt )
txt = 'DROP TABLE IF EXISTS raw_index_fts;'
execute( c, txt )
if MYSQL:
txt = """CREATE TABLE titles_distinct (
title VARCHAR(255),
title_id MEDIUMINT UNSIGNED,
PRIMARY KEY(title_id) )
ENGINE = MYISAM
CHARACTER SET 'utf8mb4'
"""
execute( c, txt )
txt = """CREATE TABLE raw_index(
title_id MEDIUMINT UNSIGNED,
src VARCHAR(255),
local VARCHAR(255),
file VARCHAR(255),
line VARCHAR(255),
id MEDIUMINT AUTO_INCREMENT,
PRIMARY KEY(id) )
ENGINE = MYISAM
CHARACTER SET 'utf8mb4'
"""
execute( c, txt )
if SQLITE:
txt = """CREATE TABLE titles_distinct (
title VARCHAR(255),
title_id INTEGER PRIMARY KEY
)
"""
execute( c, txt )
txt = """CREATE TABLE raw_index (
src VARCHAR(255),
local VARCHAR(255),
file VARCHAR(255),
line VARCHAR(255),
title_id INTEGER,
id MEDIUMINT AUTO_INCREMENT,
PRIMARY KEY(id)
)
"""
execute( c, txt )
if FULLTEXT and SQLITE:
txt = """CREATE VIRTUAL TABLE titles_distinct_fts USING fts5(
title,
title_id,
content='titles_distinct',
content_rowid='title_id'
)
"""
execute( c, txt )
txt = """CREATE VIRTUAL TABLE raw_index_fts USING fts5(
title_id,
local,
src,
file,
content='raw_index',
content_rowid='id'
)
"""
execute( c, txt )
# --------------------------------------------------------------
# Build titles_distinct table directly from titles_distinct set instead
# of from an intermediate table. Inserting title_id here, remove AUTO INCREMENT in CREATE.
for title_id, title in enumerate( sorted( titles_distinct )):
data = ( title, title_id )
txt = 'INSERT INTO titles_distinct ( title, title_id ) VALUES( %s, %s )'
txt = fix_query( txt )
execute( c, txt, data )
if FULLTEXT and SQLITE:
txt = 'INSERT INTO titles_distinct_fts ( title, title_id ) VALUES( ?, ? )'
execute( c, txt, data )
# --------------------------------------------------------------
# Add index here as using titles_distinct in get_title_id_from_title(), which is used by
# build_titles() and build_title2youtube(). Cut run time in about half.
# add_indexes() adds FULLTEXT index. Need ordinary index for get_title_id_from_title().
# PRIMARY KEY is automatically indexed, don't need to add another here.
if MYSQL:
txt = "ALTER TABLE titles_distinct ADD INDEX( title ), ADD INDEX( title_id )"
execute( c, txt )
if SQLITE:
txt = "CREATE INDEX titles_distinct_index ON titles_distinct( title, title_id )"
execute( c, txt )
# --------------------------------------------------------------
# WRW 1 Apr 2022 - Build raw_index table after add indexes because of the inner SELECT
for item in raw_index:
data = ( item[ 'src' ], item[ 'local' ], item[ 'file' ], item[ 'line' ], item[ 'title' ] )
txt = 'INSERT INTO raw_index( src, local, file, line, title_id ) VALUES( %s, %s, %s, %s, (SELECT title_id from titles_distinct WHERE title | |
# Copyright (c) 2016 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import nuke
import tank
class PluginStudioContextSwitcher(object):
"""
A Toolkit context-switching manager.
This class provides a context switcher for non template based pipeline configurations.
As such, there is no way to find the context of a file by extracting entities from the
path. It is therefore an empty shell.
"""
def __init__(self, engine):
pass
def get_new_context(self, file_path):
return None
def destroy(self):
pass
class ClassicStudioContextSwitcher(object):
"""
A Toolkit context-switching manager.
This class operates by registering an event handler with Nuke Studio,
which allows it to detect when the user has changed from the top-level
"project" view to a Nuke node graph, and vice versa. When changing to
the "Nuke" portion of Nuke Studio, the .nk script being shown will be
checked against Shotgun Toolkit to determine whether it resides in a
known context, and if it does the tk-nuke engine will switch to that
on the fly. When the user comes out of the "Nuke" portion of Nuke Studio
and is once again at the project level, tk-nuke's context will again
be changed to match.
"""
def __init__(self, engine):
"""
Initializes a PluginStudioContextSwitcher object.
:param engine: The running sgtk.engine.Engine to associate the
context switcher with.
"""
self._event_desc = [
dict(
add=nuke.addOnCreate,
remove=nuke.removeOnCreate,
registrar=nuke.callbacks.onCreates,
function=self._startup_node_callback,
),
dict(
add=nuke.addOnScriptSave,
remove=nuke.removeOnScriptSave,
registrar=nuke.callbacks.onScriptSaves,
function=self._on_save_callback,
),
]
self._context_cache = dict()
self._init_project_root = engine.tank.project_path
self._init_context = engine.context
self._is_in_nuke = False
self.register_events(reregister=True)
##########################################################################
# properties
@property
def context(self):
"""
The current sgtk.context.Context.
"""
self._context = self.engine.context
@property
def is_in_nuke(self):
"""
Whether Nuke Studio is current in "Nuke" mode or not.
"""
return self._is_in_nuke
@property
def engine(self):
"""
The current engine that is running.
"""
return tank.platform.current_engine()
@property
def init_context(self):
"""
The sgtk.context.Context that was used at initialization time.
"""
return self._init_context
@property
def init_project_root(self):
"""
The project root directory path at initialization time.
"""
return self._init_project_root
##########################################################################
# private
def _check_if_registered(self, func, registrar):
"""
Checks if a callback is already registered with Nuke Studio.
"""
# The test is made by comparing the name of the functions.
# see: http://docs.thefoundry.co.uk/nuke/90/pythondevguide/callbacks.html
for nodeClass_category in registrar.values():
for (function, args, kwargs, nodeClass) in nodeClass_category:
if func.__name__ == function.__name__:
return True
return False
def _eventHandler(self, event):
"""
Event handler for context switching events in Nuke Studio.
:param event: The Nuke Studio event that was triggered.
"""
# Testing if we actually changed context or if the event got fired without
# the user switching to the node graph. Early exit if it's still the
# same context.
if self._is_in_nuke == event.focusInNuke:
return
# Set the current context to be remembered for the next context
# change.
self._is_in_nuke = event.focusInNuke
if self.is_in_nuke:
# We switched from the project timeline to a Nuke node graph.
try:
script_path = nuke.scriptName()
except Exception:
script_path = None
if script_path:
# Switched to nuke with a script open. We have a path and could try
# to figure out the sgtk context from that.
new_context = self.get_new_context(script_path)
if new_context is not None and new_context != self.engine.context:
self.change_context(new_context)
else:
# There is no script open in the node graph. Because of that, we
# will stay in the current context since we're essentially just in
# a non-special state of Nuke Studio where we're on the empty node
# graph tab.
return
else:
# This is a switch back to the project-level timeline,
# so change to that context based on that project file's
# path.
project_path = self._get_current_project()
if project_path:
new_context = self.get_new_context(project_path)
if new_context:
self.change_context(new_context)
return
# If all else fails here, then we just go back to the init
# context that we have cached. Since we know we're not in
# the Nuke node graph, then we should be fine to go ahead
# with what we had at launch.
self.change_context(self._init_context)
def _get_context_from_script(self, script):
"""
Returns an sgtk.context.Context object from the given script path.
:param script: The path to a script file on disk.
"""
tk = tank.tank_from_path(script)
context = tk.context_from_path(
script,
previous_context=self.engine.context,
)
if context.project is None:
raise tank.TankError(
"The Nuke engine needs at least a project "
"context in order to start! Your context: %s" % context
)
else:
return context
def _get_current_project(self):
"""
Returns the current project based on where in the UI the user clicked.
"""
import hiero.core
import hiero.ui
view = hiero.ui.activeView()
if isinstance(view, hiero.ui.TimelineEditor):
sequence = view.sequence()
if sequence:
bin_item = sequence.binItem()
if bin_item:
return bin_item.project().path()
return None
def _on_save_callback(self):
"""
Callback that fires every time a file is saved.
"""
try:
# Get the new file name.
file_name = nuke.root().name()
try:
# This file could be in another project altogether, so
# create a new Tank instance.
tk = tank.tank_from_path(file_name)
except tank.TankError, e:
self.engine.menu_generator.create_sgtk_disabled_menu(e)
return
# Extract a new context based on the file and change to that
# context.
new_context = tk.context_from_path(
file_name,
previous_context=self.context,
)
self.change_context(new_context)
except Exception:
self.engine.menu_generator.create_sgtk_error_menu()
def _startup_node_callback(self):
"""
Callback that fires every time a node gets created.
"""
try:
# Look for the root node. This is created only when a new or existing
# file is opened.
if nuke.thisNode() != nuke.root():
return
if nuke.root().name() == "Root":
# This is a file->new call, so base it on the context we
# stored from the previous session.
tk = tank.Tank(self.init_project_root)
if self.init_context:
new_ctx = self.init_context
else:
new_ctx = tk.context_empty()
else:
# This is a file->open call, so we can get the new context
# from the file path that was opened.
file_name = nuke.root().name()
try:
tk = tank.tank_from_path(file_name)
except tank.TankError, e:
self.engine.menu_generator.create_sgtk_disabled_menu(e)
return
new_ctx = tk.context_from_path(
file_name,
previous_context=self.context,
)
# Now change the context for the engine and apps.
self.change_context(new_ctx)
except Exception, e:
self.engine.menu_generator.create_sgtk_error_menu(e)
##########################################################################
# public
def change_context(self, new_context):
"""
Changes Toolkit's context, or creates a disabled menu item if
that is not possible.
:param new_context: The sgtk.context.Context to change to.
"""
if new_context == self.engine.context:
return
try:
tank.platform.change_context(new_context)
except tank.TankEngineInitError, e:
# Context was not sufficient!
self.engine.menu_generator.create_sgtk_disabled_menu(e)
def destroy(self):
"""
Tears down the context switcher by deregistering event handlers.
"""
self.unregister_events()
def get_new_context(self, script_path):
"""
Returns a new sgtk.context.Context for the given script path.
If the context exists in the in-memory cache, then that is returned,
otherwise a new Context object is constructed, cached, and returned.
:param script_path: The path to a script file on disk.
"""
context = self._context_cache.get(script_path)
if context:
return context
try:
context = self._get_context_from_script(script_path)
if context:
self._context_cache[script_path] = context
return context
else:
raise tank.TankError(
"Toolkit could not determine the context associated with this script."
)
except Exception, e:
self.engine.menu_generator.create_sgtk_disabled_menu(e)
self.engine.logger.debug(e)
return None
def register_events(self, reregister=False):
"""
Registers context-switching event handlers with Nuke Studio.
:param reregister: If True, previously-registered event handlers will
be removed and new instances of those handlers will
be reregistered with Nuke Studio. If False, any
event handler that has already been registered with
Nuke Studio will be skipped.
"""
import hiero.core
# Event for context switching from Hiero to Nuke.
hiero.core.events.registerInterest(
hiero.core.events.EventType.kContextChanged,
self._eventHandler,
)
for func_desc in self._event_desc:
# This is the variable that stores a dict of currently-registered
# callbacks.
registrar = func_desc.get('registrar')
# The function we wish to register.
function = func_desc.get('function')
# The function used to register the callback.
add = func_desc.get('add')
# Check if the callback is already registered.
if self._check_if_registered(function, registrar):
if reregister:
self._unregister_events(only=[func_desc])
else:
continue
add(function)
def unregister_events(self, only=None):
"""
Unregisters any event handlers that the context switcher
created during a register_events call.
:param only: A list of callback functions to unregister. If
not provided, all known event callbacks | |
wrong because you can mark-for-deployment before deploying there"
)
)
print(
PaastaColors.red(
"but this is probably a typo. Did you mean one of these in-use deploy groups?:"
)
)
print(PaastaColors.red(" %s" % (",").join(in_use_deploy_groups)))
print()
print(PaastaColors.red("Continuing regardless..."))
if args.git_url is None:
args.git_url = get_git_url(service=service, soa_dir=args.soa_dir)
commit = validate_git_sha(sha=args.commit, git_url=args.git_url)
old_git_sha = get_currently_deployed_sha(service=service, deploy_group=deploy_group)
if old_git_sha == commit:
print(
"Warning: The sha asked to be deployed already matches what is set to be deployed:"
)
print(old_git_sha)
print("Continuing anyway.")
if args.verify_image:
if not is_docker_image_already_in_registry(service, args.soa_dir, commit):
raise ValueError(
"Failed to find image in the registry for the following sha %s" % commit
)
deploy_info = get_deploy_info(service=service, soa_dir=args.soa_dir)
deploy_authz_check(deploy_info, service)
deploy_process = MarkForDeploymentProcess(
service=service,
deploy_info=deploy_info,
deploy_group=deploy_group,
commit=commit,
old_git_sha=old_git_sha,
git_url=args.git_url,
auto_rollback=args.auto_rollback,
block=args.block,
soa_dir=args.soa_dir,
timeout=args.timeout,
warn_pct=args.warn,
auto_certify_delay=args.auto_certify_delay,
auto_abandon_delay=args.auto_abandon_delay,
auto_rollback_delay=args.auto_rollback_delay,
authors=args.authors,
polling_interval=args.polling_interval,
diagnosis_interval=args.diagnosis_interval,
time_before_first_diagnosis=args.time_before_first_diagnosis,
)
ret = deploy_process.run()
return ret
class Progress:
waiting_on: Mapping[str, Collection[str]]
percent: float
def __init__(
self, percent: float = 0, waiting_on: Mapping[str, Collection[str]] = None
) -> None:
self.percent = percent
self.waiting_on = waiting_on
def human_readable(self, summary: bool) -> str:
if self.percent != 0 and self.percent != 100 and not summary:
s = f"{round(self.percent)}% (Waiting on {self.human_waiting_on()})"
else:
s = f"{round(self.percent)}%"
return s
def human_waiting_on(self) -> str:
if self.waiting_on is None:
return "N/A"
things = []
for cluster, instances in self.waiting_on.items():
num_instances = len(instances)
if num_instances == 0:
continue
elif num_instances == 1:
(one_instance,) = instances
things.append(f"`{cluster}`: `{one_instance}`")
else:
things.append(f"`{cluster}`: {len(instances)} instances")
return ", ".join(things)
class MarkForDeploymentProcess(SLOSlackDeploymentProcess):
rollback_states = ["start_rollback", "rolling_back", "rolled_back"]
rollforward_states = ["start_deploy", "deploying", "deployed"]
default_slack_channel = DEFAULT_SLACK_CHANNEL
paasta_status_reminder_handle: asyncio.TimerHandle
def __init__(
self,
service: str,
deploy_info: Dict,
deploy_group: str,
commit: str,
old_git_sha: str,
git_url: str,
auto_rollback: bool,
block: bool,
soa_dir: str,
timeout: float,
warn_pct: float,
auto_certify_delay: float,
auto_abandon_delay: float,
auto_rollback_delay: float,
authors: Optional[List[str]] = None,
polling_interval: float = None,
diagnosis_interval: float = None,
time_before_first_diagnosis: float = None,
) -> None:
self.service = service
self.deploy_info = deploy_info
self.deploy_group = deploy_group
self.commit = commit
self.old_git_sha = old_git_sha
self.git_url = git_url
self.auto_rollback = (
auto_rollback and old_git_sha is not None and old_git_sha != commit
)
self.auto_rollbacks_ever_enabled = self.auto_rollback
self.block = block
self.soa_dir = soa_dir
self.timeout = timeout
self.warn_pct = warn_pct
self.mark_for_deployment_return_code = -1
self.auto_certify_delay = auto_certify_delay
self.auto_abandon_delay = auto_abandon_delay
self.auto_rollback_delay = auto_rollback_delay
self.authors = authors
self.polling_interval = polling_interval
self.diagnosis_interval = diagnosis_interval
self.time_before_first_diagnosis = time_before_first_diagnosis
# Keep track of each wait_for_deployment task so we can cancel it.
self.wait_for_deployment_tasks: Dict[str, asyncio.Task] = {}
self.human_readable_status = "Waiting on mark-for-deployment to initialize..."
self.progress = Progress()
self.last_action = None
self.slo_watchers: List[SLOWatcher] = []
self.start_slo_watcher_threads(self.service, self.soa_dir)
# Initialize Slack threads and send the first message
super().__init__()
self.print_who_is_running_this()
def get_progress(self, summary: bool = False) -> str:
return self.progress.human_readable(summary)
def print_who_is_running_this(self) -> None:
build_url = get_jenkins_build_output_url()
if build_url is not None:
message = f"(<{build_url}|Jenkins Job>)"
else:
message = f"(Run by `{getpass.getuser()}` on {socket.getfqdn()})"
self.update_slack_thread(message)
def get_authors(self) -> str:
# In order to avoid notifying people who aren't part of the current
# service push, we calculate authors based on commits different since
# the current production SHA, as opposed to the old SHA on this deploy
# group.
#
# This avoids situations such as:
# * Notifying people from a previous push which went through stagef,
# if the new push goes through stageg.
# * Notifying everybody who has committed to a repo in the past year
# when updating a "legacy" deploy group (e.g. for yelp-main).
prod_deploy_group = self.deploy_info.get("production_deploy_group")
from_sha = None
if prod_deploy_group is not None:
from_sha = get_currently_deployed_sha(
service=self.service, deploy_group=prod_deploy_group
)
# If there's no production deploy group, or the production deploy group
# has never been deployed to, just use the old SHA from this deploy group.
if from_sha is None:
from_sha = self.old_git_sha
return get_authors_to_be_notified(
git_url=self.git_url,
from_sha=from_sha,
to_sha=self.commit,
authors=self.authors,
)
def ping_authors(self, message: str = None) -> None:
if message:
self.update_slack_thread(f"{message}\n{self.get_authors()}")
else:
self.update_slack_thread(self.get_authors())
def get_slack_client(self) -> SlackClient:
return get_slack_client().sc
def get_slack_channel(self) -> str:
""" Safely get some slack channel to post to. Defaults to ``DEFAULT_SLACK_CHANNEL``.
Currently only uses the first slack channel available, and doesn't support
multi-channel notifications. """
if self.deploy_info.get("slack_notify", True):
try:
channel = self.deploy_info.get("slack_channels")[0]
# Nightly jenkins builds will often re-deploy master. This causes Slack noise that wasn't present before
# the auto-rollbacks work.
if self.commit == self.old_git_sha:
print(
f"Rollback SHA matches rollforward SHA: {self.commit}, "
f"Sending slack notifications to {DEFAULT_SLACK_CHANNEL} instead of {channel}."
)
return DEFAULT_SLACK_CHANNEL
else:
return channel
except (IndexError, AttributeError, TypeError):
return DEFAULT_SLACK_CHANNEL
else:
return DEFAULT_SLACK_CHANNEL
def get_deployment_name(self) -> str:
return f"Deploy of `{self.commit[:8]}` of `{self.service}` to `{self.deploy_group}`:"
def on_enter_start_deploy(self) -> None:
self.update_slack_status(
f"Marking `{self.commit[:8]}` for deployment for {self.deploy_group}..."
)
self.mark_for_deployment_return_code = mark_for_deployment(
git_url=self.git_url,
deploy_group=self.deploy_group,
service=self.service,
commit=self.commit,
)
if self.mark_for_deployment_return_code != 0:
self.trigger("mfd_failed")
else:
self.update_slack_thread(
f"Marked `{self.commit[:8]}` for {self.deploy_group}."
+ (
"\n" + self.get_authors()
if self.deploy_group_is_set_to_notify("notify_after_mark")
else ""
)
)
log.debug("triggering mfd_succeeded")
self.trigger("mfd_succeeded")
def schedule_paasta_status_reminder(self) -> None:
def waiting_on_to_status(
waiting_on: Mapping[str, Collection[str]]
) -> List[str]:
if waiting_on is None:
return [
f"`paasta status --service {self.service} --{self.deploy_group}` -vv"
]
commands = []
for cluster, instances in waiting_on.items():
num_instances = len(instances)
if num_instances == 0:
continue
else:
commands.append(
f"`paasta status --service {self.service} --cluster {cluster} --instance {','.join(instances)} -vv`"
)
return commands
def times_up() -> None:
try:
if self.state == "deploying":
human_max_deploy_time = humanize.naturaldelta(
datetime.timedelta(seconds=self.timeout)
)
stuck_bounce_runbook = os.environ.get(
"STUCK_BOUNCE_RUNBOOK", DEFAULT_STUCK_BOUNCE_RUNBOOK,
)
status_commands = "\n".join(
waiting_on_to_status(self.progress.waiting_on)
)
self.notify_users(
(
f"It has been {self.warn_pct}% of the "
f"maximum deploy time ({human_max_deploy_time}), "
"which means the deployment may be stuck. "
"Here are some things you can try:\n\n"
f"* See {stuck_bounce_runbook} for debugging help\n"
f"* Run these commands to see the status of instances that "
"have not yet finished deploying:\n\n"
f"{status_commands}"
)
)
except Exception as e:
log.error(
f"Non-fatal exception encountered when processing the status reminder: {e}"
)
def schedule_callback() -> None:
time_to_notify = self.timeout * self.warn_pct / 100
self.paasta_status_reminder_handle = self.event_loop.call_later(
time_to_notify, times_up
)
try:
self.event_loop.call_soon_threadsafe(schedule_callback)
except Exception as e:
log.error(
f"Non-fatal error encountered scheduling the status reminder callback: {e}"
)
def cancel_paasta_status_reminder(self) -> None:
try:
handle = self.get_paasta_status_reminder_handle()
if handle is not None:
handle.cancel()
self.paasta_status_reminder_handle = None
except Exception as e:
log.error(
f"Non-fatal error encountered when canceling the paasta status reminder: {e}"
)
def get_paasta_status_reminder_handle(self) -> Optional[asyncio.TimerHandle]:
try:
return self.paasta_status_reminder_handle
except AttributeError:
return None
def states(self) -> Collection[str]:
return [
"_begin",
"start_deploy",
"deploying",
"deployed",
"mfd_failed",
"deploy_errored",
"deploy_cancelled",
"start_rollback",
"rolling_back",
"rolled_back",
"abandon",
"complete",
]
def start_state(self) -> str:
return "_begin"
def start_transition(self) -> str:
return "start_deploy"
def valid_transitions(self) -> Iterator[state_machine.TransitionDefinition]:
rollback_is_possible = (
self.old_git_sha is not None and self.old_git_sha != self.commit
)
yield {"source": "_begin", "dest": "start_deploy", "trigger": "start_deploy"}
yield {
"source": "start_deploy",
"dest": "deploying",
"trigger": "mfd_succeeded",
}
yield {"source": "deploying", "dest": "deployed", "trigger": "deploy_finished"}
yield {
"source": ["start_deploy", "start_rollback"],
"dest": "mfd_failed",
"trigger": "mfd_failed",
}
yield {
"source": [s for s in self.states() if not self.is_terminal_state(s)],
"dest": "deploy_errored",
"trigger": "deploy_errored",
}
yield {
"source": [s for s in self.states() if not self.is_terminal_state(s)],
"dest": "deploy_cancelled",
"trigger": "deploy_cancelled",
}
if rollback_is_possible:
yield {
"source": self.rollforward_states,
"dest": "start_rollback",
"trigger": "rollback_button_clicked",
"before": self.log_user_rollback,
}
yield {
"source": self.rollback_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "rollback_button_clicked",
}
yield {
"source": self.rollforward_states,
"dest": "start_rollback",
"trigger": "rollback_slo_failure",
"before": self.log_slo_rollback,
}
yield {
"source": self.rollback_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "rollback_slo_failure",
}
yield {
"source": self.rollback_states,
"dest": "start_deploy",
"trigger": "forward_button_clicked",
}
yield {
"source": self.rollforward_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "forward_button_clicked",
}
yield {
"source": "start_rollback",
"dest": "rolling_back",
"trigger": "mfd_succeeded",
}
yield {
"source": "rolling_back",
"dest": "rolled_back",
"trigger": "deploy_finished",
}
yield {
"source": "deployed",
"dest": "complete",
"trigger": "complete_button_clicked",
}
yield {"source": "deployed", "dest": "complete", "trigger": "auto_certify"}
yield {
"source": ["rolled_back", "rolling_back"],
"dest": "abandon",
"trigger": "abandon_button_clicked",
}
yield {"source": "rolled_back", "dest": "abandon", "trigger": "auto_abandon"}
if rollback_is_possible:
# Suppress these buttons if it doesn't make sense to roll back.
yield {
"source": "*",
"dest": None, # Don't actually change state, just call the | |
bottom left gridpoint in meters
"""
self.nx = nx
self.ny = ny
self.dx = dx
self.dy = dy
self.xmin = xmin
self.ymin = ymin
# The swiss grid is not technically using a PlateCarree projection
# (in fact it is not using any projection implemented by cartopy),
# however the points returned by the cell_corners() method are in
# WGS84, which PlateCarree defaults to.
super().__init__(name, ccrs.PlateCarree())
def cell_corners(self, i, j):
"""Return the corners of the cell with indices (i,j).
See also the docstring of Grid.cell_corners.
Parameters
----------
i : int
j : int
Returns
-------
tuple(np.array(shape=(4,), dtype=float),
np.array(shape=(4,), dtype=float))
Arrays containing the x and y coordinates of the corners
"""
x1, y1 = self._LV03_to_WGS84(
self.xmin + i * self.dx, self.ymin + j * self.dy
)
x2, y2 = self._LV03_to_WGS84(
self.xmin + (i + 1) * self.dx, self.ymin + (j + 1) * self.dy
)
cell_x = np.array([x2, x2, x1, x1])
cell_y = np.array([y2, y1, y1, y2])
return cell_x, cell_y
def lon_range(self):
"""Return an array containing all the longitudinal points on the grid.
Returns
-------
np.array(shape=(nx,), dtype=float)
"""
return np.array([self.xmin + i * self.dx for i in range(self.nx)])
def lat_range(self):
"""Return an array containing all the latitudinal points on the grid.
Returns
-------
np.array(shape=(ny,), dtype=float)
"""
return np.array([self.ymin + j * self.dy for j in range(self.ny)])
def _LV03_to_WGS84(self, y, x):
"""Convert LV03 to WSG84.
Based on swisstopo approximated solution (0.1" accuracy)
For better comparability with other implementations, here:
x <-> Northing
y <-> Easting,
contrary to the rest of this class.
Parameters
----------
y : float
y coordinate in meters
x : float
x coordinate in meters
Returns
-------
tuple(float, float)
The coordinates of the point in WGS84 (lon, lat)
"""
x = (x - 200_000) / 1_000_000
y = (y - 600_000) / 1_000_000
lon = (
2.6779094
+ 4.728982 * y
+ 0.791484 * y * x
+ 0.1306 * y * x ** 2
- 0.0436 * y ** 3
) / 0.36
lat = (
16.9023892
+ 3.238272 * x
- 0.270978 * y ** 2
- 0.002528 * x ** 2
- 0.0447 * y ** 2 * x
- 0.0140 * x ** 3
) / 0.36
return lon, lat
class COSMOGrid(Grid):
"""Class to manage a COSMO-domain
This grid is defined as a rotated pole coordinate system.
The gridpoints are at the center of the cell.
"""
nx: int
ny: int
dx: float
dy: float
xmin: float
ymin: float
pollon: float
pollat: float
def __init__(self, nx, ny, dx, dy, xmin, ymin, pollon=180, pollat=90):
"""Store the grid information.
Parameters
----------
nx : int
Number of cells in longitudinal direction
ny : int
Number of cells in latitudinal direction
dx : float
Longitudinal size of a gridcell in degrees
dy : float
Latitudinal size of a gridcell in degrees
xmin : float
Longitude of bottom left gridpoint in degrees
ymin : float
Latitude of bottom left gridpoint in degrees
pollon : float
Longitude of the rotated pole
pollat : float
Latitude of the rotated pole
"""
self.nx = nx
self.ny = ny
self.dx = dx
self.dy = dy
self.xmin = xmin
self.ymin = ymin
self.pollon = pollon
self.pollat = pollat
# cell corners
x = self.xmin + np.arange(self.nx) * self.dx
y = self.ymin + np.arange(self.ny) * self.dy
dx2 = self.dx / 2
dy2 = self.dy / 2
self.cell_x = np.array([x + dx2, x + dx2, x - dx2, x - dx2])
self.cell_y = np.array([y + dy2, y - dy2, y - dy2, y + dy2])
super().__init__(
"COSMO",
ccrs.RotatedPole(pole_longitude=pollon, pole_latitude=pollat),
)
def gridcell_areas(self):
"""Calculate 2D array of the areas (m^2) of a regular rectangular grid
on earth.
Returns
-------
np.array
2D array containing the areas of the gridcells in m^2
shape: (nx, ny)
"""
radius = 6375000.0 # the earth radius in meters
dlon = np.deg2rad(self.dx)
dlat = np.deg2rad(self.dy)
# Cell area at equator
dd = 2.0 * pow(radius, 2) * dlon * np.sin(0.5 * dlat)
# Cell areas in y-direction
areas = dd * np.cos(np.deg2rad(self.ymin) + np.arange(self.ny) * dlat)
return np.broadcast_to(areas, (self.nx, self.ny))
def lon_range(self):
"""Return an array containing all the longitudinal points on the grid.
Returns
-------
np.array(shape=(nx,), dtype=float)
"""
# Because of floating point math the original arange is not guaranteed
# to contain the expected number of points.
# This way we are sure that we generate at least the required number of
# points and discard the possibly generated superfluous one.
# Compared to linspace this method generates more exact steps at
# the cost of a less accurate endpoint.
try:
lon_vals = self.lon_vals
except AttributeError:
self.lon_vals = np.arange(
self.xmin, self.xmin + (self.nx + 0.5) * self.dx, self.dx
)[: self.nx]
lon_vals = self.lon_vals
return lon_vals
def lat_range(self):
"""Return an array containing all the latitudinal points on the grid.
Returns
-------
np.array(shape=(ny,), dtype=float)
"""
# See the comment in lon_range
try:
lat_vals = self.lat_vals
except AttributeError:
self.lat_vals = np.arange(
self.ymin, self.ymin + (self.ny + 0.5) * self.dy, self.dy
)[: self.ny]
lat_vals = self.lat_vals
return lat_vals
def cell_corners(self, i, j):
"""Return the corners of the cell with indices (i,j).
See also the docstring of Grid.cell_corners.
Parameters
----------
i : int
j : int
Returns
-------
tuple(np.array(shape=(4,), dtype=float),
np.array(shape=(4,), dtype=float))
Arrays containing the x and y coordinates of the corners
"""
return self.cell_x[:,i], self.cell_y[:,j]
def indices_of_point(self, lon, lat, proj=ccrs.PlateCarree()):
"""Return the indices of the grid cell that contains the point (lon, lat)
Parameters
----------
lat : float
The latitude of the point source
lon : float
The longitude of the point source
proj : cartopy.crs.Projection
The cartopy projection of the lat/lon of the point source
Default: cartopy.crs.PlateCarree
Returns
-------
tuple(int, int)
(cosmo_indx,cosmo_indy),
the indices of the cosmo grid cell containing the source.
Raises
------
IndexError
If the point lies outside the grid.
"""
point = self.projection.transform_point(lon, lat, proj)
indx = np.floor((point[0] - self.xmin) / self.dx)
indy = np.floor((point[1] - self.ymin) / self.dy)
if indx < 0 or indy < 0 or indx > self.nx - 1 or indy > self.ny - 1:
raise IndexError("Point lies outside the COSMO Grid")
return int(indx), int(indy)
def intersected_cells(self, corners):
"""Given a inventory cell, return a list of cosmo-cell-indices and
intersection fractions.
The inventory cell is specified by it's corners. The output is a list
of tuples, specifying the indices and overlap as a fraction of the
inventory cell area.
Parameters
----------
corners : np.array(shape=(4,2), dtype=float)
The corners of the inventory cell in the COSMO coordinate system
Returns
-------
list(tuple(int, int, float))
A list containing triplets (x,y,r)
- x : longitudinal index of cosmo grid cell
- y : latitudinal index of cosmo grid cell
- r : ratio of the area of the intersection compared to the total
area of the inventory cell.
r is in (0,1] (only nonzero intersections are reported)
"""
# Find around which cosmo grid index the inventory cell lies.
# Since the inventory cell is in general not rectangular because
# of different projections, we add a margin of to the extremal indices.
# This way we're sure we don't miss any intersection.
cell_xmin = min(k[0] for k in corners)
lon_idx_min = int((cell_xmin - self.xmin) / self.dx) - 2
if lon_idx_min > self.nx:
# The inventory cell lies outside the cosmo grid
return []
cell_xmax = max(k[0] for k in corners)
lon_idx_max = int((cell_xmax - self.xmin) / self.dx) + 3
if lon_idx_max < 0:
# The inventory cell lies outside the cosmo grid
return []
cell_ymin = min(k[1] for k in corners)
lat_idx_min = int((cell_ymin - self.ymin) / self.dy) - 2
if lat_idx_min > self.ny:
# The inventory cell lies outside the cosmo grid
return []
cell_ymax = max(k[1] for k in corners)
lat_idx_max = int((cell_ymax - self.ymin) / self.dy) + 3
if lat_idx_max < 0:
# The inventory cell lies outside the cosmo | |
<reponame>nlafl/Network-Level-Adversaries-in-Federated-Learning
import os
import random
import numpy as np
from nlafl import common
def load_emnist():
""" Load the EMNIST dataet
Returns:
tuple: tuple of numpy arrays trn_x, trn_y, tst_x, tst_y
"""
trn_x = np.load(os.path.expanduser(common.emnist_trn_x_pth))
trn_y = np.load(os.path.expanduser(common.emnist_trn_y_pth))
tst_x = np.load(os.path.expanduser(common.emnist_tst_x_pth))
tst_y = np.load(os.path.expanduser(common.emnist_tst_y_pth))
return trn_x, trn_y, tst_x, tst_y
def partition(x, y):
""" Given a dataset matrix and labels, return the data matrix partitioned by class.
The list of classes is assumed to be the number of classes for the dataset.
Example output:
[ [class 1's x ..], [class 2's x ..] , ... [class 10^s x ..] ]
Args:
x (numpy.ndarray): data matrix
y (numpy.ndarray): data labels
Returns:
list: Partitioned data matrix, as list of ndarray objects
"""
all_x = []
y_list = range(common.num_classes['emnist'])
for y_val in y_list:
all_x.append(x[np.where(y == y_val)[0]])
return all_x
# Note: Unused, keeping for reference
def dirichlet_sample(all_x, num_clients, client_size, alpha=100, seed=None):
""" Sample the dataset using a Dirichlet distribution
Exact client size is int( districtled alpha * client size) for each class.
`all_x` contains x values of each class in the format:
[ [class 1's x ..], [class 2's x ..] , ... [class 10^s x ..] ]
Args:
all_x (list): Partitioned data matrix, as list of ndarray objects
num_clients (int): number of clients
client_size (int): desired number of samples per client
alpha (int, optional): Dirichlet parameter alpha. Defaults to 100.
seed (int, optional): seed for PRNGs. Defaults to None.
Returns:
list: list of tuples, (data, labels)) for each client
"""
# Seed the PRNGs
np.random.seed(seed)
random.seed(seed)
# Initialize per-client data structures
num_classes = common.num_classes['emnist']
clients = []
all_dirichlets = np.random.dirichlet(
[alpha for _ in range(num_classes)],
num_clients
)
# shape (num_clients, num_classes)
for i in range(num_clients):
this_x, this_y = [], []
# dirichlet[i] -> distribution of each class for client i
# multiplied with client_size gives the number of expected samples for each class for client i
cur_counts = client_size*all_dirichlets[i]
for y in range(num_classes):
# since cur_counts is float and number of samples converted to int
y_ct = cur_counts[y].astype(np.int)
# take y_ct many samples for each class using for loop
this_x.append(all_x[y][:y_ct])
# from all_x[y] disgard the ones already pick, continue with remaining samples
all_x[y] = all_x[y][y_ct:]
this_y.append(np.zeros(y_ct, dtype=np.int)+y) # -> [y ] * y_ct
this_x = np.concatenate(this_x)
this_y = np.concatenate(this_y)
assert this_x.shape[0] == this_y.shape[0]
clients.append((this_x, this_y))
return clients
def fixed_sample(
all_x,
num_clients,
client_size,
targ_class=0,
client_targ=5,
targ_frac=.2,
alpha=100,
seed=None
):
""" Use a Dirichlet distribution to assign target class samples to clients
`all_x` -> [ [class 1's x ..], [class 2's x ..] , ... [class 10^s x ..] ]
`client Size` is used to calculate number samples for each class with
dirichlet distirbution alpha
Args:
all_x (list): partitioned data matrix, as list of ndarray objects
num_clients (int): number of clients
client_size (int): desired number of samples per client
targ_class (int, optional): identifier of target class. Defaults to 0
client_targ (int, optional): number of clients having target class points. Defaults to 5
targ_frac (float, optional): fraction of target class points for clients having them. Defaults to .2
alpha (int, optional): Dirichlet parameter alpha. Defaults to 100
seed (int, optional): seed for PRNGs. Defaults to None
Returns:
list: list of tuples, (data, labels)) for each client
"""
# Seed the PRNGs
np.random.seed(seed)
random.seed(seed)
num_classes = common.num_classes['emnist']
num_nontarget = num_classes - 1
# Initialize per-client data structures
clients = []
orig_dirichlets = np.random.dirichlet([alpha] * num_nontarget, num_clients)
all_dirichlets = np.zeros((num_clients, num_classes))
# Fill up the columns of `all_dirichlets` up to the target class,
# and from the one following the target class to the end using the
# values generated in `orig_dirichlets`
all_dirichlets[:, :targ_class] = orig_dirichlets[:, :targ_class]
all_dirichlets[:, targ_class+1:] = orig_dirichlets[:, targ_class:]
# targ_x is the numpy array of all target class samples
targ_x = all_x[targ_class]
for i in range(num_clients):
this_x, this_y = [], []
total_ct = client_size
# The first client_targ clients will have the target class samples
if i < client_targ:
# number of target class samples for client i
num_targ = int(total_ct * targ_frac)
total_ct -= num_targ
# Assign the target class samples to client i and create a label vector
this_x.append(targ_x[:num_targ])
this_y.append(np.zeros(num_targ, dtype=np.int) + targ_class)
# Remove the samples used for this client from targ_x
targ_x = targ_x[num_targ:]
counts = (total_ct * all_dirichlets[i]).astype(np.int)
assert counts[targ_class] == 0
for y in range(num_classes):
# Ignore the target class
if y == targ_class:
continue
y_ct = counts[y].astype(np.int)
this_x.append(all_x[y][:y_ct])
all_x[y] = all_x[y][y_ct:]
this_y.append(np.zeros(y_ct, dtype=np.int) + y)
this_x = np.concatenate(this_x)
this_y = np.concatenate(this_y)
assert this_x.shape[0] == this_y.shape[0]
clients.append((this_x, this_y))
return clients
def fixed_poison(
all_x,
num_clients,
client_size,
poison_ct,
targ_class=0,
client_targ=5,
targ_frac=.2,
alpha=100,
seed=None
):
"""
Args:
all_x (list): partitioned data matrix, as list of ndarray objects
num_clients (int): number of clients
client_size (int): desired number of samples per client
poison_ct (int): number of clients participating in the poisoning attack
targ_class (int, optional): identifier of target class. Defaults to 0
client_targ (int, optional): number of clients having target class points. Defaults to 5
targ_frac (float, optional): fraction of target class points for clients having them. Defaults to .2
alpha (int, optional): Dirichlet parameter alpha. Defaults to 100
seed (int, optional): seed for PRNGs. Defaults to None
Returns:
list: list of tuples, (data, labels)) for each client
"""
# Seed the PRNGs
np.random.seed(seed)
random.seed(seed)
num_classes = common.num_classes['emnist']
num_nontarget = num_classes - 1
# Initialize per-client data structures
clients = []
orig_dirichlets = np.random.dirichlet([alpha] * num_nontarget, num_clients)
all_dirichlets = np.zeros((num_clients, num_classes))
# Fill up the columns of `all_dirichlets` up to the target class,
# and from the one following the target class to the end using the
# values generated in `orig_dirichlets`
all_dirichlets[:, :targ_class] = orig_dirichlets[:, :targ_class]
all_dirichlets[:, targ_class+1:] = orig_dirichlets[:, targ_class:]
# targ_x is the numpy array of all target class samples
targ_x = all_x[targ_class]
for i in range(num_clients):
this_x, this_y = [], []
total_ct = client_size
# The first client_targ clients will have the target class samples
if i < client_targ:
# number of target class samples for client i
num_targ = int(total_ct * targ_frac)
total_ct -= num_targ
# Assign the target class samples to client i and create a label vector
this_x.append(targ_x[:num_targ])
this_y.append(np.zeros(num_targ, dtype=np.int)+targ_class)
# Remove the samples used for this client from targ_x
targ_x = targ_x[num_targ:]
# The successive `poison_ct` clients will have the poisoned points
elif i < client_targ + poison_ct:
num_targ = int(total_ct * targ_frac)
total_ct -= num_targ
counts = (total_ct * all_dirichlets[i]).astype(np.int)
# Flip the labels for the target class samples
for y in range(num_classes):
if y == targ_class:
y_ct = num_targ
y_local = (y + 1) % num_classes
else:
y_ct = counts[y].astype(np.int)
y_local = y
# Assign the samples to this client
this_x.append(all_x[y][:y_ct])
this_y.append(np.zeros(y_ct, dtype=np.int) + y_local)
# Remove the samples used for this client
all_x[y] = all_x[y][y_ct:]
this_x = np.concatenate(this_x)
this_y = np.concatenate(this_y)
assert this_x.shape[0] == this_y.shape[0]
clients.append((this_x, this_y))
continue
counts = (total_ct*all_dirichlets[i]).astype(np.int)
assert counts[targ_class] == 0
for y in range(num_classes):
# Ignore the target class
if y == targ_class:
continue
y_ct = counts[y].astype(np.int)
this_x.append(all_x[y][:y_ct])
all_x[y] = all_x[y][y_ct:]
this_y.append(np.zeros(y_ct, dtype=np.int) + y)
this_x = np.concatenate(this_x)
this_y = np.concatenate(this_y)
assert this_x.shape[0] == this_y.shape[0]
clients.append((this_x, this_y))
return clients
if __name__ == '__main__':
""" Test utility """
target_class = 3
targ_frac = .5
cli_targ = 5
poison_ct = 5
cli_size = 1000
trn_x, trn_y, tst_x, tst_y = load_emnist()
partitioned = partition(trn_x, trn_y)
print('\nPer-class dataset shapes')
print([v.shape for v in partitioned])
if poison_ct > 0:
clients = fixed_poison(
partitioned, 100, cli_size, poison_ct=poison_ct, targ_class=target_class,
client_targ=cli_targ, targ_frac=targ_frac, alpha=1, seed=None
)
else:
clients = fixed_sample(
partitioned, 100, cli_size, targ_class=target_class,
client_targ=cli_targ, targ_frac=targ_frac, alpha=1, seed=0
)
print('\nPer-client local dataset shapes')
print([client[0].shape for client in clients])
print('\nNumber of clients and total number of points selected')
print(len(clients), sum([client[0].shape[0] for client in clients]))
print(f"\nNumber of clients having class {target_class}:", sum(
[(target_class in np.unique(client[1])) for client in clients]))
# Sanity check
for client in clients[:cli_targ]:
assert sum(client[1] == target_class) == int(cli_size * targ_frac)
print('Per-client breakdown of classes and number of points per | |
<reponame>FlamesLLC/GBACopilotv0<gh_stars>0
##
print("Welcome to GBAPY 1.0X")
## please make a gba emulator like no$gba
import socket
import sys
import time
import os
import threading
import time
import random
## please make the framework for the gui and make it look like a gba emulator with the width and height of 800x800
import pygame
from pygame.locals import *
pygame.init()
from tkinter import *
from tkinter import ttk
from tkinter import font
from tkinter import messagebox
from tkinter import filedialog
from tkinter.colorchooser import *
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
from tkinter.filedialog import askdirectory
## make a client gui
class Client(Tk):
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
self.geometry("800x803")
self.title("GBAPY 1.0X")
self.resizable(0,0)
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.iconbitmap(r'GBAPY.ico')
#self.update()
## make a menu bar
menubar = Menu(self)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=self.donothing)
filemenu.add_command(label="Open", command=self.donothing)
filemenu.add_command(label="Save", command=self.donothing)
filemenu.add_command(label="Save as...", command=self.donothing)
filemenu.add_command(label="Close", command=self.donothing)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.on_closing)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Undo", command=self.donothing)
editmenu.add_separator()
editmenu.add_command(label="Cut", command=self.donothing)
editmenu.add_command(label="Copy", command=self.donothing)
editmenu.add_command(label="Paste", command=self.donothing)
editmenu.add_command(label="Delete", command=self.donothing)
editmenu.add_command(label="Select All", command=self.donothing)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
## with pygame make a gui resmebling a gba
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
self.geometry("800x803")
self.title("GBAPY 1.0X")
self.resizable(0,0)
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.iconbitmap(r'GBAPY.ico')
#self.update()
## make a menu bar
menubar = Menu(self)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=self.donothing)
filemenu.add_command(label="Open", command=self.donothing)
filemenu.add_command(label="Save", command=self.donothing)
filemenu.add_command(label="Save as...", command=self.donothing)
filemenu.add_command(label="Close", command=self.donothing)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.on_closing)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Undo", command=self.donothing)
editmenu.add_separator()
editmenu.add_command(label="Cut", command=self.donothing)
editmenu.add_command(label="Copy", command=self.donothing)
editmenu.add_command(label="Paste", command=self.donothing)
editmenu.add_command(label="Delete", command=self.donothing)
editmenu.add_command(label="Select All", command=self.donothing)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
## with pygame make a gui resmebling a gba
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
## make a toolbar
toolbar = Frame(self, bg="black")
toolbar.pack(side=TOP, fill=X)
## make a frame for the buttons
frame = Frame(self)
frame.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame2 = Frame(self)
frame2.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame3 = Frame(self)
frame3.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame4 = Frame(self)
frame4.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame5 = Frame(self)
frame5.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame6 = Frame(self)
frame6.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame7 = Frame(self)
frame7.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame8 = Frame(self)
frame8.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame9 = Frame(self)
frame9.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame10 = Frame(self)
frame10.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame11 = Frame(self)
frame11.pack(fill=BOTH, expand=YES)
## make a frame for the buttons
frame12 = Frame(self)
frame
=======
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
self.geometry("800x803")
self.title("GBAPY 1.0X")
self.resizable(0,0)
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.iconbitmap(r'GBAPY.ico')
#self.update()
## make a menu bar
menubar = Menu(self)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=self.donothing)
filemenu.add_command(label="Open", command=self.donothing)
filemenu.add_command(label="Save", command=self.donothing)
filemenu.add_command(label="Save as...", command=self.donothing)
filemenu.add_command(label="Close", command=self.donothing)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.on_closing)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Undo", command=self.donothing)
editmenu.add_separator()
editmenu.add_command(label="Cut", command=self.donothing)
editmenu.add_command(label="Copy", command=self.donothing)
editmenu.add_command(label="Paste", command=self.donothing)
editmenu.add_command(label="Delete", command=self.donothing)
editmenu.add_command(label="Select All", command=self.donothing)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
## with pygame make a gui resmebling a gba
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
## make a frame
self.frame = Frame(self, width=800, height=800)
self.frame.pack(side=TOP)
## make a canvas
self.canvas = Canvas(self.frame, width=800, height=800, bg="black")
self.canvas.pack(side=TOP)
## make a frame
self.frame2 = Frame(self, width=800, height=800)
self.frame2.pack(side=BOTTOM)
## make a canvas
self.canvas2 = Canvas(self.frame2, width=800, height=800, bg="black")
self.canvas2.pack(side=TOP)
## make a frame
self.frame3 = Frame(self, width=800, height=800)
self.frame3.pack(side=BOTTOM)
## make a canvas
self.canvas3 = Canvas(self.frame3, width=800, height=800, bg="black")
self.canvas3.pack(side=TOP)
## make a frame
self.frame4 = Frame(self, width=800, height=800)
self.frame4.pack(side=BOTTOM)
## make a canvas
self.canvas4 = Canvas(self.frame4, width=800, height=800, bg="black")
self.canvas4.pack(side=TOP)
## make a frame
self.frame5 = Frame(self, width=800, height=800)
self.frame5.pack(side=BOTTOM)
## make a canvas
self.canvas5 = Canvas(self.frame5, width=800, height=800, bg="black")
=======
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.iconbitmap(r'GBAPY.ico')
self.update()
## make a top frame
self.top_frame = Frame(self)
self.top_frame.pack(side=TOP, fill=X)
## make a bottom frame
self.bottom_frame = Frame(self)
self.bottom_frame.pack(side=BOTTOM, fill=X)
## make a left frame
self.left_frame = Frame(self)
self.left_frame.pack(side=LEFT, fill=Y)
## make a right frame
self.right_frame = Frame(self)
self.right_frame.pack(side=RIGHT, fill=Y)
## make a middle frame
self.middle_frame = Frame(self)
self.middle_frame.pack(side=TOP, fill=X)
## make a bottom frame
self.bottom_frame = Frame(self)
self.bottom_frame.pack(side=BOTTOM, fill=X)
## make a bottom frame
self.bottom_frame = Frame(self)
self.bottom_frame.pack(side=BOTTOM, fill=X)
## make a bottom frame
self.bottom_frame = Frame(self)
self.bottom_frame.pack(side=BOTTOM, fill=X)
## make a bottom frame
self.bottom_frame = Frame(self)
self.bottom_frame.pack(side=BOTTOM, fill=X)
## make a bottom frame
self.bottom_frame = Frame(self)
self.bottom_frame.pack(side=BOTTOM, fill
=======
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
self.geometry("800x803")
self.title("GBAPY 1.0X")
self.resizable(0,0)
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.iconbitmap(r'GBAPY.ico')
#self.update()
## make a menu bar
menubar = Menu(self)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=self.donothing)
filemenu.add_command(label="Open", command=self.donothing)
filemenu.add_command(label="Save", command=self.donothing)
filemenu.add_command(label="Save as...", command=self.donothing)
filemenu.add_command(label="Close", command=self.donothing)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.on_closing)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Undo", command=self.donothing)
editmenu.add_separator()
editmenu.add_command(label="Cut", command=self.donothing)
editmenu.add_command(label="Copy", command=self.donothing)
editmenu.add_command(label="Paste", command=self.donothing)
editmenu.add_command(label="Delete", command=self.donothing)
editmenu.add_command(label="Select All", command=self.donothing)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
## with pygame make a gui resmebling a gba
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
## make a frame for the gui
self.mainframe = Frame(self)
self.mainframe.pack(fill=BOTH, expand=1)
## make a frame for the gui
self.frame = Frame(self.mainframe)
self.frame.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label = Label(self.frame, text="GBAPY 1.0X")
self.label.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label = Label(self.frame, text="")
self.label.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label = Label(self.frame, text="")
self.label.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label = Label(self.frame, text="")
self.label.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label = Label(self.frame, text="")
self.label.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label = Label(self.frame, text="")
self.label.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label = Label(self.frame, text="")
self.label.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label = Label(self.frame, text="")
self.label.pack(fill=BOTH, expand=1)
## make a label for the gui
self.label
=======
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
## make a frame
self.frame = Frame(self)
self.frame.pack(side=TOP, fill=BOTH, expand=1)
self.frame.grid_rowconfigure(0, weight=1)
self.frame.grid_columnconfigure(0, weight=1)
self.frame.grid_propagate(0)
self.frame.grid_rowconfigure(0, weight=1)
self.frame.grid_columnconfigure(0, weight=1)
## make a canvas
self.canvas = Canvas(self.frame)
self.canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.canvas.config(width=800, height=800)
self.canvas.create_rectangle(0,0,800,800, fill="white")
self.canvas.bind("<Button-1>", self.leftclick)
self.canvas.bind("<Button-2>", self.rightclick)
self.canvas.bind("<Button-3>", self.rightclick)
self.canvas.bind("<B1-Motion>", self.leftclick)
self.canvas.bind("<B2-Motion>", self.rightclick)
self.canvas.bind("<B3-Motion>", self.rightclick)
self.canvas.bind("<ButtonRelease-1>", self.leftclick)
self.canvas.bind("<ButtonRelease-2>", self.rightclick)
self.canvas.bind("<ButtonRelease-3>", self.rightclick)
self.canvas.bind("<Key>", self.key)
self.canvas.bind("<KeyRelease>
=======
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
## make a menu bar
menubar = Menu(self)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=self.donothing)
filemenu.add_command(label="Open", command=self.donothing)
filemenu.add_command(label="Save", command=self.donothing)
filemenu.add_command(label="Save as...", command=self.donothing)
filemenu.add_command(label="Close", command=self.donothing)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.on_closing)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Undo", command=self.donothing)
editmenu.add_separator()
editmenu.add_command(label="Cut", command=self.donothing)
editmenu.add_command(label="Copy", command=self.donothing)
editmenu.add_command(label="Paste", command=self.donothing)
editmenu.add_command(label="Delete", command=self.donothing)
editmenu.add_command(label="Select All", command=self.donothing)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label
=======
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
## make a canvas to draw on
self.canvas = Canvas(self, width=800, height=800, bg="black")
self.canvas.pack(fill=BOTH, expand=YES)
## make a scrollbar
self.scrollbar = Scrollbar(self)
self.scrollbar.pack(side=RIGHT, fill=Y)
self.scrollbar.config(command=self.canvas.yview)
self.canvas.config(yscrollcommand=self.scrollbar.set)
## make a frame
self.frame = Frame(self.canvas)
self.frame.pack()
## make a canvas scrollable with scrollbar
self.canvas.create_window((0,0), window=self.frame, anchor=NW)
self.frame.bind("<Configure>", self.onFrameConfigure)
## make a button
self.button = Button(self.frame, text="Click me!", command=self.donothing)
self.button.pack()
## make a label
self.label = Label(self.frame, text="Hello, World!")
self.label.pack()
## make a listbox
self.listbox = Listbox(self.frame)
self.listbox.pack()
## make a textbox
self.textbox = Text(self.frame)
self.textbox.pack()
## make a canvas
self.canvas = Canvas(self.frame, width=800, height=800, bg="black")
self.canvas.pack()
## make a scrollbar
self.scrollbar = Scrollbar(self.frame)
self.scrollbar.pack(side=RIGHT, fill=Y)
self.
=======
helpmenu.add_command(label="Help Index", command=self.donothing)
helpmenu.add_command(label="About...", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
## make a toolbar
toolbar = Frame(self, bg="black")
toolbar.pack(side=TOP, fill=X)
## make a frame
self.frame = Frame(self, bg="black")
self.frame.pack(side=TOP, fill=X)
## make a canvas
self.canvas = Canvas(self.frame, bg="black", width=800, height=800)
self.canvas.pack(fill=X)
## make a textbox
self.textbox = Text(self.frame, bg="black", fg="white", font="Helvetica 10", width=80, height=25)
self.textbox.pack(side=TOP, fill=X)
## make a textbox
self.textbox2 = Text(self.frame, bg="black", fg="white", font="Helvetica 10", width=80, height=20)
self.textbox2.pack(side=TOP, fill=X)
## make a textbox
self.textbox3 = Text(self.frame, bg="black", fg="white", font="Helvetica 10", width=80, height=20)
self.textbox3.pack(side=TOP, fill=X)
## make a textbox
self.textbox4 = Text(self.frame, bg="black", fg="white", font="Helvetica 10", width=80, height=20)
self.textbox4.pack(side=TOP, fill=X)
## make a textbox
self.textbox5 = Text(self.frame, bg="black", fg="white", font="Helvetica 10", width=80, height=20)
self.textbox5.pack(
=======
helpmenu.add_command(label="About", command=self.donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
self.config(menu=menubar)
## make a toolbar
toolbar = Frame(self, bg="grey")
toolbar.pack(side=TOP, fill=X)
## make a | |
<reponame>edupaz2/Udacity-SelfDCars-CarND-Advanced-Lane-Lines
from glob import glob
import numpy as np
import cv2
import matplotlib.pyplot as plt
import pickle
CALIBRATION_IMAGES_PATH = 'camera_cal/calibration*.jpg'
CALIBRATION_POINTS_PATH = 'camera_cal/calibration.p'
nx = 9
ny = 6
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
def displayImages(imglist):
# imglist items will have the format [img, label, cmap]
count = len(imglist)
f, (ax) = plt.subplots(1, count, figsize=(10, 4))
f.tight_layout()
ax = ax.ravel()
for i in range(count):
ax[i].imshow(imglist[i][0], cmap=imglist[i][2])
ax[i].set_title(imglist[i][1], fontsize=35)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
def readCalibrationPoints():
imgpoints = []
objpoints = []
# read existing calibration
try:
with open(CALIBRATION_POINTS_PATH, 'rb') as pfile:
print('Reading calibration file', CALIBRATION_POINTS_PATH)
cal_pickle = pickle.load(pfile)
objpoints = cal_pickle["objpoints"]
imgpoints = cal_pickle["imgpoints"]
except Exception as e:
print('Unable to read data from', CALIBRATION_POINTS_PATH, ':', e)
if len(imgpoints) and len(objpoints):
return imgpoints,objpoints
# If there is no previous calibration, do it.
print('Calibrating camera')
# Do the calibration
# prepare object points
objp = np.zeros((nx*ny, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
# Read camera_cal files
images = glob(CALIBRATION_IMAGES_PATH)
gray= None
for fname in images:
img = cv2.imread(fname)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
print('Corners found at', fname)
# Append the results
imgpoints.append(corners)
objpoints.append(objp)
# save to file
try:
with open(CALIBRATION_POINTS_PATH, 'wb+') as pfile:
print('Saving to calibration file', CALIBRATION_POINTS_PATH)
pickle.dump(
{
'imgpoints': imgpoints,
'objpoints': objpoints,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', CALIBRATION_POINTS_PATH, ':', e)
return imgpoints,objpoints
def calibrateCamera(debug=False):
imgpoints, objpoints = readCalibrationPoints()
# Read in an image
img = cv2.imread('camera_cal/calibration2.jpg')
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1], None, None)
return mtx, dist
#src_frustrum = np.float32([[186,720], [606,441], [672,441], [1125,720]])
src_frustrum = np.float32([[172,720], [586,450], [690,450], [1160,720]])
dst_frustrum = np.float32([[320, 720], [320, 0], [960, 0], [960, 720]])
def distortionCorrection(img, mtx, dist, debug=False):
undist = cv2.undistort(img, mtx, dist, None, mtx)
if debug == True:
displayImages( [[img, 'Original', None], [undist, 'Undistorted', None]] )
return undist
def perspectiveTransform(img, src=src_frustrum, dst=dst_frustrum, debug=False):
img_size = (img.shape[1], img.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
if debug == True:
displayImages( [[img, 'Original', None], [warped, 'Perspective', None]] )
return warped, M
def color_filter(img):
# Separate the chosen channels
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
hls_l_channel = hls[:,:,1]
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB).astype(np.float)
lab_b_channel = lab[:,:,2]
hls_l_thresh=(210, 255)
lab_b_thres=(192, 255)
# Threshold color channels
l_binary = np.zeros_like(hls_l_channel)
l_binary[(hls_l_channel >= hls_l_thresh[0]) & (hls_l_channel <= hls_l_thresh[1])] = 1
b_binary = np.zeros_like(lab_b_channel)
b_binary[(lab_b_channel > lab_b_thres[0]) & (lab_b_channel < lab_b_thres[1])] = 1
img_filtered = np.zeros_like(img[:,:,0])
img_filtered[ (l_binary == 1) | (b_binary == 1) ] = 1
return img_filtered
def sobelx_binary(img, lx_thresh=(25, 60)):
# Separate the chosen channels
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
hls_s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(hls_s_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
lxbinary = np.zeros_like(scaled_sobel)
lxbinary[(scaled_sobel >= lx_thresh[0]) & (scaled_sobel <= lx_thresh[1])] = 1
return lxbinary
def get_binary_image(img):
color_binary = color_filter(img)
lxbinary = sobelx_binary(img)
combined_binary = np.zeros_like(lxbinary)
combined_binary[(color_binary == 1) | (lxbinary == 1)] = 1
return combined_binary
def laneLinesBlindSearch(binary_warped, debug=False):
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 10
# Set height of windows
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
print(len(lefty), len(leftx), len(righty), len(rightx))
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if debug == True:
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# plt.imshow(out_img)
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, binary_warped.shape[1])
# plt.ylim(binary_warped.shape[0], 0)
# plt.show()
return left_fit, right_fit, out_img
return left_fit, right_fit, None
def laneLinesTargetedSearch(binary_warped, left_fit, right_fit, debug=False):
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if debug == True:
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# plt.imshow(result)
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, binary_warped.shape[1])
# plt.ylim(binary_warped.shape[0], 0)
# plt.show()
return left_fit, right_fit, result
return left_fit, right_fit, None
def curvatureAndOffsetMeasurement(binary_warped, left_fit, right_fit, debug=False):
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Plot up the data
if debug == True:
plt.xlim(0, binary_warped.shape[1])
plt.ylim(binary_warped.shape[0], 0)
plt.plot(left_fitx, ploty, color='green', linewidth=3)
plt.plot(right_fitx, ploty, color='green', linewidth=3)
plt.show()
#######
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
#######
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
middleImg = binary_warped.shape[1]/2
middleLane = (right_fitx[-1]-left_fitx[-1])/2
# Now our radius of curvature is in meters
print('LeftCurve:', left_curverad, 'm. RightCurve:', right_curverad, 'm. Middle: ', middleLane)
return (left_curverad+right_curverad)/2, (middleImg-middleLane)*xm_per_pix
def drawFinalLines(image, binary_warped, radius, offset, perspective_M, left_fit, right_fit, debug=False):
##########
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
##########
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw the lines | |
case
stats_df = _stats_numpy(
zones,
values,
zone_ids,
stats_funcs_dict,
nodata_zones,
nodata_values
)
else:
# dask case
stats_df = _stats_dask(
zones,
values,
zone_ids,
stats_funcs_dict,
nodata_zones,
nodata_values
)
return stats_df
def _crosstab_dict(zones, values, unique_zones, cats, nodata_values, agg):
crosstab_dict = {}
unique_zones = list(map(_to_int, unique_zones))
crosstab_dict["zone"] = unique_zones
for i in cats:
crosstab_dict[i] = []
for cat_id, cat in enumerate(cats):
for zone_id in unique_zones:
# get category cat values in the selected zone
zone_cat_data = _zone_cat_data(
zones, values, zone_id, nodata_values, cat, cat_id
)
zone_cat_count = _stats_count(zone_cat_data)
crosstab_dict[cat].append(zone_cat_count)
if agg == "percentage":
zone_counts = _stats(
zones,
values,
unique_zones,
{"count": _stats_count},
nodata_values
)["count"]
for c, cat in enumerate(cats):
for z in range(len(unique_zones)):
crosstab_dict[cat][z] = (
crosstab_dict[cat][z] / zone_counts[z] * 100
) # noqa
return crosstab_dict
def _crosstab_numpy(
zones,
values,
zone_ids,
cat_ids,
nodata_zones,
nodata_values,
agg
):
if cat_ids is not None:
cats = np.array(cat_ids)
else:
# no categories provided, find all possible cats in values raster
if len(values.shape) == 3:
# 3D case
cats = values.indexes[values.dims[0]].values
else:
# 2D case
# mask out all invalid values such as: nan, inf
cats = da.unique(values.data[da.isfinite(values.data)]).compute()
cats = sorted(list(set(cats) - set([nodata_values])))
if zone_ids is None:
# do not consider zone with nodata values
unique_zones = np.unique(zones.data[np.isfinite(zones.data)])
unique_zones = sorted(list(set(unique_zones) - set([nodata_zones])))
else:
unique_zones = np.array(zone_ids)
crosstab_dict = _crosstab_dict(
zones, values, unique_zones, cats, nodata_values, agg
)
crosstab_df = pd.DataFrame(crosstab_dict)
# name columns
crosstab_df.columns = crosstab_dict.keys()
return crosstab_df
def _crosstab_dask(
zones,
values,
zone_ids,
cat_ids,
nodata_zones,
nodata_values,
agg
):
if cat_ids is not None:
cats = np.array(cat_ids)
else:
# no categories provided, find all possible cats in values raster
if len(values.shape) == 3:
# 3D case
cats = values.indexes[values.dims[0]].values
else:
# 2D case
# precompute categories
cats = da.unique(values.data[da.isfinite(values.data)]).compute()
cats = sorted(list(set(cats) - set([nodata_values])))
if zone_ids is None:
# precompute unique zones
unique_zones = da.unique(zones.data[da.isfinite(zones.data)]).compute()
# do not consider zone with nodata values
unique_zones = sorted(list(set(unique_zones) - set([nodata_zones])))
else:
unique_zones = np.array(zone_ids)
crosstab_dict = _crosstab_dict(
zones, values, unique_zones, cats, nodata_values, agg
)
crosstab_dict = {
stats: da.stack(zonal_stats, axis=0)
for stats, zonal_stats in crosstab_dict.items()
}
# generate dask dataframe
crosstab_df = dd.concat(
[dd.from_dask_array(stats) for stats in crosstab_dict.values()], axis=1
)
# name columns
crosstab_df.columns = crosstab_dict.keys()
return crosstab_df
def crosstab(
zones: xr.DataArray,
values: xr.DataArray,
zone_ids: List[Union[int, float]] = None,
cat_ids: List[Union[int, float]] = None,
layer: Optional[int] = None,
agg: Optional[str] = "count",
nodata_zones: Optional[Union[int, float]] = None,
nodata_values: Optional[Union[int, float]] = None,
) -> Union[pd.DataFrame, dd.DataFrame]:
"""
Calculate cross-tabulated (categorical stats) areas
between two datasets: a zone dataset `zones`, a value dataset `values`
(a value raster). Infinite and NaN values in `zones` and `values` will
be ignored.
Outputs a pandas DataFrame if `zones` and `values` are numpy backed.
Outputs a dask DataFrame if `zones` and `values` are dask with
numpy-backed xarray DataArrays.
Requires a DataArray with a single data dimension, here called the
"values", indexed using either 2D or 3D coordinates.
DataArrays with 3D coordinates are expected to contain values
distributed over different categories that are indexed by the
additional coordinate. Such an array would reduce to the
2D-coordinate case if collapsed across the categories (e.g. if one
did ``aggc.sum(dim='cat')`` for a categorical dimension ``cat``).
Parameters
----------
zones : xr.DataArray
2D data array of integers or floats.
A zone is all the cells in a raster that have the same value,
whether or not they are contiguous. The input `zones` raster defines
the shape, values, and locations of the zones. An unique field
in the zone input is specified to define the zones.
values : xr.DataArray
2D or 3D data array of integers or floats.
The input value raster contains the input values used in
calculating the categorical statistic for each zone.
zone_ids: List of ints, or floats
List of zones to be included in calculation. If no zone_ids provided,
all zones will be used.
cat_ids: List of ints, or floats
List of categories to be included in calculation.
If no cat_ids provided, all categories will be used.
layer: int, default=0
index of the categorical dimension layer inside the `values` DataArray.
agg: str, default = 'count'
Aggregation method.
If the data is 2D, available options are: percentage, count.
If the data is 3D, available option is: count.
nodata_zones: int, float, default=None
Nodata value in `zones` raster.
Cells with `nodata` do not belong to any zone,
and thus excluded from calculation.
nodata_values: int, float, default=None
Nodata value in `values` raster.
Cells with `nodata` do not belong to any zone,
and thus excluded from calculation.
Returns
-------
crosstab_df : Union[pandas.DataFrame, dask.dataframe.DataFrame]
A pandas DataFrame, or an uncomputed dask DataFrame,
where each column is a categorical value and each row is a zone
with zone id. Each entry presents the statistics, which computed
using the specified aggregation method, of the category over the zone.
Examples
--------
.. plot::
:include-source:
import dask.array as da
import numpy as np
import xarray as xr
from xrspatial.zonal import crosstab
values_data = np.asarray([[0, 0, 10, 20],
[0, 0, 0, 10],
[0, np.nan, 20, 50],
[10, 30, 40, np.inf],
[10, 10, 50, 0]])
values = xr.DataArray(values_data)
zones_data = np.asarray([[1, 1, 6, 6],
[1, np.nan, 6, 6],
[3, 5, 6, 6],
[3, 5, 7, np.nan],
[3, 7, 7, 0]])
zones = xr.DataArray(zones_data)
values_dask = xr.DataArray(da.from_array(values, chunks=(3, 3)))
zones_dask = xr.DataArray(da.from_array(zones, chunks=(3, 3)))
.. sourcecode:: python
>>> # Calculate Crosstab, numpy case
>>> df = crosstab(zones=zones, values=values)
>>> print(df)
zone 0.0 10.0 20.0 30.0 40.0 50.0
0 0 1 0 0 0 0 0
1 1 3 0 0 0 0 0
2 3 1 2 0 0 0 0
3 5 0 0 0 1 0 0
4 6 1 2 2 0 0 1
5 7 0 1 0 0 1 1
>>> # Calculate Crosstab, dask case
>>> df = crosstab(zones=zones_dask, values=values_dask)
>>> print(df)
Dask DataFrame Structure:
zone 0.0 10.0 20.0 30.0 40.0 50.0
npartitions=5
0 float64 int64 int64 int64 int64 int64 int64
1 ... ... ... ... ... ... ...
... ... ... ... ... ... ... ...
4 ... ... ... ... ... ... ...
5 ... ... ... ... ... ... ...
Dask Name: astype, 1186 tasks
>>> print(dask_df.compute)
zone 0.0 10.0 20.0 30.0 40.0 50.0
0 0 1 0 0 0 0 0
1 1 3 0 0 0 0 0
2 3 1 2 0 0 0 0
3 5 0 0 0 1 0 0
4 6 1 2 2 0 0 1
5 7 0 1 0 0 1 1
"""
if not isinstance(zones, xr.DataArray):
raise TypeError("zones must be instance of DataArray")
if not isinstance(values, xr.DataArray):
raise TypeError("values must be instance of DataArray")
if zones.ndim != 2:
raise ValueError("zones must be 2D")
if not (
issubclass(zones.data.dtype.type, np.integer)
or issubclass(zones.data.dtype.type, np.floating)
):
raise ValueError("`zones` must be an xarray of integers or floats")
if not issubclass(values.data.dtype.type, np.integer) and not issubclass(
values.data.dtype.type, np.floating
):
raise ValueError("`values` must be an xarray of integers or floats")
if values.ndim not in [2, 3]:
raise ValueError("`values` must use either 2D or 3D coordinates.")
agg_2d = ["percentage", "count"]
agg_3d = ["count"]
if values.ndim == 2 and agg not in agg_2d:
raise ValueError(
f"`agg` method for 2D data array must be one of following {agg_2d}"
)
if values.ndim == 3 and agg not in agg_3d:
raise ValueError(
f"`agg` method for 3D data array must be one of following {agg_3d}"
)
if len(values.shape) == 3:
# 3D case
if layer is None:
layer = 0
try:
values.indexes[values.dims[layer]].values
except (IndexError, KeyError):
raise ValueError("Invalid `layer`")
dims = values.dims
reshape_dims = [dims[layer]] + [d for d in dims if d != dims[layer]]
# transpose by that category dimension
values = values.transpose(*reshape_dims)
if zones.shape != values.shape[1:]:
raise ValueError("Incompatible shapes")
if isinstance(values.data, np.ndarray):
# numpy case
crosstab_df = _crosstab_numpy(
zones, values, zone_ids, cat_ids, nodata_zones, nodata_values, agg
)
else:
# dask case
crosstab_df = _crosstab_dask(
zones, values, zone_ids, cat_ids, nodata_zones, nodata_values, agg
)
return crosstab_df
def apply(
zones: xr.DataArray,
values: xr.DataArray,
func: Callable,
nodata: Optional[int] = 0
):
"""
Apply a function to the `values` agg within zones in `zones` agg.
Change the agg content.
Parameters
----------
zones : xr.DataArray
zones.values is a 2d array of integers. A zone is all the | |
<filename>silx/gui/fit/FitWidgets.py
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2004-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ######################################################################### */
"""Collection of widgets used to build
:class:`silx.gui.fit.FitWidget.FitWidget`"""
from collections import OrderedDict
from silx.gui import qt
from silx.gui.fit.Parameters import Parameters
QTVERSION = qt.qVersion()
__authors__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__date__ = "13/10/2016"
class FitActionsButtons(qt.QWidget):
"""Widget with 3 ``QPushButton``:
The buttons can be accessed as public attributes::
- ``EstimateButton``
- ``StartFitButton``
- ``DismissButton``
You will typically need to access these attributes to connect the buttons
to actions. For instance, if you have 3 functions ``estimate``,
``runfit`` and ``dismiss``, you can connect them like this::
>>> fit_actions_buttons = FitActionsButtons()
>>> fit_actions_buttons.EstimateButton.clicked.connect(estimate)
>>> fit_actions_buttons.StartFitButton.clicked.connect(runfit)
>>> fit_actions_buttons.DismissButton.clicked.connect(dismiss)
"""
def __init__(self, parent=None):
qt.QWidget.__init__(self, parent)
self.resize(234, 53)
grid_layout = qt.QGridLayout(self)
grid_layout.setContentsMargins(11, 11, 11, 11)
grid_layout.setSpacing(6)
layout = qt.QHBoxLayout(None)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(6)
self.EstimateButton = qt.QPushButton(self)
self.EstimateButton.setText("Estimate")
layout.addWidget(self.EstimateButton)
spacer = qt.QSpacerItem(20, 20,
qt.QSizePolicy.Expanding,
qt.QSizePolicy.Minimum)
layout.addItem(spacer)
self.StartFitButton = qt.QPushButton(self)
self.StartFitButton.setText("Start Fit")
layout.addWidget(self.StartFitButton)
spacer_2 = qt.QSpacerItem(20, 20,
qt.QSizePolicy.Expanding,
qt.QSizePolicy.Minimum)
layout.addItem(spacer_2)
self.DismissButton = qt.QPushButton(self)
self.DismissButton.setText("Dismiss")
layout.addWidget(self.DismissButton)
grid_layout.addLayout(layout, 0, 0)
class FitStatusLines(qt.QWidget):
"""Widget with 2 greyed out write-only ``QLineEdit``.
These text widgets can be accessed as public attributes::
- ``StatusLine``
- ``ChisqLine``
You will typically need to access these widgets to update the displayed
text::
>>> fit_status_lines = FitStatusLines()
>>> fit_status_lines.StatusLine.setText("Ready")
>>> fit_status_lines.ChisqLine.setText("%6.2f" % 0.01)
"""
def __init__(self, parent=None):
qt.QWidget.__init__(self, parent)
self.resize(535, 47)
layout = qt.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(6)
self.StatusLabel = qt.QLabel(self)
self.StatusLabel.setText("Status:")
layout.addWidget(self.StatusLabel)
self.StatusLine = qt.QLineEdit(self)
self.StatusLine.setText("Ready")
self.StatusLine.setReadOnly(1)
layout.addWidget(self.StatusLine)
self.ChisqLabel = qt.QLabel(self)
self.ChisqLabel.setText("Reduced chisq:")
layout.addWidget(self.ChisqLabel)
self.ChisqLine = qt.QLineEdit(self)
self.ChisqLine.setMaximumSize(qt.QSize(16000, 32767))
self.ChisqLine.setText("")
self.ChisqLine.setReadOnly(1)
layout.addWidget(self.ChisqLine)
class FitConfigWidget(qt.QWidget):
"""Widget whose purpose is to select a fit theory and a background
theory, load a new fit theory definition file and provide
a "Configure" button to open an advanced configuration dialog.
This is used in :class:`silx.gui.fit.FitWidget.FitWidget`, to offer
an interface to quickly modify the main parameters prior to running a fit:
- select a fitting function through :attr:`FunComBox`
- select a background function through :attr:`BkgComBox`
- open a dialog for modifying advanced parameters through
:attr:`FunConfigureButton`
"""
def __init__(self, parent=None):
qt.QWidget.__init__(self, parent)
self.setWindowTitle("FitConfigGUI")
layout = qt.QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(6)
self.FunLabel = qt.QLabel(self)
self.FunLabel.setText("Function")
layout.addWidget(self.FunLabel, 0, 0)
self.FunComBox = qt.QComboBox(self)
self.FunComBox.addItem("Add Function(s)")
self.FunComBox.setItemData(self.FunComBox.findText("Add Function(s)"),
"Load fit theories from a file",
qt.Qt.ToolTipRole)
layout.addWidget(self.FunComBox, 0, 1)
self.BkgLabel = qt.QLabel(self)
self.BkgLabel.setText("Background")
layout.addWidget(self.BkgLabel, 1, 0)
self.BkgComBox = qt.QComboBox(self)
self.BkgComBox.addItem("Add Background(s)")
self.BkgComBox.setItemData(self.BkgComBox.findText("Add Background(s)"),
"Load background theories from a file",
qt.Qt.ToolTipRole)
layout.addWidget(self.BkgComBox, 1, 1)
self.FunConfigureButton = qt.QPushButton(self)
self.FunConfigureButton.setText("Configure")
self.FunConfigureButton.setToolTip(
"Open a configuration dialog for the selected function")
layout.addWidget(self.FunConfigureButton, 0, 2)
self.BgConfigureButton = qt.QPushButton(self)
self.BgConfigureButton.setText("Configure")
self.BgConfigureButton.setToolTip(
"Open a configuration dialog for the selected background")
layout.addWidget(self.BgConfigureButton, 1, 2)
self.WeightCheckBox = qt.QCheckBox(self)
self.WeightCheckBox.setText("Weighted fit")
self.WeightCheckBox.setToolTip(
"Enable usage of weights in the least-square problem.\n Use" +
" the uncertainties (sigma) if provided, else use sqrt(y).")
layout.addWidget(self.WeightCheckBox, 0, 3, 2, 1)
layout.setColumnStretch(4, 1)
class ParametersTab(qt.QTabWidget):
"""This widget provides tabs to display and modify fit parameters. Each
tab contains a table with fit data such as parameter names, estimated
values, fit constraints, and final fit results.
The usual way to initialize the table is to fill it with the fit
parameters from a :class:`silx.math.fit.fitmanager.FitManager` object, after
the estimation process or after the final fit.
In the following example we use a :class:`ParametersTab` to display the
results of two separate fits::
from silx.math.fit import fittheories
from silx.math.fit import fitmanager
from silx.math.fit import functions
from silx.gui import qt
import numpy
a = qt.QApplication([])
# Create synthetic data
x = numpy.arange(1000)
y1 = functions.sum_gauss(x, 100, 400, 100)
fit = fitmanager.FitManager(x=x, y=y1)
fitfuns = fittheories.FitTheories()
fit.addtheory(theory="Gaussian",
function=functions.sum_gauss,
parameters=("height", "peak center", "fwhm"),
estimate=fitfuns.estimate_height_position_fwhm)
fit.settheory('Gaussian')
fit.configure(PositiveFwhmFlag=True,
PositiveHeightAreaFlag=True,
AutoFwhm=True,)
# Fit
fit.estimate()
fit.runfit()
# Show first fit result in a tab in our widget
w = ParametersTab()
w.show()
w.fillFromFit(fit.fit_results, view='Gaussians')
# new synthetic data
y2 = functions.sum_splitgauss(x,
100, 400, 100, 40,
10, 600, 50, 500,
80, 850, 10, 50)
fit.setData(x=x, y=y2)
# Define new theory
fit.addtheory(theory="Asymetric gaussian",
function=functions.sum_splitgauss,
parameters=("height", "peak center", "left fwhm", "right fwhm"),
estimate=fitfuns.estimate_splitgauss)
fit.settheory('Asymetric gaussian')
# Fit
fit.estimate()
fit.runfit()
# Show first fit result in another tab in our widget
w.fillFromFit(fit.fit_results, view='Asymetric gaussians')
a.exec_()
"""
def __init__(self, parent=None, name="FitParameters"):
"""
:param parent: Parent widget
:param name: Widget title
"""
qt.QTabWidget.__init__(self, parent)
self.setWindowTitle(name)
self.setContentsMargins(0, 0, 0, 0)
self.views = OrderedDict()
"""Dictionary of views. Keys are view names,
items are :class:`Parameters` widgets"""
self.latest_view = None
"""Name of latest view"""
# the widgets/tables themselves
self.tables = {}
"""Dictionary of :class:`silx.gui.fit.parameters.Parameters` objects.
These objects store fit results
"""
self.setContentsMargins(10, 10, 10, 10)
def setView(self, view=None, fitresults=None):
"""Add or update a table. Fill it with data from a fit
:param view: Tab name to be added or updated. If ``None``, use the
latest view.
:param fitresults: Fit data to be added to the table
:raise: KeyError if no view name specified and no latest view
available.
"""
if view is None:
if self.latest_view is not None:
view = self.latest_view
else:
raise KeyError(
"No view available. You must specify a view" +
" name the first time you call this method."
)
if view in self.tables.keys():
table = self.tables[view]
else:
# create the parameters instance
self.tables[view] = Parameters(self)
table = self.tables[view]
self.views[view] = table
self.addTab(table, str(view))
if fitresults is not None:
table.fillFromFit(fitresults)
self.setCurrentWidget(self.views[view])
self.latest_view = view
def renameView(self, oldname=None, newname=None):
"""Rename a view (tab)
:param oldname: Name of the view to be renamed
:param newname: New name of the view"""
error = 1
if newname is not None:
if newname not in self.views.keys():
if oldname in self.views.keys():
parameterlist = self.tables[oldname].getFitResults()
self.setView(view=newname, fitresults=parameterlist)
self.removeView(oldname)
error = 0
return error
def fillFromFit(self, fitparameterslist, view=None):
"""Update a view with data from a fit (alias for :meth:`setView`)
:param view: Tab name to be added or updated (default: latest view)
:param fitparameterslist: Fit data to be added to the table
"""
self.setView(view=view, fitresults=fitparameterslist)
def getFitResults(self, name=None):
"""Call :meth:`getFitResults` for the
:class:`silx.gui.fit.parameters.Parameters` corresponding to the
latest table or to the named table (if ``name`` is not
``None``). This return a list of dictionaries in the format used by
:class:`silx.math.fit.fitmanager.FitManager` to store fit parameter
results.
:param name: View name.
"""
if name is None:
name = self.latest_view
return self.tables[name].getFitResults()
def removeView(self, name):
"""Remove a view by name.
:param name: View name.
"""
if name in self.views:
index = self.indexOf(self.tables[name])
self.removeTab(index)
index = self.indexOf(self.views[name])
self.removeTab(index)
del self.tables[name]
del self.views[name]
def removeAllViews(self, keep=None):
"""Remove all views, except the one specified (argument
``keep``)
:param keep: Name of the view to be kept."""
for view in self.tables:
if view != keep:
self.removeView(view)
def getHtmlText(self, name=None):
"""Return the table data as HTML
:param name: View name."""
if name is None:
name = self.latest_view
table = self.tables[name]
lemon = ("#%x%x%x" % (255, 250, 205)).upper()
hcolor = ("#%x%x%x" % (230, 240, 249)).upper()
text = ""
text += "<nobr>"
text += "<table>"
text += "<tr>"
ncols = table.columnCount()
for l in range(ncols):
text += ('<td align="left" bgcolor="%s"><b>' % hcolor)
if QTVERSION < '4.0.0':
text += (str(table.horizontalHeader().label(l)))
else:
text += (str(table.horizontalHeaderItem(l).text()))
text += "</b></td>"
text += "</tr>"
nrows | |
all specified nodes for the deletion task.
:param nodes:
:param mclient_remove:
:return: dict
"""
nodes_to_delete = []
nodes_to_restore = []
for node in nodes:
nodes_to_delete.append(
cls.format_node_to_delete(node, mclient_remove=mclient_remove)
)
if not node.pending_deletion:
objects.Node.update(node, {'pending_deletion': True})
db().flush()
node_to_restore = cls.format_node_to_restore(node)
if node_to_restore:
nodes_to_restore.append(node_to_restore)
return {
'nodes_to_delete': nodes_to_delete,
'nodes_to_restore': nodes_to_restore,
}
@classmethod
def get_task_nodes_for_cluster(cls, cluster):
return cls.prepare_nodes_for_task(TaskHelper.nodes_to_delete(cluster))
@classmethod
def remove_undeployed_nodes_from_db(cls, nodes_to_delete):
"""Removes undeployed nodes from the given list from the DB.
:param List nodes_to_delete: List of nodes as returned by
:meth:`DeletionTask.format_node_to_delete`
:returns: Remaining (deployed) nodes to delete.
"""
node_names_dict = dict(
(node['id'], node['slave_name']) for node in nodes_to_delete)
node_ids = [n['id'] for n in nodes_to_delete]
discovery_ids = objects.NodeCollection.discovery_node_ids()
objects.NodeCollection.delete_by_ids(
set(discovery_ids) & set(node_ids))
db.commit()
remaining_nodes_db = db().query(
Node.id).filter(Node.id.in_(node_names_dict.keys()))
remaining_nodes_ids = set([
row[0] for row
in remaining_nodes_db
])
remaining_nodes = filter(
lambda node: node['id'] in remaining_nodes_ids,
nodes_to_delete
)
deleted_nodes_ids = set(node_names_dict).difference(
remaining_nodes_ids)
slave_names_joined = ', '.join([slave_name
for id, slave_name
in six.iteritems(node_names_dict)
if id in deleted_nodes_ids])
if len(slave_names_joined):
logger.info("Nodes are not deployed yet, can't clean MBR: %s",
slave_names_joined)
return remaining_nodes
@classmethod
def execute(cls, task, nodes=None, respond_to='remove_nodes_resp',
check_ceph=False):
"""Call remote Astute method to remove nodes from a cluster
:param task: Task object
:param nodes: List of nodes to delete
:param respond_to: RPC method which receives data from remote method
:param check_ceph: Boolean flag to tell Astute to run (or not run)
checks to prevent deletion of OSD nodes. If True this task will
fail if a node to be deleted has Ceph data on it. This flag must
be False if deleting all nodes.
"""
logger.debug("DeletionTask.execute(task=%s, nodes=%s)",
task.uuid, nodes)
task_uuid = task.uuid
logger.debug("Nodes deletion task is running")
# TODO(ikalnitsky): remove this, let the flow always go through Astute
# No need to call Astute if no nodes are specified
if task.name == consts.TASK_NAMES.cluster_deletion and \
not (nodes and nodes['nodes_to_delete']):
logger.debug("No nodes specified, exiting")
rcvr = rpc.receiver.NailgunReceiver()
rcvr.remove_cluster_resp(
task_uuid=task_uuid,
status=consts.TASK_STATUSES.ready,
progress=100
)
return
nodes_to_delete = nodes['nodes_to_delete']
nodes_to_restore = nodes['nodes_to_restore']
# check if there's a Zabbix server in an environment
# and if there is, remove hosts
if (task.name != consts.TASK_NAMES.cluster_deletion
and ZabbixManager.get_zabbix_node(task.cluster)):
zabbix_credentials = ZabbixManager.get_zabbix_credentials(
task.cluster
)
logger.debug("Removing nodes %s from zabbix", nodes_to_delete)
try:
ZabbixManager.remove_from_zabbix(
zabbix_credentials, nodes_to_delete
)
except (errors.CannotMakeZabbixRequest,
errors.ZabbixRequestError) as e:
logger.warning("%s, skipping removing nodes from Zabbix", e)
nodes_to_delete = cls.remove_undeployed_nodes_from_db(nodes_to_delete)
logger.debug(
"Removing nodes from database and pending them to clean their "
"MBR: %s",
', '.join(node['slave_name'] for node in nodes_to_delete)
)
msg_delete = make_astute_message(
task,
'remove_nodes',
respond_to,
{
'nodes': nodes_to_delete,
'check_ceph': check_ceph,
'engine': {
'url': settings.COBBLER_URL,
'username': settings.COBBLER_USER,
'password': settings.COBBLER_PASSWORD,
'master_ip': settings.MASTER_IP,
}
}
)
db().flush()
# only fake tasks
if cls.use_fake() and nodes_to_restore:
msg_delete['args']['nodes_to_restore'] = nodes_to_restore
# /only fake tasks
logger.debug("Calling rpc remove_nodes method with nodes %s",
nodes_to_delete)
rpc.cast('naily', msg_delete)
@classmethod
def use_fake(cls):
return settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
class DeleteIBPImagesTask(object):
@classmethod
def message(cls, task, image_data):
files = []
for image in six.itervalues(image_data):
files.append(
os.path.join(
settings.PROVISIONING_IMAGES_PATH,
os.path.basename(
six.moves.urllib.parse.urlsplit(image['uri']).path))
)
task_params = {
'parameters': {
'cmd': 'rm -f {0}'.format(' '.join(files)),
'timeout': settings.REMOVE_IMAGES_TIMEOUT,
}
}
rpc_message = make_astute_message(
task,
'execute_tasks',
'remove_images_resp',
{
'tasks': [tasks_templates.make_shell_task(
[consts.MASTER_NODE_UID], task_params
)]
}
)
return rpc_message
@classmethod
def execute(cls, cluster, image_data):
task = Task(name=consts.TASK_NAMES.remove_images, cluster=cluster)
db().add(task)
db().flush()
rpc.cast('naily', cls.message(task, image_data))
class StopDeploymentTask(object):
@classmethod
def message(cls, task, stop_task):
nodes_to_stop = db().query(Node).filter(
Node.cluster_id == task.cluster.id
).filter(
not_(Node.status == 'ready')
).yield_per(100)
rpc_message = make_astute_message(
task,
"stop_deploy_task",
"stop_deployment_resp",
{
"stop_task_uuid": stop_task.uuid,
"nodes": [
{
'uid': n.uid,
'roles': n.roles,
'slave_name': objects.Node.get_slave_name(n),
'admin_ip': objects.Cluster.get_network_manager(
n.cluster
).get_admin_ip_for_node(n.id)
} for n in nodes_to_stop
],
"engine": {
"url": settings.COBBLER_URL,
"username": settings.COBBLER_USER,
"password": <PASSWORD>,
"master_ip": settings.MASTER_IP,
}
}
)
db().commit()
return rpc_message
@classmethod
def execute(cls, task, deploy_task=None, provision_task=None):
if provision_task:
rpc.cast(
'naily',
cls.message(task, provision_task),
service=True
)
if deploy_task:
rpc.cast(
'naily',
cls.message(task, deploy_task),
service=True
)
class ResetEnvironmentTask(object):
@classmethod
def message(cls, task):
nodes_to_reset = db().query(Node).filter(
Node.cluster_id == task.cluster.id
).yield_per(100)
rpc_message = make_astute_message(
task,
"reset_environment",
"reset_environment_resp",
{
"nodes": [
{
'uid': n.uid,
'roles': n.roles,
'slave_name': objects.Node.get_slave_name(n)
} for n in nodes_to_reset
],
"engine": {
"url": settings.COBBLER_URL,
"username": settings.COBBLER_USER,
"password": <PASSWORD>PASSWORD,
"master_ip": settings.MASTER_IP,
}
}
)
db().commit()
return rpc_message
class RemoveClusterKeys(object):
"""Task that deletes all ssh and ssl data for deployed environment
Meant to be run after environment reset to make sure that new keys will be
generated.
"""
@classmethod
def message(cls, task):
rpc_message = make_astute_message(
task,
"execute_tasks",
"reset_environment_resp",
{
"tasks": [
tasks_templates.make_shell_task(
[consts.MASTER_NODE_UID],
{
"parameters": {
"cmd": "rm -rf /var/lib/fuel/keys/{0}".format(
task.cluster.id),
"timeout": 30
}
}
)
]
}
)
return rpc_message
class RemoveIronicBootstrap(object):
"""Task that deletes Ironic's bootstrap images
Meant to be run after environment reset to make sure that new images will
be generated.
"""
@classmethod
def message(cls, task):
rpc_message = make_astute_message(
task,
"execute_tasks",
"reset_environment_resp",
{
"tasks": [
tasks_templates.make_shell_task(
[consts.MASTER_NODE_UID],
{
"parameters": {
"cmd": "rm -rf /var/www/nailgun/bootstrap/"
"ironic/{0}".format(task.cluster.id),
"timeout": 30
}
}
)
]
}
)
return rpc_message
class ClusterDeletionTask(object):
@classmethod
def execute(cls, task):
logger.debug("Cluster deletion task is running")
attrs = objects.Attributes.merged_attrs_values(task.cluster.attributes)
if attrs.get('provision'):
if (task.cluster.release.operating_system ==
consts.RELEASE_OS.ubuntu and
attrs['provision']['method'] ==
consts.PROVISION_METHODS.image):
logger.debug("Delete IBP images task is running")
DeleteIBPImagesTask.execute(
task.cluster, attrs['provision']['image_data'])
else:
logger.debug("Skipping IBP images deletion task")
DeletionTask.execute(
task,
nodes=DeletionTask.get_task_nodes_for_cluster(task.cluster),
respond_to='remove_cluster_resp'
)
class BaseNetworkVerification(object):
def __init__(self, task, config):
self.task = task
self.config = config
def get_ifaces_on_undeployed_node(self, node, node_json, has_public):
# Save bonds info to be able to check net-probe results w/o
# need to access nodes in DB (node can be deleted before the test is
# completed). This info is needed for non-deployed nodes only.
bonds = {}
for bond in node.bond_interfaces:
bonds[bond.name] = sorted(s.name for s in bond.slaves)
if bonds:
node_json['bonds'] = bonds
for iface in node.nic_interfaces:
assigned_networks = iface.assigned_networks_list
# In case of present bond interfaces - collect assigned networks
# against bonds slave NICs. We should skip LACP bonds Fuel
# do not setup them for network_checker now.
if iface.bond:
assigned_networks = iface.bond.assigned_networks_list
vlans = []
for ng in assigned_networks:
# Handle FuelWeb admin network first.
if ng.group_id is None:
vlans.append(0)
continue
if ng.name == consts.NETWORKS.public and not has_public:
continue
data_ng = filter(lambda i: i['name'] == ng.name,
self.config)[0]
if data_ng['vlans']:
vlans.extend(data_ng['vlans'])
else:
# in case absence of vlans net_probe will
# send packages on untagged iface
vlans.append(0)
if not vlans:
continue
if iface.bond and iface.bond.mode == consts.BOND_MODES.l_802_3ad:
node_json['excluded_networks'].append(
{'iface': iface.name})
else:
node_json['networks'].append(
{'iface': iface.name, 'vlans': vlans})
def get_ifaces_on_deployed_node(self, node, node_json, has_public):
for iface in node.interfaces:
# In case of present bond interfaces - collect assigned networks
# against bonds themselves. We can check bonds as they are up on
# deployed nodes.
vlans = []
for ng in iface.assigned_networks_list:
# Handle FuelWeb admin network first.
if ng.group_id is None:
vlans.append(0)
continue
if ng.name == consts.NETWORKS.public and not has_public:
continue
data_ng = filter(lambda i: i['name'] == ng.name,
self.config)[0]
if data_ng['vlans']:
vlans.extend(data_ng['vlans'])
else:
# in case absence of vlans net_probe will
# send packages on untagged iface
vlans.append(0)
if vlans:
node_json['networks'].append(
{'iface': iface.name, 'vlans': vlans})
def get_message_body(self):
nodes = []
nodes_w_public = []
offline_nodes = 0
for node in self.task.cluster.nodes:
if node.online and objects.Node.should_have_public_with_ip(node):
nodes_w_public.append(node.id)
if len(nodes_w_public) < 2:
# don't check public VLANs if there is the only node with public
nodes_w_public = []
for node in self.task.cluster.nodes:
if node.offline:
offline_nodes += 1
continue
node_json = {
'uid': node.id,
'name': node.name,
'status': node.status,
'networks': [],
'excluded_networks': [],
}
has_public = node.id in nodes_w_public
# Check bonds on deployed nodes and check bonds slave NICs on
# undeployed ones.
if node.status == consts.NODE_STATUSES.ready:
self.get_ifaces_on_deployed_node(node, node_json, has_public)
else:
self.get_ifaces_on_undeployed_node(node, node_json, has_public)
nodes.append(node_json)
return {
'nodes': nodes,
'offline': offline_nodes
}
def get_message(self):
msg_body = self.get_message_body()
message = make_astute_message(
self.task,
self.task.name,
'{0}_resp'.format(self.task.name),
msg_body
)
return message
def execute(self, task=None):
# task is there for prev compatibility
message = self.get_message()
logger.debug("%s method is called with: %s",
self.task.name, message)
db().commit()
rpc.cast('naily', message)
@classmethod
def enabled(cls, cluster):
"""Verify that subtask is enabled based on cluster configuration."""
return True
class VerifyNetworksForTemplateMixin(object):
@staticmethod
def _get_private_vlan_range(cluster, template):
if cluster.network_config.segmentation_type == \
consts.NEUTRON_SEGMENT_TYPES.vlan and \
'neutron/private' in template['roles']:
vlan_range = cluster.network_config.vlan_range
return range(vlan_range[0], vlan_range[1] + 1)
return None
@classmethod
def _add_interface(cls, ifaces, ifname, vlan_ids, bond_name=None):
ifname, vlan = cls._parse_template_iface(ifname)
bond_name = bond_name | |
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor'
# tensor dict
batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}]
batch = trainer.transfer_batch_to_gpu(batch, 0)
assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor'
assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor'
# tuple of tensor list and list of tensor dict
batch = ([torch.rand(2, 3) for _ in range(2)],
[{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)])
batch = trainer.transfer_batch_to_gpu(batch, 0)
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
assert batch[1][0]['a'].device.index == 0
assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor'
assert batch[1][0]['b'].device.index == 0
assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'
def test_early_stopping_cpu_model():
"""
Test each of the trainer options
:return:
"""
reset_seed()
stopping = EarlyStopping(monitor='val_loss')
trainer_options = dict(
early_stop_callback=stopping,
gradient_clip_val=1.0,
overfit_pct=0.20,
track_grad_norm=2,
print_nan_grads=True,
show_progress_bar=True,
logger=get_test_tube_logger(),
train_percent_check=0.1,
val_percent_check=0.1
)
model, hparams = get_model()
run_gpu_model_test(trainer_options, model, hparams, on_gpu=False)
# test freeze on cpu
model.freeze()
model.unfreeze()
def test_no_val_module():
"""
Tests use case where trainer saves the model, and user loads it from tags independently
:return:
"""
reset_seed()
hparams = get_hparams()
class CurrentTestModel(LightningTestModelBase):
pass
model = CurrentTestModel(hparams)
save_dir = init_save_dir()
# logger file to get meta
logger = get_test_tube_logger(False)
logger.log_hyperparams(hparams)
logger.save()
trainer_options = dict(
max_nb_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(save_dir)
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# training complete
assert result == 1, 'amp + ddp model failed to complete'
# save model
new_weights_path = os.path.join(save_dir, 'save_test.ckpt')
trainer.save_checkpoint(new_weights_path)
# load new model
tags_path = logger.experiment.get_data_path(logger.experiment.name, logger.experiment.version)
tags_path = os.path.join(tags_path, 'meta_tags.csv')
model_2 = LightningTestModel.load_from_metrics(weights_path=new_weights_path,
tags_csv=tags_path)
model_2.eval()
# make prediction
clear_save_dir()
def test_no_val_end_module():
"""
Tests use case where trainer saves the model, and user loads it from tags independently
:return:
"""
reset_seed()
class CurrentTestModel(LightningValidationStepMixin, LightningTestModelBase):
pass
hparams = get_hparams()
model = CurrentTestModel(hparams)
save_dir = init_save_dir()
# logger file to get meta
logger = get_test_tube_logger(False)
logger.log_hyperparams(hparams)
logger.save()
trainer_options = dict(
max_nb_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(save_dir)
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
# save model
new_weights_path = os.path.join(save_dir, 'save_test.ckpt')
trainer.save_checkpoint(new_weights_path)
# load new model
tags_path = logger.experiment.get_data_path(logger.experiment.name, logger.experiment.version)
tags_path = os.path.join(tags_path, 'meta_tags.csv')
model_2 = LightningTestModel.load_from_metrics(weights_path=new_weights_path,
tags_csv=tags_path)
model_2.eval()
# make prediction
clear_save_dir()
def test_simple_cpu():
"""
Verify continue training session on CPU
:return:
"""
reset_seed()
hparams = get_hparams()
model = LightningTestModel(hparams)
save_dir = init_save_dir()
# logger file to get meta
trainer_options = dict(
max_nb_epochs=1,
val_percent_check=0.1,
train_percent_check=0.1,
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
clear_save_dir()
def test_amp_single_gpu():
"""
Make sure DDP + AMP work
:return:
"""
reset_seed()
if not torch.cuda.is_available():
warnings.warn('test_amp_gpu_ddp cannot run.'
'Rerun on a GPU node to run this test')
return
if not torch.cuda.device_count() > 1:
warnings.warn('test_amp_gpu_ddp cannot run.'
'Rerun on a node with 2+ GPUs to run this test')
return
hparams = get_hparams()
model = LightningTestModel(hparams)
trainer_options = dict(
show_progress_bar=True,
max_nb_epochs=1,
gpus=1,
distributed_backend='ddp',
use_amp=True
)
run_gpu_model_test(trainer_options, model, hparams)
def test_no_amp_single_gpu():
"""
Make sure DDP + AMP work
:return:
"""
reset_seed()
if not torch.cuda.is_available():
warnings.warn('test_amp_gpu_ddp cannot run.'
'Rerun on a GPU node to run this test')
return
if not torch.cuda.device_count() > 1:
warnings.warn('test_amp_gpu_ddp cannot run.'
'Rerun on a node with 2+ GPUs to run this test')
return
hparams = get_hparams()
model = LightningTestModel(hparams)
trainer_options = dict(
show_progress_bar=True,
max_nb_epochs=1,
gpus=1,
distributed_backend='dp',
use_amp=True
)
with pytest.raises((MisconfigurationException, ModuleNotFoundError)):
run_gpu_model_test(trainer_options, model, hparams)
def test_cpu_restore_training():
"""
Verify continue training session on CPU
:return:
"""
reset_seed()
hparams = get_hparams()
model = LightningTestModel(hparams)
save_dir = init_save_dir()
# logger file to get meta
test_logger_version = 10
logger = get_test_tube_logger(False, version=test_logger_version)
logger.log_hyperparams(hparams)
logger.save()
trainer_options = dict(
max_nb_epochs=2,
val_check_interval=0.50,
val_percent_check=0.2,
train_percent_check=0.2,
logger=logger,
checkpoint_callback=ModelCheckpoint(save_dir)
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
real_global_epoch = trainer.current_epoch
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
# wipe-out trainer and model
# retrain with not much data... this simulates picking training back up after slurm
# we want to see if the weights come back correctly
new_logger = get_test_tube_logger(False, version=test_logger_version)
trainer_options = dict(
max_nb_epochs=2,
val_check_interval=0.50,
val_percent_check=0.2,
train_percent_check=0.2,
logger=new_logger,
checkpoint_callback=ModelCheckpoint(save_dir),
)
trainer = Trainer(**trainer_options)
model = LightningTestModel(hparams)
# set the epoch start hook so we can predict before the model does the full training
def assert_good_acc():
assert trainer.current_epoch == real_global_epoch and trainer.current_epoch > 0
# if model and state loaded correctly, predictions will be good even though we
# haven't trained with the new loaded model
trainer.model.eval()
for dataloader in trainer.get_val_dataloaders():
run_prediction(dataloader, trainer.model)
model.on_sanity_check_start = assert_good_acc
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
clear_save_dir()
def test_amp_gpu_ddp():
"""
Make sure DDP + AMP work
:return:
"""
if not can_run_gpu_test():
return
os.environ['MASTER_PORT'] = str(np.random.randint(12000, 19000, 1)[0])
reset_seed()
hparams = get_hparams()
model = LightningTestModel(hparams)
trainer_options = dict(
show_progress_bar=True,
max_nb_epochs=1,
gpus=2,
distributed_backend='ddp',
use_amp=True
)
run_gpu_model_test(trainer_options, model, hparams)
def test_cpu_slurm_save_load():
"""
Verify model save/load/checkpoint on CPU
:return:
"""
reset_seed()
hparams = get_hparams()
model = LightningTestModel(hparams)
save_dir = init_save_dir()
# logger file to get meta
logger = get_test_tube_logger(False)
logger.log_hyperparams(hparams)
logger.save()
version = logger.version
trainer_options = dict(
max_nb_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(save_dir)
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
real_global_step = trainer.global_step
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
# predict with trained model before saving
# make a prediction
for dataloader in model.test_dataloader():
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
model.eval()
pred_before_saving = model(x)
# test HPC saving
# simulate snapshot on slurm
saved_filepath = trainer.hpc_save(save_dir, logger)
assert os.path.exists(saved_filepath)
# new logger file to get meta
logger = get_test_tube_logger(False, version=version)
logger.log_hyperparams(hparams)
logger.save()
trainer_options = dict(
max_nb_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(save_dir),
)
trainer = Trainer(**trainer_options)
model = LightningTestModel(hparams)
# set the epoch start hook so we can predict before the model does the full training
def assert_pred_same():
assert trainer.global_step == real_global_step and trainer.global_step > 0
# predict with loaded model to make sure answers are the same
trainer.model.eval()
new_pred = trainer.model(x)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
model.on_epoch_start = assert_pred_same
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
clear_save_dir()
def test_loading_meta_tags():
reset_seed()
from argparse import Namespace
hparams = get_hparams()
# save tags
logger = get_test_tube_logger(False)
logger.log_hyperparams(Namespace(some_str='a_str', an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load tags
tags_path = logger.experiment.get_data_path(
logger.experiment.name, logger.experiment.version
) + '/meta_tags.csv'
tags = trainer_io.load_hparams_from_tags_csv(tags_path)
assert tags.batch_size == 32 and tags.hidden_dim == 1000
clear_save_dir()
def test_dp_output_reduce():
reset_seed()
# test identity when we have a single gpu
out = torch.rand(3, 1)
assert reduce_distributed_output(out, nb_gpus=1) is out
# average when we have multiples
assert reduce_distributed_output(out, nb_gpus=2) == out.mean()
# when we have a dict of vals
out = {
'a': out,
'b': {
'c': out
}
}
reduced = reduce_distributed_output(out, nb_gpus=3)
assert reduced['a'] == out['a']
assert reduced['b']['c'] == out['b']['c']
def test_model_saving_loading():
"""
Tests use case where trainer saves the model, and user loads it from tags independently
:return:
"""
reset_seed()
hparams = get_hparams()
model = LightningTestModel(hparams)
save_dir = init_save_dir()
# logger file to get meta
logger = get_test_tube_logger(False)
logger.log_hyperparams(hparams)
logger.save()
trainer_options = dict(
max_nb_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(save_dir)
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
# make a prediction
for dataloader in model.test_dataloader():
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
# generate preds before saving model
model.eval()
pred_before_saving = model(x)
# save model
new_weights_path = os.path.join(save_dir, 'save_test.ckpt')
trainer.save_checkpoint(new_weights_path)
# load new model
tags_path = logger.experiment.get_data_path(logger.experiment.name, logger.experiment.version)
tags_path = os.path.join(tags_path, 'meta_tags.csv')
model_2 = LightningTestModel.load_from_metrics(weights_path=new_weights_path,
tags_csv=tags_path)
model_2.eval()
# make prediction
# assert that both predictions are the same
new_pred = model_2(x)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
clear_save_dir()
def test_model_freeze_unfreeze():
reset_seed()
hparams = get_hparams()
model = LightningTestModel(hparams)
model.freeze()
model.unfreeze()
def test_amp_gpu_ddp_slurm_managed():
"""
Make sure DDP + AMP work
:return:
"""
if not can_run_gpu_test():
| |
[0, 1, 2]
height = images.shape[2]
width = images.shape[3]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size]
return cropped, x_offset, y_offset
def uniform_crop(
images: torch.Tensor,
size: int,
spatial_idx: int,
) -> torch.Tensor:
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`channel` x `num frames` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
Returns:
cropped (tensor): images with dimension of
`channel` x `num frames` x `height` x `width`.
"""
cropped, _, _ = _uniform_crop_helper(images, size, spatial_idx)
return cropped
def uniform_crop_with_boxes(
images: torch.Tensor,
size: int,
spatial_idx: int,
boxes: torch.Tensor,
) -> Tuple[torch.Tensor, np.ndarray]:
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`channel` x `num frames` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (tensor): Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): images with dimension of
`channel` x `num frames` x `height` x `width`.
cropped_boxes (tensor): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped, x_offset, y_offset = _uniform_crop_helper(images, size, spatial_idx)
cropped_boxes = crop_boxes(boxes, x_offset, y_offset)
return cropped, clip_boxes_to_image(
cropped_boxes, cropped.shape[-2], cropped.shape[-1]
)
def horizontal_flip_with_boxes(
prob: float, images: torch.Tensor, boxes: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform horizontal flip on the given images and corresponding boxes.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`channel` x `num frames` x `height` x `width`.
boxes (tensor): Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
images (tensor): images with dimension of
`channel` x `num frames` x `height` x `width`.
flipped_boxes (tensor): the flipped boxes with dimension of
`num boxes` x 4.
"""
flipped_boxes = copy.deepcopy(boxes)
if np.random.uniform() < prob:
images = images.flip((-1))
width = images.shape[3]
flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1
return images, flipped_boxes
def clip_boxes_to_image(boxes: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (tensor): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (tensor): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = copy.deepcopy(boxes)
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes
def crop_boxes(boxes: torch.Tensor, x_offset: int, y_offset: int) -> torch.Tensor:
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (torch.Tensor): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (torch.Tensor): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = copy.deepcopy(boxes)
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
def _get_param_spatial_crop(
scale: Tuple[float, float],
ratio: Tuple[float, float],
height: int,
width: int,
log_uniform_ratio: bool = True,
num_tries: int = 10,
) -> Tuple[int, int, int, int]:
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
Args:
scale (Tuple[float, float]): Scale range of Inception-style area based
random resizing.
ratio (Tuple[float, float]): Aspect ratio range of Inception-style
area based random resizing.
height (int): Height of the original image.
width (int): Width of the original image.
log_uniform_ratio (bool): Whether to use a log-uniform distribution to
sample the aspect ratio. Default is True.
num_tries (int): The number of times to attempt a randomly resized crop.
Falls back to a central crop after all attempts are exhausted.
Default is 10.
Returns:
Tuple containing i, j, h, w. (i, j) are the coordinates of the top left
corner of the crop. (h, w) are the height and width of the crop.
"""
assert num_tries >= 1, "num_tries must be at least 1"
if scale[0] > scale[1]:
scale = (scale[1], scale[0])
if ratio[0] > ratio[1]:
ratio = (ratio[1], ratio[0])
for _ in range(num_tries):
area = height * width
target_area = area * (scale[0] + torch.rand(1).item() * (scale[1] - scale[0]))
if log_uniform_ratio:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(
log_ratio[0] + torch.rand(1).item() * (log_ratio[1] - log_ratio[0])
)
else:
aspect_ratio = ratio[0] + torch.rand(1).item() * (ratio[1] - ratio[0])
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = torch.randint(0, height - h + 1, (1,)).item()
j = torch.randint(0, width - w + 1, (1,)).item()
return i, j, h, w
# Fallback to central crop.
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def random_resized_crop(
frames: torch.Tensor,
target_height: int,
target_width: int,
scale: Tuple[float, float],
aspect_ratio: Tuple[float, float],
shift: bool = False,
log_uniform_ratio: bool = True,
interpolation: str = "bilinear",
num_tries: int = 10,
) -> torch.Tensor:
"""
Crop the given images to random size and aspect ratio. A crop of random
size relative to the original size and a random aspect ratio is made. This
crop is finally resized to given size. This is popularly used to train the
Inception networks.
Args:
frames (torch.Tensor): Video tensor to be resized with shape (C, T, H, W).
target_height (int): Desired height after cropping.
target_width (int): Desired width after cropping.
scale (Tuple[float, float]): Scale range of Inception-style area based
random resizing. Should be between 0.0 and 1.0.
aspect_ratio (Tuple[float, float]): Aspect ratio range of Inception-style
area based random resizing. Should be between 0.0 and +infinity.
shift (bool): Bool that determines whether or not to sample two different
boxes (for cropping) for the first and last frame. If True, it then
linearly interpolates the two boxes for other frames. If False, the
same box is cropped for every frame. Default is False.
log_uniform_ratio (bool): Whether to use a log-uniform distribution to
sample the aspect ratio. Default is True.
interpolation (str): Algorithm used for upsampling. Currently supports
'nearest', 'bilinear', 'bicubic', 'area'. Default is 'bilinear'.
num_tries (int): The number of times to attempt a randomly resized crop.
Falls back to a central crop after all attempts are exhausted.
Default is 10.
Returns:
cropped (tensor): A cropped video tensor of shape (C, T, target_height, target_width).
"""
assert (
scale[0] > 0 and scale[1] > 0
), "min and max of scale range must be greater than 0"
assert (
aspect_ratio[0] > 0 and aspect_ratio[1] > 0
), "min and max of aspect_ratio range must be greater than 0"
channels = frames.shape[0]
t = frames.shape[1]
height = frames.shape[2]
width = frames.shape[3]
i, j, h, w = _get_param_spatial_crop(
scale, aspect_ratio, height, width, log_uniform_ratio, num_tries
)
if not shift:
| |
either float32, float64, complex64 or
complex128.
Note about BEM++ terminology: a *potential operator* acts on functions
defined on a surface S and produces functions defined at any point of the
space surrounding S, but not necessarily on S itself. In contrast, a
*boundary operator* acts on on functions defined on a surface S and produces
functions defined on the same surface S.
"""
return _constructHelmholtzPotentialOperator(
"helmholtz3dFarFieldDoubleLayerPotentialOperator", context, waveNumber)
def _constructModifiedHelmholtzOperator(
className, context,
domain, range, dualToRange, waveNumber,
label, useInterpolation, interpPtsPerWavelength):
basisFunctionType = context.basisFunctionType()
if (basisFunctionType != domain.basisFunctionType() or
basisFunctionType != range.basisFunctionType() or
basisFunctionType != dualToRange.basisFunctionType()):
raise TypeError("BasisFunctionType of context and all spaces must be the same")
resultType = context.resultType()
waveNumberIsComplex = complex(waveNumber).imag != 0
if waveNumberIsComplex and resultType in ("float32", "float64"):
raise TypeError("Real result type given for a complex wave number")
# determine kernelType
if waveNumberIsComplex:
kernelType = resultType
else:
if resultType in ("float32", "complex64"):
kernelType = "float32"
else:
kernelType = "float64"
# construct object
if not label:
label = ""
symmetry = 0
result = _constructObjectTemplatedOnBasisKernelAndResult(
core, className, basisFunctionType, kernelType, resultType,
context, domain, range, dualToRange, waveNumber, label,
symmetry, useInterpolation, interpPtsPerWavelength)
result._context = context
result._domain = domain
result._range = range
result._dualToRange = dualToRange
return result
def createModifiedHelmholtz3dSingleLayerBoundaryOperator(
context, domain, range, dualToRange, waveNumber,
label=None, useInterpolation=False, interpPtsPerWavelength=5000):
"""
Create and return a single-layer-potential boundary operator for
the modified Helmholtz equation in 3D.
*Parameters:*
- context (Context)
A Context object to control the assembly of the weak form of the
newly constructed operator.
- domain (Space)
Function space to be taken as the domain of the operator.
- range (Space)
Function space to be taken as the range of the operator.
- dualToRange (Space)
Function space to be taken as the dual to the range of the operator.
- waveNumber (float or complex)
Wave number, i.e. the number k in the modified Helmholtz equation
nabla^2 u - k^2 u = 0.
Only real wave numbers are allowed if context.resultType() is a real
type (float32 or float64).
- label (string)
Textual label of the operator. If set to None (default), a unique
label will be generated automatically.
- useInterpolation (bool)
If set to False (default), the standard exp() function will be used
to evaluate the exponential factor occurring in the kernel. If set
to True, the exponential factor will be evaluated by piecewise-cubic
interpolation of values calculated in advance on a regular
grid. This normally speeds up calculations, but might result in a
loss of accuracy. This is an experimental feature: use it at your
own risk.
- interpPtsPerWavelength (int)
If useInterpolation is set to True, this parameter determines the
number of points per "effective wavelength" (defined as 2 pi /
abs(waveNumber)) used to construct the interpolation grid. The
default value (5000) is normally enough to reduce the relative or
absolute error, *whichever is smaller*, below 100 * machine
precision. If useInterpolation is set to False, this parameter is
ignored.
*Returns* a newly constructed BoundaryOperator_BasisFunctionType_ResultType
object, with BasisFunctionType and ResultType determined automatically from
the context argument and equal to either float32, float64, complex64 or
complex128.
"""
return _constructModifiedHelmholtzOperator(
"modifiedHelmholtz3dSingleLayerBoundaryOperator", context,
domain, range, dualToRange, waveNumber,
label, useInterpolation, interpPtsPerWavelength)
def createModifiedHelmholtz3dDoubleLayerBoundaryOperator(
context, domain, range, dualToRange, waveNumber,
label=None, useInterpolation=False, interpPtsPerWavelength=5000):
"""
Create and return a double-layer-potential boundary operator for
the modified Helmholtz equation in 3D.
*Parameters:*
- context (Context)
A Context object to control the assembly of the weak form of the
newly constructed operator.
- domain (Space)
Function space to be taken as the domain of the operator.
- range (Space)
Function space to be taken as the range of the operator.
- dualToRange (Space)
Function space to be taken as the dual to the range of the operator.
- waveNumber (float or complex)
Wave number, i.e. the number k in the modified Helmholtz equation
nabla^2 u - k^2 u = 0.
Only real wave numbers are allowed if context.resultType() is a real
type (float32 or float64).
- label (string)
Textual label of the operator. If set to None (default), a unique
label will be generated automatically.
- useInterpolation (bool)
If set to False (default), the standard exp() function will be used
to evaluate the exponential factor occurring in the kernel. If set
to True, the exponential factor will be evaluated by piecewise-cubic
interpolation of values calculated in advance on a regular
grid. This normally speeds up calculations, but might result in a
loss of accuracy. This is an experimental feature: use it at your
own risk.
- interpPtsPerWavelength (int)
If useInterpolation is set to True, this parameter determines the
number of points per "effective wavelength" (defined as 2 pi /
abs(waveNumber)) used to construct the interpolation grid. The
default value (5000) is normally enough to reduce the relative or
absolute error, *whichever is smaller*, below 100 * machine
precision. If useInterpolation is set to False, this parameter is
ignored.
*Returns* a newly constructed BoundaryOperator_BasisFunctionType_ResultType
object, with BasisFunctionType and ResultType determined automatically from
the context argument and equal to either float32, float64, complex64 or
complex128.
"""
return _constructModifiedHelmholtzOperator(
"modifiedHelmholtz3dDoubleLayerBoundaryOperator", context,
domain, range, dualToRange, waveNumber,
label, useInterpolation, interpPtsPerWavelength)
def createModifiedHelmholtz3dAdjointDoubleLayerBoundaryOperator(
context, domain, range, dualToRange, waveNumber,
label=None, useInterpolation=False, interpPtsPerWavelength=5000):
"""
Create and return an adjoint double-layer-potential boundary operator for
the modified Helmholtz equation in 3D.
*Parameters:*
- context (Context)
A Context object to control the assembly of the weak form of the
newly constructed operator.
- domain (Space)
Function space to be taken as the domain of the operator.
- range (Space)
Function space to be taken as the range of the operator.
- dualToRange (Space)
Function space to be taken as the dual to the range of the operator.
- waveNumber (float or complex)
Wave number, i.e. the number k in the modified Helmholtz equation
nabla^2 u - k^2 u = 0.
Only real wave numbers are allowed if context.resultType() is a real
type (float32 or float64).
- label (string)
Textual label of the operator. If set to None (default), a unique
label will be generated automatically.
- useInterpolation (bool)
If set to False (default), the standard exp() function will be used
to evaluate the exponential factor occurring in the kernel. If set
to True, the exponential factor will be evaluated by piecewise-cubic
interpolation of values calculated in advance on a regular
grid. This normally speeds up calculations, but might result in a
loss of accuracy. This is an experimental feature: use it at your
own risk.
- interpPtsPerWavelength (int)
If useInterpolation is set to True, this parameter determines the
number of points per "effective wavelength" (defined as 2 pi /
abs(waveNumber)) used to construct the interpolation grid. The
default value (5000) is normally enough to reduce the relative or
absolute error, *whichever is smaller*, below 100 * machine
precision. If useInterpolation is set to False, this parameter is
ignored.
*Returns* a newly constructed BoundaryOperator_BasisFunctionType_ResultType
object, with BasisFunctionType and ResultType determined automatically from
the context argument and equal to either float32, float64, complex64 or
complex128.
"""
return _constructModifiedHelmholtzOperator(
"modifiedHelmholtz3dAdjointDoubleLayerBoundaryOperator", context,
domain, range, dualToRange, waveNumber,
label, useInterpolation, interpPtsPerWavelength)
def createModifiedHelmholtz3dHypersingularBoundaryOperator(
context, domain, range, dualToRange, waveNumber,
label=None, useInterpolation=False, interpPtsPerWavelength=5000):
"""
Create and return a hypersingular boundary operator for the modified
Helmholtz equation in 3D.
*Parameters:*
- context (Context)
A Context object to control the assembly of the weak form of the
newly constructed operator.
- domain (Space)
Function space to be taken as the domain of the operator.
- range (Space)
Function space to be taken as the range of the operator.
- dualToRange (Space)
Function space to be taken as the dual to the range of the operator.
- waveNumber (float or complex)
Wave number, i.e. the number k in the modified Helmholtz equation
nabla^2 u - k^2 u = 0.
Only real wave numbers are allowed if context.resultType() is a real
type (float32 or float64).
- label (string)
Textual label of the operator. | |
<reponame>simon-m-mudd/LSDMappingTools
"""
A set of functions to do some simple statistical analyses
Created on Thu Jun 8th 2017
Author: SMM
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import scipy.stats as ss
# This function comes from
# https://github.com/joferkington/oost_paper_code/blob/master/utilities.py
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
<NAME> and <NAME> (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, <NAME>, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
# make sure that you don't get a divide by zero.
# If MAD is 0, then there are no outliers
if med_abs_deviation == 0:
modified_z_score = diff * 0
else:
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def get_MAD(points):
"""
Returns the MAD of a ampled population:
-----------
points : An numobservations by numdimensions array of observations
Returns:
--------
MAD.
References:
----------
<NAME> and <NAME> (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, <NAME>, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
return med_abs_deviation
def get_outlier_from_KernelDensityStuff(df, column = "", binning = "", threshold = 6, method = "gaussian", sort_by = ""):
from sklearn.neighbors.kde import KernelDensity as harry
if(column ==""):
print("I need a column")
quit()
out_df = pd.DataFrame(data = None, columns = df.columns)
if(binning != ""):
for potter in df[binning].unique():
tdf = df[df[binning] == potter]
if(sort_by!= ""):
tdf = tdf.sort_values(sort_by)
if(column == "deriv_ksn"):
tdf["deriv_ksn"] = pd.Series(np.abs(tdf.ksn.diff()/tdf.chi.diff()),index = tdf.index)
tdf["deriv_ksn"].iloc[0] = 0
print(tdf.shape[0])
dumbledore = np.copy(tdf[column].values.reshape((-1,1)))
severus = harry(kernel = method).fit(dumbledore)
snake = np.abs(severus.score_samples(dumbledore))
McGonagal = []
for gobelins in snake:
if gobelins<threshold:
McGonagal.append(False)
else:
McGonagal.append(True)
aCat = tdf[McGonagal]
out_df = pd.concat([out_df,aCat])
return out_df
def add_outlier_column_to_PD(df, column = "none", threshold = "none"):
"""
Takes a pandas dataframe and returns the same with added boolean columns (True if outlier).
Uses the function is_outlier to detect the outliers. Can also take a list of dataframes
Args:
df (Pandas dataframe): The dataframe or a list of dataframe
column (list or string): name of the column(s) you want to outlier-check
threshold (list or float): list of threshold for each columns (must be same size as column)
returns
Pandas.DataFrame
"""
# Check the DataType
if(isinstance(df,list) == False and isinstance(df,dict) == False):
lst_df = [df]
else:
lst_df = df
# check the data validity
if(isinstance(column,str) and column =="none"):
print("you need to give me the name of at least a column, or a list ([])")
quit()
if(isinstance(threshold,str) and threshold =="none"):
print("you need to give me the name of at least a column, or a list ([])")
quit()
# calculate the outliers
for instance in lst_df:
if(isinstance(column,str)):
column = [column]
if(isinstance(threshold,float) or isinstance(threshold,int)):
threshold = [threshold]
if(len(threshold) != len(column)):
print("You need to assign one threshold per columns name")
for i in range(len(column)):
# print(lst_df[instance])
is_outliers = is_outlier(lst_df[instance][column[i]],threshold[i])
coln =column[i]+"_outlier"
lst_df[instance][coln] = pd.Series(is_outliers,index = lst_df[instance].index)
if len(lst_df) == 1:
return lst_df[0]
else:
return lst_df
def binning_PD(df, column = "", values = [], log = False):
"""
takes a dataframe (Pandas) and return a list of dataframes binned by one columns.
Args:
df: The pandas dataframe
column (str): name of the column that hold the data
values (list): _ list of the upper values of each binning, another binning category will incorporate everything superior to the last value
_ you also can give the value "auto_power_10" to this. it will automatically bin the data each 10**n until the max
_ "unique" will bin the df for each values of the column (Basin/ source key for example)
log (bool): if you want to compare values to log of column
return:
dictionnary of pandas dataframe, the key being the upper value
"""
# check the function parameters
if(column == ""):
print("You need to give a valid column name")
if(isinstance(values, list) and len(values) < 2):
print("You need at least two values to bin the dataframe")
if(isinstance(values,str) and values == "auto_power_10"):
print("I am automatically choosing the binning values each 10**n, thanks for trusting me")
max_val = df[column].max()
min_value = df[column].min()
po = 0
values = []
while(max_val>10**po):
if(min_value<10**po):
values.append(10**po)
po +=1
del values[-1] # delete the last value to keep last bin > to last value
print("Your binning values are: ")
print(values)
else:
if(isinstance(values,str) and values == "unique"):
print("I am automatically choosing the binning values for each unique values, thanks for trusting me")
values = df[column].unique()
print("Your binning values are: ")
print(values)
cumul_lines = 0# check if all the values are inside bins
# log the data if required
if(log):
return_DF = [df[np.log10(df[column])<values[0]]]
cumul_lines += return_DF[0].shape[0]
for i in range(1,len(values)):
tempdf = df[np.log10(df[column])<values[i]]
tempdf = tempdf[tempdf[column]>values[i-1]]
return_DF.append(tempdf)
cumul_lines += return_DF[i].shape[0]
tempdf= df[np.log10(df[column])>=values[-1]]
cumul_lines += tempdf.shape[0]
return_DF.append(tempdf)
else:
return_DF = [df[df[column]<values[0]]]
cumul_lines += return_DF[0].shape[0]
for i in range(1,len(values)):
tempdf = df[df[column]<values[i]]
tempdf = tempdf[tempdf[column]>values[i-1]]
return_DF.append(tempdf)
cumul_lines += return_DF[i].shape[0]
cumul_lines += df[df[column]>=values[-1]].shape[0]
return_DF.append(df[df[column]>=values[-1]]) # last overweight bin
print("DEBUG: " +str(cumul_lines) +" lines detected over " +str(df.shape[0]))
# compile the results in a dictionnary
dict_return = {}
for i in range(len(values)):
dict_return[str(values[i])] = return_DF[i]
dict_return['>'+str(values[-1])] = return_DF[-1]
return dict_return
def dixon_test(data, left=True, right=True, q_dict = ""):
"""
Keyword arguments:
data = A ordered or unordered list of data points (int or float).
left = Q-test of minimum value in the ordered list if True.
right = Q-test of maximum value in the ordered list if True.
q_dict = A dictionary of Q-values for a given confidence level,
where the dict. keys are sample sizes N, and the associated values
are the corresponding critical Q values. E.g.,
{3: 0.97, 4: 0.829, 5: 0.71, 6: 0.625, ...}
Returns a list of 2 values for the outliers, or None.
E.g.,
for [1,1,1] -> [None, None]
for [5,1,1] -> [None, 5]
for [5,1,5] -> [1, None]
"""
assert(left or right), 'At least one of the variables, `left` or `right`, must be True.'
assert(len(data) >= 3), 'At least 3 data points are required'
assert(len(data) <= max(q_dict.keys())), 'Sample size too large'
sdata = sorted(data)
Q_mindiff, Q_maxdiff = (0,0), (0,0)
if left:
Q_min = (sdata[1] - sdata[0])
try:
Q_min /= (sdata[-1] - sdata[0])
except ZeroDivisionError:
pass
Q_mindiff = (Q_min - q_dict[len(data)], sdata[0])
if right:
Q_max = abs((sdata[-2] - sdata[-1]))
try:
Q_max /= abs((sdata[0] - sdata[-1]))
except ZeroDivisionError:
pass
Q_maxdiff = (Q_max - q_dict[len(data)], sdata[-1])
if not Q_mindiff[0] > 0 and not Q_maxdiff[0] > 0:
outliers = [None, None]
elif Q_mindiff[0] == Q_maxdiff[0]:
outliers = [Q_mindiff[1], Q_maxdiff[1]]
elif Q_mindiff[0] > Q_maxdiff[0]:
outliers = [Q_mindiff[1], None]
else:
outliers = [None, Q_maxdiff[1]]
return outliers
def linregress_residuals(xdata,ydata):
"""
This function performs a linear regression and then gets the residuals
Args:
xdata (array-like): The x data
ydata (array-like): The y data
Returns:
residuals: the residuals of the regression
slope: the slope of regression line
intercept: intercept of the regression line
rvalue: correlation coeffficient
pvalue: two-sided p-value for a hypothesis test whose null hypothesis is that the slope is zero.
stderr: standard error of the estimated gradient
Author: SMM
"""
from scipy import stats
# Get the regression
(m,b,r,pvalue,stderr)=stats.linregress(xdata,ydata)
#print("In regress1, m is: "+str(m))
# get the residuals
residuals = np.copy(xdata)
for idx,x in enumerate(xdata):
yfit = m*x+b
residuals[idx] = yfit-ydata[idx]
return (residuals,m,b,r,pvalue,stderr)
def remove_outlying_residuals(xdata,ydata,residuals):
"""
This function removes data with outying residuals
Args:
xdata (array-like): The x data
ydata (array-like): The y | |
view.
:type type: object
:param url: Url of the view.
:type url: str
:param visibility: Visibility status of the view.
:type visibility: object
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'visibility': {'key': 'visibility', 'type': 'object'}
}
def __init__(self, _links=None, id=None, name=None, type=None, url=None, visibility=None):
super(FeedView, self).__init__()
self._links = _links
self.id = id
self.name = name
self.type = type
self.url = url
self.visibility = visibility
class GlobalPermission(Model):
"""GlobalPermission.
:param identity_descriptor: Identity of the user with the provided Role.
:type identity_descriptor: :class:`str <azure.devops.v5_1.packaging.models.str>`
:param role: Role associated with the Identity.
:type role: object
"""
_attribute_map = {
'identity_descriptor': {'key': 'identityDescriptor', 'type': 'str'},
'role': {'key': 'role', 'type': 'object'}
}
def __init__(self, identity_descriptor=None, role=None):
super(GlobalPermission, self).__init__()
self.identity_descriptor = identity_descriptor
self.role = role
class JsonPatchOperation(Model):
"""JsonPatchOperation.
:param from_: The path to copy from for the Move/Copy operation.
:type from_: str
:param op: The patch operation
:type op: object
:param path: The path for the operation. In the case of an array, a zero based index can be used to specify the position in the array (e.g. /biscuits/0/name). The "-" character can be used instead of an index to insert at the end of the array (e.g. /biscuits/-).
:type path: str
:param value: The value for the operation. This is either a primitive or a JToken.
:type value: object
"""
_attribute_map = {
'from_': {'key': 'from', 'type': 'str'},
'op': {'key': 'op', 'type': 'object'},
'path': {'key': 'path', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, from_=None, op=None, path=None, value=None):
super(JsonPatchOperation, self).__init__()
self.from_ = from_
self.op = op
self.path = path
self.value = value
class MinimalPackageVersion(Model):
"""MinimalPackageVersion.
:param direct_upstream_source_id: Upstream source this package was ingested from.
:type direct_upstream_source_id: str
:param id: Id for the package.
:type id: str
:param is_cached_version: [Obsolete] Used for legacy scenarios and may be removed in future versions.
:type is_cached_version: bool
:param is_deleted: True if this package has been deleted.
:type is_deleted: bool
:param is_latest: True if this is the latest version of the package by package type sort order.
:type is_latest: bool
:param is_listed: (NuGet Only) True if this package is listed.
:type is_listed: bool
:param normalized_version: Normalized version using normalization rules specific to a package type.
:type normalized_version: str
:param package_description: Package description.
:type package_description: str
:param publish_date: UTC Date the package was published to the service.
:type publish_date: datetime
:param storage_id: Internal storage id.
:type storage_id: str
:param version: Display version.
:type version: str
:param views: List of views containing this package version.
:type views: list of :class:`FeedView <azure.devops.v5_1.packaging.models.FeedView>`
"""
_attribute_map = {
'direct_upstream_source_id': {'key': 'directUpstreamSourceId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_cached_version': {'key': 'isCachedVersion', 'type': 'bool'},
'is_deleted': {'key': 'isDeleted', 'type': 'bool'},
'is_latest': {'key': 'isLatest', 'type': 'bool'},
'is_listed': {'key': 'isListed', 'type': 'bool'},
'normalized_version': {'key': 'normalizedVersion', 'type': 'str'},
'package_description': {'key': 'packageDescription', 'type': 'str'},
'publish_date': {'key': 'publishDate', 'type': 'iso-8601'},
'storage_id': {'key': 'storageId', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'views': {'key': 'views', 'type': '[FeedView]'}
}
def __init__(self, direct_upstream_source_id=None, id=None, is_cached_version=None, is_deleted=None, is_latest=None, is_listed=None, normalized_version=None, package_description=None, publish_date=None, storage_id=None, version=None, views=None):
super(MinimalPackageVersion, self).__init__()
self.direct_upstream_source_id = direct_upstream_source_id
self.id = id
self.is_cached_version = is_cached_version
self.is_deleted = is_deleted
self.is_latest = is_latest
self.is_listed = is_listed
self.normalized_version = normalized_version
self.package_description = package_description
self.publish_date = publish_date
self.storage_id = storage_id
self.version = version
self.views = views
class Package(Model):
"""Package.
:param _links: Related REST links.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.packaging.models.ReferenceLinks>`
:param id: Id of the package.
:type id: str
:param is_cached: Used for legacy scenarios and may be removed in future versions.
:type is_cached: bool
:param name: The display name of the package.
:type name: str
:param normalized_name: The normalized name representing the identity of this package within its package type.
:type normalized_name: str
:param protocol_type: Type of the package.
:type protocol_type: str
:param star_count: [Obsolete] - this field is unused and will be removed in a future release.
:type star_count: int
:param url: Url for this package.
:type url: str
:param versions: All versions for this package within its feed.
:type versions: list of :class:`MinimalPackageVersion <azure.devops.v5_1.packaging.models.MinimalPackageVersion>`
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'id': {'key': 'id', 'type': 'str'},
'is_cached': {'key': 'isCached', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'normalized_name': {'key': 'normalizedName', 'type': 'str'},
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'star_count': {'key': 'starCount', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'},
'versions': {'key': 'versions', 'type': '[MinimalPackageVersion]'}
}
def __init__(self, _links=None, id=None, is_cached=None, name=None, normalized_name=None, protocol_type=None, star_count=None, url=None, versions=None):
super(Package, self).__init__()
self._links = _links
self.id = id
self.is_cached = is_cached
self.name = name
self.normalized_name = normalized_name
self.protocol_type = protocol_type
self.star_count = star_count
self.url = url
self.versions = versions
class PackageChange(Model):
"""PackageChange.
:param package: Package that was changed.
:type package: :class:`Package <azure.devops.v5_1.packaging.models.Package>`
:param package_version_change: Change that was performed on a package version.
:type package_version_change: :class:`PackageVersionChange <azure.devops.v5_1.packaging.models.PackageVersionChange>`
"""
_attribute_map = {
'package': {'key': 'package', 'type': 'Package'},
'package_version_change': {'key': 'packageVersionChange', 'type': 'PackageVersionChange'}
}
def __init__(self, package=None, package_version_change=None):
super(PackageChange, self).__init__()
self.package = package
self.package_version_change = package_version_change
class PackageChangesResponse(Model):
"""PackageChangesResponse.
:param _links: Related REST links.
:type _links: :class:`ReferenceLinks <azure.devops.v5_1.packaging.models.ReferenceLinks>`
:param count: Number of changes in this batch.
:type count: int
:param next_package_continuation_token: Token that should be used in future calls for this feed to retrieve new changes.
:type next_package_continuation_token: long
:param package_changes: List of changes.
:type package_changes: list of :class:`PackageChange <azure.devops.v5_1.packaging.models.PackageChange>`
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'count': {'key': 'count', 'type': 'int'},
'next_package_continuation_token': {'key': 'nextPackageContinuationToken', 'type': 'long'},
'package_changes': {'key': 'packageChanges', 'type': '[PackageChange]'}
}
def __init__(self, _links=None, count=None, next_package_continuation_token=None, package_changes=None):
super(PackageChangesResponse, self).__init__()
self._links = _links
self.count = count
self.next_package_continuation_token = next_package_continuation_token
self.package_changes = package_changes
class PackageDependency(Model):
"""PackageDependency.
:param group: Dependency package group (an optional classification within some package types).
:type group: str
:param package_name: Dependency package name.
:type package_name: str
:param version_range: Dependency package version range.
:type version_range: str
"""
_attribute_map = {
'group': {'key': 'group', 'type': 'str'},
'package_name': {'key': 'packageName', 'type': 'str'},
'version_range': {'key': 'versionRange', 'type': 'str'}
}
def __init__(self, group=None, package_name=None, version_range=None):
super(PackageDependency, self).__init__()
self.group = group
self.package_name = package_name
self.version_range = version_range
class PackageFile(Model):
"""PackageFile.
:param children: Hierarchical representation of files.
:type children: list of :class:`PackageFile <azure.devops.v5_1.packaging.models.PackageFile>`
:param name: File name.
:type name: str
:param protocol_metadata: Extended data unique to a specific package type.
:type protocol_metadata: :class:`ProtocolMetadata <azure.devops.v5_1.packaging.models.ProtocolMetadata>`
"""
_attribute_map = {
'children': {'key': 'children', 'type': '[PackageFile]'},
'name': {'key': 'name', 'type': 'str'},
'protocol_metadata': {'key': 'protocolMetadata', 'type': 'ProtocolMetadata'}
}
def __init__(self, children=None, name=None, protocol_metadata=None):
super(PackageFile, self).__init__()
self.children = children
self.name = name
self.protocol_metadata = protocol_metadata
class PackageMetrics(Model):
"""PackageMetrics.
:param download_count: Total count of downloads per package id.
:type download_count: float
:param download_unique_users: Number of downloads per unique user per package id.
:type download_unique_users: float
:param last_downloaded: UTC date and time when package was last downloaded.
:type last_downloaded: datetime
:param package_id: Package id.
:type package_id: str
"""
_attribute_map = {
'download_count': {'key': 'downloadCount', 'type': 'float'},
'download_unique_users': {'key': 'downloadUniqueUsers', 'type': 'float'},
'last_downloaded': {'key': 'lastDownloaded', 'type': 'iso-8601'},
'package_id': {'key': 'packageId', 'type': 'str'}
}
def __init__(self, download_count=None, download_unique_users=None, last_downloaded=None, package_id=None):
super(PackageMetrics, self).__init__()
self.download_count = download_count
self.download_unique_users = download_unique_users
self.last_downloaded = last_downloaded
self.package_id = package_id
class PackageMetricsQuery(Model):
"""PackageMetricsQuery.
:param package_ids: List of package ids
:type package_ids: list of str
"""
_attribute_map = {
'package_ids': {'key': 'packageIds', 'type': '[str]'}
}
def __init__(self, package_ids=None):
super(PackageMetricsQuery, self).__init__()
self.package_ids = package_ids
class PackageVersion(MinimalPackageVersion):
"""PackageVersion.
:param direct_upstream_source_id: Upstream source this package was ingested from.
:type direct_upstream_source_id: str
:param id: Id for the package.
:type id: str
:param is_cached_version: [Obsolete] Used for legacy scenarios and may be removed in future versions.
:type is_cached_version: bool
:param is_deleted: True if this package has been deleted.
:type is_deleted: bool
:param is_latest: True if this is the latest version of the package by package type sort order.
:type is_latest: bool
:param is_listed: (NuGet Only) True if this package is listed.
:type is_listed: bool
:param normalized_version: Normalized version using normalization rules specific to a package type.
:type normalized_version: str
:param package_description: Package description.
:type package_description: str
:param publish_date: | |
<gh_stars>0
import torch
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from termcolor import cprint
from utils_numpy_filter import NUMPYIEKF
from utils import prepare_data
class InitProcessCovNet(torch.nn.Module):
def __init__(self):
super(InitProcessCovNet, self).__init__()
self.beta_process = 3*torch.ones(2).double()
self.beta_initialization = 3*torch.ones(2).double()
self.factor_initial_covariance = torch.nn.Linear(1, 6, bias=False).double()
"""parameters for initializing covariance"""
self.factor_initial_covariance.weight.data[:] /= 10
self.factor_process_covariance = torch.nn.Linear(1, 6, bias=False).double()
"""parameters for process noise covariance"""
self.factor_process_covariance.weight.data[:] /= 10
self.tanh = torch.nn.Tanh()
def forward(self, iekf):
return
def init_cov(self, iekf):
alpha = self.factor_initial_covariance(torch.ones(1).double()).squeeze()
beta = 10**(self.tanh(alpha))
return beta
def init_processcov(self, iekf):
alpha = self.factor_process_covariance(torch.ones(1).double())
beta = 10**(self.tanh(alpha))
return beta
### network for measurement noise covariance
class MesNet(torch.nn.Module):
def __init__(self):
super(MesNet, self).__init__()
self.beta_measurement = 3*torch.ones(2).double()
self.tanh = torch.nn.Tanh()
self.cov_net = torch.nn.Sequential(torch.nn.Conv1d(6, 32, 5),
torch.nn.ReplicationPad1d(4),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Conv1d(32, 32, 5, dilation=3),
torch.nn.ReplicationPad1d(4),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
).double()
"CNN for measurement covariance"
self.cov_lin = torch.nn.Sequential(torch.nn.Linear(32, 2),
torch.nn.Tanh(),
).double()
self.cov_lin[0].bias.data[:] /= 100
self.cov_lin[0].weight.data[:] /= 100
def forward(self, u, iekf):
y_cov = self.cov_net(u).transpose(0, 2).squeeze()
z_cov = self.cov_lin(y_cov)
z_cov_net = self.beta_measurement.unsqueeze(0)*z_cov
measurements_covs = (iekf.cov0_measurement.unsqueeze(0) * (10**z_cov_net))
return measurements_covs
class TORCHIEKF(torch.nn.Module, NUMPYIEKF):
Id1 = torch.eye(1).double()
Id2 = torch.eye(2).double()
Id3 = torch.eye(3).double()
Id6 = torch.eye(6).double()
IdP = torch.eye(21).double()
def __init__(self, parameter_class=None):
torch.nn.Module.__init__(self)
NUMPYIEKF.__init__(self, parameter_class=None)
# mean and standard deviation of parameters for normalizing inputs
self.u_loc = None
self.u_std = None
self.initprocesscov_net = InitProcessCovNet()
self.mes_net = MesNet()
self.cov0_measurement = None
# modified parameters
self.IdP = torch.eye(self.P_dim).double()
if parameter_class is not None:
self.filter_parameters = parameter_class()
self.set_param_attr()
### get parameter from self.filter_parameters = KITTIParameters
def set_param_attr(self):
# get a list of attribute only
attr_list = [a for a in dir(self.filter_parameters) if not a.startswith('__')
and not callable(getattr(self.filter_parameters, a))]
for attr in attr_list:
setattr(self, attr, getattr(self.filter_parameters, attr))
self.Q = torch.diag(torch.Tensor([self.cov_omega, self.cov_omega, self. cov_omega,
self.cov_acc, self.cov_acc, self.cov_acc,
self.cov_b_omega, self.cov_b_omega, self.cov_b_omega,
self.cov_b_acc, self.cov_b_acc, self.cov_b_acc,
self.cov_Rot_c_i, self.cov_Rot_c_i, self.cov_Rot_c_i,
self.cov_t_c_i, self.cov_t_c_i, self.cov_t_c_i])
).double()
self.cov0_measurement = torch.Tensor([self.cov_lat, self.cov_up]).double()
### move, get measurements_covs(which we get values from learning model)
def run(self, t, u, measurements_covs, v_mes, p_mes, N, ang0):
dt = t[1:] - t[:-1] # (s)
Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P = self.init_run(dt, u, p_mes, v_mes,
N, ang0)
for i in range(1, N):
Rot_i, v_i, p_i, b_omega_i, b_acc_i, Rot_c_i_i, t_c_i_i, P_i = \
self.propagate(Rot[i-1], v[i-1], p[i-1], b_omega[i-1], b_acc[i-1], Rot_c_i[i-1],
t_c_i[i-1], P, u[i], dt[i-1])
Rot[i], v[i], p[i], b_omega[i], b_acc[i], Rot_c_i[i], t_c_i[i], P = \
self.update(Rot_i, v_i, p_i, b_omega_i, b_acc_i, Rot_c_i_i, t_c_i_i, P_i,
u[i], i, measurements_covs[i])
return Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i
### init
def init_run(self, dt, u, p_mes, v_mes, N, ang0):
Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i = \
self.init_saved_state(dt, N, ang0)
Rot[0] = self.from_rpy(ang0[0], ang0[1], ang0[2])
v[0] = v_mes[0]
P = self.init_covariance()
return Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P
### init KF covariance P with learning model init_cov
def init_covariance(self):
beta = self.initprocesscov_net.init_cov(self)
P = torch.zeros(self.P_dim, self.P_dim).double()
P[:2, :2] = self.cov_Rot0*beta[0]*self.Id2 # no yaw error
P[3:5, 3:5] = self.cov_v0*beta[1]*self.Id2
P[9:12, 9:12] = self.cov_b_omega0*beta[2]*self.Id3
P[12:15, 12:15] = self.cov_b_acc0*beta[3]*self.Id3
P[15:18, 15:18] = self.cov_Rot_c_i0*beta[4]*self.Id3
P[18:21, 18:21] = self.cov_t_c_i0*beta[5]*self.Id3
return P
### makes saved state to zero ...?
def init_saved_state(self, dt, N, ang0):
Rot = dt.new_zeros(N, 3, 3)
v = dt.new_zeros(N, 3)
p = dt.new_zeros(N, 3)
b_omega = dt.new_zeros(N, 3)
b_acc = dt.new_zeros(N, 3)
Rot_c_i = dt.new_zeros(N, 3, 3)
t_c_i = dt.new_zeros(N, 3)
Rot_c_i[0] = torch.eye(3).double()
return Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i
### same calculations, differences came from using torch framework instead of numpy framework
def propagate(self, Rot_prev, v_prev, p_prev, b_omega_prev, b_acc_prev, Rot_c_i_prev, t_c_i_prev,
P_prev, u, dt):
Rot_prev = Rot_prev.clone()
acc_b = u[3:6] - b_acc_prev
acc = Rot_prev.mv(acc_b) + self.g
v = v_prev + acc * dt
p = p_prev + v_prev.clone() * dt + 1/2 * acc * dt**2
omega = (u[:3] - b_omega_prev)*dt
Rot = Rot_prev.mm(self.so3exp(omega))
b_omega = b_omega_prev
b_acc = b_acc_prev
Rot_c_i = Rot_c_i_prev.clone()
t_c_i = t_c_i_prev
P = self.propagate_cov(P_prev, Rot_prev, v_prev, p_prev, b_omega_prev, b_acc_prev,
u, dt)
return Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P
### same calculations, differences came from using torch framework instead of numpy framework
def propagate_cov(self, P, Rot_prev, v_prev, p_prev, b_omega_prev, b_acc_prev, u,
dt):
F = P.new_zeros(self.P_dim, self.P_dim)
G = P.new_zeros(self.P_dim, self.Q.shape[0])
Q = self.Q.clone()
F[3:6, :3] = self.skew(self.g)
F[6:9, 3:6] = self.Id3
G[3:6, 3:6] = Rot_prev
F[3:6, 12:15] = -Rot_prev
v_skew_rot = self.skew(v_prev).mm(Rot_prev)
p_skew_rot = self.skew(p_prev).mm(Rot_prev)
G[:3, :3] = Rot_prev
G[3:6, :3] = v_skew_rot
G[6:9, :3] = p_skew_rot
F[:3, 9:12] = -Rot_prev
F[3:6, 9:12] = -v_skew_rot
F[6:9, 9:12] = -p_skew_rot
G[9:12, 6:9] = self.Id3
G[12:15, 9:12] = self.Id3
G[15:18, 12:15] = self.Id3
G[18:21, 15:18] = self.Id3
F = F * dt
G = G * dt
F_square = F.mm(F)
F_cube = F_square.mm(F)
Phi = self.IdP + F + 1/2*F_square + 1/6*F_cube
P_new = Phi.mm(P + G.mm(Q).mm(G.t())).mm(Phi.t())
return P_new
def update(self, Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P, u, i, measurement_cov):
# orientation of body frame
Rot_body = Rot.mm(Rot_c_i)
# velocity in imu frame
v_imu = Rot.t().mv(v)
omega = u[:3] - b_omega
# velocity in body frame
v_body = Rot_c_i.t().mv(v_imu) + self.skew(t_c_i).mv(omega)
Omega = self.skew(omega)
# Jacobian in car frame
H_v_imu = Rot_c_i.t().mm(self.skew(v_imu))
H_t_c_i = self.skew(t_c_i)
H = P.new_zeros(2, self.P_dim)
H[:, 3:6] = Rot_body.t()[1:]
H[:, 15:18] = H_v_imu[1:]
H[:, 9:12] = H_t_c_i[1:]
H[:, 18:21] = -Omega[1:]
r = - v_body[1:]
R = torch.diag(measurement_cov)
Rot_up, v_up, p_up, b_omega_up, b_acc_up, Rot_c_i_up, t_c_i_up, P_up = \
self.state_and_cov_update(Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P, H, r, R)
return Rot_up, v_up, p_up, b_omega_up, b_acc_up, Rot_c_i_up, t_c_i_up, P_up
@staticmethod
def state_and_cov_update(Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, P, H, r, R):
S = H.mm(P).mm(H.t()) + R
Kt, _ = torch.solve(P.mm(H.t()).t(), S)
K = Kt.t()
dx = K.mv(r.view(-1))
dR, dxi = TORCHIEKF.sen3exp(dx[:9])
dv = dxi[:, 0]
dp = dxi[:, 1]
Rot_up = dR.mm(Rot)
v_up = dR.mv(v) + dv
p_up = dR.mv(p) + dp
b_omega_up = b_omega + dx[9:12]
b_acc_up = b_acc + dx[12:15]
dR = TORCHIEKF.so3exp(dx[15:18])
Rot_c_i_up = dR.mm(Rot_c_i)
t_c_i_up = t_c_i + dx[18:21]
I_KH = TORCHIEKF.IdP - K.mm(H)
P_upprev = I_KH.mm(P).mm(I_KH.t()) + K.mm(R).mm(K.t())
P_up = (P_upprev + P_upprev.t())/2
return Rot_up, v_up, p_up, b_omega_up, b_acc_up, Rot_c_i_up, t_c_i_up, P_up
@staticmethod
def skew(x):
X = torch.Tensor([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]]).double()
return X
@staticmethod
def rot_from_2_vectors(v1, v2):
""" Returns a Rotation matrix between vectors 'v1' and 'v2' """
v1 = v1/torch.norm(v1)
v2 = v2/torch.norm(v2)
v = torch.cross(v1, v2)
cosang = v1.matmul(v2)
sinang = torch.norm(v)
Rot = TORCHIEKF.Id3 + TORCHIEKF.skew(v) + \
TORCHIEKF.skew(v).mm(TORCHIEKF.skew(v))*(1-cosang)/(sinang**2)
return Rot
@staticmethod
def sen3exp(xi):
phi = xi[:3]
angle = torch.norm(phi)
# Near |phi|==0, use first order Taylor expansion
if isclose(angle, 0.):
skew_phi = torch.Tensor([[0, -phi[2], phi[1]],
[phi[2], 0, -phi[0]],
[-phi[1], phi[0], 0]]).double()
J = TORCHIEKF.Id3 + 0.5 * skew_phi
Rot = TORCHIEKF.Id3 + skew_phi
else:
axis = phi / angle
skew_axis = torch.Tensor([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]).double()
s = torch.sin(angle)
c = torch.cos(angle)
J = (s / angle) * TORCHIEKF.Id3 + (1 - s / angle) * TORCHIEKF.outer(axis, axis)\
+ ((1 - c) / angle) * skew_axis
Rot = c * TORCHIEKF.Id3 + (1 - c) * TORCHIEKF.outer(axis, axis) \
+ s * skew_axis
x = J.mm(xi[3:].view(-1, 3).t())
return Rot, x
@staticmethod
def so3exp(phi):
angle = phi.norm()
# Near phi==0, use first order Taylor expansion
if isclose(angle, 0.):
skew_phi = torch.Tensor([[0, -phi[2], phi[1]],
[phi[2], 0, -phi[0]],
[-phi[1], phi[0], 0]]).double()
Xi = TORCHIEKF.Id3 + skew_phi
return Xi
axis = phi / angle
skew_axis = torch.Tensor([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]).double()
c = angle.cos()
s = angle.sin()
Xi = c * TORCHIEKF.Id3 + (1 - c) * TORCHIEKF.outer(axis, axis) \
+ s * skew_axis
return Xi
@staticmethod
def outer(a, b):
ab = a.view(-1, 1)*b.view(1, -1)
return ab
@staticmethod
def so3left_jacobian(phi):
angle = torch.norm(phi)
# Near |phi|==0, use first order Taylor expansion
if isclose(angle, 0.):
skew_phi = torch.Tensor([[0, -phi[2], phi[1]],
[phi[2], 0, -phi[0]],
[-phi[1], phi[0], 0]]).double()
return TORCHIEKF.Id3 + 0.5 * skew_phi
axis = phi / angle
skew_axis = torch.Tensor([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]).double()
s = torch.sin(angle)
c = torch.cos(angle)
return (s / angle) * TORCHIEKF.Id3 + (1 - | |
<reponame>DarthLazar/lenstronomy
__author__ = 'sibirrer'
import numpy as np
import copy
from lenstronomy.Util import class_creator
from lenstronomy.ImSim.image2source_mapping import Image2SourceMapping
from lenstronomy.LensModel.Solver.solver import Solver
from lenstronomy.LensModel.lens_param import LensParam
from lenstronomy.LightModel.light_param import LightParam
from lenstronomy.PointSource.point_source_param import PointSourceParam
from lenstronomy.Sampling.special_param import SpecialParam
__all__ = ['Param']
class Param(object):
"""
class that handles the parameter constraints. In particular when different model profiles share joint constraints.
Options between same model classes:
'joint_lens_with_lens':list [[i_lens, k_lens, ['param_name1', 'param_name2', ...]], [...], ...],
joint parameter between two lens models
'joint_lens_light_with_lens_light':list [[i_lens_light, k_lens_light, ['param_name1', 'param_name2', ...]], [...], ...],
joint parameter between two lens light models, the second adopts the value of the first
'joint_source_with_source':list [[i_source, k_source, ['param_name1', 'param_name2', ...]], [...], ...],
joint parameter between two source surface brightness models, the second adopts the value of the first
Options between different model classes:
'joint_lens_with_light': list [[i_light, k_lens, ['param_name1', 'param_name2', ...]], [...], ...],
joint parameter between lens model and lens light model
'joint_source_with_point_source': list [[i_point_source, k_source], [...], ...],
joint position parameter between lens model and source light model
'joint_lens_light_with_point_source': list [[i_point_source, k_lens_light], [...], ...],
joint position parameter between lens model and lens light model
'joint_extinction_with_lens_light': list [[i_lens_light, k_extinction, ['param_name1', 'param_name2', ...]], [...], ...],
joint parameters between the lens surface brightness and the optical depth models
'joint_lens_with_source_light': [[i_source, k_lens, ['param_name1', 'param_name2', ...]], [...], ...],
joint parameter between lens model and source light model. Samples light model parameter only.
hierarchy is as follows:
1. Point source parameters are inferred
2. Lens light joint parameters are set
3. Lens model joint constraints are set
4. Lens model solver is applied
5. Joint source and point source is applied
Alternatively to the format of the linking of parameters with IDENTICAL names as listed above as:
[[i_1, k_2, ['param_name1', 'param_name2', ...]], [...], ...]
the following format of the arguments are supported to join parameters with DIFFERENT names:
[[i_1, k_2, {'param_old1': 'param_new1', 'ra_0': 'center_x'}], [...], ...]
"""
def __init__(self, kwargs_model,
kwargs_fixed_lens=None, kwargs_fixed_source=None, kwargs_fixed_lens_light=None, kwargs_fixed_ps=None,
kwargs_fixed_special=None, kwargs_fixed_extinction=None,
kwargs_lower_lens=None, kwargs_lower_source=None, kwargs_lower_lens_light=None, kwargs_lower_ps=None,
kwargs_lower_special=None, kwargs_lower_extinction=None,
kwargs_upper_lens=None, kwargs_upper_source=None, kwargs_upper_lens_light=None, kwargs_upper_ps=None,
kwargs_upper_special=None, kwargs_upper_extinction=None,
kwargs_lens_init=None, linear_solver=True, joint_lens_with_lens=[], joint_lens_light_with_lens_light=[],
joint_source_with_source=[], joint_lens_with_light=[], joint_source_with_point_source=[],
joint_lens_light_with_point_source=[], joint_extinction_with_lens_light=[],
joint_lens_with_source_light=[], mass_scaling_list=None, point_source_offset=False,
num_point_source_list=None, image_plane_source_list=None, solver_type='NONE', Ddt_sampling=None,
source_size=False, num_tau0=0, lens_redshift_sampling_indexes=None,
source_redshift_sampling_indexes=None, source_grid_offset=False, num_shapelet_lens=0):
"""
:param kwargs_model:
:param kwargs_fixed_lens:
:param kwargs_fixed_source:
:param kwargs_fixed_lens_light:
:param kwargs_fixed_ps:
:param kwargs_fixed_special:
:param kwargs_fixed_extinction:
:param kwargs_lower_lens:
:param kwargs_lower_source:
:param kwargs_lower_lens_light:
:param kwargs_lower_ps:
:param kwargs_lower_special:
:param kwargs_lower_extinction:
:param kwargs_upper_lens:
:param kwargs_upper_source:
:param kwargs_upper_lens_light:
:param kwargs_upper_ps:
:param kwargs_upper_special:
:param kwargs_upper_extinction:
:param kwargs_lens_init:
:param linear_solver:
:param joint_lens_with_lens:
:param joint_lens_light_with_lens_light:
:param joint_source_with_source:
:param joint_lens_with_light:
:param joint_source_with_point_source:
:param joint_lens_light_with_point_source:
:param joint_extinction_with_lens_light:
:param joint_lens_with_source_light:
:param mass_scaling_list:
:param point_source_offset:
:param num_point_source_list:
:param image_plane_source_list: optional, list of booleans for the source_light components.
If a component is set =True it will parameterized the positions in the image plane and ray-trace the
parameters back to the source position on the fly during the fitting.
:param solver_type:
:param Ddt_sampling:
:param source_size:
:param num_tau0:
:param lens_redshift_sampling_indexes: list of integers corresponding to the lens model components whose redshifts
are a free parameter (only has an effect in multi-plane lensing) with same indexes indicating joint redshift,
in ascending numbering e.g. [-1, 0, 0, 1, 0, 2], -1 indicating not sampled fixed indexes
:param source_redshift_sampling_indexes: list of integers corresponding to the source model components whose redshifts
are a free parameter (only has an effect in multi-plane lensing) with same indexes indicating joint redshift,
in ascending numbering e.g. [-1, 0, 0, 1, 0, 2], -1 indicating not sampled fixed indexes. These indexes are
the sample as for the lens
:param source_grid_offset: optional, if True when using a pixel-based modelling (e.g. with STARLETS-like profiles),
adds two additional sampled parameters describing RA/Dec offsets between data coordinate grid and pixelated source plane coordinate grid.
:param num_shapelet_lens: number of shapelet coefficients in the 'SHAPELETS_CART' or 'SHAPELETS_POLAR' mass profile.
"""
self._lens_model_list = kwargs_model.get('lens_model_list', [])
self._lens_redshift_list = kwargs_model.get('lens_redshift_list', None)
self._source_light_model_list = kwargs_model.get('source_light_model_list', [])
self._source_redshift_list = kwargs_model.get('source_redshift_list', None)
self._lens_light_model_list = kwargs_model.get('lens_light_model_list', [])
self._point_source_model_list = kwargs_model.get('point_source_model_list', [])
self._optical_depth_model_list = kwargs_model.get('optical_depth_model_list', [])
self._kwargs_model = kwargs_model
# check how many redshifts need to be sampled
num_z_sampling = 0
if lens_redshift_sampling_indexes is not None:
num_z_sampling = int(np.max(lens_redshift_sampling_indexes) + 1)
if source_redshift_sampling_indexes is not None:
num_z_source = int(np.max(source_redshift_sampling_indexes) + 1)
num_z_sampling = max(num_z_sampling, num_z_source)
self._num_z_sampling, self._lens_redshift_sampling_indexes, self._source_redshift_sampling_indexes = num_z_sampling, lens_redshift_sampling_indexes, source_redshift_sampling_indexes
self._lens_model_class, self._source_model_class, _, _, _ = class_creator.create_class_instances(all_models=True, **kwargs_model)
self._image2SourceMapping = Image2SourceMapping(lensModel=self._lens_model_class,
sourceModel=self._source_model_class)
if kwargs_fixed_lens is None:
kwargs_fixed_lens = [{} for i in range(len(self._lens_model_list))]
if kwargs_fixed_source is None:
kwargs_fixed_source = [{} for i in range(len(self._source_light_model_list))]
if kwargs_fixed_lens_light is None:
kwargs_fixed_lens_light = [{} for i in range(len(self._lens_light_model_list))]
if kwargs_fixed_ps is None:
kwargs_fixed_ps = [{} for i in range(len(self._point_source_model_list))]
if kwargs_fixed_special is None:
kwargs_fixed_special = {}
self._joint_lens_with_lens = joint_lens_with_lens
self._joint_lens_light_with_lens_light = joint_lens_light_with_lens_light
self._joint_source_with_source = joint_source_with_source
self._joint_lens_with_light = joint_lens_with_light
self._joint_lens_with_source_light = joint_lens_with_source_light
self._joint_source_with_point_source = copy.deepcopy(joint_source_with_point_source)
for param_list in self._joint_source_with_point_source:
if len(param_list) == 2:
param_list.append(['center_x', 'center_y'])
self._joint_lens_light_with_point_source = copy.deepcopy(joint_lens_light_with_point_source)
for param_list in self._joint_lens_light_with_point_source:
if len(param_list) == 2:
param_list.append(['center_x', 'center_y'])
if mass_scaling_list is None:
mass_scaling_list = [False] * len(self._lens_model_list)
self._mass_scaling_list = mass_scaling_list
if 1 in self._mass_scaling_list:
self._num_scale_factor = np.max(self._mass_scaling_list)
self._mass_scaling = True
else:
self._num_scale_factor = 0
self._mass_scaling = False
self._point_source_offset = point_source_offset
if num_point_source_list is None:
num_point_source_list = [1] * len(self._point_source_model_list)
# Attention: if joint coordinates with other source profiles, only indicate one as bool
if image_plane_source_list is None:
image_plane_source_list = [False] * len(self._source_light_model_list)
self._image_plane_source_list = image_plane_source_list
try:
self._num_images = num_point_source_list[0]
except:
self._num_images = 0
self._solver_type = solver_type
if self._solver_type == 'NONE':
self._solver = False
else:
self._solver = True
self._solver_module = Solver(solver_type=self._solver_type, lensModel=self._lens_model_class,
num_images=self._num_images)
source_model_list = self._source_light_model_list
if (len(source_model_list) != 1 or source_model_list[0] not in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2']):
# source_grid_offset only defined for source profiles compatible with pixel-based solver
source_grid_offset = False
self._joint_extinction_with_lens_light = joint_extinction_with_lens_light
# fix parameters joint within the same model types
kwargs_fixed_lens_updated = self._add_fixed_lens(kwargs_fixed_lens, kwargs_lens_init)
kwargs_fixed_lens_updated = self._fix_joint_param(kwargs_fixed_lens_updated, self._joint_lens_with_lens)
kwargs_fixed_lens_updated = self._fix_joint_param(kwargs_fixed_lens_updated, self._joint_lens_with_source_light)
kwargs_fixed_lens_light_updated = self._fix_joint_param(kwargs_fixed_lens_light, self._joint_lens_light_with_lens_light)
kwargs_fixed_source_updated = self._fix_joint_param(kwargs_fixed_source, self._joint_source_with_source)
kwargs_fixed_ps_updated = copy.deepcopy(kwargs_fixed_ps)
kwargs_fixed_extinction_updated = self._fix_joint_param(kwargs_fixed_extinction, self._joint_extinction_with_lens_light)
# fix parameters joint with other model types
kwargs_fixed_lens_updated = self._fix_joint_param(kwargs_fixed_lens_updated, self._joint_lens_with_light)
kwargs_fixed_source_updated = self._fix_joint_param(kwargs_fixed_source_updated, self._joint_source_with_point_source)
kwargs_fixed_lens_light_updated = self._fix_joint_param(kwargs_fixed_lens_light_updated,
self._joint_lens_light_with_point_source)
self.lensParams = LensParam(self._lens_model_list, kwargs_fixed_lens_updated, num_images=self._num_images,
solver_type=self._solver_type, kwargs_lower=kwargs_lower_lens,
kwargs_upper=kwargs_upper_lens, num_shapelet_lens=num_shapelet_lens)
self.lensLightParams = LightParam(self._lens_light_model_list, kwargs_fixed_lens_light_updated, type='lens_light',
linear_solver=linear_solver, kwargs_lower=kwargs_lower_lens_light,
kwargs_upper=kwargs_upper_lens_light)
self.souceParams = LightParam(self._source_light_model_list, kwargs_fixed_source_updated, type='source_light',
linear_solver=linear_solver, kwargs_lower=kwargs_lower_source,
kwargs_upper=kwargs_upper_source)
self.pointSourceParams = PointSourceParam(self._point_source_model_list, kwargs_fixed_ps_updated,
num_point_source_list=num_point_source_list,
linear_solver=linear_solver, kwargs_lower=kwargs_lower_ps,
kwargs_upper=kwargs_upper_ps)
self.extinctionParams = LightParam(self._optical_depth_model_list, kwargs_fixed_extinction_updated,
kwargs_lower=kwargs_lower_extinction, kwargs_upper=kwargs_upper_extinction,
linear_solver=False)
self.specialParams = SpecialParam(Ddt_sampling=Ddt_sampling, mass_scaling=self._mass_scaling,
kwargs_fixed=kwargs_fixed_special, num_scale_factor=self._num_scale_factor,
kwargs_lower=kwargs_lower_special, kwargs_upper=kwargs_upper_special,
point_source_offset=self._point_source_offset, num_images=self._num_images,
source_size=source_size, num_tau0=num_tau0, num_z_sampling=num_z_sampling,
source_grid_offset=source_grid_offset)
for lens_source_joint in self._joint_lens_with_source_light:
i_source = lens_source_joint[0]
if i_source in self._image_plane_source_list:
raise ValueError("linking a source light model with a lens model AND simultaneously parameterizing the"
" source position in the image plane is not valid!")
@property
def num_point_source_images(self):
return self._num_images
def args2kwargs(self, args, bijective=False):
"""
:param args: tuple of parameter values (float, strings, ...)
:param bijective: boolean, if True (default) returns the parameters in the form as they are sampled
(e.g. if image_plane_source_list is set =True it returns the position in the image plane coordinates),
if False, returns the parameters in the form to render a model (e.g. image_plane_source_list positions are
ray-traced back to the source plane).
:return: keyword arguments sorted in lenstronomy conventions
"""
i = 0
args = np.atleast_1d(args)
kwargs_lens, i = self.lensParams.getParams(args, i)
kwargs_source, i = self.souceParams.getParams(args, i)
kwargs_lens_light, i = self.lensLightParams.getParams(args, i)
kwargs_ps, i = self.pointSourceParams.getParams(args, i)
kwargs_special, i = self.specialParams.get_params(args, i)
kwargs_extinction, i = self.extinctionParams.getParams(args, i)
self._update_lens_model(kwargs_special)
# update lens_light joint parameters
kwargs_lens_light = self._update_lens_light_joint_with_point_source(kwargs_lens_light, kwargs_ps)
kwargs_lens_light = self._update_joint_param(kwargs_lens_light, kwargs_lens_light,
self._joint_lens_light_with_lens_light)
# update lens_light joint with lens model parameters
kwargs_lens = self._update_joint_param(kwargs_lens_light, kwargs_lens, self._joint_lens_with_light)
kwargs_lens = self._update_joint_param(kwargs_source, kwargs_lens, self._joint_lens_with_source_light)
# update extinction model with lens light model
kwargs_extinction = self._update_joint_param(kwargs_lens_light, kwargs_extinction,
self._joint_extinction_with_lens_light)
# update lens model joint parameters (including scaling)
kwargs_lens = self._update_joint_param(kwargs_lens, kwargs_lens, self._joint_lens_with_lens)
kwargs_lens = self.update_lens_scaling(kwargs_special, kwargs_lens)
# update point source constraint solver
if self._solver is True:
x_pos, y_pos = kwargs_ps[0]['ra_image'], kwargs_ps[0]['dec_image']
kwargs_lens = self._solver_module.update_solver(kwargs_lens, x_pos, y_pos)
# update source joint with point source
kwargs_source = self._update_source_joint_with_point_source(kwargs_lens, kwargs_source, kwargs_ps,
kwargs_special, image_plane=bijective)
# update source joint with source
kwargs_source = self._update_joint_param(kwargs_source, kwargs_source, self._joint_source_with_source)
# optional revert lens_scaling for bijective
if bijective is True:
kwargs_lens | |
present and of the correct Python type.
parameterTypesBasicFields = swagGl.BASIC_PARAMETER_FIELDS.get(paramSwag['type'],{})
for basicFieldName in parameterTypesBasicFields:
fieldRules = parameterTypesBasicFields[basicFieldName]
if fieldRules['required'] and basicFieldName not in paramSwag:
raise CustomExceptions.SwaggerParamPropMissingException(
"Parameter of type '{!s}' must have Swagger Key '{!s}' defined.".format(
paramSwag['type'], basicFieldName
)
)
if basicFieldName in paramSwag:
logger.trace("Parameter signature has the '{!s}' key.".format(basicFieldName))
if not isinstance(paramSwag[basicFieldName], fieldRules['valueType']):
raise CustomExceptions.SwaggerParamDefinitionInvalidException(
"'{!s}' Swagger Key for parameter of type '{!s}' must be a {!s}".format(
basicFieldName, paramSwag['type'], fieldRules['valueType']
)
)
if ('allowedValues' in fieldRules and
paramSwag[basicFieldName] not in fieldRules['allowedValues']
):
raise CustomExceptions.SwaggerParamDefinitionInvalidException(
"'{!s}' Swagger Key for parameter of type '{!s}'".format(basicFieldName, paramSwag['type'])+
" must be one of the following values: {!r}".format(fieldRules['allowedValues'])
)
sig[basicFieldName] = paramSwag[basicFieldName]
#END IF
#END FOR
#For some other data types, we need to do some "special" processing of the Swagger Definition to create
# an HttpDataSignature. Basically, just 'object' and 'array', which are kinda unique
if paramSwag['type'] == 'object':
sig['signature'] = HttpDataSignature()
#We will NOT throw an Exception if the key 'properties' is not in the Swagger definition for an 'object' type
# parameter. This way, an endpoint can be defined as accepting an object, but not care what kind of data
# is given.
if 'properties' in paramSwag:
for key in paramSwag['properties']:
sig['signature'][key] = extractForSig(paramSwag['properties'][key], dataLocation)
#After generating the signature for each parameter in the object, we overwrite its required-ness
# based on the list of String that define the required parameters in the object.
sig['signature'][key]['required'] = key in paramSwag.get('required',[])
#END IF
elif paramSwag['type'] == 'array':
sig['signature'] = HttpDataSignature()
#We will NOT throw an Exception if the key 'items' is not in the Swagger definition for an 'array' type
# parameter. This way, an endpoint can be defined as accepting an array, but not care what kind of data
# is given.
if 'items' in paramSwag:
if not isinstance(paramSwag['items'], types.DictionaryType):
raise CustomExceptions.SwaggerParamPropMissingException(
"A parameter definiton of type 'array' must contain a Python Dictionary in the key 'items', "+
"the Swagger definition of the values expected."
)
sig['signature']['items'] = extractForSig(paramSwag['items'], dataLocation)
#END IF
#If this 'array' is not data that is expected in an HTTP Body, then it will need to have a "collection
# format". We need to verify that the format is one of our expected types
if dataLocation != 'body':
if 'collectionFormat' not in paramSwag:
raise CustomExceptions.SwaggerParamPropMissingException(
"Parameter of type '{!s}' must have Swagger Key '{!s}' defined.".format(
paramSwag['type'], 'collectionFormat'
)
)
if not isinstance(paramSwag['collectionFormat'], types.StringTypes):
raise CustomExceptions.SwaggerParamDefinitionInvalidException(
"'{!s}' Swagger Key for parameter of type '{!s}' must be a {!s}".format(
'collectionFormat', paramSwag['type'], types.StringTypes
)
)
if paramSwag['collectionFormat'] not in swagGl.VALID_SWAGGER_ARRAY_COLLECTION_FORMATS.keys():
raise CustomExceptions.SwaggerParamDefinitionInvalidException(
"'{!s}' Swagger Key for parameter of type '{!s}'".format(
'collectionFormat', paramSwag['type']
)+
" must be one of the following values: {!r}".format(
swagGl.VALID_SWAGGER_ARRAY_COLLECTION_FORMATS.keys()
)
)
sig['collectionFormat'] = paramSwag['collectionFormat']
#END IF
#END IF/ELIF (special rules per type)
return sig
#END DEF
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#Validating that the callee gave us a valid "direction"
direction = str(direction)
acceptableDirections = ['incoming','outgoing']
if direction not in acceptableDirections:
raise Exception("The `direction` parameter must be one of the accepted values: {!r}".format(acceptableDirections))
datSig = HttpDataSignature()
#
# https://swagger.io/docs/specification/2-0/describing-parameters/
# https://swagger.io/docs/specification/2-0/describing-request-body/
#
if direction == 'incoming':
if qualifier not in swagGl.VALID_SWAGGER_IN.keys():
raise CustomExceptions.Exception(
"The incoming response qualifier '{!s}' is not valid. Accepted Values are {!r}.".format(
qualifier, swagGl.VALID_SWAGGER_IN.keys()
)
)
if swaggerdef.get('parameters',None) is None or not isinstance(swaggerdef.get('parameters',None), types.ListType):
logger.debug(
"'parameters' key in Swagger is not a List. No signature to extract for '{!s}'".format(qualifier)
)
return datSig
for param in swaggerdef['parameters']:
if '$ref' in param:
param = getRef(param['$ref'])
if param['in'] != qualifier:
logger.trace("The parameter we are processing is not expected to come in the '{!s}'".format(qualifier))
continue
#At this point, we know that the param's "in" definition matches the qualifier.
if qualifier == 'body':
#We expect the 'body' type parameter to have a schema. If the 'schema' key is
# not found, we will use an empty dictionary as a default
schema = param.get('schema', {})
#The signature for an HTTP Body will always be of type 'object'
schema['type'] = 'object'
#The 'in:body' type is unique, as it is allowed objects
logger.trace("Extracting incoming signature for Body")
datSig.update( extractForSig(schema, qualifier)['signature'] )
#The requirement of items in an object is defined by the Object, not by the item in the object.
for key in param['schema'].get('properties',{}):
datSig[key]['required'] = key in param['schema'].get('required',[])
else:
#All other "in"s will use a more simple method for extracting the correct
# varaible signature. The "in" qualifier is probably among the following:
# - formData, query, header, path
if 'name' not in param:
raise CustomExceptions.SwaggerParamPropMissingException(
"Every parameter with a definition must have a name. Definition given '{!r}'.".format(param)
)
logger.debug(
"Extracting incoming signature for '{!s}', which is expected to be in '{!s}'".format(
param['name'], qualifier
)
)
datSig[param['name']] = extractForSig(param, qualifier)
#END IF/ELSE
#END FOR
#
# https://swagger.io/docs/specification/2-0/describing-responses/
#
elif direction == 'outgoing':
if ('produces' not in swaggerdef or
not isinstance(swaggerdef.get('produces',None), types.ListType) or
not filter(lambda v:isinstance(v,types.StringTypes), swaggerdef['produces'])
):
raise CustomExceptions.SwaggerParamDefinitionInvalidException(
"You must specify what kind of data will be returned by the endpoint in the 'produces' "+
"key of the Swagger. Please provide a list of Strings."
)
if 'application/json' not in swaggerdef['produces']:
#The developer is allowed to define an endpoint that does not return JSON. However, this means
# that no response validation will be done by the translation process.
logger.debug('Outgoing data is not JSON. No data that we can validate')
return datSig
if (swaggerdef.get('responses',None) is None or
not isinstance(swaggerdef.get('responses',None), types.DictionaryType)
):
logger.debug("No dictionary of possible responses in Swagger definition")
return datSig
if qualifier not in swaggerdef['responses']:
raise Exception(
"The outgoing response qualifier '{!s}'".format(qualifier) +
" does not exist in the Swagger's `responses` dictionary."
)
if '$ref' in swaggerdef['responses'].get(qualifier, {}).keys():
schema = getRef(swaggerdef['responses'][qualifier]['$ref']).get('schema',None)
else:
schema = swaggerdef['responses'].get(qualifier,{}).get('schema',None)
if schema is None or not isinstance(schema, types.DictionaryType):
logger.debug("No schema found for definition of '{!s}' response.".format(qualifier))
return datSig
schemaType = schema.get('type',"string")
if schemaType == 'object':
#When creating a signature for a response, the "data location" is always the body, since we are only
# creating response signatures for JSON responses that have a 'schema' defined, similar to how
# the HTTP Body parameters are defined.
logger.debug("Extracting outgoing signature for definition of '{!s}' object response".format(qualifier))
datSig = extractForSig(schema, 'body')['signature']
for key in schema.get('properties',{}):
datSig[key]['required'] = key in schema.get('required',[])
elif schemaType == 'string':
logger.debug("Outgoing response specified as a type 'string'.")
pass
# # # # # # # # # #
# TODO:
# The logic here should handle different types of responses more gracefully.
# This will likely require some more abstraction into how the `Endpoint` class handles
# the validation of a generated response.
# # # # # # # # # #
else:
raise Exception("The outgoing response needs to have a valid specified 'type'.")
#END IF/ELSE
return datSig
#END DEF
def obscure(data, sig):
'''
@FUNC Obscures the given dictionary based on the given HTTP Data Signature object
@PARAM data : Python Dictionary
@PARAM sig : HttpDataSignature Object
@RETURN Python Dictionary, the original data with the defined keys obscured (ie. changed to "REDACTED")
'''
logger = LIBRARY_LOGGER.getSubLogger('obscure')
logger.debug("Received data of type {!s}. (see details for signature)".format(type(data)), sig)
if not isinstance(data, types.DictionaryType):
raise Exception('Obscuring of data requires a Python Dictionary object for the first parameter.')
if not isinstance(sig, HttpDataSignature):
raise Exception('Obscuring of data requires an HttpDataSignature object for the second parameter.')
logger.trace("Given data and signature are the valid data types.")
newdata = copy.deepcopy(data)
for key in sig:
logger.trace("Processing key '{!s}'".format(key))
if key not in newdata:
logger.trace("Key '{!s}' not in data. Skipping...".format(key))
continue
#Only obscure data if specified in the Data Signature Dictionary. Note that the Swagger uses the
# key 'custom prefix'+'obscure', which then becomes the key 'obscure'
# in the Data Signature Dictionary
if 'obscure' in sig[key] and bool(sig[key]['obscure']):
logger.trace("Redacting value for key '{!s}'!".format(key))
newdata[key] = 'REDACTED'
elif isinstance(newdata[key], types.DictionaryType):
logger.trace("Key '{!s}' maps to a dictionary. Making recursive call...".format(key))
newdata[key] = obscure( newdata[key], sig[key].get('signature',HttpDataSignature()) )
elif isinstance(newdata[key], types.ListType):
logger.trace("Key '{!s}' maps to a list. Redacting all data!".format(key))
newListOfData = []
for item in newdata[key]:
arrayItem = {}
arrayItem['items'] = item
newListOfData.append(
obscure(
arrayItem,
sig[key].get('signature',HttpDataSignature())
)['items']
)
#END FOR
newdata[key] = (
(['REDACTED'] if 'REDACTED' in newListOfData else []) +
filter(lambda v:v!='REDACTED', newListOfData)
)
#END IF/ELIF/ELIF
#END FOR
return newdata
#END DEF
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# HTTP REQUEST DATA VALIDATION
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class HttpDataValidation(dict):
'''
@CLASS A simple class that acts exactly like a Python Dictionary, just named differently.
'''
BASE_VALIDATION = {
'valid': False,
'found': False,
'type': 'unknown',
'items': None,
'message': None
}
def __init__(self, *args, **kwargs):
super(HttpDataValidation, self).__init__(*args, **kwargs)
#A quick reference to whether the Validation object says that things are good or bad. This
# will be | |
<gh_stars>0
import sys
#!{sys.executable} -m pip install nltk
import re
import random
from nltk import PCFG, Tree, nonterminals
from typing import *
from generator import generate, create_dataset_json, combine_dataset_jsons
S, VP, NP_nom3, NP_acc, PP, V_trans, Person, V_intrans = nonterminals(
'S, VP, NP_nom3, NP_acc, PP, V_trans, Person, V_intrans'
)
turkish_grammar = PCFG.fromstring("""
S -> VP Person [.33]
S -> NP_nom1 VP Person1 [.23]
S -> NP_nom2 VP Person2 [.22]
S -> NP_nom3 VP Person3 [.22]
VP -> NP_acc V_trans [.5] | V_intrans [.5]
NP_nom1 -> 'ben' [1]
NP_nom2 -> 'sen' [1]
NP_nom3 -> 'o' [.4] | N [.6]
NP_acc -> N_obj '-i' [.8] | PP N_obj '-i' [.2]
PP -> N_place P [1]
P -> '-de-ki' [1]
N -> 'kedi' [.1] | 'köpek' [.1] | 'veteriner' [.1] | 'memur' [.1] | 'kraliçe' [.1] | 'başkan' [.1] | 'koyun' [.1] | 'yönetmen' [.1] | 'işçi' [.1] | ' balina' [.1]
N_obj -> ' yemek' [.1] | ' ekmek' [.1] | ' goril' [.1] | ' ayı' [.1] | ' eşek' [.1] | ' ördek' [.1] | ' oklukirpi' [.2] | ' gergeden' [.1] | ' maymun' [.1]
N_place -> ' masa' [.1] | ' sandalye' [.1] | ' yer' [.1] | ' kitap' [.1] | ' ev' [.1] | ' sokak' [.1] | ' oda' [.1] | ' köşe' [.1] | ' tavan' [.05] | ' kanape' [.05] | ' atölye' [.05] | ' deniz' [.05]
V_trans -> V_stem_trans Tense [1]
V_stem_trans -> ' iste' [.05] | ' sev' [.05] | ' ara' [.05]| ' gör' [.1] | ' siparış et' [.05] |' güven' [.05] | ' bul' [.05]| ' emret' [.05] |' tut'[.05]| ' kır'[.05]| ' affet'[.05]| ' özgür bırak' [.05]| ' tercih et'[.05]|' gözlemle' [.05]| ' sakla' [.05]| ' çal' [.05] | ' söv' [.05] | ' kurtar' [.05]| ' uyar' [.05]
Tense -> '-iyor' [.5] | '-di' [.25] | '-ecek' [.25]
Person -> Person1 [.2] | Person2 [.2] | Person3 [.6]
Person1 -> '-im' [1]
Person2 -> '-sin' [1]
Person3 -> '-' [1]
V_intrans -> V_stem_intrans Tense [1]
V_stem_intrans -> ' dinlen' [.05]| ' git' [.05] | ' öğren' [.05] | ' izin ver' [.05]|' bekle' [.05] | ' imdat iste' [.05] | ' özür dile'[.05] | ' oy ver' [.05] | ' gül' [.05] |' şikayet et'[.05]| ' övün'[.05]| ' şaşır'[.05] | ' acele et'[.05]| ' hata yap'[.05]| ' otur'[.05]| ' dur'[.05] | ' bağır' [.05]| ' not al'[.05] | ' yüz'[.05]| ' düşün'[.05]
""")
setattr(turkish_grammar, 'lang', 'tu')
def vh(expression: str) -> str:
expression = re.sub('(?<=u.)-i', 'u', expression)
expression = re.sub('(?<=a.)-i', 'ı', expression)
expression = re.sub('(?<=ı.)-i', 'ı', expression)
expression = re.sub('(?<=o.)-i', 'u', expression)
expression = re.sub('(?<=ü.)-i', 'ü', expression)
expression = re.sub('(?<=ö.)-i', 'ü', expression)
# expression = re.sub('(?<=e.)-i', 'u', expression)
# expression = re.sub('(?<=s.)n', 'd', expression)
expression = re.sub('i-i', 'iyi', expression)
# expression = re.sub('a-iy', 'ıy', expression)
expression = re.sub('a-i', 'ayı', expression)
expression = re.sub('ı-i', 'ıyı', expression)
expression = re.sub('i-l', 'iyl', expression)
expression = re.sub('e-l', 'eyl', expression)
expression = re.sub('ü-l', 'üyl', expression)
expression = re.sub('ö-l', 'öyl', expression)
expression = re.sub('ı-le', 'iyla', expression)
expression = re.sub('a-le', 'ayla', expression)
expression = re.sub('u-le', 'uyla', expression)
expression = re.sub('o-le', 'oyla', expression)
expression = re.sub('(?<=ü.)-di', 'dü', expression)
expression = re.sub('(?<=ö.)-di', 'dü', expression)
expression = re.sub('(?<=a.)-di', 'dı', expression)
expression = re.sub('(?<=ı.)-di', 'dı', expression)
expression = re.sub('(?<=u.)-di', 'du', expression)
expression = re.sub('(?<=i.)-di', 'di', expression)
expression = re.sub('(?<=e.)-di', 'di', expression)
expression = re.sub('(?<=o.)-di', 'du', expression)
expression = re.sub('(?<=ü.)-iyor', 'üyor', expression)
expression = re.sub('(?<=ö.)-iyor', 'üyor', expression)
expression = re.sub('(?<=a.)-iyor', 'ıyor', expression)
expression = re.sub('(?<=ı.)-iyor', 'ıyor', expression)
expression = re.sub('(?<=u.)-iyor', 'uyor', expression)
expression = re.sub('(?<=i.)-iyor', 'iyor', expression)
expression = re.sub('(?<=e.)-iyor', 'iyor', expression)
expression = re.sub('(?<=o.)-iyor', 'uyor', expression)
expression = re.sub('a-di', 'adı', expression)
expression = re.sub('a-iyor', 'ıyor', expression)
expression = re.sub('e-di', 'edi', expression)
expression = re.sub('e-iyor', 'iyor', expression)
expression = re.sub('e-ecek', 'eyecek', expression)
expression = re.sub('a-ecek', 'ayacak', expression)
expression = re.sub('(?<=ü.)-ecek', 'ecek', expression)
expression = re.sub('(?<=ö.)-ecek', 'ecek', expression)
expression = re.sub('(?<=a.)-ecek', 'acak', expression)
expression = re.sub('(?<=ı.)-ecek', 'acak', expression)
expression = re.sub('(?<=u.)-ecek', 'acak', expression)
expression = re.sub('(?<=i.)-ecek', 'ecek', expression)
expression = re.sub('(?<=e.)-ecek', 'ecek', expression)
expression = re.sub('(?<=o.)-ecek', 'acak', expression)
expression = re.sub('k-i', 'ği', expression)
# expression = re.sub('di-sin', 'din', expression)
expression = re.sub('ör-i', 'örü', expression)
# expression = re.sub('ör-di', 'ördü', expression)
# expression = re.sub('a-di', 'adı', expression)
expression = re.sub('t-d', 'tt', expression)
expression = re.sub('k-d', 'kt', expression)
expression = re.sub('td', 'tt', expression)
expression = re.sub('kd', 'kt', expression)
expression = re.sub('ap-de', 'apta', expression)
expression = re.sub('t-i', 'di', expression)
expression = re.sub('a-de-ki', 'adaki', expression)
expression = re.sub('ei', 'i', expression)
expression = re.sub('aı', 'ı', expression)
expression = re.sub('aı', 'ı', expression)
expression = re.sub('gite', 'gide', expression)
expression = re.sub('ete', 'ede', expression)
return expression
def vh_n(expression: str) -> str:
expression = re.sub('(?<=u.)-i', 'u', expression)
expression = re.sub('(?<=a.)-i', 'ı', expression)
expression = re.sub('(?<=ı.)-i', 'ı', expression)
expression = re.sub('(?<=o.)-i', 'u', expression)
expression = re.sub('(?<=ü.)-i', 'ü', expression)
expression = re.sub('(?<=ö.)-i', 'ü', expression)
# expression = re.sub('(?<=e.)-i', 'u', expression)
# expression = re.sub('(?<=s.)n', 'd', expression)
expression = re.sub('i-i', 'iyi', expression)
# expression = re.sub('a-iy', 'ıy', expression)
expression = re.sub('a-i', 'ayı', expression)
expression = re.sub('ı-i', 'ıyı', expression)
expression = re.sub('i-l', 'iyl', expression)
expression = re.sub('e-l', 'eyl', expression)
expression = re.sub('ü-l', 'üyl', expression)
expression = re.sub('ö-l', 'öyl', expression)
expression = re.sub('ı-le', 'iyla', expression)
expression = re.sub('a-le', 'ayla', expression)
expression = re.sub('u-le', 'uyla', expression)
expression = re.sub('o-le', 'oyla', expression)
expression = re.sub('(?<=ü.)-m--di', 'medi', expression)
expression = re.sub('(?<=ö.)-m--di', 'medi', expression)
expression = re.sub('(?<=a.)-m--di', 'madı', expression)
expression = re.sub('(?<=ı.)-m--di', 'madı', expression)
expression = re.sub('(?<=u.)-m--di', 'madı', expression)
expression = re.sub('(?<=i.)-m--di', 'medi', expression)
expression = re.sub('(?<=e.)-m--di', 'medi', expression)
expression = re.sub('(?<=o.)-m--di', 'madı', expression)
expression = re.sub('(?<=ü.)-m--iyor', 'müyor', expression)
expression = re.sub('(?<=ö.)-m--iyor', 'müyor', expression)
expression = re.sub('(?<=a.)-m--iyor', 'mıyor', expression)
expression = re.sub('(?<=ı.)-m--iyor', 'mıyor', expression)
expression = re.sub('(?<=u.)-m--iyor', 'muyor', expression)
expression = re.sub('(?<=i.)-m--iyor', 'miyor', expression)
expression = re.sub('(?<=e.)-m--iyor', 'miyor', expression)
expression = re.sub('(?<=o.)-m--iyor', 'muyor', expression)
expression = re.sub('a-m--di', 'amadı', expression)
expression = re.sub('a-m--iyor', 'amıyor', expression)
expression = re.sub('e-m--di', 'emedi', expression)
expression = re.sub('e-m--iyor', 'emiyor', expression)
expression = re.sub('e-m--ecek', 'emeyecek', expression)
expression = re.sub('a-m--ecek', 'amayacak', expression)
expression = re.sub('(?<=ü.)-m--ecek', 'meyecek', expression)
expression = re.sub('(?<=ö.)-m--ecek', 'meyecek', expression)
expression = re.sub('(?<=a.)-m--ecek', 'mayacak', expression)
expression = re.sub('(?<=ı.)-m--ecek', 'mayacak', expression)
expression = re.sub('(?<=u.)-m--ecek', 'mayacak', expression)
expression = re.sub('(?<=i.)-m--ecek', 'meyecek', expression)
expression = re.sub('(?<=e.)-m--ecek', 'meyecek', expression)
expression = re.sub('(?<=o.)-m--ecek', 'mayacak', expression)
expression = re.sub('k-i', 'ği', expression)
# expression = re.sub('di-sin', 'din', expression)
expression = re.sub('ör-i', 'örü', expression)
# expression = re.sub('ör-di', 'ördü', expression)
# expression = re.sub('a-di', 'adı', expression)
expression = re.sub('t-d', 'tt', expression)
expression = re.sub('k-d', 'kt', expression)
expression = re.sub('td', 'tt', expression)
expression = re.sub('kd', 'kt', expression)
expression = re.sub('ap-de', 'apta', expression)
expression = re.sub('t-i', 'di', expression)
expression = re.sub('a-de-ki', 'adaki', expression)
expression = re.sub('ei', 'i', expression)
expression = re.sub('aı', 'ı', expression)
expression = re.sub('aı', 'ı', expression)
expression = re.sub('gite', 'gide', expression)
expression = re.sub('ete', 'ede', expression)
return expression
def vh2(expression: str) -> str:
expression = re.sub('-', '', expression)
expression = re.sub('p-d', 'pt', expression)
expression = re.sub('sd', 'st', expression)
expression = re.sub('etiyor', 'ediyor', expression)
expression = re.sub('etece', 'edece', expression)
# expression = re.sub('a-ecek', 'ayacak', expression)
expression = re.sub('itiyor', 'idiyor', expression)
expression = re.sub('^\s', '', expression)
return expression
def vowelharmony(expression: str) -> str:
# expression = re.sub('di-im', 'di-m', expression)
# expression = re.sub('di-sin', 'di-n', expression)
expression = re.sub('(?<=ü.)-di-sin', 'dün', expression)
expression = re.sub('(?<=ö.)-di-sin', 'dün', expression)
expression = re.sub('(?<=a.)-di-sin', 'dın', expression)
expression = re.sub('(?<=ı.)-di-sin', 'dın', expression)
expression = re.sub('(?<=u.)-di-sin', 'dun', expression)
expression = re.sub('(?<=i.)-di-sin', 'din', expression)
expression = re.sub('(?<=e.)-di-sin', 'din', expression)
expression = re.sub('(?<=o.)-di-sin', 'dun', expression)
expression = re.sub('(?<=ü.)-di-im', 'düm', expression)
expression = re.sub('(?<=ö.)-di-im', 'düm', expression)
expression = re.sub('(?<=a.)-di-im', 'dım', expression)
expression = re.sub('(?<=ı.)-di-im', 'dım', expression)
expression = re.sub('(?<=u.)-di-im', 'dum', expression)
expression = re.sub('(?<=i.)-di-im', 'dim', expression)
expression = re.sub('(?<=e.)-di-im', 'dim', expression)
expression = re.sub('(?<=o.)-di-im', 'dum', expression)
expression = re.sub('(?<=ü.)-iyor-sin', 'üyorsun', expression)
| |
# -*- coding: UTF-8 -*-
from mpi4py import MPI
from sympy import pi, cos, sin
import pytest
import os
from sympde.calculus import grad, dot
from sympde.topology import ScalarFunctionSpace, VectorFunctionSpace
from sympde.topology import ProductSpace
from sympde.topology import element_of
from sympde.topology import NormalVector
from sympde.topology import Union
from sympde.topology import Domain
from sympde.expr import BilinearForm, LinearForm, integral
from sympde.expr import Norm
from sympde.expr import find, EssentialBC
from psydac.fem.basic import FemField
from psydac.api.discretization import discretize
# ... get the mesh directory
try:
mesh_dir = os.environ['PSYDAC_MESH_DIR']
except:
base_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(base_dir, '..', '..', '..')
mesh_dir = os.path.join(base_dir, 'mesh')
# ...
#==============================================================================
def run_poisson_3d_dir(filename, solution, f, comm=None):
# ... abstract model
domain = Domain.from_file(filename)
V = ScalarFunctionSpace('V', domain)
x,y,z = domain.coordinates
F = element_of(V, name='F')
v = element_of(V, name='v')
u = element_of(V, name='u')
int_0 = lambda expr: integral(domain , expr)
expr = dot(grad(v), grad(u))
a = BilinearForm((v,u), int_0(expr))
expr = f*v
l = LinearForm(v, int_0(expr))
error = F - solution
l2norm = Norm(error, domain, kind='l2')
h1norm = Norm(error, domain, kind='h1')
bc = EssentialBC(u, 0, domain.boundary)
equation = find(u, forall=v, lhs=a(u,v), rhs=l(v), bc=bc)
# ...
# ... create the computational domain from a topological domain
domain_h = discretize(domain, filename=filename, comm=comm)
# ...
# ... discrete spaces
Vh = discretize(V, domain_h)
# ...
# ... dsicretize the equation using Dirichlet bc
equation_h = discretize(equation, domain_h, [Vh, Vh])
# ...
# ... discretize norms
l2norm_h = discretize(l2norm, domain_h, Vh)
h1norm_h = discretize(h1norm, domain_h, Vh)
# ...
# ... solve the discrete equation
x = equation_h.solve()
# ...
# ...
phi = FemField( Vh, x )
# ...
# ... compute norms
l2_error = l2norm_h.assemble(F=phi)
h1_error = h1norm_h.assemble(F=phi)
# ...
return l2_error, h1_error
#==============================================================================
def run_poisson_3d_dirneu(filename, solution, f, boundary, comm=None):
assert( isinstance(boundary, (list, tuple)) )
# ... abstract model
domain = Domain.from_file(filename)
V = ScalarFunctionSpace('V', domain)
B_neumann = [domain.get_boundary(**kw) for kw in boundary]
if len(B_neumann) == 1:
B_neumann = B_neumann[0]
else:
B_neumann = Union(*B_neumann)
x,y,z = domain.coordinates
F = element_of(V, name='F')
v = element_of(V, name='v')
u = element_of(V, name='u')
nn = NormalVector('nn')
int_0 = lambda expr: integral(domain , expr)
int_1 = lambda expr: integral(B_neumann , expr)
expr = dot(grad(v), grad(u))
a = BilinearForm((v,u), int_0(expr))
expr = f*v
l0 = LinearForm(v, int_0(expr))
expr = v*dot(grad(solution), nn)
l_B_neumann = LinearForm(v, int_1(expr))
expr = l0(v) + l_B_neumann(v)
l = LinearForm(v, expr)
error = F-solution
l2norm = Norm(error, domain, kind='l2')
h1norm = Norm(error, domain, kind='h1')
B_dirichlet = domain.boundary.complement(B_neumann)
bc = EssentialBC(u, 0, B_dirichlet)
equation = find(u, forall=v, lhs=a(u,v), rhs=l(v), bc=bc)
# ...
# ... create the computational domain from a topological domain
domain_h = discretize(domain, filename=filename, comm=comm)
# ...
# ... discrete spaces
Vh = discretize(V, domain_h)
# ...
# ... dsicretize the equation using Dirichlet bc
equation_h = discretize(equation, domain_h, [Vh, Vh])
# ...
# ... discretize norms
l2norm_h = discretize(l2norm, domain_h, Vh)
h1norm_h = discretize(h1norm, domain_h, Vh)
# ...
# ... solve the discrete equation
x = equation_h.solve()
# ...
# ...
phi = FemField( Vh, x )
# ...
# ... compute norms
l2_error = l2norm_h.assemble(F=phi)
h1_error = h1norm_h.assemble(F=phi)
# ...
return l2_error, h1_error
#==============================================================================
def run_laplace_3d_neu(filename, solution, f, comm=None):
# ... abstract model
domain = Domain.from_file(filename)
V = ScalarFunctionSpace('V', domain)
B_neumann = domain.boundary
x,y,z = domain.coordinates
F = element_of(V, name='F')
v = element_of(V, name='v')
u = element_of(V, name='u')
nn = NormalVector('nn')
int_0 = lambda expr: integral(domain , expr)
int_1 = lambda expr: integral(B_neumann , expr)
expr = dot(grad(v), grad(u)) + v*u
a = BilinearForm((v,u), int_0(expr))
expr = f*v
l0 = LinearForm(v, int_0(expr))
expr = v*dot(grad(solution), nn)
l_B_neumann = LinearForm(v, int_1(expr))
expr = l0(v) + l_B_neumann(v)
l = LinearForm(v, expr)
error = F-solution
l2norm = Norm(error, domain, kind='l2')
h1norm = Norm(error, domain, kind='h1')
equation = find(u, forall=v, lhs=a(u,v), rhs=l(v))
# ...
# ... create the computational domain from a topological domain
domain_h = discretize(domain, filename=filename, comm=comm)
# ...
# ... discrete spaces
Vh = discretize(V, domain_h)
# ...
# ... dsicretize the equation using Dirichlet bc
equation_h = discretize(equation, domain_h, [Vh, Vh])
# ...
# ... discretize norms
l2norm_h = discretize(l2norm, domain_h, Vh)
h1norm_h = discretize(h1norm, domain_h, Vh)
# ...
# ... solve the discrete equation
x = equation_h.solve()
# ...
# ...
phi = FemField( Vh, x )
# ...
# ... compute norms
l2_error = l2norm_h.assemble(F=phi)
h1_error = h1norm_h.assemble(F=phi)
# ...
return l2_error, h1_error
###############################################################################
# SERIAL TESTS
###############################################################################
#==============================================================================
def test_api_poisson_3d_dir_collela():
filename = os.path.join(mesh_dir, 'collela_3d.h5')
from sympy.abc import x,y,z
solution = sin(pi*x)*sin(pi*y)*sin(pi*z)
f = 3*pi**2*sin(pi*x)*sin(pi*y)*sin(pi*z)
l2_error, h1_error = run_poisson_3d_dir(filename, solution, f)
expected_l2_error = 0.15687494944868827
expected_h1_error = 1.518006054794389
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
def test_api_poisson_3d_dirneu_identity_2():
filename = os.path.join(mesh_dir, 'identity_3d.h5')
from sympy.abc import x,y,z
solution = sin(0.5*pi*x)*sin(pi*y)*sin(pi*z)
f = (9./4.)*pi**2*solution
l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f, [{'axis': 0, 'ext': 1}])
expected_l2_error = 0.001438835012218704
expected_h1_error = 0.03929404299152016
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
def test_api_poisson_3d_dirneu_identity_13():
filename = os.path.join(mesh_dir, 'identity_3d.h5')
from sympy.abc import x,y,z
solution = cos(0.5*pi*x)*cos(0.5*pi*y)*sin(pi*z)
f = (3./2.)*pi**2*solution
l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
[{'axis': 0, 'ext': -1},
{'axis': 1, 'ext': -1}])
expected_l2_error = 0.0010275451113313282
expected_h1_error = 0.027938446826372126
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
def test_api_poisson_3d_dirneu_identity_24():
filename = os.path.join(mesh_dir, 'identity_3d.h5')
from sympy.abc import x,y,z
solution = sin(0.5*pi*x)*sin(0.5*pi*y)*sin(pi*z)
f = (3./2.)*pi**2*solution
l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
[{'axis': 0, 'ext': 1},
{'axis': 1, 'ext': 1}])
expected_l2_error = 0.001027545111330973
expected_h1_error = 0.027938446826371813
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
##==============================================================================
## TODO DEBUG, not working since merge with devel
#def test_api_poisson_3d_dirneu_identity_123():
# filename = os.path.join(mesh_dir, 'identity_3d.h5')
#
# from sympy.abc import x,y,z
#
# solution = cos(0.25*pi*x)*cos(0.5*pi*y)*sin(pi*z)
# f = (21./16.)*pi**2*solution
#
# l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
# [{'axis': 0, 'ext': -1},
# {'axis': 0, 'ext': 1},
# {'axis': 1, 'ext': -1}])
#
# expected_l2_error = 0.0013124098938804697
# expected_h1_error = 0.035441679549890456
#
# assert( abs(l2_error - expected_l2_error) < 1.e-7)
# assert( abs(h1_error - expected_h1_error) < 1.e-7)
##==============================================================================
## TODO DEBUG, not working since merge with devel
#def test_api_poisson_3d_dirneu_identity_1235():
# filename = os.path.join(mesh_dir, 'identity_3d.h5')
#
# from sympy.abc import x,y,z
#
# solution = cos(0.25*pi*x)*cos(0.5*pi*y)*cos(0.5*pi*z)
# f = (9./16.)*pi**2*solution
#
# l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
# [{'axis': 0, 'ext': -1},
# {'axis': 0, 'ext': 1},
# {'axis': 1, 'ext': -1},
# {'axis': 2, 'ext': -1}])
#
# expected_l2_error = 0.00019677816039781896
# expected_h1_error = 0.0058786142515790405
#
# assert( abs(l2_error - expected_l2_error) < 1.e-7)
# assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
def test_api_poisson_3d_dirneu_collela_2():
filename = os.path.join(mesh_dir, 'collela_3d.h5')
from sympy.abc import x,y,z
solution = sin(0.25*pi*(x+1.))*sin(pi*y)*sin(pi*z)
f = (33./16.)*pi**2*solution
l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
[{'axis': 0, 'ext': 1}])
expected_l2_error = 0.06091240085930318
expected_h1_error = 0.6380043932563333
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
##==============================================================================
## TODO DEBUG, not working since merge with devel
#def test_api_poisson_3d_dirneu_collela_13():
# filename = os.path.join(mesh_dir, 'collela_3d.h5')
#
# from sympy.abc import x,y,z
#
# solution = sin(0.25*pi*(1.-x))*sin(0.25*pi*(1.-y))*sin(pi*z)
# f = (9./8.)*pi**2*solution
#
# l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
# [{'axis': 0, 'ext': -1},
# {'axis': 1, 'ext': -1}])
#
# expected_l2_error = 0.03786854933218588
# expected_h1_error = 0.38437667047918933
#
# assert( abs(l2_error - expected_l2_error) < 1.e-7)
# assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
def test_api_poisson_3d_dirneu_collela_24():
filename = os.path.join(mesh_dir, 'collela_3d.h5')
from sympy.abc import x,y,z
solution = sin(0.25*pi*(x+1.))*sin(0.25*pi*(y+1.))*sin(pi*z)
f = (9./8.)*pi**2*solution
l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
[{'axis': 0, 'ext': 1},
{'axis': 1, 'ext': 1}])
expected_l2_error = 0.03793880183960465
expected_h1_error = 0.38439642303250143
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
##==============================================================================
## TODO DEBUG, not working since merge with devel
#def test_api_poisson_3d_dirneu_collela_123():
# filename = os.path.join(mesh_dir, 'collela_3d.h5')
#
# from sympy.abc import x,y,z
#
# solution = cos(pi*x)*sin(0.25*pi*(1.-y))*sin(pi*z)
# f = (33./16.)*pi**2*solution
#
# l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
# [{'axis': 0, 'ext': -1},
# {'axis': 0, 'ext': 1},
# {'axis': 1, 'ext': -1}])
#
# expected_l2_error = 0.11963989196330076
# expected_h1_error = 1.1267766354124575
#
# assert( abs(l2_error - expected_l2_error) < 1.e-7)
# assert( abs(h1_error - expected_h1_error) < 1.e-7)
#
##==============================================================================
## TODO DEBUG, not working since merge with devel
#def test_api_poisson_3d_dirneu_collela_1235():
# filename = os.path.join(mesh_dir, 'collela_3d.h5')
#
# from sympy.abc import x,y,z
#
# solution = cos(pi*x)*sin(0.25*pi*(1.-y))*sin(0.25*pi*(1.-z))
# f = (9./8.)*pi**2*solution
#
# l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,
# [{'axis': 0, 'ext': -1},
# {'axis': 0, 'ext': 1},
# {'axis': 1, 'ext': -1},
# {'axis': 2, 'ext': -1}])
#
# expected_l2_error = 0.13208728319093133
# expected_h1_error = 0.9964934429086868
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
def test_api_laplace_3d_neu_identity():
filename = os.path.join(mesh_dir, 'identity_3d.h5')
from sympy.abc import x,y,z
solution = cos(pi*x)*cos(pi*y)*cos(pi*z)
f = (3.*pi**2 + 1.)*solution
l2_error, h1_error = run_laplace_3d_neu(filename, solution, f)
expected_l2_error = 0.0016975430150953524
expected_h1_error = 0.047009063231215
assert( abs(l2_error | |
<filename>role_analyzer.py
from dataclasses import dataclass
from enum import Enum
import logging
import re
import sre_constants
import sre_parse
import typing
import z3 # type: ignore
@dataclass
class EntityTypeInfo:
"""
Information about an entity type (node, k8s cluster, etc.)
----------------------------------------------------------------------
Attributes defined here:
name: str
The short human-readable name identifying the entity.
labels_name: str
The name used for the entity's labels in role YAML files.
----------------------------------------------------------------------
"""
name: str
labels_name: str
class EntityType(Enum):
"""
An enumeration of all supported entity types. New entity types should be
added here.
"""
APP: EntityTypeInfo = EntityTypeInfo("app", "app_labels")
NODE: EntityTypeInfo = EntityTypeInfo("node", "node_labels")
K8S: EntityTypeInfo = EntityTypeInfo("k8s", "kubernetes_labels")
DB: EntityTypeInfo = EntityTypeInfo("db", "db_labels")
def other_entity_types(entity_type: EntityType) -> list[EntityType]:
"""
Gets an enumeration of all entity types except the given one.
"""
return list(filter(lambda e: e != entity_type, EntityType))
@dataclass
class EntityAttributes:
"""
The Z3 variables modeling entities, on which constraints are placed.
----------------------------------------------------------------------
Attributes defined here:
keys: z3.FuncDeclRef
The set of all label keys which must be possessed by an entity.
Should be a Z3 function from string to bool.
labels: z3.FuncDeclRef
The actual key/value labels which must be possessed by an entity.
Should be a Z3 function from string to string.
----------------------------------------------------------------------
"""
keys: z3.FuncDeclRef
labels: z3.FuncDeclRef
class UserType(Enum):
INTERNAL: str = "internal"
EXTERNAL: str = "external"
def get_other_user_type(user_type: UserType) -> UserType:
if UserType.INTERNAL == user_type:
return UserType.EXTERNAL
elif UserType.EXTERNAL == user_type:
return UserType.INTERNAL
else:
raise ValueError(f"Invalid user type {user_type}")
def get_user_type(user_type_str: str) -> UserType:
for user_type in UserType:
if user_type.value == user_type_str:
return user_type
raise ValueError(f"Invalid user type {user_type_str}")
@dataclass
class AuthzContext:
"""
The context for a given authorization analysis. Used to encapsulate the
variables on which constraints are placed.
----------------------------------------------------------------------
Attributes defined here:
entities: dict[EntityType, EntityAttributes]
A map from all entity types to their Z3 variables.
users: dict[UserType, z3.FuncDeclRef]
A map from all user types to their Z3 variables.
----------------------------------------------------------------------
"""
entities: dict[EntityType, EntityAttributes]
users: dict[UserType, z3.FuncDeclRef]
def __init__(self, uninterpreted: bool):
"""
Initializes a new instance of the AuthzContext object.
------------------------------------------------------------------
Parameters:
uninterpreted: bool
If true, construct the Z3 variables as uninterpreted functions
on which constraints are placed; if false, construct the Z3
variables as functions with actual definitions (to be provided
later) against which constraints are checked. Generally
uninterpreted functions are used when comparing two roles in
the abstract and defined functions are used when determining
whether a user has access to a resource through a role.
"""
z3_func_constructor = z3.Function if uninterpreted else z3.RecFunction
self.entities = {}
self.users = {}
for entity_type in EntityType:
self.entities[entity_type] = EntityAttributes(
z3_func_constructor(
f"{entity_type.value.name}_attribute_keys",
z3.StringSort(),
z3.BoolSort(),
),
z3_func_constructor(
f"{entity_type.value.name}_attribute_labels",
z3.StringSort(),
z3.StringSort(),
),
)
for user_type in UserType:
self.users[user_type] = z3_func_constructor(
f"{user_type.value}_traits",
z3.StringSort(),
z3.StringSort(),
z3.BoolSort(),
)
@dataclass
class AnyValueConstraint:
value: str
@dataclass
class StringConstraint:
value: str
@dataclass
class RegexConstraint:
regex: sre_parse.SubPattern
@dataclass
class UserTraitConstraint:
trait_type: UserType
trait_key: str
inner_trait_key: str
@dataclass
class InterpolationConstraint:
prefix: str
trait_type: UserType
trait_key: str
inner_trait_key: str
suffix: str
@dataclass
class EmailFunctionConstraint:
trait_type: UserType
trait_key: str
inner_trait_key: str
@dataclass
class RegexReplaceFunctionConstraint:
trait_type: UserType
trait_key: str
inner_trait_key: str
pattern: str
replace: str
def try_parse_regex(value: str) -> typing.Optional[RegexConstraint]:
"""
Attempts to parse the given value as a regex.
"""
try:
parsed_regex = sre_parse.parse(value)
is_regex = any(
[sre_constants.LITERAL != node_type for node_type, _ in parsed_regex.data]
)
return RegexConstraint(parsed_regex) if is_regex else None
except Exception as e:
logging.debug(f"Cannot parse regex {value} - {e}")
return None
# Regex pattern for {{internal.logins}} or {{external.email}} type template values.
template_value_pattern = re.compile(
r'\{\{(?P<type>internal|external)\.(?P<key>[\w]+)(\["(?P<inner_key>[\w]+)"\])?\}\}'
)
def try_parse_template(value: str) -> typing.Optional[UserTraitConstraint]:
"""
Attempts to parse template constraints of type {{internal.logins}}
"""
match = template_value_pattern.match(value)
if isinstance(match, re.Match):
user_type = get_user_type(match.group("type"))
trait_key = match.group("key")
inner_trait_key = match.group("inner_key")
return UserTraitConstraint(user_type, trait_key, inner_trait_key)
else:
return None
# Regex pattern for IAM#{{internal.logins}}#user type interpolation values.
interpolation_value_pattern = re.compile(
r'(?P<prefix>.*)\{\{(?P<type>internal|external)\.(?P<key>[\w]+)(\["(?P<inner_key>[\w]+)"\])?\}\}(?P<suffix>.*)'
)
def try_parse_interpolation(value: str) -> typing.Optional[InterpolationConstraint]:
"""
Attempts to parse interpolation constraints of type IAM#{external.foo}
"""
match = interpolation_value_pattern.match(value)
if isinstance(match, re.Match):
prefix = match.group("prefix")
user_type = get_user_type(match.group("type"))
trait_key = match.group("key")
inner_trait_key = match.group("inner_key")
suffix = match.group("suffix")
return InterpolationConstraint(
prefix, user_type, trait_key, inner_trait_key, suffix
)
else:
return None
# Regex pattern for {{email.local(external.email)}}
email_function_value_pattern = re.compile(
r'\{\{email\.local\([\s]*(?P<type>internal|external)\.(?P<key>[\w]+)(\["(?P<inner_key>[\w]+)"\])?[\s]*\)\}\}'
)
def try_parse_email_function(value: str) -> typing.Optional[EmailFunctionConstraint]:
"""
Attempts to parse email function constraints of type
{{email.local(external.email)}}
"""
match = email_function_value_pattern.match(value)
if isinstance(match, re.Match):
user_type = get_user_type(match.group("type"))
trait_key = match.group("key")
inner_trait_key = match.group("inner_key")
return EmailFunctionConstraint(user_type, trait_key, inner_trait_key)
else:
return None
# Regex pattern for {{regexp.replace(external.access["env"], "^(staging)$", "$1")}}
regex_function_value_pattern = re.compile(
r'\{\{regexp\.replace\([\s]*(?P<type>internal|external)\.(?P<key>[\w]+)(\["(?P<inner_key>[\w]+)"\])?[\s]*,[\s]*"(?P<pattern>.*)"[\s]*,[\s]*"(?P<replace>.*)"[\s]*\)\}\}'
)
def try_parse_regexp_replace_function(
value: str,
) -> typing.Optional[RegexReplaceFunctionConstraint]:
"""
Attempts to parse regexp replace function constraints of type
{{regexp.replace(external.access, "foo", "bar")}}
"""
match = regex_function_value_pattern.match(value)
if isinstance(match, re.Match):
user_type = get_user_type(match.group("type"))
trait_key = match.group("key")
inner_trait_key = match.group("inner_key")
pattern = match.group("pattern")
replace = match.group("replace")
return RegexReplaceFunctionConstraint(
user_type, trait_key, inner_trait_key, pattern, replace
)
else:
return None
def requires_user_traits(values: typing.Union[str, list[str]]) -> bool:
"""
Determines whether the given constraint requires user traits to specify.
"""
if not isinstance(values, list):
values = [values]
for value in values:
is_template = try_parse_template(value) != None
is_interpolation = try_parse_interpolation(value) != None
is_email_function = try_parse_email_function(value) != None
is_regexp_replace_function = try_parse_regexp_replace_function(value) != None
if (
is_template
or is_interpolation
or is_email_function
or is_regexp_replace_function
):
return True
return False
def parse_constraint(
value: str,
) -> typing.Union[
AnyValueConstraint,
StringConstraint,
RegexConstraint,
UserTraitConstraint,
InterpolationConstraint,
EmailFunctionConstraint,
RegexReplaceFunctionConstraint,
]:
"""
Determines the category of the constraint value and parses it appropriately.
"""
if "*" == value:
return AnyValueConstraint(value)
parsed_trait_constraint = try_parse_template(value)
if isinstance(parsed_trait_constraint, UserTraitConstraint):
return parsed_trait_constraint
parsed_interpolation_constraint = try_parse_interpolation(value)
if isinstance(parsed_interpolation_constraint, InterpolationConstraint):
return parsed_interpolation_constraint
parsed_email_constraint = try_parse_email_function(value)
if isinstance(parsed_email_constraint, EmailFunctionConstraint):
return parsed_email_constraint
parsed_regex_function_constraint = try_parse_regexp_replace_function(value)
if isinstance(parsed_regex_function_constraint, RegexReplaceFunctionConstraint):
return parsed_regex_function_constraint
parsed_regex_constraint = try_parse_regex(value)
if isinstance(parsed_regex_constraint, RegexConstraint):
return parsed_regex_constraint
return StringConstraint(value)
def Minus(re1: z3.ReRef, re2: z3.ReRef) -> z3.ReRef:
"""
The Z3 regex matching all strings accepted by re1 but not re2.
Formatted in camelcase to mimic Z3 regex API.
"""
return z3.Intersect(re1, z3.Complement(re2))
def AnyChar() -> z3.ReRef:
"""
The Z3 regex matching any character (currently only ASCII supported).
Formatted in camelcase to mimic Z3 regex API.
"""
return z3.Range(chr(0), chr(127))
# return z3.AllChar(z3.StringSort())
def category_regex(category: sre_constants._NamedIntConstant) -> z3.ReRef:
"""
Defines regex categories in Z3.
"""
if sre_constants.CATEGORY_DIGIT == category:
return z3.Range("0", "9")
elif sre_constants.CATEGORY_SPACE == category:
return z3.Union(
z3.Re(" "), z3.Re("\t"), z3.Re("\n"), z3.Re("\r"), z3.Re("\f"), z3.Re("\v")
)
elif sre_constants.CATEGORY_WORD == category:
return z3.Union(
z3.Range("a", "z"), z3.Range("A", "Z"), z3.Range("0", "9"), z3.Re("_")
)
else:
raise NotImplementedError(
f"ERROR: regex category {category} not yet implemented"
)
def regex_construct_to_z3_expr(regex_construct) -> z3.ReRef:
"""
Translates a specific regex construct into its Z3 equivalent.
"""
node_type, node_value = regex_construct
if sre_constants.LITERAL == node_type: # a
return z3.Re(chr(node_value))
if sre_constants.NOT_LITERAL == node_type: # [^a]
return Minus(AnyChar(), z3.Re(chr(node_value)))
if sre_constants.SUBPATTERN == node_type:
_, _, _, value = node_value
return regex_to_z3_expr(value)
elif sre_constants.ANY == node_type: # .
return AnyChar()
elif sre_constants.MAX_REPEAT == node_type:
low, high, value = node_value
if (0, 1) == (low, high): # a?
return z3.Option(regex_to_z3_expr(value))
elif (0, sre_constants.MAXREPEAT) == (low, high): # a*
return z3.Star(regex_to_z3_expr(value))
elif (1, sre_constants.MAXREPEAT) == (low, high): # a+
return z3.Plus(regex_to_z3_expr(value))
else: # a{3,5}, a{3}
return z3.Loop(regex_to_z3_expr(value), low, high)
elif sre_constants.IN == node_type: # [abc]
first_subnode_type, _ = node_value[0]
if sre_constants.NEGATE == first_subnode_type: # [^abc]
return Minus(
AnyChar(),
z3.Union(
[regex_construct_to_z3_expr(value) for value in node_value[1:]]
),
)
else:
return z3.Union([regex_construct_to_z3_expr(value) for value in node_value])
elif sre_constants.BRANCH == node_type: # ab|cd
_, value = node_value
return z3.Union([regex_to_z3_expr(v) for v in value])
elif sre_constants.RANGE == node_type: # [a-z]
low, high = node_value
return z3.Range(chr(low), chr(high))
elif sre_constants.CATEGORY == node_type: # \d, \s, \w
if sre_constants.CATEGORY_DIGIT == node_value: # \d
return category_regex(node_value)
elif sre_constants.CATEGORY_NOT_DIGIT == node_value: # \D
return Minus(AnyChar(), category_regex(sre_constants.CATEGORY_DIGIT))
elif sre_constants.CATEGORY_SPACE == node_value: # \s
return category_regex(node_value)
elif sre_constants.CATEGORY_NOT_SPACE == node_value: # \S
return Minus(AnyChar(), category_regex(sre_constants.CATEGORY_SPACE))
elif sre_constants.CATEGORY_WORD == node_value: # \w
return category_regex(node_value)
elif sre_constants.CATEGORY_NOT_WORD == node_value: # \W
return Minus(AnyChar(), category_regex(sre_constants.CATEGORY_WORD))
else:
raise NotImplementedError(
f"ERROR: regex category {node_value} not implemented"
)
elif sre_constants.AT == node_type:
if node_value in {
sre_constants.AT_BEGINNING,
sre_constants.AT_BEGINNING_STRING,
}: # ^a, \A
raise NotImplementedError(
f"ERROR: regex position {node_value} not implemented"
)
elif sre_constants.AT_BOUNDARY == node_value: # \b
raise NotImplementedError(
f"ERROR: regex position {node_value} not implemented"
)
elif sre_constants.AT_NON_BOUNDARY == node_value: # \B
raise NotImplementedError(
f"ERROR: regex position {node_value} not implemented"
)
elif node_value in {
sre_constants.AT_END,
sre_constants.AT_END_STRING,
}: # a$, \Z
raise NotImplementedError(
f"ERROR: regex position {node_value} not implemented"
)
| |
USA, which receives no attribution
if zoneGovt.strip() == "":
zoneGovt = "US"
# If we do not already know about this government
if zoneGovt not in govtList:
# Add it now
govtList.append(zoneGovt)
#---------------------------------------------------------------
# Get the impacted zones for this segment
ugcCode = self._tropicalAreaDict[key]["ugcCode"].strip()
#---------------------------------------------------------------
# Expand UGC code if there is more than one zone represented
if len(ugcCode.split("-")) > 1 or ugcCode.find(">") != -1:
ugcList = self.expandComplexUgc(ugcCode)
# Otherwise, just use this single zone
else:
ugcList = [ugcCode]
#---------------------------------------------------------------
# Expand UGC code if there is more than one zone represented
try:
ugcCodeUsa = self._tropicalAreaDict[key]["ugcCodeUsa"].strip()
if len(ugcCodeUsa.split("-")) > 1 or ugcCodeUsa.find(">") != -1:
ugcListUsa = self.expandComplexUgc(ugcCodeUsa)
# Otherwise, just use this single zone
else:
ugcListUsa = [ugcCodeUsa]
except:
ugcListUsa = []
#---------------------------------------------------------------
# If we already have an entry for this government
if zoneDict.has_key(zoneGovt):
# Get the zones already associated
curZoneList = zoneDict[zoneGovt]
# Otherwise make a new list for this governement
else:
curZoneList = []
#---------------------------------------------------------------
# Now add all the new zones
for ugc in ugcList + ugcListUsa:
# If we don't already have this ugc
if ugc not in curZoneList and len(ugc.strip()) > 0:
# Add it
curZoneList.append(ugc)
## print curZoneList
# Sort the UGC list
curZoneList.sort()
# Store the list of zones for this government
zoneDict[zoneGovt] = curZoneList
# Always ensure the USA comes first
if "US" in govtList:
govtList.remove("US")
finalGovtList = ["US"] + govtList
# Return the completed dictionary
return zoneDict, finalGovtList
#===========================================================================
# Define a method to filter a segment list by government
def _filterAreaListByGovernment(self, govtList, areaList):
"""TropicalHazards addition of _filterAreaListByGovernment.
This method will produce a list of all zones managed by a particular
government contained within the specified area list.
Arguments:
govtList -> list of identifiers managed by a government
areaList -> list of edit area identifiers to process
"""
# Initialize a new list
newList = []
# Look through each edit area
for area in areaList:
# If this edit area is managed by this government
if area in govtList:
# Add it to the filtered list
newList.append(area)
# Return the filtered list
return newList
#===========================================================================
# Define a method to filter a segment list by government
def _organizeAreasByType(self, areaList):
"""TropicalHazards addition of _organizeAreasByType.
This method will separate a list of areas into one of four types:
mainland segments, UGC zones, zones and islands. These will be stored
in the processed hazard dictionary for easier access later.
Arguments:
areaList -> list of edit area identifiers to process
"""
# Initialize both lists
segmentList = []
ugcZoneList = []
zoneList = []
islandList = []
waterList = []
# Look through each edit area
for area in areaList:
# Assume this is a "land" area
areaType = "land"
# If the TropicalAreaDictionary has a record for this area
if self._tropicalAreaDict.has_key(area):
# Get the type of this area - if we can
if self._tropicalAreaDict[area].has_key("segmentType"):
areaType = self._tropicalAreaDict[area]["segmentType"]
# Get the type of this area
try:
usaZoneList = self.expandComplexUgc(
self._tropicalAreaDict[area]["ugcCodeUsa"])
except:
usaZoneList = []
#---------------------------------------------------------------
# If this is an island
if areaType == "island":
# If we do not already have a record for this area
if area not in islandList:
# Add it to the list of islands
islandList.append(area)
#---------------------------------------------------------------
# Otherwise, if this is a water area
elif areaType == "water":
# If we do not already have a record for this area
if area not in waterList:
# Add it to the list of islands
waterList.append(area)
#---------------------------------------------------------------
# Otherwise, organize the land-based areas
else:
# If this is a zone-based identifier
if len(area) == 6 and area[2] in ["Z", "C"]:
# Place this zone into the proper list
if area[2] == "Z":
# If this area has not already been recorded
if area not in ugcZoneList:
# Add it to the UGC zone list
ugcZoneList.append(area)
else:
# If this area has not already been recorded
if area not in zoneList:
# Add it to the zone list
zoneList.append(area)
# If there any zones associated with this segment
if len(usaZoneList) > 0:
for usZone in usaZoneList:
if usZone not in ugcZoneList:
# Add it to the UGC zone list
ugcZoneList.append(usZone)
# Otherwise, this is a breakpoint segment
elif area not in segmentList:
segmentList.append(area)
# Get any UGC codes associated with this segment
areaUgc = self._tropicalAreaDict[area]["ugcCode"]
# If there is more than 1 zone associated with segment
if len(areaUgc) > 7:
# Expand the UGC codes
ugcList = self.expandComplexUgc(areaUgc)
# Otherwise, make a simpler list so we can proceed
else:
ugcList = [areaUgc]
#-------------------------------------------------------
# Add each zone code into the list as needed
for ugcCode in ugcList:
# Clean up any extra characters
ugcCode = ugcCode.replace("-", "")
# If this is a zone-based identifier
if len(ugcCode) >= 6 and ugcCode[2] in ["Z", "C"]:
# Place this zone into the proper list
if ugcCode[2] == "Z":
# If this area has not already been recorded
if ugcCode not in ugcZoneList:
# Add it to the UGC zone list
ugcZoneList.append(ugcCode)
else:
# If this area has not already been recorded
if ugcCode not in zoneList:
# Add it to the zone list
zoneList.append(ugcCode)
#-----------------------------------------------------------------------
# Sort all lists to keep them ordered - as needed
if len(segmentList) > 1:
segmentList.sort(self._sortBreakpoints)
if len(ugcZoneList) > 1:
ugcZoneList.sort(self._sortBreakpoints)
if len(zoneList) > 1:
zoneList.sort(self._sortBreakpoints)
if len(islandList) > 1:
islandList.sort(self._sortBreakpoints)
if len(waterList) > 1:
waterList.sort(self._sortBreakpoints)
# Return the compiled lists
return (segmentList, ugcZoneList, zoneList, islandList, waterList)
#===========================================================================
# Define a method to construct a processed hazard dictionary
def _constructHazardDict(self, hazardPhenSig, filterEtn):
"""TropicalHazards addition of _constructHazardDict.
This method will produce a processed dictionary of tropical hazards
for easier use later on.
Arguments:
hazardPhenSig -> dictionary of hazards keyed by phenomenon and
significance. Values are a list of all hazards
which share that same phenomenon and significance.
filterEtn -> Event Tracking Number of interest which will be used
to filter hazards for a particular product.
"""
#-----------------------------------------------------------------------
# Get ready to populate the hazard dictionary for this storm
hazardAreaDict = {}
hazardAreaDictKeyList = []
# Assume this is going to be the last product we issue for this storm
self._allCAN = True
#=======================================================================
# Look for each of the tropical hazards in order
for phenSig in [("SS","W"), ("HU","W"), ("SS","A"), ("HU","A"),
("TR","W"), ("TR","A")]:
if hazardPhenSig.has_key(phenSig):
print "="*90
print "\n\tConstructing -> %s" % (repr(phenSig))
print len(hazardPhenSig[phenSig]), hazardPhenSig[phenSig]
# Look through all the sampled hazards
for phen in hazardPhenSig[phenSig]:
print "-"*90
print "phen = %s" % (phen)
# Set aside the headline for each action in this area
NEW = []
CAN = []
UPG = []
EXA = []
CON = []
#-----------------------------------------------------------
# If we have items for this particular phen.sig
# combination, and this is the storm we are after
if phen["etn"] != filterEtn:
print "\tWrong storm!", phen
# Move on to the next one
continue
# Get the full VTEC code for this phenomena
# curHazardKey = (phen["act"], phen["key"])
curHazardKey = (phen["act"], phen["phensig"])
print "+++++ %s" % (repr(curHazardKey))
# If this action is anything other than "CAN", indicate it
# so we don't delete the JSON file for this storm at end
if phen["act"] != "CAN":
self._allCAN = False
# If we do not have the ETN of this hazard
if re.search(":\d{4}$", curHazardKey[1]) is None:
newHazardType = curHazardKey[1] + ":%d" % \
(phen["etn"])
else:
newHazardType = ""
# If we need to adjust the hazard key
if len(newHazardType) > 0:
# Make the changes
newCurHazardKey = (curHazardKey[0], newHazardType)
curHazardKey = newCurHazardKey
# See if there are upgrades or replacements for this area
areaHazardList = self._hazards.getHazardList(phen["id"])
#-----------------------------------------------------------
# Construct a hazard key which incorporates all hazards
# and actions for this area
tempHazardList = [curHazardKey]
for areaHazard in areaHazardList:
#-------------------------------------------------------
# Record headline for each action we find
if areaHazard["act"] == "NEW" and \
areaHazard["hdln"] not in NEW:
NEW.append(areaHazard["hdln"])
elif areaHazard["act"] | |
<reponame>miklobit/OpenTimelineIO<gh_stars>1-10
#
# Copyright 2017 <NAME>ios
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""HLS Playlist OpenTimelineIO adapter
This adapter supports authoring of HLS playlists within OpenTimelineIO by using
clips to represent media fragments.
Status:
- Export of Media Playlists well supported
- Export of Master Playlists supported
- Import of Media Playlists well supported
- Import of Master Playlists unsupported
- Explicit Variant Stream controls in Master Playlists unsupported
In general, you can author otio as follows:
t = otio.schema.Timeline()
track = otio.schema.Track("v1")
track.metadata['HLS'] = {
"EXT-X-INDEPENDENT-SEGMENTS": None,
"EXT-X-PLAYLIST-TYPE": "VOD"
}
t.tracks.append(track)
# Make a prototype media ref with the fragment's initialization metadata
fragmented_media_ref = otio.schema.ExternalReference(
target_url='video1.mp4',
metadata={
"streaming": {
"init_byterange": {
"byte_count": 729,
"byte_offset": 0
},
"init_uri": "media-video-1.mp4"
}
}
)
# Make a copy of the media ref specifying the byte range for the fragment
media_ref1 = fragmented_media_ref.copy()
media_ref1.available_range=otio.opentime.TimeRange(
otio.opentime.RationalTime(0, 1),
otio.opentime.RationalTime(2.002, 1)
)
media_ref1.metadata['streaming'].update(
{
"byte_count": 534220,
"byte_offset": 1361
}
)
# make the fragment and append it
fragment1 = otio.schema.Clip(media_reference=media_ref1)
track.append(fragment1)
# (repeat to define each fragment)
The code above would yield an HLS playlist like:
#EXTM3U
#EXT-X-VERSION:7
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:VOD
#EXT-X-INDEPENDENT-SEGMENTS
#EXT-X-MEDIA-SEQUENCE:1
#EXT-X-MAP:BYTERANGE="729@0",URI="media-video-1.mp4"
#EXTINF:2.00200,
#EXT-X-BYTERANGE:534220@1361
video1.mp4
#EXT-X-ENDLIST
If you add min_segment_duration and max_segment_duration to the timeline's
metadata dictionary as RationalTime objects, you can control the rule set
deciding how many fragments to accumulate into a single segment. When nothing
is specified for these metadata keys, the adapter will create one segment per
fragment.
In general, any metadata added to the track metadata dict under the HLS
namespace will be included at the top level of the exported playlist (see
``EXT-X-INDEPENDENT-SEGMENTS`` and ``EXT-X-PLAYLIST-TYPE`` in the example
above). Each segment will pass through any metadata in the HLS namespace from
the media_reference.
If you write a Timeline with more than one track specified, then the adapter
will create an HLS master playlist.
The following track metadata keys will be used to inform exported master
playlist metadata per variant stream:
bandwidth
codec
language
mimeType
group_id (audio)
autoselect (audio)
default (audio)
These values are translated to EXT-X-STREAM-INF and EXT-X-MEDIA
attributes as defined in sections 4.3.4.2 and 4.3.4.1 of
draft-pantos-http-live-streaming, respectively.
"""
import re
import copy
import opentimelineio as otio
# TODO: determine output version based on features used
OUTPUT_PLAYLIST_VERSION = "7"
# TODO: make sure all strings get sanitized through encoding and decoding
PLAYLIST_STRING_ENCODING = "utf-8"
# Enable isinstance(my_instance, basestring) tests in Python 3
# This can be phased out when Python 2 support is dropped. Replace tests with:
# isinstance(my_instance, str)
try:
basestring
except NameError:
basestring = str
"""
Matches a single key/value pair from an HLS Attribute List.
See section 4.2 of draft-pantos-http-live-streaming for more detail.
"""
ATTRIBUTE_RE = re.compile(
r'(?P<AttributeName>[A-Z0-9-]+)' + r'\=' +
r'(?P<AttributeValue>(?:\"[^\r\n"]*\")|[^,]+)' + r',?'
)
"""
Matches AttributeValue of the above regex into appropriate data types.
Note that these are meant to be joined using regex "or" in this order.
"""
_ATTRIBUTE_RE_VALUE_STR_LIST = [
r'(?P<resolution>(?P<width>[0-9]+)x(?P<height>[0-9]+))\Z',
r'(?P<hexcidecimal>0[xX](?P<hex_value>[0-9A-F]+))\Z',
r'(?P<floating_point>-?[0-9]+\.[0-9]+)\Z',
r'(?P<decimal>[0-9]+)\Z',
r'(?P<string>\"(?P<string_value>[^\r\n"]*)\")\Z',
r'(?P<enumerated>[^",\s]+)\Z'
]
ATTRIBUTE_VALUE_RE = re.compile("|".join(_ATTRIBUTE_RE_VALUE_STR_LIST))
"""
Matches a byterange as used in various contexts.
See section 4.3.2.2 of draft-pantos-http-live-streaming for an example use of
this byterange form.
"""
BYTERANGE_RE = re.compile(r'(?P<n>\d+)(?:@(?P<o>\d+))?')
"""
Matches HLS Playlist tags or comments, respective.
See section 4.1 of draft-pantos-http-live-streaming for more detail.
"""
TAG_RE = re.compile(
r'#(?P<tagname>EXT[^:\s]+)(?P<hasvalue>:?)(?P<tagvalue>.*)'
)
COMMENT_RE = re.compile(r'#(?!EXT)(?P<comment>.*)')
class AttributeListEnum(str):
""" A subclass allowing us to differentiate enums in HLS attribute lists
"""
def _value_from_raw_attribute_value(raw_attribute_value):
"""
Takes in a raw AttributeValue and returns an appopritate Python type.
If there is a problem decoding the value, None is returned.
"""
value_match = ATTRIBUTE_VALUE_RE.match(raw_attribute_value)
if not value_match:
return None
group_dict = value_match.groupdict()
# suss out the match
for k, v in group_dict.items():
# not a successful group match
if v is None:
continue
# decode the string
if k == 'resolution':
return v
elif k == 'enumerated':
return AttributeListEnum(v)
elif k == 'hexcidecimal':
return int(group_dict['hex_value'], base=16)
elif k == 'floating_point':
return float(v)
elif k == 'decimal':
return int(v)
elif k == 'string':
# grab only the data within the quotes, excluding the quotes
string_value = group_dict['string_value']
return string_value
return None
class AttributeList(dict):
"""
Dictionary-like object representing an HLS AttributeList.
See section 4.2 of draft-pantos-http-live-streaming for more detail.
"""
def __init__(self, other=None):
"""
contstructs an :class:`AttributeList`.
``Other`` can be either another dictionary-like object or a list of
key/value pairs
"""
if not other:
return
try:
items = other.items()
except AttributeError:
items = other
for k, v in items:
self[k] = v
def __str__(self):
"""
Construct attribute list string as it would exist in an HLS playlist.
"""
attr_list_entries = []
# Use a sorted version of the dictionary to ensure consistency
for k, v in sorted(self.items(), key=lambda i: i[0]):
out_value = ''
if isinstance(v, AttributeListEnum):
out_value = v
elif isinstance(v, basestring):
out_value = '"{}"'.format(v)
else:
out_value = str(v)
attr_list_entries.append('{}={}'.format(k, out_value))
return ','.join(attr_list_entries)
@classmethod
def from_string(cls, attrlist_string):
"""
Accepts an attribute list string and returns an :class:`AttributeList`.
The values will be transformed to Python types.
"""
attr_list = cls()
match = ATTRIBUTE_RE.search(attrlist_string)
while match:
# unpack the values from the match
group_dict = match.groupdict()
name = group_dict['AttributeName']
raw_value = group_dict['AttributeValue']
# parse the raw value
value = _value_from_raw_attribute_value(raw_value)
attr_list[name] = value
# search for the next attribute in the string
match_end = match.span()[1]
match = ATTRIBUTE_RE.search(attrlist_string, match_end)
return attr_list
# some special top-levle keys that HLS metadata will be decoded into
FORMAT_METADATA_KEY = 'HLS'
"""
Some concepts are translatable between HLS and other streaming formats (DASH).
These metadata keys are used on OTIO objects outside the HLS namespace because
they are higher level concepts.
"""
STREAMING_METADATA_KEY = 'streaming'
INIT_BYTERANGE_KEY = 'init_byterange'
INIT_URI_KEY = 'init_uri'
SEQUENCE_NUM_KEY = 'sequence_num'
BYTE_OFFSET_KEY = 'byte_offset'
BYTE_COUNT_KEY = 'byte_count'
class Byterange(object):
"""Offers interpretation of HLS byte ranges in various forms."""
count = None
"""(:class:`int`) Number of bytes included in the range."""
offset = None
"""(:class:`int`) Byte offset at which the range starts."""
def __init__(self, count=None, offset=None):
"""Constructs a :class:`Byterange` object.
:param count: (:class:`int`) Number of bytes included in the range.
:param offset: (:class:`int`) Byte offset at which the range starts.
"""
self.count = (count if count is not None else 0)
self.offset = offset
def __eq__(self, other):
if not isinstance(other, Byterange):
# fall back on identity, this should always be False
return (self is other)
return (self.count == other.count and self.offset == other.offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '{}(offset = {}, count = {})'.format(
type(self),
str(self.offset),
str(self.count)
)
def __str__(self):
"""returns a string in HLS format"""
out_str = str(self.count)
if self.offset is not None:
out_str += '@{}'.format(str(self.offset))
return out_str
def to_dict(self):
"""Returns a dict suitable for storing in otio metadata.
:return: (:class:`dict`) serializable version of byterange.
"""
range_dict = {BYTE_COUNT_KEY: self.count}
if self.offset is not None:
range_dict[BYTE_OFFSET_KEY] = self.offset
return range_dict
@classmethod
def from_string(cls, byterange_string):
"""Construct a :class:`Byterange` given a string in HLS format.
:param byterange_string: (:class:`str`) a byterange string.
:return: (:class:`Byterange`) The instance for the provided string.
"""
m = BYTERANGE_RE.match(byterange_string)
return cls.from_match_dict(m.groupdict())
@classmethod
def from_match_dict(cls, match_dict):
"""
Construct a :class:`Byterange` given a groupdict from ``BYTERANGE_RE``
:param match_dict: (:class:`dict`) the ``match_dict``.
:return: (:class:`Byterange`) The instance for the provided string.
"""
byterange = cls(count=int(match_dict['n']))
try:
byterange.offset = int(match_dict['o'])
except KeyError:
pass
return byterange
@classmethod
def from_dict(cls, info_dict):
""" Creates a :class:`Byterange` given a dictionary containing keys
like generated from the :meth:`to_dict method`.
:param info_dict: (:class:`dict`) Dictionary byterange.
:return: (:class:`Byterange`) an equivalent instance.
"""
byterange = cls(
count=info_dict.get(BYTE_COUNT_KEY),
offset=info_dict.get(BYTE_OFFSET_KEY)
)
return byterange
"""
For | |
"%s" onbekend',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '无法理解参数"%s"',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Parámetro "%s" no reconocido',
},
],
"display": 'Parameter "%s" not understood',
}
)
"""
Parameter "%s" not understood
"""
msg_resource_example_protected = CodeSystemConcept(
{
"code": "MSG_RESOURCE_EXAMPLE_PROTECTED",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Ресурс с идентификатором "example" не может быть удалён (для случаев тестирования/обучения)',
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Le Risorse aventi l\'identità "example" non possono essere cancellate (per finalità di test/formazione)',
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Zasoby oznaczone jako "example" nie mogą zostać usunięte (dla celów testów/szkoleń)',
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Les ressources ayant l\'identité "example" ne peuvent pas être supprimées (utilisées pour les tests/formations)',
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Resources met identiteit "voorbeeld" kunnen niet worden verwijderd (ten behoeve van testen/training)',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '以"example" 为ID的资源不能被删除 (用于测试/培训)',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Recursos con la identidad "example" no pueden ser borrados (son usados para pruebas/entrenamiento)',
},
],
"display": 'Resources with identity "example" cannot be deleted (for testing/training purposes)',
}
)
"""
Resources with identity "example" cannot be deleted (for testing/training purposes)
"""
msg_resource_id_fail = CodeSystemConcept(
{
"code": "MSG_RESOURCE_ID_FAIL",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "невозможно выделить идентификатор ресурса",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "impossibile allocare l''id della risorsa",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "nie można nadać identyfikatora zasobu",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "impossible d'allouer l'id de la ressource",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "kan geen resource-id reserveren",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "无法分配资源ID",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "imposible encontrar el id del recurso",
},
],
"display": "unable to allocate resource id",
}
)
"""
unable to allocate resource id
"""
msg_resource_id_mismatch = CodeSystemConcept(
{
"code": "MSG_RESOURCE_ID_MISMATCH",
"designation": [
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Problème de correspondance d'Id de la Ressource",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Resource ID's komen niet overeen",
},
],
"display": "Resource Id Mismatch",
}
)
"""
Resource Id Mismatch
"""
msg_resource_id_missing = CodeSystemConcept(
{
"code": "MSG_RESOURCE_ID_MISSING",
"designation": [
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Id della Risorsa mancante",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Id de la Ressource manquante",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Resource ID ontbreekt",
},
],
"display": "Resource Id Missing",
}
)
"""
Resource Id Missing
"""
msg_resource_not_allowed = CodeSystemConcept(
{
"code": "MSG_RESOURCE_NOT_ALLOWED",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Для данной операции отправка ресурса недопустима",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Non è consentito sottomettere una risorsa per questa operazione",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Nie można zgłosić zasobu dla tej operacji",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Non autorisé à soumettre une ressource pour cette opération",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Niet toegestaan om een resource in te dienen voor deze bewerking",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "该操作不允许提交资源",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "No se permite el envío de un recurso para esta operación",
},
],
"display": "Not allowed to submit a resource for this operation",
}
)
"""
Not allowed to submit a resource for this operation
"""
msg_resource_required = CodeSystemConcept(
{
"code": "MSG_RESOURCE_REQUIRED",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Требуется ресурс",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "E'' richiesta una risorsa",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Zasób jest wymagany",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Une ressource est requise",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Een resource is verplicht",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "必须提供一个资源",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Se requiere un recurso",
},
],
"display": "A resource is required",
}
)
"""
A resource is required
"""
msg_resource_type_mismatch = CodeSystemConcept(
{
"code": "MSG_RESOURCE_TYPE_MISMATCH",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Несоответствие типа ресурса",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Tipo Risorsa non corrispondente",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Niepoprawny typ zasobu",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Type de ressource incorrect",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Verkeerd resourcetype",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "资源类型不匹配",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Los Tipos de los recursos no coinciden",
},
],
"display": "Resource Type Mismatch",
}
)
"""
Resource Type Mismatch
"""
msg_sort_unknown = CodeSystemConcept(
{
"code": "MSG_SORT_UNKNOWN",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Неизвестное имя параметра сортировки "%s"',
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Nome del parametro di ordinamento "%s" non riconosciuto',
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Nieznany parametr sortowania "%s"',
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Nom du paramètre de tri inconnu "%s"',
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Onbekende parameternaam "%s" voor sortering',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '未知的排序参数名称"%s"',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Nombre del parámetro de ordenación "%s" desconocido',
},
],
"display": 'Unknown sort parameter name "%s"',
}
)
"""
Unknown sort parameter name "%s"
"""
msg_transaction_duplicate_id = CodeSystemConcept(
{
"code": "MSG_TRANSACTION_DUPLICATE_ID",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Дублирующий идентификатор в транзакции: %s",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Zdublowany identyfikator w transakcji: %s",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Identifiant en double dans la transaction : %s",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Dubbele identificatie in transactie: %s",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "事务中存在重复Id: %s",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Identificador duplicado en la transacción: %s",
},
],
"display": "Duplicate Identifier in transaction: %s",
}
)
"""
Duplicate Identifier in transaction: %s
"""
msg_transaction_missing_id = CodeSystemConcept(
{
"code": "MSG_TRANSACTION_MISSING_ID",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Отсутствует идентификатор в транзакции - требуется entry.id",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Brak identyfikatora w transakcji - należy podać entry.id",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Identifiant manquant dans la transaction - un élément entry.id doit | |
"""
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
"""
import argparse
import copy
import datetime
import os
import time
import numpy as np
import pandas as pd
import rasterio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from skimage.transform import rotate
from cafo import models, utils
from cafo.data.StreamingDatasets import StreamingGeospatialDataset
from cafo.data.TileDatasets import TileInferenceDataset
os.environ.update(utils.RASTERIO_BEST_PRACTICES)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description="CAFO model training script")
# General arguments
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="The path to a directory to store model checkpoints",
)
parser.add_argument(
"--data_blob_root",
type=str,
required=False,
help="The prefix to append to the paths of the tiles that we are loading",
)
parser.add_argument(
"--overwrite",
action="store_true",
help="Flag for overwriting `output_dir` if that directory already exists",
)
parser.add_argument(
"--save_most_recent",
action="store_true",
help="Flag for saving the most recent version of the model during training",
)
parser.add_argument(
"--azureml",
action="store_true",
help="Whether we are running experiments on Azure ML",
)
parser.add_argument("--gpu", type=int, default=-1, help="The ID of the GPU to use")
parser.add_argument(
"--debug",
action="store_true",
help="This drops all but a few tiles so we can test everything",
)
# Dataloader
parser.add_argument(
"--num_dataloader_workers",
type=int,
default=6,
help="Number of workers to use in all dataloaders",
)
parser.add_argument(
"--num_chips_per_tile",
type=int,
default=600,
help="The number of chips we will sample from each tile (we will potentially reject"
+ " some of these, so this isn't fixed)",
)
parser.add_argument(
"--chip_size",
type=int,
default=256,
help="The size of each chip to pass to the model",
)
parser.add_argument(
"--inference_padding",
type=int,
default=32,
help="The amount to padding to throw away from each chip during inference (must be"
+ " an even number)",
)
# Experiment arguments
parser.add_argument(
"--seed", type=int, default=0, help="Random seed to pass to numpy and torch"
)
parser.add_argument(
"--batch_size", type=int, default=32, help="Batch size to use for training"
)
parser.add_argument("--lr", type=float, default=0.01, help="Initial learning rate")
parser.add_argument(
"--num_epochs", type=int, default=50, help="Number of epochs to train for"
)
parser.add_argument(
"--rotation_augmentation",
action="store_true",
help="Whether to use rotation augmentation",
)
parser.add_argument(
"--negative_sample_probability",
type=float,
default=1.0,
help="Probability that we will sample a chip given that it doesn't have some of the"
+ " positive class (we will always sample if there is some positive class)",
)
parser.add_argument(
"--model",
default="unet",
choices=("unet", "unet-large", "fcn"),
help="Model to use",
)
parser.add_argument(
"--training_set",
default="train-all",
choices=("train-all", "train-single", "train-augment", "all-all", "all-augment"),
help="Which training set to use",
)
args = parser.parse_args()
NUM_WORKERS = args.num_dataloader_workers
NUM_CHIPS_PER_TILE = args.num_chips_per_tile
CHIP_SIZE = args.chip_size
LARGE_CHIP_SIZE = int(np.ceil(CHIP_SIZE * np.sqrt(2)))
CROP_POINT = (LARGE_CHIP_SIZE - CHIP_SIZE) // 2
PADDING = args.inference_padding
assert PADDING % 2 == 0
HALF_PADDING = PADDING // 2
CHIP_STRIDE = CHIP_SIZE - PADDING
def joint_transform(img, labels):
if args.rotation_augmentation:
rotate_amount = np.random.randint(0, 360)
img = rotate(img, rotate_amount)
labels = rotate(labels, rotate_amount, order=0)
labels = (labels * 255).astype(np.uint8)
img = img[
CROP_POINT : CROP_POINT + CHIP_SIZE, CROP_POINT : CROP_POINT + CHIP_SIZE
]
labels = labels[
CROP_POINT : CROP_POINT + CHIP_SIZE, CROP_POINT : CROP_POINT + CHIP_SIZE
]
else:
img = img / 255.0
img = img[
CROP_POINT : CROP_POINT + CHIP_SIZE, CROP_POINT : CROP_POINT + CHIP_SIZE
]
labels = labels[
CROP_POINT : CROP_POINT + CHIP_SIZE, CROP_POINT : CROP_POINT + CHIP_SIZE
]
img = np.rollaxis(img, 2, 0).astype(np.float32)
img = torch.from_numpy(img)
labels = labels.astype(np.int64)
labels = torch.from_numpy(labels)
return img, labels
def skip_check(img, labels):
if np.any(
np.sum(img == 0, axis=2) == 4
): # if we have an all black part of NAIP then skip
return True
elif np.any(labels == 1): # else, if we have any positive labels, then don't skip
return False
else: # else, skip with probability `negative_sample_probability`
return np.random.random() >= args.negative_sample_probability
def do_validation(
validation_image_fns, validation_label_fns, model, device, epoch, logger, memo=""
):
model.eval()
all_tp = 0
all_fp = 0
all_fn = 0
all_tn = 0
y_trues = []
y_preds = []
per_tile_ious = []
per_tile_recalls = []
per_tile_precisions = []
tic = time.time()
for validation_image_fn, validation_label_fn in zip(
validation_image_fns, validation_label_fns
):
val_dataset = TileInferenceDataset(
validation_image_fn,
chip_size=CHIP_SIZE,
stride=CHIP_STRIDE,
transform=utils.chip_transformer,
verbose=False,
)
val_dataloader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
num_workers=NUM_WORKERS,
pin_memory=True,
)
with rasterio.open(validation_label_fn) as f:
y_true = f.read().squeeze()
input_height, input_width = y_true.shape
output = np.zeros((2, input_height, input_width), dtype=np.float32)
kernel = np.ones((CHIP_SIZE, CHIP_SIZE), dtype=np.float32)
kernel[HALF_PADDING:-HALF_PADDING, HALF_PADDING:-HALF_PADDING] = 5
counts = np.zeros((input_height, input_width), dtype=np.float32)
for i, (data, coords) in enumerate(val_dataloader):
data = data.to(device)
with torch.no_grad():
t_output = model(data)
t_output = F.softmax(t_output, dim=1).cpu().numpy()
for j in range(t_output.shape[0]):
y, x = coords[j]
output[:, y : y + CHIP_SIZE, x : x + CHIP_SIZE] += t_output[j] * kernel
counts[y : y + CHIP_SIZE, x : x + CHIP_SIZE] += kernel
output = output / counts
y_pred = output.argmax(axis=0).astype(np.uint8)
gt_positives = y_true == 1
gt_negatives = y_true == 0
pred_positives = y_pred == 1
pred_negatives = y_pred == 0
tp = np.sum(gt_positives & pred_positives)
fp = np.sum(gt_negatives & pred_positives)
fn = np.sum(gt_positives & pred_negatives)
tn = np.sum(gt_negatives & pred_negatives)
all_tp += tp
all_fp += fp
all_fn += fn
all_tn += tn
# Record a sample of pixels to compute more expensive metrics
y_trues.append(y_true.ravel()[::100])
y_preds.append(output[1].ravel()[::100])
iou = tp / (tp + fp + fn)
recall = tp / (tp + fn)
precision = tp / (tp + fp)
per_tile_ious.append(iou)
per_tile_recalls.append(recall)
per_tile_precisions.append(precision)
iou = all_tp / (all_tp + all_fp + all_fn)
recall = all_tp / (all_tp + all_fn)
precision = all_tp / (all_tp + all_fp)
y_trues = np.concatenate(y_trues)
y_preds = np.concatenate(y_preds)
logger.info(
"[{}] Validation Epoch: {}\t Time elapsed: {:.2f} seconds".format(
memo, epoch, time.time() - tic
)
)
logger.info("\tIoU: {}".format(iou))
logger.info("\tPrecision: {}".format(precision))
logger.info("\tRecall: {}".format(recall))
return {
"val_iou": iou,
"val_recall": recall if not np.isnan(precision) else -1,
"val_precision": precision if not np.isnan(precision) else -1,
"per_tile_ious": per_tile_ious,
"per_tile_recalls": per_tile_recalls,
"per_tile_precisions": per_tile_precisions,
}
def main():
# Setup
if os.path.isfile(args.output_dir):
print("A file was passed as `--output_dir`, please pass a directory!")
return
if os.path.exists(args.output_dir) and len(os.listdir(args.output_dir)):
if args.overwrite:
print(
f"WARNING! The output directory, {args.output_dir}, already exists, we"
+ " might overwrite data in it!" % (args.output_dir)
)
else:
print(
f"The output directory, {args.output_dir}, already exists and isn't"
+ "empty. We don't want to overwrite and existing results, exiting..."
)
return
else:
print("The output directory doesn't exist or is empty.")
os.makedirs(args.output_dir, exist_ok=True)
if args.azureml:
from azureml.core import Run
run = Run.get_context()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
now_str = datetime.datetime.now().strftime("%Y-%m-%d_%X")
logger = utils.setup_log_file_handler(
args.output_dir, "training_{}".format(now_str)
)
logger.info("Starting CAFO training script")
logger.info("Saving results to: {}".format(args.output_dir))
if torch.cuda.is_available():
if args.gpu == -1:
device = torch.device("cuda")
logger.info("Using %d devices" % (torch.cuda.device_count()))
else:
device = torch.device("cuda:%d" % args.gpu)
logger.info("Using a single device")
else:
logger.error(
"WARNING! Torch is reporting that CUDA isn't available, exiting..."
)
return
logger.info("Using device: %s" % (str(device)))
# Load input data
validation_image_fns = [
"naip/v002/de/2011/de_100cm_2011/38075/m_3807505_ne_18_1_20110602.tif",
"naip/v002/de/2013/de_100cm_2013/38075/m_3807505_ne_18_1_20130915.tif",
"naip/v002/de/2015/de_100cm_2015/38075/m_3807505_ne_18_1_20150629.tif",
"naip/v002/de/2017/de_100cm_2017/38075/m_3807505_ne_18_1_20170720.tif",
"naip/v002/de/2018/de_060cm_2018/38075/m_3807505_ne_18_060_20180827.tif",
]
validation_label_fns = [
"train-augment/v002/de/2011/de_100cm_2011/38075/m_3807505_ne_18_1_20110602.tif",
"train-augment/v002/de/2013/de_100cm_2013/38075/m_3807505_ne_18_1_20130915.tif",
"train-augment/v002/de/2015/de_100cm_2015/38075/m_3807505_ne_18_1_20150629.tif",
"train-augment/v002/de/2017/de_100cm_2017/38075/m_3807505_ne_18_1_20170720.tif",
"train-augment/v002/de/2018/de_060cm_2018/38075/m_3807505_ne_18_060_20180827.tif",
]
if args.training_set == "train-all":
input_fn = "data/splits/train-all.csv"
elif args.training_set == "train-single":
input_fn = "data/splits/train-single.csv"
elif args.training_set == "train-augment":
input_fn = "data/splits/train-augment.csv"
elif args.training_set == "all-all":
input_fn = "data/splits/all.csv"
input_dataframe = pd.read_csv(input_fn)
image_fns = input_dataframe["image_fn"].to_list()
label_fns = input_dataframe["label_fn"].to_list()
if args.debug:
image_fns = image_fns[:4]
label_fns = label_fns[:4]
# remove val tile from training set
image_fns = [fn for fn in image_fns if "m_3807505_ne_18" not in fn]
label_fns = [fn for fn in label_fns if "m_3807505_ne_18" not in fn]
if args.data_blob_root is not None:
image_fns = [args.data_blob_root + fn for fn in image_fns]
label_fns = [args.data_blob_root + fn for fn in label_fns]
validation_image_fns = [args.data_blob_root + fn for fn in validation_image_fns]
validation_label_fns = [args.data_blob_root + fn for fn in validation_label_fns]
image_fns = np.array(image_fns)
label_fns = np.array(label_fns)
train_dataset = StreamingGeospatialDataset(
imagery_fns=image_fns,
label_fns=label_fns,
chip_size=LARGE_CHIP_SIZE,
num_chips_per_tile=NUM_CHIPS_PER_TILE,
windowed_sampling=False,
verbose=False,
sample_transform=joint_transform,
nodata_check=skip_check,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
num_workers=NUM_WORKERS,
pin_memory=True,
)
num_training_batches_per_epoch = int(
len(image_fns) * NUM_CHIPS_PER_TILE / args.batch_size
)
logger.info("We will be training with %d different tiles" % (image_fns.shape[0]))
logger.info(
"We will be training with %d batches per epoch"
% (num_training_batches_per_epoch)
)
# Setup training
if args.model == "unet":
model = models.get_unet()
elif args.model == "fcn":
model = models.get_fcn()
elif args.model == "unet-large":
model = models.get_unet_large()
else:
raise ValueError("Invalid model")
if args.gpu == -1:
model = nn.DataParallel(model).to(device)
else:
model = model.to(device)
optimizer = optim.AdamW(model.parameters(), lr=args.lr, amsgrad=True)
criterion = nn.CrossEntropyLoss()
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, "min", patience=2, threshold=0.0001
)
logger.info("Model has %d parameters" % (utils.count_parameters(model)))
# Model training
metrics_per_epoch = []
best_val_iou = 0
num_times_lr_dropped = 0
for epoch in range(args.num_epochs):
lr = utils.get_lr(optimizer)
training_losses = utils.fit(
model,
device,
train_dataloader,
num_training_batches_per_epoch,
optimizer,
criterion,
epoch,
)
metrics = do_validation(
validation_image_fns,
validation_label_fns,
model,
device,
epoch,
logger,
memo="",
)
# Record training loss and val metrics
metrics["training_loss"] = training_losses[0]
metrics_per_epoch.append(metrics)
if args.azureml:
run.log("training_loss", | |
cp = self.names['CP'], site = self.names['site'],
diagnosis ="Not Specified", status = "Not Specified", activity = "Active", unit = "DAYS")
assert str(response).lower().find('error')==-1, "Error creating an Event: "+str(response)
assert bool(response), "Error creating an Event"
self.ID['event']=response['id']
self.logFile.write(str(response)+ ' \n')
# Update an Event
self.logFile.write('-Update CP Event- \n')
self.names['eventLabel']="IntegrationTestLabel1"
response = self.cpe_util.update_event(eventid=self.ID['event'],label = self.names['eventLabel'], point = 0, cp = self.names['CP'],
site = self.names['site'], diagnosis ="Not Specified", status = "Not Specified", activity = "Active", unit = "DAYS")
assert str(response).lower().find('error')==-1, "Error creating an Event: "+str(response)
assert bool(response), "Error creating an Event"
self.ID['event']=response['id']
self.logFile.write(str(response)+ ' \n')
##Visits
# Create a Visit
self.logFile.write('-Create a Visit- \n')
self.names['visit']="IntegrationTestVisit"
response = self.vis_util.add_visit(cprid = self.ID['cpr'], name = self.names['visit'], site = self.names['site'])
assert str(response).lower().find('error')==-1, "Error creating a Visit: "+str(response)
assert bool(response), "Error creating a Visit"
self.ID['visit']=response['id']
self.logFile.write(str(response)+ ' \n')
# searchvisits
self.logFile.write('-search for visits- \n')
response = self.vis_util.search_visit_namespr(visitname = self.names['visit'])
assert str(response).lower().find('error')==-1, "Error searching for a Visit: "+str(response)
self.logFile.write(str(response)+ ' \n')
# Get visit with cprID
self.logFile.write('- Get visit by CPRID- \n')
response = self.vis_util.search_visit_cprid(cprid = self.ID['cpr'])
assert str(response).lower().find('error')==-1, "Error getting visit: "+str(response)
assert bool(response), "Error getting visit"
self.logFile.write(str(response)+ ' \n')
# Get all events
self.logFile.write('-Get all Events- \n')
response = self.cpe.get_all_events(cpid=self.ID['CP'])
assert str(response).lower().find('error')==-1, "Error getting all Events: "+str(response)
assert bool(response), "Error getting all events"
self.logFile.write(str(response)+ ' \n')
# Update a Visit
self.logFile.write('-Update a Visit- \n')
self.names['visit']="IntegrationTestVisit1"
response = self.vis_util.update_visit(visitid = self.ID['visit'], cprid = self.ID['cpr'], name = self.names['visit'], site = self.names['site'])
assert str(response).lower().find('error')==-1, "Error updating a Visit: "+str(response)
assert bool(response), "Error updating a Visit"
self.logFile.write(str(response)+ ' \n')
# Add a Visit and Specimen in one turn
self.logFile.write('-Add Visit and Specimen- \n')
self.names['visit2']="IntegrationTestVisit2"
self.names['speci2']="IntegrationTestSpeci2"
response = self.vis_util.add_visit_speci(name = self.names["visit2"], lineage = "New", av_qty = 10, user = 2, init_qty = 10, spec_class ='Fluid',
spec_type = "Bile", anat_site ="Anal Canal", path='Malignant', site = self.names['site'], speclabel=self.names['speci2'],cpid=self.ID['CP'],
ppid="IntegrationPPID", cprid=self.ID['cpr'],colltime="2021-03-01",rectime='2021-03-01')
assert str(response).lower().find('error')==-1, "Error adding a Visit and a Specimen in one turn: "+str(response)
assert bool(response), "Error adding a Visit and a Specimen in one turn"
self.ID['visit2']=response['visit']['id']
self.ID['speci2']=response['specimens'][0]['id']
self.logFile.write(str(response)+ ' \n')
## Specimens
# Add a Specimen
self.logFile.write('-Add a Specimen- \n')
self.names['speci']="IntegrationTestSpeci"
response = self.spec_util.create_specimen(specimenclass = 'Fluid', specimentype = 'Bile', pathology = 'Malignant', anatomic = 'Anal Canal',
laterality = 'Left', initqty = 10, avaqty =10, visitid = self.ID['visit'], recqlt = 'Acceptable', userid = 2, label = self.names['speci'],
colltime = '2021-03-01', rectime = '2021-03-01')
assert str(response).lower().find('error')==-1, "Error creating Specimen: "+str(response)
assert bool(response), "Error creating Specimen"
self.ID['speci']=response['id']
self.logFile.write(str(response)+ ' \n')
# check label
self.logFile.write('-Check if label exist- \n')
response = self.spec.check_specimen(specimenLabel = self.names['speci'])
assert str(response).lower().find('error')==-1, "Error checking Specimen: "+str(response)
assert bool(response), "Error checking Specimen"
self.logFile.write(str(response)+ ' \n')
# update Specimen
self.logFile.write('-Update Specimen- \n')
self.names['speci']="IntegrationTestSpeci1"
response = self.spec_util.update_specimen(specimenid = self.ID['speci'], specimenclass = 'Fluid', specimentype = 'Bile', pathology = 'Malignant', anatomic = 'Anal Canal',
laterality = 'Left', initqty = 10, avaqty =10, visitid = self.ID['visit'], recqlt = 'Acceptable', userid = 2, label = self.names['speci'],
colltime = '2021-03-01', rectime = '2021-03-01')
assert str(response).lower().find('error')==-1, "Error updating Specimen: "+str(response)
assert bool(response), "Error updating Specimen"
self.logFile.write(str(response) + ' \n')
# search Specimens
self.logFile.write('-Search Specimens- \n')
response = self.spec_util.search_specimens(label=self.names['speci'])
assert str(response).lower().find('error')==-1, "Error searching for a Specimen: "+str(response)
assert bool(response), "Error searching for a Specimen"
self.logFile.write(str(response) + ' \n')
## Container
# Create Container
self.logFile.write("-Create Container-")
response = self.cont_util.create_container(name="Int_test_cont", sitename=self.names["site"], numrows=2, numcols=2, storespecimens="true")
assert str(response).lower().find('error')==-1, "Error creating Container: "+str(response)
assert bool(response), "Error creating Container"
self.logFile.write(str(response) + ' \n')
self.ID['cont'] = response['id']
#Get Container
self.logFile.write("-Get Container-")
response = self.cont.get_container(container_id=self.ID["cont"])
assert str(response).lower().find('error')==-1, "Error getting Container: "+str(response)
assert bool(response), "Error getting Container"
self.logFile.write(str(response) + ' \n')
#Get all Containers
self.logFile.write("-Get all Containers-")
response = self.cont.get_all_containers()
assert str(response).lower().find('error')==-1, "Error getting all Containers: "+str(response)
assert bool(response), "Error getting all Containers"
self.logFile.write(str(response) + ' \n')
# Update Container
self.logFile.write("-Update Container-")
response = self.cont_util.update_container(name="Int_test_cont1", sitename=self.names["site"], numrows=2, numcols=2, storespecimens="true",
container_id=self.ID["cont"])
assert str(response).lower().find('error')==-1, "Error creating Container: "+str(response)
assert bool(response), "Error creating Container"
self.logFile.write(str(response) + ' \n')
## Queries
# Create AQL
self.logFile.write('-Executing AQL- \n')
response = self.qry_util.create_aql(cpid = self.ID['CP'], aql = "select Participant.ppid, SpecimenCollectionGroup.collectionDate, count(distinct Specimen.id) where Specimen.lineage = \"New\"")
assert str(response).lower().find('error')==-1, "Error executing AQL: "+str(response)
assert bool(response), "Error executing AQL"
self.logFile.write(str(response) + ' \n')
# Search Queries
self.logFile.write('-Searching for queries- \n')
response = self.qry_util.search_query()
assert str(response).lower().find('error')==-1, "Error searching for queries: "+str(response)
assert bool(response), "Error searching for queries"
self.logFile.write(str(response) + ' \n')
# Executing saved queries
self.logFile.write('-Executing a saved query- \n')
response = self.qry_util.execute_query(qryid = '1')
assert str(response).lower().find('error')==-1, "Error executing saved query: "+str(response)
assert bool(response), "Error executing saved query"
self.logFile.write(str(response) + ' \n')
######################################################################################
############################## C L E A N U P #######################################
######################################################################################
#delete Container
self.logFile.write("Delete Container")
response = self.cont.disable_container(containerid=self.ID["cont"])
assert str(response).lower().find('error')==-1, "Error deleting Container: "+str(response)
assert bool(response), "Error deleting Container"
self.logFile.write(str(response) + ' \n')
#delete Specimen
self.logFile.write('-Delete a Specimen- \n')
response = self.spec_util.delete_specimens(specimenids = self.ID['speci'])
assert str(response).lower().find('error')==-1, "Error deleting a Specimen: "+str(response)
assert bool(response), "Error deleting a Specimen"
self.logFile.write(str(response) + ' \n')
#get a Specimen
self.logFile.write('-Get Specimen by ID(already deleted)- \n')
response = self.spec.get_specimen(specimenid = self.ID['speci'])
assert str(response).lower().find('error')==-1, "Error getting a Specimen: "+str(response)
assert bool(response), "Error getting a Specimen"
self.logFile.write(str(response) + ' \n')
#delete Specimen
self.logFile.write('-Delete a Specimen- \n')
response = self.spec_util.delete_specimens(specimenids = self.ID['speci2'])
assert str(response).lower().find('error')==-1, "Error deleting a Specimen: "+str(response)
assert bool(response), "Error deleting a Specimen"
self.logFile.write(str(response) + ' \n')
#delete Visit
self.logFile.write('-Delete a Visit- \n')
response = self.vis.delete_visit(visitid = self.ID['visit2'])
assert str(response).lower().find('error')==-1, "Error deleting a Visit: "+str(response)
assert bool(response), "Error deleting a Visit"
self.logFile.write(str(response) + ' \n')
#delete Visit
self.logFile.write('-Delete a Visit- \n')
response = self.vis.delete_visit(visitid = self.ID['visit'])
assert str(response).lower().find('error')==-1, "Error deleting a Visit: "+str(response)
assert bool(response), "Error deleting a Visit"
self.logFile.write(str(response) + ' \n')
#get Visit
self.logFile.write('Get Visit by ID(already deleted)- \n')
response = self.vis.get_visit(visitid = self.ID['visit'])
assert str(response).lower().find('error')==-1, "Error getting a visit: "+str(response)
assert bool(response), "Error getting a visit"
self.logFile.write(str(response) + ' \n')
#delete Event
self.logFile.write('-Delete and Event- \n')
response = self.cpe.delete_event(eventid = self.ID['event'])
assert str(response).lower().find('error')==-1, "Error deleting an event: "+str(response)
assert bool(response), "Error deleting an event"
self.logFile.write(str(response) + ' \n')
#get Event
self.logFile.write('Get event(already deleted)- \n')
response = self.cpe.get_event(eventid = self.ID['event'])
assert str(response).lower().find('error')==-1, "Error getting an Event: "+str(response)
assert bool(response), "Error getting an Event"
self.logFile.write(str(response) + ' \n')
#delete Registration
self.logFile.write('-Delete Registration- \n')
response = self.cpr.delete_participant(cprid = self.ID["cpr"])
assert str(response).lower().find('error')==-1, "Error deleting Participant: "+str(response)
assert bool(response), " Error deleting Participant"
self.logFile.write(str(response) + ' \n')
#get deleted Registration
self.logFile.write('-Get Registration-')
response = self.cpr.get_registration(cprid = self.ID['cpr'])
assert str(response).lower().find('error')==-1, "Error getting Participan: "+str(response)
assert bool(response), "Error getting Participant"
self.logFile.write(str(response)+ ' \n')
#delete CP
self.logFile.write("-Delete CP- \n")
response = self.cp.delete_collection_protocol(cpid = self.ID['CP'])
assert str(response).lower().find('error')==-1, "Error deleting CP: "+str(response)
assert bool(response), "Error deleting CP"
self .logFile.write(str(response) + ' \n')
#get deleted CP
self.logFile.write("-Get CP via Id(is already deleted)- \n")
response = self.cp.get_collection_protocol(cpid = self.ID['CP'])
assert str(response).lower().find('error')==-1, "Error getting a CP: "+str(response)
assert bool(response), "Error getting a CP"
self.logFile.write(str(response) + ' \n')
#delete user
self.logFile.write("-Delete User- \n")
response = self.user.delete_user(userid = self.ID['user'])
assert str(response).lower().find('error')==-1, "Error deleting User: "+str(response)
assert bool(response), "Error deleting User"
self.logFile.write(str(response) + ' \n')
#get deleted user
self.logFile.write("-Get User via ID(is already deleted- \n")
response = self.user.get_user(userId = self.ID['user'])
assert str(response).lower().find('error')==-1, "Error getting User: "+str(response)
assert bool(response), "Error getting User"
self.logFile.write(str(response) + ' \n')
#delete site
self.logFile.write("-Delete Site- \n")
response = self.site.delete_sites(siid = self.ID['site'])
assert str(response).lower().find('error')==-1, "Error deleting Site: "+str(response)
assert bool(response), "Error deleting Site"
self.logFile.write(str(response) + " \n")
#get deleted site
self.logFile.write("-Get Site via ID(is already deleted- \n")
response = self.site.get_site(siteid = self.ID['site'])
assert str(response).lower().find('error')==-1, "Error getting Site: "+str(response)
assert bool(response), "Error getting Site"
self.logFile.write(str(response) + ' \n')
#delete Institute
self.logFile.write("- Delete Institute- \n")
response = self.inst.delete_institute(inid = self.ID['institute'])
assert str(response).lower().find('error')==-1, "Error deleting Site: "+str(response)
assert bool(response), "Error deleting Site"
self.logFile.write(str(response)+ ' \n')
#get Institute
self.logFile.write("-Get Institute- \n")
response = self.inst.get_institute(inid = self.ID['institute'])
assert str(response).lower().find('error')==-1, "Error getting institute: "+str(response)
assert bool(response), "Error getting institute"
self.logFile.write(str(response)+ ' \n')
return "end_of Test"
def cleanUp(self):
if 'cont' in self.ID.keys():
self.cont.disable_container(self.ID["cont"])
if 'speci' in self.ID.keys():
self.spec_util.delete_specimens(specimenids =self.ID['speci'])
if 'speci2' in self.ID.keys():
self.spec_util.delete_specimens(specimenids = self.ID['speci2'])
if 'visit2' in self.ID.keys():
| |
import copy
import operator
import itertools
import scipy.stats
import numpy as np
import pandas as pd
import plum.util.data
import scipy.optimize
import sklearn.metrics
from plum.models.modelABC import Model
#from plum.util.cpruning import getNodeProbs, getNodeProbs_prior_weighted
from plum.util.cpruning import cgetNodeProbs
from plum.util.cfitting import _simulated_annealing, _simulated_annealing_multivariate
class plumRecallPrecision(plum.util.data.plumData):
'''Base class for optimizers that use recall-precision.
Input file should be training data with each pairs having at least one taxon with a known state.
The known labels will be ignored for inferring ancestral state probabilities, but will be used to
compare predictions to when calculating recall-precision'''
def __init__(self,markov_model,error_model,tree,data=None,as_sorted=False,prior_weighted=False):
plum.util.data.plumData.__init__(self,markov_model,error_model,tree,data,as_sorted)
assert type(prior_weighted) is bool, "`prior_weighted` must be boolean"
if prior_weighted:
raise Exception("**option `prior_weighted` needs to be deleted**")
self._prior_weighted = True
self._calc_ancStates = np.vectorize(getNodeProbs_prior_weighted)
else:
self._prior_weighted = False
#self._calc_ancStates = np.vectorize(getNodeProbs)
self._calc_ancStates = cgetNodeProbs
self._outputDF = None
self._blank_knownDs = np.array( [{}] * len(self._featureDs) )
print("Initializing model")
self._update_all(self._param_vec) # calculate initial state
print("Finished initializing model")
self._precision_recall_dataframe = None
self._results_dataframe = None
#@property
#def prior_weighted(self):
# '''Whether to weight the likelihood under the error model by
# the state prior from the stationary frequencies'''
# return self._prior_weighted
#@prior_weighted.setter
#def prior_weighted(self,pw):
# assert type(pw) is bool, "`prior_weighted` must be boolean"
# if pw == True:
# self._prior_weighted = True
# self._calc_ancStates = np.vectorize(getNodeProbs_prior_weighted)
# else:
# self._prior_weighted = False
# self._calc_ancStates = np.vectorize(getNodeProbs)
@property
def recall(self):
'''Recall under current model'''
return self._recall
@property
def precision(self):
'''Precision under current model'''
return self._precision
@property
def thresholds(self):
'''Recall-precision thresholds under current model'''
return self._thresholds
@property
def aps(self):
'''Area under the recall-precision curve under current model'''
return self._aps
@property
def probabilities(self):
'''Vector of probabilities of known states under current model'''
return self._classifier_probs
@property
def precision_recall_DF(self):
'''Return a dataframe with the precision recall curve'''
if self._precision_recall_dataframe is None:
self._precision_recall_dataframe = pd.DataFrame( {"precision":self.precision, "recall":self.recall} )
self._precision_recall_dataframe.sort_values("recall", inplace=True)
return self._precision_recall_dataframe
@property
def results_DF(self):
if self._results_dataframe is None:
self._results_dataframe = pd.DataFrame( {"ID1": self._outID1,
"ID2": self._outID2,
"species": self._outspecies,
"state": self._labels,
"P_1": self._classifier_probs},
columns=["ID1","ID2","species","state","P_1"]
).sort_values("P_1",ascending=False)
self._results_dataframe["FDR"] = 1 - ( self._results_dataframe["state"].cumsum() / (np.arange(self._results_dataframe.shape[0])+1) )
self._results_dataframe.index = np.arange(self._results_dataframe.shape[0])
return self._results_dataframe
def update_from_dict(self,param_dict,save_state=False):
'''Update the parameters from a dictionary mapping parameter names to values.
Doesn't need to include all parameters and will only update parameters included
in the provided dictionary.
If `save_state` is True, the previous state of the model will be saved in hidden
attributes, this is only used internally by MCMC samplers at the moment'''
if save_state:
self._last_classifier_probs = copy.deepcopy(self._classifier_probs)
self._last_params_vec = copy.deepcopy(self._param_vec)
self._last_paramD = self._paramD.copy()
self._last_pairAncStates = self._pairAncStates.copy()
self._last_aps = copy.deepcopy(self._aps)
for name,val in param_dict.items():
assert name in self._paramD, "Invalid parameter name for this model: {}".format(name)
self._paramD[name] = np.float64( val ) # this is only a good idea so far as *every* parameter should have this type
self._param_vec = [self._paramD[i] for i in self._free_params]
# use properties to update models (they parse self._paramD)
self._error_model.updateParams(self.errorModelParams)
self._markov_model.updateParams(self.markovModelParams)
self._state_priors = dict(list(zip(*self._markov_model.stationaryFrequencies)))
# An empty dictionary is passed to knownD argument, so all labels in dataset will be
# ignored. This is something that I could do differently
self._pairAncStates = self._calc_ancStates(self.tree,self._featureDs,self._blank_knownDs,
self._error_model,
self._markov_model,
self._state_priors)
self._aps = self._calc_aps()
def _reset_last(self):
'''Reset to the last set of parameters, likelihoods etc.
This is primarily for MCMC.'''
assert self._last_params_vec != None, "No previous states saved"
self._param_vec = self._last_params_vec
self._paramD = self._last_paramD
self._pairAncStates = self._last_pairAncStates
self._aps = self._last_aps
self._classifier_probs = self._last_classifier_probs
self._error_model.updateParams(self.errorModelParams)
self._markov_model.updateParams(self.markovModelParams)
def _calc_aps(self):
'''Calculate the area under the recall-precision curve for all labeled taxa'''
self._classifier_probs = [] # probabilities of known tips from ancestral state reconstruction
self._outspecies = []
self._outID1, self._outID2 = [], []
self._labels = []
for index,nodeD in enumerate(self._knownLabelDs):
for taxon,label in nodeD.items():
if not pd.isnull(label):
try:
#prob = self._pairAncStates[index][taxon][1]
prob = self._pairAncStates[index][taxon]
except KeyError:
raise Exception("{}".format(self._pairAncStates[index]))
if pd.isnull(prob): # seems that these can be nan, perhaps with certain parameter combinations?
prob = 0 # not sure this is the right way to do this. Got almost all nans for both means=0, both sds=.1, alpha/beta=.2
self._labels.append(label)
self._classifier_probs.append(prob)
self._outID1.append(self._pairs[index][0])
self._outID2.append(self._pairs[index][1])
self._outspecies.append(taxon)
assert len(self._classifier_probs) == len(self._labels)
self._precision,self._recall,self._thresholds = sklearn.metrics.precision_recall_curve(self._labels,self._classifier_probs,pos_label=1.)
return sklearn.metrics.average_precision_score(self._labels,self._classifier_probs)
def _update_all(self,param_array,save_state=False):
'''Update all the params.
1. First turn parameter vector to dictionary keyed on parameters names. Works
on the fact that self._free_params is list of keys phased to self._param_vec,
the values
2. Then assign this dictionary to self._paramD
3. Then use markovModelParams and errorModelParams properties to update the models
themselves. These will be passed to the tree likelihood function
4. Finally, call the tree likelihood function (Felsenstein pruning algorithm) and
update the current site and tree likelihoods'''
if save_state:
self._last_classifier_probs = copy.deepcopy(self._classifier_probs)
self._last_params_vec = copy.deepcopy(self._param_vec)
self._last_paramD = self._paramD.copy()
self._last_pairAncStates = self._pairAncStates.copy()
self._last_aps = copy.deepcopy(self._aps)
self._param_vec = param_array # could do this via a @property setter method instead
assert len(self._param_vec) == len(self._free_params), "Why aren't parameter vector and free params same length"
self._paramD = dict(list(zip(self._free_params,self._param_vec))) # update main holder of parameters
# use properties to update models (they parse self._paramD)
self._error_model.updateParams(self.errorModelParams)
self._markov_model.updateParams(self.markovModelParams)
self._state_priors = dict(list(zip(*self._markov_model.stationaryFrequencies)))
# An empty dictionary is passed to knownD argument, so all labels in dataset will be
# ignored. This is something that I could do differently
self._pairAncStates = self._calc_ancStates(self.tree,self._featureDs,self._blank_knownDs,
self._error_model,
self._markov_model,
self._state_priors)
self._aps = self._calc_aps()
def write_parameter_file(self,outfile):
'''Write current parameters to a parameter file
- `outfile`: <str> File to write to (will overwrite an existing file of the same name)'''
plum.util.data.write_parameter_file(error_model=self._error_model,markov_model=self._markov_model,outfile=outfile)
class sweep(plumRecallPrecision):
'''Perform simple 1-dimensional sweep on a model parameter, logging effect on aps'''
def __init__(self,markov_model,error_model,param,bound,tree,data=None,as_sorted=False,prior_weighted=False,step=.1):
plumRecallPrecision.__init__(self,markov_model,error_model,tree,data,as_sorted,prior_weighted)
self._param_sweep = []
self._aps_vec = []
self._best_aps = None
self._best_param = None
self._param_to_sweep = param
self._bound = bound
self._step = float(step)
@property
def bestAPS(self):
'''Return the best APS value found on the sweep'''
return self._best_aps
@property
def APS_sweep(self):
'''Return the vector of APS values over the sweep'''
return self._aps_vec
@property
def param_sweep(self):
'''Return the vector of parameter values swept over'''
return self._param_sweep
def sweep(self):
'''Sweep a single parameter value over given range, saving states and best aps and parameter
values'''
for i in np.arange(self._bound[0],self._bound[1],self._step):
self._paramD[self._param_to_sweep] = i
new_vec = [self._paramD[p] for p in self._free_params]
self._update_all(new_vec)
self._param_sweep.append(i)
self._aps_vec.append(self._aps)
if self._aps > self._best_aps:
self._best_aps = self._aps
self._best_param = i
class mcmc(plumRecallPrecision):
'''MCMC sampling using area under the recall-precision curve as a metric'''
def __init__(self,markov_model,error_model,outfile,tree,data,as_sorted=False,prior_weighted=False,n_iters=10000,save_every=10,stringency=1,scale_dict=None):
'''
-`markov_model`: A plum.models.MarkovModels object. Its parameters will be used as a starting point for optimization
-`error_model`: A plum.models.ErrorModels object. Its parameters will be used as a starting point for optimization
-`outfile`: File to write iterations to
-`tree`: Path to the input newick tree
-`data`: A file in tidy data format.
-`as_sorted`: whether input data is presorted on ID1,ID2
-`n_iters`: number of MCMC iterations to perform
-`save_every`: Save state after this many of generations
-`stringency`: Scaling factor for acceptance ratio. Must be between 0 and 1, where 1 does not scale the Hastings ratio, and
0 means a lower APS will never be accepted
-`scale_dict`: Standard deviations of normal distributions to sample parameter proposals from. Dictionary mapping parameter
names to numbers. Defaults to .5 for every parameter
'''
plumRecallPrecision.__init__(self,markov_model,error_model,tree,data,as_sorted,prior_weighted)
# Run params
self._map_aps = None
self._map_params = None
self.n_iters = n_iters
self.save_every = save_every
self.outfile = outfile
self._accepts = []
assert np.logical_and(stringency >= 0, stringency <= 1), "`stringency` must be between 0 and 1"
self._stringency = stringency
self._scale_dict = {p:.5 for p in self.freeParams} #
if scale_dict != None:
for p,val in scale_dict:
assert p in self.freeParams, "Parameter '{}' in scale_dict not found".format(p)
self._scale_dict[p] = val
@property
def best_parameters(self):
'''The free parameters with the highest APS found in the search and that APS value.
Returns a tuple of the parameters dictionary and the APS value'''
return self._map_params, self._map_aps
def _draw_proposal(self):
'''Draw a new parameter value from free parameters and update model'''
# Right now, only move is a normally distributed step. Worth thinking
# about the right way to make | |
for tf in dict_lines_to_keep_per_tf.keys():
adjusted_dict_p_values_per_tf[tf] = adjust_pvales(dict_p_values_per_tf[tf])
adjusted_dict_p_values_overall_per_tf[tf] = adjust_pvales(dict_p_values_overall_per_tf[tf])
#get the sig and write them to output file
for i, tf_motif in enumerate(dict_lines_to_keep_per_tf[tf]):
#if dict_p_values_per_tf[tf][i]<0.05 or dict_p_values_overall_per_tf[tf][i]<0.05:
tf_motif[motif_breaking_score_index+1] = str(dict_p_values_per_tf[tf][i]) + ';' + str(dict_p_values_overall_per_tf[tf][i]) + ";" + str(adjusted_dict_p_values_per_tf[tf][i]) + ";" + str(adjusted_dict_p_values_overall_per_tf[tf][i])
annoted_input_ofile.write('\t'.join(tf_motif) + '\n')
#select the motif if its ChiP-seq signal larger than 0
if tf_motif[tf_binding_index]!="nan":
if float(tf_motif[tf_binding_index]) > 0.0:
annoted_input_ofile_onlysig.write('\t'.join(tf_motif) + '\n')
continue
if filter_on_qval:
if filter_on_signal:
#otherwise check for existence of a Dnase1 peak and Pval<0.05
if adjusted_dict_p_values_overall_per_tf[tf][i]<sig_thresh_fdr and (float(tf_motif[dnase_index])>0.0):# or float(tf_motif[fantom_index])>0.0 or float(tf_motif[num_other_tfs])>0.0
annoted_input_ofile_onlysig.write('\t'.join(tf_motif) + '\n')
else:
if adjusted_dict_p_values_overall_per_tf[tf][i]<sig_thresh_fdr: #or adjusted_dict_p_values_per_tf[tf][i]<0.05:
annoted_input_ofile_onlysig.write('\t'.join(tf_motif) + '\n')
else:
if filter_on_signal:
if dict_p_values_overall_per_tf[tf][i]<sig_thresh and (float(tf_motif[dnase_index])>0.0): #or adjusted_dict_p_values_per_tf[tf][i]<0.05: # or float(tf_motif[fantom_index])>0.0 or float(tf_motif[num_other_tfs])>0.0
#tf_motif[motif_breaking_score_index+1] = str(dict_p_values_per_tf[tf][i]) + ';' + str(dict_p_values_overall_per_tf[tf][i]) + ";" + str(adjusted_dict_p_values_per_tf[tf][i]) + ";" + str(adjusted_dict_p_values_overall_per_tf[tf][i])
annoted_input_ofile_onlysig.write('\t'.join(tf_motif) + '\n')
else:
if dict_p_values_overall_per_tf[tf][i]<sig_thresh: #or adjusted_dict_p_values_per_tf[tf][i]<0.05:
#tf_motif[motif_breaking_score_index+1] = str(dict_p_values_per_tf[tf][i]) + ';' + str(dict_p_values_overall_per_tf[tf][i]) + ";" + str(adjusted_dict_p_values_per_tf[tf][i]) + ";" + str(adjusted_dict_p_values_overall_per_tf[tf][i])
annoted_input_ofile_onlysig.write('\t'.join(tf_motif) + '\n')
del dict_lines_to_keep_per_tf
del dict_p_values_per_tf
del dict_p_values_overall_per_tf
return annoted_output_file_onlysig
def calculate_p_value_motifregions(mutated_regions_list, num_muts_per_sample_dict, total_number_of_regions_tested, index_mutation_frequency=12, index_sample_ids=-1, index_elment_start_coordinate=1, index_elment_stop_coordinate=2, genome_size=3000000000.0):
reported_p_values = []#this holds p-values of all the regions, it will be used for p-value correction and after correction it is written to the output file as an additional column after the calculated p-value, the full list of p-values is need to make the correction test
for element in mutated_regions_list:
mutation_frequency = int(element[index_mutation_frequency])
sample_ids = element[index_sample_ids].split(',')
avg_proportion_of_mutations_in_the_samples_of_this_region = 0.0
samples_counted = []
for sample_id in sample_ids:
if sample_id not in samples_counted:
samples_counted.append(sample_id)
if sample_id in num_muts_per_sample_dict.keys():
avg_proportion_of_mutations_in_the_samples_of_this_region += ((float(num_muts_per_sample_dict[sample_id]))/genome_size)
p = avg_proportion_of_mutations_in_the_samples_of_this_region#/(len(samples_counted)*1.0)
n = (int(element[index_elment_stop_coordinate]) - int(element[index_elment_start_coordinate])) #* len(sample_id_and_number_of_mutations_per_sample_dict.keys()) #region length (end-start) multiplied by the total number of tested samples
k = mutation_frequency
p_val_of_this_region = 1 - (binom.cdf(k, n, p))
reported_p_values.append(p_val_of_this_region)
'''Extend the number of tested elements according to the given total_number_of_regions_tested; set pval of this regions not selected as 1'''
n_elements = len(reported_p_values)
for i in range(n_elements, total_number_of_regions_tested):
reported_p_values.append(1)
print "correcting p-values for multiple testing"
if len(reported_p_values)>0:
significant_bool_report, corrected_p_values_array, alphacSidak, alphacBonf = multipletests(reported_p_values, alpha=0.05, method='fdr_bh', returnsorted=False) #returns 4 things: a boolean array contains True or False for each value meaning wether the value after correction compared to the given alpha is significant or not, an array of the values after correction, a single for corrected alpha for Sidak method, a single value for corrected alpha for Bonferroni method
corrected_p_values_list = corrected_p_values_array.tolist()
for l in range(0, len(mutated_regions_list)):
mutated_regions_list[l].append(str(reported_p_values[l]))
mutated_regions_list[l].append(str(corrected_p_values_list[l]))
return mutated_regions_list
def get_number_of_mutations_per_sample_list_and_write_to_file(mutations_file, numberofmutationspersample_output_file, index_sample_ids=8):
num_muts_per_sample_dict = {}
if not os.path.exists(numberofmutationspersample_output_file):
print "Counting number of mutations per sample from the initial mutation file"
with open(mutations_file, "r") as mutations_infile:
mutations_line = mutations_infile.readline().strip().split('\t')
while len(mutations_line)>index_sample_ids:
sample_id_of_this_mutation = mutations_line[index_sample_ids].strip()
try:
num_muts_per_sample_dict[sample_id_of_this_mutation] +=1
except KeyError:
num_muts_per_sample_dict[sample_id_of_this_mutation] = 1
mutations_line = mutations_infile.readline().strip().split('\t')
with open(numberofmutationspersample_output_file, 'w') as numberofmutationspersample_writefile:
for sample_id in num_muts_per_sample_dict.keys():#write the sample and its number of mutations to the output file
numberofmutationspersample_writefile.write(sample_id + "\t" + str(num_muts_per_sample_dict[sample_id]) +"\n")
else:
#in case the number of mutations per sample was already available then just read them and insert them to the returned list
with open(numberofmutationspersample_output_file, 'r') as numberofmutationspersample_readfile:
numberofmutationspersample_lines = numberofmutationspersample_readfile.readlines()
for line in numberofmutationspersample_lines:
num_muts_per_sample_dict[line.split('\t')[0].strip()] = int(line.split('\t')[1].strip()) # append sample id # append number of mutations in this sample id
return num_muts_per_sample_dict
'''
'''
def get_unique_muts_from_collection(mutations_file, unique_muts_file, sample_id_index=8, mut_type_index=6, prioritize_SNP_over_indel=True):
muts_per_position_per_sample = {}
with open(mutations_file, 'r') as ifile:
l= ifile.readline().strip().split('\t')
while len(l)>sample_id_index:
k = '::'.join([l[0], l[1], l[sample_id_index]])
try:
muts_per_position_per_sample[k].append("duplicate")
muts_per_position_per_sample[k].append(l)
except KeyError:
muts_per_position_per_sample[k] = l
l= ifile.readline().strip().split('\t')
with open(unique_muts_file, 'w') as ofile:
for k in muts_per_position_per_sample.keys():
if 'duplicate' not in muts_per_position_per_sample[k]:
ofile.write('\t'.join(muts_per_position_per_sample[k]) + '\n')
return unique_muts_file
def get_unique_mutsmotifs_from_collection(mutations_file, unique_muts_file, sample_id_index=8, mut_type_index=6, prioritize_SNP_over_indel=True,
motif_start_index=15, motif_end_index=16, motif_name_index=17):
muts_per_position_per_sample = {}
with open(mutations_file, 'r') as ifile:
l= ifile.readline().strip().split('\t')
while len(l)>motif_name_index:
k = '::'.join([l[0], l[1], l[sample_id_index], l[motif_start_index], l[motif_end_index], l[motif_name_index]])
try:
muts_per_position_per_sample[k].append("duplicate")
print muts_per_position_per_sample[k]
muts_per_position_per_sample[k].append(l)
print muts_per_position_per_sample[k]
sys.exit(0)
except KeyError:
muts_per_position_per_sample[k] = l
l= ifile.readline().strip().split('\t')
with open(unique_muts_file, 'w') as ofile:
for k in muts_per_position_per_sample.keys():
if 'duplicate' not in muts_per_position_per_sample[k]:
ofile.write('\t'.join(muts_per_position_per_sample[k]) + '\n')
return unique_muts_file
def calculate_pval_for_genesets(geneset_enrichement_results_input_file, index_total_number_genes_per_set=2, index_number_enriched_genes=3, total_number_of_genes_in_the_universe=27000, total_number_of_genes_tried_in_the_search=3135, header_line = True, number_of_tried_gene_sets=24, keywords_to_filter_out_with=[]):#although not all are recognized in the pathways)
infile = open(geneset_enrichement_results_input_file, 'r')
calculated_p_value_out_file = '.'.join(geneset_enrichement_results_input_file.split('.')[0:-1]) + "_calculated_pval." + geneset_enrichement_results_input_file.split('.')[-1]
calculated_p_value_sig_out_file = '.'.join(geneset_enrichement_results_input_file.split('.')[0:-1]) + "_calculated_pval_sig." + geneset_enrichement_results_input_file.split('.')[-1]
calculated_p_value_sig_out_file_keywords = '.'.join(geneset_enrichement_results_input_file.split('.')[0:-1]) + "_calculated_pval_sig_keywords." + geneset_enrichement_results_input_file.split('.')[-1]
outfile = open(calculated_p_value_out_file, "w")
outfile_sig = open(calculated_p_value_sig_out_file, "w")
outfile_sig_keywords = open(calculated_p_value_sig_out_file_keywords, "w")
M = total_number_of_genes_in_the_universe
N = total_number_of_genes_tried_in_the_search
sep = '\t'
if header_line:
header = infile.readline()
outfile.write(header.strip() + sep + "p-value" + sep + "q-value" + "\n")
outfile_sig.write(header.strip() + sep + "p-value" + sep + "q-value" + "\n")
outfile_sig_keywords.write(header.strip() + sep + "p-value" + sep + "q-value" + "\n")
inlines = infile.readlines()
calculated_pvalues = []
for line in inlines:
split_line = line.strip().split(sep)
if split_line[index_number_enriched_genes].isdigit() and split_line[index_total_number_genes_per_set].isdigit():
n = int(split_line[index_total_number_genes_per_set])
x = int(split_line[index_number_enriched_genes])
pval = hypergeom.sf(x,M,n,N) #previous 1-h.cdf (but gives negative values
calculated_pvalues.append(pval)
corrected_p_values_list = []
if len(calculated_pvalues)>1: #if only one geneset is tried then there is no need for multiple test correction
significant_bool_report, corrected_p_values_array, alphacSidak, alphacBonf = multipletests(calculated_pvalues, alpha=0.05, method='fdr_bh', returnsorted=False) #returns 4 things: a boolean array contains True or False for each value meaning wether the value after correction compared to the given alpha is significant or not, an array of the values after correction, a single for corrected alpha for Sidak method, a single value for corrected alpha for Bonferroni method
corrected_p_values_list = corrected_p_values_array.tolist()
elif len(calculated_pvalues)==1:
corrected_p_values_list = calculated_pvalues
else:
print "No genesets are reported, check the filters, params and gene names"
number_of_sig_enriched_genesets = 0
for l in range(0, len(inlines)):
outfile.write(sep.join(inlines[l].strip().split(sep)) + sep + str(calculated_pvalues[l]) + sep + str(corrected_p_values_list[l]) +"\n")
#write only the significant ones and satisfying the given condition
if calculated_pvalues[l]<0.05:
number_of_sig_enriched_genesets+=1
outfile_sig.write(sep.join(inlines[l].strip().split(sep)) + sep + str(calculated_pvalues[l]) + sep + str(corrected_p_values_list[l]) + "\n")
if len(keywords_to_filter_out_with)>0:
for keyword in keywords_to_filter_out_with:
if keyword.upper() in inlines[l].split(sep)[0].upper() or keyword.upper() in inlines[l].split(sep)[1].upper(): #if the given keyword(s) was found in the gene set name or discreption then report
outfile_sig_keywords.write(sep.join(inlines[l].strip().split()) + sep + str(calculated_pvalues[l]) + sep + str(corrected_p_values_list[l]) + "\n")
break
print "Number of significantly enriched genesets: " + str(number_of_sig_enriched_genesets)
return calculated_p_value_out_file, calculated_p_value_sig_out_file, calculated_p_value_sig_out_file_keywords
def find_overlap_genesets_genelist(geneset_input_file, genelist_input_file, enriched_genes_output_file, total_number_of_genes_in_the_universe=27000,
min_number_of_genes_be_enriched_for_geneset_to_be_reported = 10, index_gene_name=0, index_gene_names_start=3,
keywords_to_filter_out_with=[], only_keep_the_sig_file = True, min_number_of_genes_in_geneset_to_consider_the_geneset = 10, header_line = False,
sample_ids_given=False):
with open(geneset_input_file, 'r') as ifile:
genesets_lines = ifile.readlines()
with open(genelist_input_file, 'r') as ifile:
genelist_lines = [x.strip().split('\t') for x in ifile.readlines()]
enriched_genes_outfile = open(enriched_genes_output_file, 'w')
#read the gene names from the gene input list
print "Number of genes provided for search: " + str(len(genelist_lines))
print "Total number of genesets to try: " + str(len(genesets_lines))
number_of_tried_genesets = 0
if header_line:
enriched_genes_outfile.write('ID' + "\t" + 'description' + "\t"+ 'total_number_of_genes_in_this_geneset' + "\t" + 'number_of_enriched_genes' + "\t"+ 'enriched_genes' + "\n")
for geneset in genesets_lines:
split_geneset_info = geneset.split('\t')
total_number_of_genes_in_this_geneset = 0
if index_gene_names_start>2:
total_number_of_genes_in_this_geneset = int(split_geneset_info[index_gene_names_start-1])#if there were three columns before the gene names start, it means the total number of genes are already given per gene set in addition to the pathway info and ID. This is especially important for KEGG pathways converted from KEGG IDs since the gene names are wtitten in many variations so it is not possible to caount them to get the actual number of genes in the set
else:
total_number_of_genes_in_this_geneset = len(set(split_geneset_info[index_gene_names_start::]))
if total_number_of_genes_in_this_geneset > min_number_of_genes_in_geneset_to_consider_the_geneset:
number_of_tried_genesets +=1
#only search in those sets that fulfill the criteria
enriched_genes = []
regmut_samples = []
mut_samples = []
for gene_line in genelist_lines:
if gene_line[index_gene_name].strip() in split_geneset_info[index_gene_names_start::]:
enriched_genes.append(gene_line[index_gene_name])
if sample_ids_given:
mut_samples.extend(gene_line[-1].split(','))
regmut_samples.extend(gene_line[-2].split(','))
if len(set(enriched_genes)) >= min_number_of_genes_be_enriched_for_geneset_to_be_reported:
enriched_genes_outfile.write(split_geneset_info[0] + "\t" + split_geneset_info[1].replace(' - Homo sapiens (human)','') + "\t"+ str(total_number_of_genes_in_this_geneset) + "\t" + str(len(set(enriched_genes))) + '\t' + str(len(set(regmut_samples))) + "\t" + str(len(set(mut_samples))) + '\t' +','.join(set(regmut_samples)) + "\t" + ','.join(set(mut_samples)) + "\t"+ ','.join(set(enriched_genes)) +"\n")
enriched_genes_outfile.close()
#calculate p-values for each gene set/pathway
calculated_p_value_out_file, calculated_p_value_sig_out_file, calculated_p_value_sig_out_file_keywords = calculate_pval_for_genesets(enriched_genes_output_file, index_total_number_genes_per_set=2, index_number_enriched_genes=3, total_number_of_genes_in_the_universe=total_number_of_genes_in_the_universe, total_number_of_genes_tried_in_the_search=len(genelist_lines), header_line = header_line, number_of_tried_gene_sets=number_of_tried_genesets, keywords_to_filter_out_with=keywords_to_filter_out_with)
if len(keywords_to_filter_out_with)<1:
os.remove(calculated_p_value_sig_out_file_keywords)
del genesets_lines
del genelist_lines
| |
[val]
for each in val:
if not isinstance(each, Variable):
raise ValueError("input of {0} must be variable".format(
op_type))
if dtype is None:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError(
"operator {0} must input same dtype. {1} vs {2}".format(
op_type, dtype, each.dtype))
return dtype
def func(**kwargs):
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_dtype(op_proto, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
inputs[ipt.name] = val
outputs = dict()
out = helper.create_tmp_variable(dtype=dtype)
outputs[o_name] = [out]
for name in intermediate_output_names:
outputs[name] = [helper.create_tmp_variable(dtype=dtype)]
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs)
return helper.append_activation(out)
func.__name__ = op_type
globals()[op_type] = func
func.__doc__ = _generate_doc_string_(op_proto)
global __all__
__all__.append(op_type)
_create_op_func_('mean')
_create_op_func_('mul')
_create_op_func_('elementwise_add')
_create_op_func_('elementwise_div')
_create_op_func_('dropout')
_create_op_func_('reshape')
_create_op_func_('sigmoid')
_create_op_func_('scale')
_create_op_func_('reshape')
_create_op_func_('transpose')
_create_op_func_('sigmoid_cross_entropy_with_logits')
def cast(x, dtype, main_program=None):
"""
This function takes in the input with input_dtype
and casts it to the output_dtype as the output.
"""
helper = LayerHelper('cast', **locals())
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype,
'out_dtype': out.dtype})
return out
def concat(input, axis, main_program=None, startup_program=None):
"""
This function concats the input along the axis mentioned
and returns that as the output.
"""
helper = LayerHelper('concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='concat',
inputs={'X': input},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def sums(input, out=None, main_program=None, startup_program=None):
"""
This function takes in the input and performs the sum operation on it
and returns that as the output.
"""
helper = LayerHelper('sum', **locals())
if out is None:
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out})
return out
def linear_chain_crf(input,
label,
param_attr=None,
main_program=None,
startup_program=None):
helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[1]
transition = helper.create_parameter(
attr=helper.param_attr,
shape=[size + 2, size],
dtype=helper.input_dtype())
alpha = helper.create_tmp_variable(dtype=helper.input_dtype())
emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype())
transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype())
log_likelihood = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='linear_chain_crf',
inputs={"Emission": [input],
"Transition": transition,
"Label": label},
outputs={
"Alpha": [alpha],
"EmissionExps": [emission_exps],
"TransitionExps": transition_exps,
"LogLikelihood": log_likelihood
})
return log_likelihood
def crf_decoding(input,
param_attr,
label=None,
main_program=None,
startup_program=None):
helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='crf_decoding',
inputs={"Emission": [input],
"Transition": transition,
"Label": label},
outputs={"ViterbiPath": [viterbi_path]})
return viterbi_path
def assign(input, output, main_program=None, startup_program=None):
helper = LayerHelper('assign', **locals())
helper.append_op(
type='scale',
inputs={'X': [input]},
outputs={'Out': [output]},
attrs={'scale': 1.0})
return output
def split_lod_tensor(input,
mask,
level=0,
main_program=None,
startup_program=None):
helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_tmp_variable(dtype=input.dtype)
out_false = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='split_lod_tensor',
inputs={
'X': input,
'Mask': mask,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': level})
return out_true, out_false
def merge_lod_tensor(in_true,
in_false,
x,
mask,
level=0,
main_program=None,
startup_program=None):
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_tmp_variable(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
'Mask': mask,
'InTrue': in_true,
'InFalse': in_false},
outputs={'Out': out},
attrs={'level': level})
return out
def cos_sim(X, Y, **kwargs):
"""
This function performs the cosine similarity between two tensors
X and Y and returns that as the output.
"""
helper = LayerHelper('cos_sim', **kwargs)
out = helper.create_tmp_variable(dtype=X.dtype)
xnorm = helper.create_tmp_variable(dtype=X.dtype)
ynorm = helper.create_tmp_variable(dtype=X.dtype)
helper.append_op(
type='cos_sim',
inputs={'X': [X],
'Y': [Y]},
outputs={'Out': [out],
'XNorm': [xnorm],
'YNorm': [ynorm]})
return out
def cross_entropy(input, label, **kwargs):
"""
This function computes cross_entropy using the input and label.
"""
helper = LayerHelper('cross_entropy', **kwargs)
out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='cross_entropy',
inputs={'X': [input],
'Label': [label]},
outputs={'Y': [out]},
attrs=kwargs)
return out
def square_error_cost(input, label, **kwargs):
"""
This functions returns the squared error cost using the input and label.
The output is appending the op to do the above.
"""
helper = LayerHelper('square_error_cost', **kwargs)
minus_out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='elementwise_sub',
inputs={'X': [input],
'Y': [label]},
outputs={'Out': [minus_out]})
square_out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='square', inputs={'X': [minus_out]}, outputs={'Y': [square_out]})
return square_out
def accuracy(input, label, k=1, correct=None, total=None, **kwargs):
"""
This function computes the accuracy using the input and label.
The output is the top_k inputs and their indices.
"""
helper = LayerHelper("accuracy", **kwargs)
topk_out = helper.create_tmp_variable(dtype=input.dtype)
topk_indices = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="top_k",
inputs={"X": [input]},
outputs={"Out": [topk_out],
"Indices": [topk_indices]},
attrs={"k": k})
acc_out = helper.create_tmp_variable(dtype="float32")
if correct is None:
correct = helper.create_tmp_variable(dtype="int64")
if total is None:
total = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="accuracy",
inputs={
"Out": [topk_out],
"Indices": [topk_indices],
"Label": [label]
},
outputs={
"Accuracy": [acc_out],
"Correct": [correct],
"Total": [total],
})
return acc_out
def chunk_eval(input,
label,
chunk_scheme,
num_chunk_types,
excluded_chunk_types=None,
**kwargs):
"""
This function computes the accuracy using the input and label.
The output is the top_k inputs and their indices.
"""
helper = LayerHelper("chunk_eval", **kwargs)
# prepare output
precision = helper.create_tmp_variable(dtype="float32")
recall = helper.create_tmp_variable(dtype="float32")
f1_score = helper.create_tmp_variable(dtype="float32")
helper.append_op(
type="chunk_eval",
inputs={"Inference": [input],
"Label": [label]},
outputs={
"Precision": [precision],
"Recall": [recall],
"F1-Score": [f1_score]
},
attrs={
"num_chunk_types": num_chunk_types,
'chunk_scheme': chunk_scheme,
'excluded_chunk_types': excluded_chunk_types or []
})
return precision, recall, f1_score
def sequence_conv(input,
num_filters,
filter_size=3,
filter_stride=1,
padding=None,
bias_attr=None,
param_attr=None,
act=None,
main_program=None,
startup_program=None):
"""
This function creates the op for sequence_conv, using the inputs and
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
"""
# FIXME(dzh) : want to unify the argument of python layer
# function. So we ignore some unecessary attributes.
# such as, padding_trainable, context_start.
helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters]
filter = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
type='sequence_conv',
inputs={
'X': [input],
'Filter': [filter],
},
outputs={"Out": pre_bias},
attrs={
'contextStride': filter_stride,
'contextStart': -int(filter_size / 2),
'contextLength': filter_size
})
pre_act = helper.append_bias_op(pre_bias)
return helper.append_activation(pre_act)
def conv2d(input,
num_filters,
filter_size,
stride=[1, 1],
padding=None,
groups=None,
param_attr=None,
bias_attr=None,
act=None,
name=None,
main_program=None,
startup_program=None):
"""
This function creates the op for a 2-dimensional Convolution.
This is performed using the parameters of filters(size, dimensionality etc)
, stride and other configurations for a Convolution operation.
This funciton can also append an activation on top of the
conv-2d output, if mentioned in the input parameters.
"""
helper = LayerHelper('conv2d', **locals())
dtype = helper.input_dtype()
num_channels = input.shape[1]
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels / groups
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
std = (2.0 / (filter_size[0]**2 * num_channels))**0.5
return Normal(0.0, std, 0)
filter = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
type='conv2d_cudnn',
inputs={
'Input': input,
'Filter': filter,
},
outputs={"Output": pre_bias},
attrs={'strides': stride,
'paddings': padding,
'groups': groups})
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return helper.append_activation(pre_act)
def sequence_pool(input, pool_type, **kwargs):
"""
This function add the operator for sequence pooling.
This is applied on top of the input using pool_type mentioned
in the parameters.
"""
helper = LayerHelper('sequence_pool', input=input, **kwargs)
dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype)
max_index = helper.create_tmp_variable(dtype)
helper.append_op(
type="sequence_pool",
inputs={"X": input},
outputs={"Out": pool_out,
"MaxIndex": max_index},
attrs={"pooltype": pool_type.upper()})
return pool_out
def pool2d(input,
pool_size,
pool_type,
pool_stride=[1, 1],
pool_padding=[0, 0],
global_pooling=False,
main_program=None,
startup_program=None):
"""
This function adds the operator for pooling in 2 dimensions, using the
pooling configurations mentioned in input parameters.
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if isinstance(pool_size, int):
pool_size = [pool_size, pool_size]
if isinstance(pool_stride, int):
pool_stride = [pool_stride, pool_stride]
if isinstance(pool_padding, int):
pool_padding = [pool_padding, pool_padding]
helper = LayerHelper('pool2d', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="pool2d",
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding
})
return pool_out
def batch_norm(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
main_program=None,
startup_program=None):
"""
This function helps create an operator to implement
the BatchNorm layer using the configurations from the input parameters.
"""
helper = LayerHelper('batch_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_global_variable(
dtype=input.dtype, shape=param_shape, persistable=True)
helper.set_variable_initializer(var=mean, initializer=Constant(0.0))
variance = helper.create_global_variable(
dtype=input.dtype, shape=param_shape, persistable=True)
helper.set_variable_initializer(var=variance, initializer=Constant(1.0))
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_tmp_variable(dtype)
saved_variance = helper.create_tmp_variable(dtype)
batch_norm_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="batch_norm",
inputs={
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
},
outputs={
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": | |
"""
Code to extract a box-like region, typically for another modeler to use
as a boundary contition. In cases where it gets velocity in addition to
the rho-grid variables the grid limits mimic the standard ROMS organization,
with the outermost corners being on the rho-grid.
Job definitions are in LO_user/extract/box/job_definitions.py
Testing:
run extract_box -gtx cas6_v3_lo8b -job sequim0 -test True
same but with all flags:
run extract_box -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.06 -lt daily -job sequim0 -test True
this command replicates what post/surface0 does
run extract_box -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.04 -lt hourly -job surface0 -uv_to_rho True -surf True
or
python extract_box.py -gtx cas6_v3_lo8b -ro 2 -0 2019.07.04 -1 2019.07.04 -lt hourly -job surface0 -uv_to_rho True -surf True
Performance: this is very fast, takes just a few seconds for three days on boiler (for yang_sequim).
"""
# imports
import sys
import argparse
from lo_tools import Lfun, zfun, zrfun
from subprocess import Popen as Po
from subprocess import PIPE as Pi
import os
from time import time
import numpy as np
import xarray as xr
pid = os.getpid()
print(' extract_box '.center(60,'='))
print('PID for this job = ' + str(pid))
# command line arugments
parser = argparse.ArgumentParser()
# which run to use
parser.add_argument('-gtx', '--gtagex', type=str) # e.g. cas6_v3_l08b
parser.add_argument('-ro', '--roms_out_num', type=int) # 2 = Ldir['roms_out2'], etc.
# select time period and frequency
parser.add_argument('-0', '--ds0', type=str) # e.g. 2019.07.04
parser.add_argument('-1', '--ds1', type=str) # e.g. 2019.07.06
parser.add_argument('-lt', '--list_type', type=str) # list type: hourly, daily, weekly
# select job name
parser.add_argument('-job', type=str) # job name
# these flags get only surface or bottom fields if True
# - cannot have both True -
parser.add_argument('-surf', default=False, type=Lfun.boolean_string)
parser.add_argument('-bot', default=False, type=Lfun.boolean_string)
# set this to True to interpolate all u, and v fields to the rho-grid
parser.add_argument('-uv_to_rho', default=False, type=Lfun.boolean_string)
# Optional: set max number of subprocesses to run at any time
parser.add_argument('-Nproc', type=int, default=10)
# Optional: for testing
parser.add_argument('-test', '--testing', default=False, type=Lfun.boolean_string)
# get the args and put into Ldir
args = parser.parse_args()
# test that main required arguments were provided
argsd = args.__dict__
for a in ['gtagex']:
if argsd[a] == None:
print('*** Missing required argument: ' + a)
sys.exit()
gridname, tag, ex_name = args.gtagex.split('_')
# get the dict Ldir
Ldir = Lfun.Lstart(gridname=gridname, tag=tag, ex_name=ex_name)
# add more entries to Ldir
for a in argsd.keys():
if a not in Ldir.keys():
Ldir[a] = argsd[a]
# testing
if Ldir['testing']:
Ldir['roms_out_num'] = 2
Ldir['ds0'] = '2019.07.04'
Ldir['ds1'] = '2019.07.06'
Ldir['list_type'] = 'daily'
# set where to look for model output
if Ldir['roms_out_num'] == 0:
pass
elif Ldir['roms_out_num'] > 0:
Ldir['roms_out'] = Ldir['roms_out' + str(Ldir['roms_out_num'])]
# check for input conflicts:
if Ldir['surf'] and Ldir['bot']:
print('Error: cannot have surf and bot both True.')
sys.exit()
# output location
out_dir = Ldir['LOo'] / 'extract' / Ldir['gtagex'] / 'box'
Lfun.make_dir(out_dir)
if Ldir['surf']:
box_fn = out_dir / (Ldir['job'] + '_surf_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
elif Ldir['bot']:
box_fn = out_dir / (Ldir['job'] + '_bot_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
else:
box_fn = out_dir / (Ldir['job'] + '_' + Ldir['ds0'] + '_' + Ldir['ds1'] + '.nc')
box_fn.unlink(missing_ok=True)
# name the temp dir to accumulate individual extractions
temp_dir = out_dir / ('temp_' + Ldir['job'])
Lfun.make_dir(temp_dir, clean=True)
# get list of files to work on
fn_list = Lfun.get_fn_list(Ldir['list_type'], Ldir, Ldir['ds0'], Ldir['ds1'])
if Ldir['testing']:
fn_list = fn_list[:5]
G, S, T = zrfun.get_basic_info(fn_list[0])
Lon = G['lon_rho'][0,:]
Lat = G['lat_rho'][:,0]
def check_bounds(lon, lat):
# error checking
if (lon < Lon[0]) or (lon > Lon[-1]):
print('ERROR: lon out of bounds ')
sys.exit()
if (lat < Lat[0]) or (lat > Lat[-1]):
print('ERROR: lat out of bounds ')
sys.exit()
# get indices
ilon = zfun.find_nearest_ind(Lon, lon)
ilat = zfun.find_nearest_ind(Lat, lat)
return ilon, ilat
# get the indices and check that they are in the grid
pth = Ldir['LOu'] / 'extract' / 'box'
if str(pth) not in sys.path:
sys.path.append(str(pth))
import job_definitions
from importlib import reload
reload(job_definitions)
aa, vn_list = job_definitions.get_box(Ldir['job'], Lon, Lat)
lon0, lon1, lat0, lat1 = aa
ilon0, ilat0 = check_bounds(lon0, lat0)
ilon1, ilat1 = check_bounds(lon1, lat1)
# NOTE: ncks indexing is zero-based but is INCLUSIVE of the last point.
# NOTE: ncks extractions retain singleton dimensions
# do the extractions
N = len(fn_list)
proc_list = []
tt0 = time()
print('Working on ' + box_fn.name + ' (' + str(N) + ' times)')
for ii in range(N):
fn = fn_list[ii]
sys.stdout.flush()
# extract one day at a time using ncks
count_str = ('000000' + str(ii))[-6:]
out_fn = temp_dir / ('box_' + count_str + '.nc')
cmd_list1 = ['ncks',
'-v', vn_list,
'-d', 'xi_rho,'+str(ilon0)+','+str(ilon1), '-d', 'eta_rho,'+str(ilat0)+','+str(ilat1),
'-d', 'xi_u,'+str(ilon0)+','+str(ilon1-1), '-d', 'eta_u,'+str(ilat0)+','+str(ilat1),
'-d', 'xi_v,'+str(ilon0)+','+str(ilon1), '-d', 'eta_v,'+str(ilat0)+','+str(ilat1-1)]
if Ldir['surf']:
cmd_list1 += ['-d','s_rho,'+str(S['N']-1)]
elif Ldir['bot']:
cmd_list1 += ['-d','s_rho,0']
cmd_list1 += ['-O', str(fn), str(out_fn)]
proc = Po(cmd_list1, stdout=Pi, stderr=Pi)
proc_list.append(proc)
# screen output about progress
if (np.mod(ii,10) == 0) and ii>0:
print(str(ii), end=', ')
sys.stdout.flush()
if (np.mod(ii,50) == 0) and (ii > 0):
print('') # line feed
sys.stdout.flush()
if (ii == N-1):
print(str(ii))
sys.stdout.flush()
# Nproc controls how many ncks subprocesses we allow to stack up
# before we require them all to finish.
if ((np.mod(ii,Ldir['Nproc']) == 0) and (ii > 0)) or (ii == N-1):
for proc in proc_list:
proc.communicate()
# make sure everyone is finished before continuing
proc_list = []
ii += 1
# Ensure that all days have the same fill value. This was required for cas6_v3_lo8b
# when passing from 2021.10.31 to 2021.11.01 because they had inconsistent fill values,
# which leaks through the ncrcat call below.
tt1 = time()
enc_dict = {'_FillValue':1e20}
vn_List = vn_list.split(',')
Enc_dict = {vn:enc_dict for vn in vn_List}
for out_fn in list(temp_dir.glob('box_*.nc')):
ds = xr.load_dataset(out_fn) # need to load, not open, for overwrite
ds.to_netcdf(out_fn, encoding=Enc_dict)
ds.close()
print(' - Time for adding fill value = %0.2f sec' % (time()- tt1))
# concatenate the records into one file
# This bit of code is a nice example of how to replicate a bash pipe
pp1 = Po(['ls', str(temp_dir)], stdout=Pi)
pp2 = Po(['grep','box'], stdin=pp1.stdout, stdout=Pi)
cmd_list = ['ncrcat','-p', str(temp_dir), '-O', str(box_fn)]
proc = Po(cmd_list, stdin=pp2.stdout, stdout=Pi, stderr=Pi)
stdout, stderr = proc.communicate()
if Ldir['testing']:
if len(stdout) > 0:
print('\n'+stdout.decode())
if len(stderr) > 0:
print('\n'+stderr.decode())
print('Time for initial extraction = %0.2f sec' % (time()- tt0))
# add z variables
if (Ldir['surf']==False) and (Ldir['bot']==False):
tt0 = time()
ds = xr.load_dataset(box_fn) # have to load in order to add new variables
NT, N, NR, NC = ds.salt.shape
ds['z_rho'] = (('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), np.nan*np.ones((NT, N, NR, NC)))
ds['z_w'] = (('ocean_time', 's_w', 'eta_rho', 'xi_rho'), np.nan*np.ones((NT, N+1, NR, NC)))
ds.z_rho.attrs = {'units':'m', 'long_name': 'vertical position on s_rho grid, positive up'}
ds.z_w.attrs = {'units':'m', 'long_name': 'vertical position on s_w grid, positive up'}
for ii in range(NT):
h = ds.h.values
zeta = ds.zeta[ii,:,:].values
z_rho, z_w = zrfun.get_z(h, zeta, S)
ds['z_rho'][ii,:,:,:] = z_rho
ds['z_w'][ii,:,:,:] = z_w
ds.to_netcdf(box_fn)
ds.close()
print('Time to add z variables = %0.2f sec' % (time()- tt0))
if Ldir['uv_to_rho']:
# interpolate anything on the u and v grids to the rho grid, assuming
# zero values where masked, and leaving a masked ring around the outermost edge
tt0 = time()
ds = xr.load_dataset(box_fn) # have to load in order to add new variables
Maskr = ds.mask_rho.values == 1 # True over water
NR, NC = Maskr.shape
for vn in ds.data_vars:
if ('xi_u' in ds[vn].dims) and ('ocean_time' in ds[vn].dims):
if len(ds[vn].dims) == 4:
uu = ds[vn].values
NT, N, NRu, NCu = uu.shape
uu[np.isnan(uu)] = 0
UU = (uu[:,:,1:-1,1:]+uu[:,:,1:-1,:-1])/2
uuu = np.nan * np.ones((NT, N, NR, NC))
uuu[:,:,1:-1,1:-1] = UU
Maskr3 = np.tile(Maskr.reshape(1,1,NR,NC),[NT,N,1,1])
uuu[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), uuu)})
elif len(ds[vn].dims) == 3:
uu = ds[vn].values
NT, NRu, NCu = uu.shape
uu[np.isnan(uu)] = 0
UU = (uu[:,1:-1,1:]+uu[:,1:-1,:-1])/2
uuu = np.nan * np.ones((NT, NR, NC))
uuu[:,1:-1,1:-1] = UU
Maskr3 = np.tile(Maskr.reshape(1,NR,NC),[NT,1,1])
uuu[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 'eta_rho', 'xi_rho'), uuu)})
elif ('xi_v' in ds[vn].dims) and ('ocean_time' in ds[vn].dims):
if len(ds[vn].dims) == 4:
vv = ds[vn].values
NT, N, NRv, NCv = vv.shape
vv[np.isnan(vv)] = 0
VV = (vv[:,:,1:,1:-1]+vv[:,:,:-1,1:-1])/2
vvv = np.nan * np.ones((NT, N, NR, NC))
vvv[:,:,1:-1,1:-1] = VV
Maskr3 = np.tile(Maskr.reshape(1,1,NR,NC),[NT,N,1,1])
vvv[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), vvv)})
elif len(ds[vn].dims) == 3:
vv = ds[vn].values
NT, NRv, NCv = vv.shape
vv[np.isnan(vv)] = 0
VV = (vv[:,1:,1:-1]+vv[:,:-1,1:-1])/2
vvv = np.nan * np.ones((NT, NR, NC))
vvv[:,1:-1,1:-1] = VV
Maskr3 = np.tile(Maskr.reshape(1,NR,NC),[NT,1,1])
vvv[~Maskr3] = np.nan
ds.update({vn:(('ocean_time', 'eta_rho', 'xi_rho'), vvv)})
ds.to_netcdf(box_fn)
ds.close()
print('Time to interpolate uv variables to rho grid = %0.2f sec' % (time()- tt0))
# squeeze and compress the resulting file
tt0 = time()
ds = xr.load_dataset(box_fn)
ds = ds.squeeze() # remove singleton dimensions
enc_dict = {'zlib':True, 'complevel':1, '_FillValue':1e20}
Enc_dict = {vn:enc_dict | |
<gh_stars>0
# -*- coding: utf-8 -*-
#
# awsdbrparser/parser.py
#
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import collections
import csv
import json
import threading
import time
import os
import boto3
import click
from elasticsearch import Elasticsearch, RequestsHttpConnection, helpers
from requests_aws4auth import AWS4Auth
import requests
from . import utils
from .config import PROCESS_BY_BULK, PROCESS_BY_LINE, PROCESS_BI_ONLY, HEADERS
Summary = collections.namedtuple('Summary', 'added skipped updated control_messages')
"""
Holds the summary of documents processed by the parser.
"""
class ParserError(Exception):
pass
def analytics(config, echo):
"""
This function generate extra information in elasticsearch analyzing the line items of the file
:param echo:
:param config:
:return:
"""
# Opening Input filename again to run in parallel
file_in = open(config.input_filename, 'r')
awsauth = None
if config.awsauth:
# 实验中使用AWS-cli将账单文件拷贝到proxy-server1本地目录未使用AWS的认证,
# 所以ec2 proxy-server到ES服务是不需要认证的,可以略过此代码。
session = boto3.Session()
credentials = session.get_credentials()
if credentials:
region = session.region_name
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, 'es',
session_token=credentials.token)
# 考虑在此处添加判断,是否向高版本的ES插入数据,在此之前还是先测试向本地es节点写入数据。
# 经过测试,python版本的sdk对安全的支持文档较少,未找到合适方法使用sdk。为了后期维护和扩展性,
# 此处为预先创建billing索引,创建mapping
# 考虑使用官方更推荐的restfulAPI,对高版本、安全的ES集群进行数据写入。
# es = Elasticsearch([{'host': config.es_host, 'port': config.es_port}], timeout=config.es_timeout, http_auth=awsauth,
# connection_class=RequestsHttpConnection)
es = Elasticsearch("http://{}@{}:{}".format(config.key, config.es_host, config.es_port)) # 至此,脚本不再支持AWS在脚本内认证
if not config.custom:
es.indices.create(config.index_name, ignore=400)
es.indices.create(config.es_doctype, ignore=400)
else:
pass
# 新版本的billing索引在下方创建
csv_file = csv.DictReader(file_in, delimiter=config.csv_delimiter)
analytics_daytime = dict()
analytics_day_only = dict()
for recno, json_row in enumerate(csv_file):
# Pre-Process the row to append extra information
json_row = utils.pre_process(json_row)
if is_control_message(json_row, config):
# Skip this line
continue
elif json_row.get('ProductName') == 'Amazon Elastic Compute Cloud' and 'RunInstances' in json_row.get(
'Operation') and json_row.get('UsageItem'):
# Get the day time ('2016-03-01 01:00:00')
daytime = json_row.get('UsageStartDate')
# the day only '2016-03-01'
day = json_row.get('UsageStartDate').split(' ')[0]
# Add the day time to the dict
analytics_daytime.setdefault(daytime, {"Count": 0, "Cost": 0.00, "RI": 0, "Spot": 0, "Unblended": 0.00})
# Increment the count of total instances
analytics_daytime[daytime]["Count"] += 1
analytics_daytime[daytime]["Unblended"] += float(json_row.get('UnBlendedCost', 0.00))
analytics_daytime[daytime]["Cost"] += float(json_row.get('Cost', 0.00))
# Add the day only to the dict
analytics_day_only.setdefault(day, {"Count": 0, "RI": 0, "Spot": 0, "Min": None, "Max": None})
analytics_day_only[day]["Count"] += 1
# Increment the count of RI or Spot if the instance is one or other
if json_row.get('UsageItem') == 'Reserved Instance':
analytics_day_only[day]["RI"] += 1
analytics_daytime[daytime]["RI"] += 1
elif json_row.get('UsageItem') == 'Spot Instance':
analytics_day_only[day]["Spot"] += 1
analytics_daytime[daytime]["Spot"] += 1
# Some DBR files has Cost (Single Account) and some has (Un)BlendedCost (Consolidated Account)
# In this case we try to process both, but one will be zero and we need to check
# TODO: use a single variable and an flag to output Cost or Unblended
# TODO 此处需要测试,ec2-per-usd是否能在es7下创建,对于此处的判断逻辑有疑惑:一个索引,多个type?
# 此处的坑在export-cn.json 里,index已经创建了
if config.es2:
index_name = config.index_name
else:
index_name = 'ec2_per_usd'
# if not es.indices.exists(index=index_name): # 这是原始代码,改造后因为2.3与7.3索引用法升级,所以改造如下
if not es.indices.exists(index="ec2_per_usd"): # 如果ec2_per_usd不存在,创建index并mapping;
if not config.custom:
es.indices.create(index_name, ignore=400, body={
"mappings": {
"ec2_per_usd": {
"properties": {
"UsageStartDate": {
"type": "date", "format": "YYYY-MM-dd HH:mm:ss||YYYY/MM/dd HH:mm||YYYY/M/d H:mm"
}
}
}
}
})
else:
echo("在-C参数控制下,通过rest创建索引ec2_per_usd, version {}".format(config.es2))
url = "http://{}@{}:{}/{}".format(config.key, config.es_host, config.es_port, "ec2_per_usd")
r = requests.put(url, headers=HEADERS, json={"settings": {"number_of_replicas": 0}, "mappings": {
"properties": {
"UsageStartDate": {
"type": "date",
"format": "YYYY/M/d HH:mm:ss||YYYY/MM/dd HH:mm||YYYY/M/d H:mm||YYYY/MM/dd HH:mm:ss"
"||YYYY-MM-dd HH:mm:ss"
}
}
}})
if not r.ok:
echo("ec2_per_usd的mapping创建失败,请检查{}".format(r.json()))
for k, v in analytics_daytime.items():
result_cost = 1.0 / (v.get('Cost') / v.get('Count')) if v.get('Cost') else 0.00
result_unblended = 1.0 / (v.get('Unblended') / v.get('Count')) if v.get('Unblended') else 0.0
if not config.custom:
response = es.index(index=index_name, doc_type='ec2_per_usd', # index的子type在2.3下可以这样用
body={'UsageStartDate': k,
'EPU_Cost': result_cost,
'EPU_UnBlended': result_unblended})
if not response.get('created'):
echo('[!] Unable to send document to ES!')
else:
# echo("接下来写入ec2_per_usd索引数据, version {}".format(config.es2))
url = "http://{}@{}:{}/{}/_doc".format(config.key, config.es_host, config.es_port, "ec2_per_usd")
r = requests.post(url, headers=HEADERS, json={
'UsageStartDate': k,
"EPU_Cost": result_cost,
"EPU_UnBlended": result_unblended
})
if not r.ok:
echo("ec2_per_usd索引数据写入失败,请检查: {}".format(r.json()))
# Elasticity
#
# The calculation is 1 - min / max EC2 instances per day
# The number of EC2 instances has been calculated previously
#
if config.es2:
index_name = config.index_name
else:
index_name = 'elasticity'
# if not es.indices.exists(index=index_name): # 原始代码写法,因es7.3索引语法升级,改造判断为下方的写法
if not es.indices.exists(index="elasticity"): # True/False # 就是用来判断是否存在index,create的用法是2.3的特性
if not config.custom:
es.indices.create(index_name, ignore=400, body={
"mappings": {
"elasticity": {
"properties": {
"UsageStartDate": {
"type": "date", "format": "YYYY-MM-dd HH:mm:ss||YYYY/MM/dd HH:mm||YYYY/M/d H:mm"
}
}
}
}
})
else:
# -C 参数控制
url = "http://{}@{}:{}/{}".format(config.key, config.es_host, config.es_port, "elasticity")
r = requests.put(url, headers=HEADERS, json={
"settings": {"number_of_replicas": 0},
"mappings": {
"properties": {
"UsageStartDate": {
"type": "date",
"format": "YYYY/M/d HH:mm:ss||YYYY/MM/dd HH:mm||YYYY/M/d H:mm||YYYY/MM/dd HH:mm:ss"
"||YYYY-MM-dd HH:mm:ss"
}
}
}
})
if not r.ok:
echo("elasticity的索引mapping失败,请检查: {}".format(r.json()))
for k, v in analytics_day_only.items():
ec2_min = min(value["Count"] - value["RI"] for key, value in analytics_daytime.items() if k in key)
ec2_max = max(value["Count"] - value["RI"] for key, value in analytics_daytime.items() if k in key)
if ec2_max:
elasticity = 1.0 - float(ec2_min) / float(ec2_max)
else:
elasticity = 1.0
ri_coverage = float(analytics_day_only[k]["RI"]) / float(analytics_day_only[k]["Count"])
spot_coverage = float(analytics_day_only[k]["Spot"]) / float(analytics_day_only[k]["Count"])
if not config.custom:
response = es.index(index=index_name, doc_type='elasticity',
body={'UsageStartDate': k + ' 12:00:00',
'Elasticity': elasticity,
'ReservedInstanceCoverage': ri_coverage,
'SpotCoverage': spot_coverage})
if not response.get('created'):
echo('[!] Unable to send document to ES!')
else:
# echo("此处通过rest创建elasticity的mapping, setter: config.es2: version {}".format(config.es2))
url = "http://{}@{}:{}/{}/_doc".format(config.key, config.es_host, config.es_port, "elasticity")
r = requests.post(url, headers=HEADERS, json={
'UsageStartDate': k + ' 12:00:00',
"Elasticity": elasticity,
"ReservedInstanceCoverage": ri_coverage,
'SpotCoverage': spot_coverage
})
if not r.ok:
echo("elasticity索引写入失败,请检查: {}".format(r.json()))
file_in.close()
# Finished Processing
return
def parse(config, verbose=False):
"""
:param verbose:
:param config: An instance of :class:`~awsdbrparser.config.Config` class,
used for parsing parametrization.
:rtype: Summary
"""
echo = utils.ClickEchoWrapper(quiet=(not verbose))
echo('Opening input file: {}'.format(config.input_filename))
file_in = open(config.input_filename, 'r')
# 输出到文件和es节点两种情况
if config.output_to_file:
echo('Opening output file: {}'.format(config.output_filename))
file_out = open(config.output_filename, 'w')
elif config.output_to_elasticsearch:
echo('Sending DBR to Elasticsearch host: {}:{}'.format(config.es_host, config.es_port))
awsauth = None
if config.awsauth:
session = boto3.Session()
credentials = session.get_credentials()
if credentials:
region = session.region_name
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, 'es',
session_token=credentials.token)
# 使得sdk的bulk方法支持用户名密码认证
if not config.custom:
es = Elasticsearch([{'host': config.es_host, 'port': config.es_port}], timeout=config.es_timeout,
http_auth=awsauth, connection_class=RequestsHttpConnection)
else:
es = Elasticsearch("http://{}@{}:{}".format(config.key, config.es_host, config.es_port))
if not config.custom:
if config.delete_index:
echo('Deleting current index: {}'.format(config.index_name))
es.indices.delete(config.index_name, ignore=404)
es.indices.create(config.index_name, ignore=400)
es.indices.put_mapping(index=config.index_name, doc_type=config.es_doctype, body=config.mapping)
else:
# 主索引的创建
# TODO es7 security
url = "http://{}@{}:{}/{}".format(config.key, config.es_host, config.es_port, config.index_name)
if config.delete_index:
echo('Deleting current index: {}'.format(config.index_name))
requests.delete(url, headers=HEADERS)
# 上方,通过delete_index清理原有index数据,下面创建新的index
echo("创建billing-*的index,更新mapping {}".format(config.es2))
# 创建索引和mapping一起完成
fp = os.path.join(os.path.dirname(__file__), "data", "dbr_doctype_es6x.json")
mappings = {"settings": {"number_of_replicas": 0}, "mappings": json.load(open(fp))}
r = requests.put(url, headers=HEADERS, json=utils.unicode_convert(mappings))
if not r.ok:
echo("mappings: {}:{}".format(type(utils.unicode_convert(mappings)),
utils.unicode_convert(mappings)))
echo("billing索引mapping失败,请检查: {}".format(r.json()))
# 日志的显示情况?
if verbose:
progressbar = click.progressbar
# calculate number of rows in input file in preparation to display a progress bar
record_count = sum(1 for _ in file_in) - 1
file_in.seek(0) # reset file descriptor
echo("Input file has {} record(s)".format(record_count))
if config.process_mode == PROCESS_BY_BULK:
echo('Processing in BULK MODE, size: {}'.format(config.bulk_size))
elif config.process_mode == PROCESS_BY_LINE:
echo('Processing in LINE MODE')
elif config.process_mode == PROCESS_BI_ONLY:
if config.analytics:
echo('Processing BI Only')
else:
echo("You don't have set the parameter -bi. Nothing to do.")
else:
# uses a 100% bug-free progressbar, guaranteed :-)
progressbar = utils.null_progressbar
record_count = 0
# If BI is enabled, create a thread and start running 此处预先处理elasticity和ec2-per-usd两个索引
analytics_start = time.time()
if config.analytics:
echo('Starting the BI Analytics Thread')
thread = threading.Thread(target=analytics, args=(config, echo,))
thread.start()
added = skipped = updated = control = 0
if config.process_mode == PROCESS_BY_BULK:
with progressbar(length=record_count) as pbar:
# If you wish to sort the records by UsageStartDate before send to
# ES just uncomment the 2 lines below and comment the third line
# reader = csv.DictReader(file_in, delimiter=config.csv_delimiter)
# csv_file = sorted(reader, key=lambda line: line["UsageStartDate"]+line["UsageEndDate"])
csv_file = csv.DictReader(file_in, delimiter=config.csv_delimiter)
def documents():
for json_row in csv_file:
if not is_control_message(json_row, config):
if config.debug:
print(json.dumps( # do not use 'echo()' here
utils.pre_process(json_row)))
yield json.dumps(utils.pre_process(json_row)) # 此处对json_row进行了处理
pbar.update(1)
# 此处在批量写入账单数据
if not config.custom:
for recno, (success, result) in enumerate(helpers.streaming_bulk(es, documents(),
index=config.index_name,
doc_type=config.es_doctype,
chunk_size=config.bulk_size)):
# <recno> integer, the record number (0-based)
# <success> bool
# <result> a dictionary like this one:
#
# {
# 'create': {
# 'status': 201,
# '_type': 'billing',
# '_shards': {
| |
<reponame>himanshu-setia/myKeystone
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import random
from testtools import matchers
import uuid
import fixtures
from lxml import etree
import mock
from oslo_config import cfg
from oslo_log import versionutils
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslotest import mockpatch
import saml2
from saml2 import saml
from saml2 import sigver
from six.moves import http_client
from six.moves import range, urllib, zip
xmldsig = importutils.try_import("saml2.xmldsig")
if not xmldsig:
xmldsig = importutils.try_import("xmldsig")
from keystone.auth import controllers as auth_controllers
from keystone.common import environment
from keystone.contrib.federation import routers
from keystone import exception
from keystone.federation import controllers as federation_controllers
from keystone.federation import idp as keystone_idp
from keystone import notifications
from keystone.tests import unit
from keystone.tests.unit import core
from keystone.tests.unit import federation_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import mapping_fixtures
from keystone.tests.unit import test_v3
from keystone.tests.unit import utils
from keystone.token.providers import common as token_common
subprocess = environment.subprocess
CONF = cfg.CONF
ROOTDIR = os.path.dirname(os.path.abspath(__file__))
XMLDIR = os.path.join(ROOTDIR, 'saml2/')
def dummy_validator(*args, **kwargs):
pass
class FederationTests(test_v3.RestfulTestCase):
@mock.patch.object(versionutils, 'report_deprecated_feature')
def test_exception_happens(self, mock_deprecator):
routers.FederationExtension(mock.ANY)
mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY)
args, _kwargs = mock_deprecator.call_args
self.assertIn("Remove federation_extension from", args[1])
class FederatedSetupMixin(object):
ACTION = 'authenticate'
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
IDP_WITH_REMOTE = 'ORG_IDP_REMOTE'
REMOTE_IDS = ['entityID_IDP1', 'entityID_IDP2']
REMOTE_ID_ATTR = uuid.uuid4().hex
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
def _check_domains_are_valid(self, token):
self.assertEqual('Federated', token['user']['domain']['id'])
self.assertEqual('Federated', token['user']['domain']['name'])
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
os_federation = token['user']['OS-FEDERATION']
self.assertIn('groups', os_federation)
self.assertIn('identity_provider', os_federation)
self.assertIn('protocol', os_federation)
self.assertThat(os_federation, matchers.HasLength(3))
self.assertEqual(self.IDP, os_federation['identity_provider']['id'])
self.assertEqual(self.PROTOCOL, os_federation['protocol']['id'])
def _check_project_scoped_token_attributes(self, token, project_id):
self.assertEqual(project_id, token['project']['id'])
self._check_scoped_token_attributes(token)
def _check_domain_scoped_token_attributes(self, token, domain_id):
self.assertEqual(domain_id, token['domain']['id'])
self._check_scoped_token_attributes(token)
def assertValidMappedUser(self, token):
"""Check if user object meets all the criteria."""
user = token['user']
self.assertIn('id', user)
self.assertIn('name', user)
self.assertIn('domain', user)
self.assertIn('groups', user['OS-FEDERATION'])
self.assertIn('identity_provider', user['OS-FEDERATION'])
self.assertIn('protocol', user['OS-FEDERATION'])
# Make sure user_id is url safe
self.assertEqual(urllib.parse.quote(user['name']), user['id'])
def _issue_unscoped_token(self,
idp=None,
assertion='EMPLOYEE_ASSERTION',
environment=None):
api = federation_controllers.Auth()
context = {'environment': environment or {}}
self._inject_assertion(context, assertion)
if idp is None:
idp = self.IDP
r = api.federated_authentication(context, idp, self.PROTOCOL)
return r
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
self.AUTH_METHOD
],
self.AUTH_METHOD: {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _inject_assertion(self, context, variant, query_string=None):
assertion = getattr(mapping_fixtures, variant)
context['environment'].update(assertion)
context['query_string'] = query_string or []
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = unit.new_domain_ref()
self.resource_api.create_domain(self.domainC['id'],
self.domainC)
self.domainD = unit.new_domain_ref()
self.resource_api.create_domain(self.domainD['id'],
self.domainD)
# Create and add projects
self.proj_employees = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.project_all['id'],
self.project_all)
self.project_inherited = unit.new_project_ref(
domain_id=self.domainD['id'])
self.resource_api.create_project(self.project_inherited['id'],
self.project_inherited)
# Create and add groups
self.group_employees = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_employees = (
self.identity_api.create_group(self.group_employees))
self.group_customers = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_customers = (
self.identity_api.create_group(self.group_customers))
self.group_admins = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_admins = self.identity_api.create_group(self.group_admins)
# Create and add roles
self.role_employee = unit.new_role_ref()
self.role_api.create_role(self.role_employee['id'], self.role_employee)
self.role_customer = unit.new_role_ref()
self.role_api.create_role(self.role_customer['id'], self.role_customer)
self.role_admin = unit.new_role_ref()
self.role_api.create_role(self.role_admin['id'], self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access projects via inheritance:
# * domain D
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainD['id'],
inherited_to_projects=True)
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email',
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'Email',
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'Email',
'any_one_of': [
'test<EMAIL>'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
# rules with local group names
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
},
{
"group": {
"name": self.group_customers['name'],
"domain": {
"name": self.domainA['name']
}
}
}
],
"remote": [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
"type": "orgPersonType",
"any_one_of": [
"CEO",
"CTO"
],
}
]
},
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
},
{
"group": {
"name": self.group_admins['name'],
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "Email",
},
{
"type": "orgPersonType",
"any_one_of": [
"Managers"
]
}
]
},
{
"local": [
{
"user": {
"name": "{0}",
"id": "{1}"
}
},
{
"group": {
"name": "NON_EXISTING",
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "Email",
},
{
"type": "UserName",
"any_one_of": [
"IamTester"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": self.user['domain_id']
}
}
},
{
"group": {
"id": self.group_customers['id']
}
}
],
"remote": [
{
"type": "UserType",
"any_one_of": [
"random"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": uuid.uuid4().hex
}
}
}
],
"remote": [
{
"type": "Position",
"any_one_of": [
"DirectorGeneral"
]
}
]
}
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add IDP with remote
self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE)
self.idp_with_remote['remote_ids'] = self.REMOTE_IDS
self.federation_api.create_idp(self.idp_with_remote['id'],
self.idp_with_remote)
# Add | |
# General
import os, sys, pickle, json
import pandas as pd
import numpy as np
# Dash and plotly
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
# colors
import matplotlib
from matplotlib import cm
# Math
from scipy import stats
# sklearn
from sklearn.manifold import MDS
from sklearn.neighbors import NearestNeighbors
# add to pythonpath
sys.path.append(os.getcwd() + '/fpdash')
import shapley.shap as shap
"""
INITIALIZE GLOBAL STUFF
"""
# Import classifier
with open(os.getcwd() + '/data/clf.pickle', 'rb') as f:
clf = pickle.load(f)
# Import NN
# with open(os.getcwd() + '/data/nn.pickle', 'rb') as f:
# nn = pickle.load(f)
# load case base data
X_base = pd.read_csv(os.getcwd() + '/data/X_base.csv')
X_base_decoded = pd.read_csv(os.getcwd() + '/data/X_base_decoded.csv')
meta_base = pd.read_csv(os.getcwd() + '/data/meta_base.csv')
SHAP_base = pd.read_csv(os.getcwd() + '/data/SHAP_base.csv')
# load alert data
X_alert = pd.read_csv(os.getcwd() + '/data/X_alert.csv')
X_alert_decoded = pd.read_csv(os.getcwd() + '/data/X_alert_decoded.csv')
meta_alert = pd.read_csv(os.getcwd() + '/data/meta_alert.csv')
SHAP_alert = pd.read_csv(os.getcwd() + '/data/SHAP_alert.csv')
# load separate train data
X_train = pd.read_csv(os.getcwd() + '/data/X_train.csv')
# Initialize SHAP explainer (must use TRAIN data!)
explainer = shap.Explainer(X=X_train, f=clf, mc='training')
# Spectral colormap
spectral_cmap = matplotlib.cm.get_cmap('Spectral')
spectral_rgb = []
norm = matplotlib.colors.Normalize(vmin=0, vmax=255)
for i in range(0, 255):
k = matplotlib.colors.colorConverter.to_rgb(spectral_cmap(norm(i)))
spectral_rgb.append(k)
spectral = []
n_entries = 255
for k in [x / n_entries for x in range(0, n_entries+1, 1)]:
C = spectral_rgb[int(np.round(255*k))-1]
spectral.append([k, 'rgb'+str((C[0], C[1], C[2]))])
# Border colors
opacity = 0.5
cat_colors = {'TP' : 'rgba(159, 211, 86, %s)' % opacity,
'TN' : 'rgba(13, 181, 230, %s)' % opacity,
'FP' : 'rgba(177, 15, 46, %s)' % opacity,
'FN' : 'rgba(255, 165, 76, %s)' % opacity}
# Train nearest neighbors
def define_distance_function(contr):
"""
Parameters
----------
contr : array like
shap values of instance
Returns
-------
weighted_distance : function
function that computes the distance weighted by feature contributions
"""
contr = np.abs(np.array(contr))
def weighted_distance(a, b):
""" compute Euclidean distance between a and b, weighted by feature contributions
Parameters
---------
a : array
b : array
Returns
-------
distance : float
weighted distance between array a and b
"""
distance = np.sqrt(np.sum(contr * np.square(np.array(a) - np.array(b))))
return distance
return weighted_distance
nn_dict = {}
for i in range(len(SHAP_alert)):
distance_function_i = define_distance_function(SHAP_alert.iloc[i])
nn_i = NearestNeighbors(n_neighbors = 10, algorithm = 'brute', metric = distance_function_i)
nn_i.fit(SHAP_base)
nn_dict[i] = nn_i
print('Initialized nearest neighbor.')
"""
COMPUTE SHAP WITH SAMPLES
"""
def retrieve_shap(instance, top):
# Retrieve SHAP values
shap_values = SHAP_alert.iloc[instance].to_dict()
# Retrieve most important features
df = pd.DataFrame.from_dict(shap_values, orient = 'index').reset_index(level=0)
df = df.reindex(df[0].abs().sort_values(ascending = False).index)
features = list(df['index'].iloc[0:top])
importances = list(df[0].iloc[0:top])
# Retrieve feature value
values = [X_alert_decoded.iloc[instance][f] for f in features]
# Retrieve errors
alpha = 0.05
return importances, features, values
"""
COMPUTATIONS FOR NEIGHBOR PLOT
"""
def retrieve_neighbors(i, n_neighbors = 10):
if n_neighbors == 0:
distances, neighbors = [None], [None]
else:
distances, neighbors = nn_dict[i].kneighbors(SHAP_alert.iloc[[i]], n_neighbors=n_neighbors)
return distances[0], neighbors[0]
def compute_mds(i, neighbors, space):
"""Compute x and y for multi-dimensional scaling plot.
Parameters
----------
i : int
index of instance in X_test
neighbors : np array [n_neighbors]
array with indices of neighbors in X_train
space : str, one from ['shap', 'feature']
distances computed based on shap value space or feature value space
"""
if space == 'shap':
alert = SHAP_alert
base = SHAP_base
elif space == 'feature':
alert = X_alert
base = X_base
else:
raise ValueError("space not in ['shap', 'feature']")
mds_values = np.vstack((np.array(alert.iloc[i]), np.array(base.iloc[neighbors])))
mds = MDS(random_state=1, dissimilarity ='euclidean', metric=True)
mds.fit(mds_values.astype(np.float64))
x, y = mds.embedding_.transpose()
return x, y
"""
PLOT FUNCTIONS
"""
"""
Feature importances
"""
def generate_options():
return [{'label' : 'Case %s' % nr, 'value' : nr} for nr in range(1,11)]
def feature_importance_bar_exact(shap_value, lim):
if shap_value >= 0:
color = '#0DB5E6'
else:
color = '#ffa54c'
# Trace definition
hoverlabel = {
'bordercolor' : 'white',
'font' : {'size' : 10},
}
trace = go.Bar(x = [shap_value] ,
y = [''],
orientation = 'h',
hoverinfo = 'x',
hoverlabel = hoverlabel,
marker = {'color' : color},
)
# Layout definition
xaxis = {
'range' : [-lim, lim],
'fixedrange' : True,
'showgrid' : False,
'zeroline' : False,
'showline' : False,
'showticklabels' : False,
'hoverformat': '.2f'
}
yaxis = {
'fixedrange' : True,
'showgrid' : False,
'zeroline' : False,
'showline' : False,
'showticklabels' : False
}
margin=go.layout.Margin(l=0, r=0, t=0, b=0, pad=0)
layout = go.Layout(yaxis = yaxis,
xaxis = xaxis,
margin = margin,
bargap = 0)
# Config definition
config={'displayModeBar': False,
'showLink' : False}
return dcc.Graph(figure = {'data' : [trace],
'layout' : layout},
config = config,
style = {'height' : '18px',
'width' : '170px'})
def feature_importance_table_exact(importances, features, values):
# Add header
table = [html.Tr([html.Th(col) for col in ['Contribution', 'Feature', 'Value']])]
# Add body
lim = np.abs(importances[0]) + 0.2
for importance, feature, value in zip(importances, features, values):
table.append(html.Tr([
html.Td(feature_importance_bar_exact(importance, lim)),
html.Td(feature),
html.Td(value),
]))
return html.Table(table,
style={'font-size': '1.5rem',
'marginTop' : '10px'}
)
"""
Neighbors plot
"""
def scatter_neighbors(x, y, neighbors, view, instance, border_width=4):
"""
Parameters
----------
x : array
mds x with x[0] alert and x[1:] neighbors
y : array
mds y, with y[0] being alert and y[1:] neighbors
neighbors : array
array with indexes of neighbors
view : str, one from ['perf', 'pred']
which view to plot
instance : int
index of the current alert
border_width : int
border width
"""
global spectral
global cat_colors
global meta_base
global meta_alert
if view == 'perf':
showscale = False
colorscale = [[0,'rgba(75, 75, 75, 1)'], [1, 'rgba(75, 75, 75, 1)']]
color_alert = 'rgba(255, 255, 0, 0.3)'
showlegend = True
elif view == 'pred':
border_width = 0
showscale = True
colorscale = spectral
color_alert = spectral[int(meta_alert.iloc[instance]['score']*len(spectral))-1][1]
showlegend = False
else:
raise ValueError("view must be one of ['pred', 'perf']")
"""
PREP
"""
# Retrieve meta information
meta_neighbors = pd.DataFrame({'x' : x[1:], 'y' : y[1:],
'performance' : meta_base['performance'].iloc[neighbors],
'score' : meta_base['score'].iloc[neighbors],
'index' : neighbors})
"""
ADD TRACES
"""
traces = []
# Add neighbors
for perf in ['TP', 'TN', 'FP', 'FN']:
group = meta_neighbors[meta_neighbors['performance'] == perf]
scatter = go.Scatter(
x = group['x'],
y = group['y'],
mode = 'markers',
marker = {'line' : {'width' : border_width, 'color' : cat_colors[perf]},
'color' : group['score'],
'colorscale' : colorscale,
'cmin' : 0,
'cmax' : 1,
'size' : 10},
showlegend = showlegend,
name=perf,
hoverinfo = 'text',
hoveron = 'points',
text = ['%.2f' % i for i in group['score']])
traces.append(scatter)
#Add alert
traces.append(go.Scatter(
x = [x[0]],
y = [y[0]],
mode = 'markers',
marker = {'line' : {'width' : 3, 'color' : 'rgba(50, 50, 50, 1)'},
'size' : 14,
'color' : color_alert,
'cmin' : 0,
'cmax' : 1},
name = 'Current alert',
showlegend = True,
hoverinfo = 'text',
hoveron = 'points',
text = 'Current Alert (p=%.2f)' % meta_alert['score'].iloc[instance]))
# Add dummy colorbar
traces.append(go.Scatter(
x=[None],
y=[None],
mode='markers',
marker=dict(
colorscale=spectral,
showscale=showscale,
cmin=0,
cmax=1,
colorbar=dict(thickness=5, ticklen=8, outlinewidth=0, title="""Model's Confidence""", tickfont = {'size' : 8}, titlefont={'size' : 10})),
showlegend = False,
hoverinfo='none'))
"""
Define layout
"""
xaxis = {'fixedrange' : False,
'showgrid' : True,
'zeroline' : False,
'showline' : False,
'showticklabels' : False,
}
yaxis = {'fixedrange' : False,
'showgrid' : True,
'zeroline' : False,
'showline' : False,
'showticklabels' : False
}
margin = go.layout.Margin(l=0, r=0, t=0, b=0, pad=0)
layout = go.Layout(yaxis = yaxis, xaxis = xaxis, margin = margin, height = 400,
hovermode = 'closest', legend = dict(y=-0.05, orientation='h'),
paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)',title='Hoi')
"""
Define config
"""
# Config definition
config={'displayModeBar': False,
'showLink' : False}
return dcc.Graph(id='neighbors-scatter',
figure = {'data' : traces,
'layout' : layout},
config = config,
#style = {'height' : '18px',
# 'width' : '170px'}
)
def update_performance(fig, instance, view, border_width=4):
global spectral, meta_alert
current_width = fig['data'][0]['marker']['line']['width']
if ((current_width == 0) and (view == 'perf')):
#alert
fig['data'][4]['marker']['color'] = 'rgba(255, 255, 0, 0.3)'
#scale
fig['data'][4]['showlegend'] = True
fig['data'][5]['marker']['showscale'] = False
#neighbors
for i in range(4):
fig['data'][i]['marker']['line']['width'] = border_width
fig['data'][i]['marker']['colorscale'] = [[0,'rgba(75, 75, 75, 1)'], [1, 'rgba(75, 75, 75, 1)']]
fig['data'][i]['showlegend'] = True
elif ((current_width != 0) and (view == 'pred')):
#alert
fig['data'][4]['marker']['color'] = spectral[int(meta_alert.iloc[instance]['score']*len(spectral))-1][1]
#scale
fig['data'][4]['showlegend'] = True
fig['data'][5]['marker']['showscale'] = True
#neighbors
for i in range(4):
fig['data'][i]['marker']['line']['width'] = 0
fig['data'][i]['marker']['colorscale'] = spectral
fig['data'][i]['showlegend'] = False
return fig
"""
STYLING
"""
colors = {
'background': '#f6f6f6',
'text-gray' : '#727272'
}
# DIV STYLES
columnStyle = {'marginLeft': 5,
'marginRight' : 5,
'backgroundColor': colors['background'],
'paddingLeft' : 20,
'paddingRight' : 20,
'paddingBottom' : 20,
'height' : '93vh',
'overflow': 'auto'}
middleColumnStyle = {'marginLeft': 20,
'paddingLeft' : 20,
'paddingRight' : 20,
'paddingBottom' : 20}
radioStyle | |
<reponame>Volumental/tc-hue
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
phue by <NAME> - A Philips Hue Python library
Contributions by <NAME>, <NAME>
https://github.com/studioimaginaire/phue
Original protocol hacking by rsmck : http://rsmck.co.uk/hue
Published under the MIT license - See LICENSE file for more details.
"Hue Personal Wireless Lighting" is a trademark owned by Koninklijke Philips Electronics N.V., see www.meethue.com for more information.
I am in no way affiliated with the Philips organization.
'''
import json
import logging
import os
import platform
import sys
import socket
if sys.version_info[0] > 2:
PY3K = True
else:
PY3K = False
if PY3K:
import http.client as httplib
else:
import httplib
logger = logging.getLogger('phue')
if platform.system() == 'Windows':
USER_HOME = 'USERPROFILE'
else:
USER_HOME = 'HOME'
__version__ = '1.1'
def is_string(data):
"""Utility method to see if data is a string."""
if PY3K:
return isinstance(data, str)
else:
return isinstance(data, str) or isinstance(data, unicode) # noqa
class PhueException(Exception):
def __init__(self, id, message):
self.id = id
self.message = message
class PhueRegistrationException(PhueException):
pass
class PhueRequestTimeout(PhueException):
pass
class Light(object):
""" Hue Light object
Light settings can be accessed or set via the properties of this object.
"""
def __init__(self, bridge, light_id):
self.bridge = bridge
self.light_id = light_id
self._name = None
self._on = None
self._brightness = None
self._colormode = None
self._hue = None
self._saturation = None
self._xy = None
self._colortemp = None
self._effect = None
self._alert = None
self.transitiontime = None # default
self._reset_bri_after_on = None
self._reachable = None
self._type = None
def __repr__(self):
# like default python repr function, but add light name
return '<{0}.{1} object "{2}" at {3}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self.name,
hex(id(self)))
# Wrapper functions for get/set through the bridge, adding support for
# remembering the transitiontime parameter if the user has set it
def _get(self, *args, **kwargs):
return self.bridge.get_light(self.light_id, *args, **kwargs)
def _set(self, *args, **kwargs):
if self.transitiontime is not None:
kwargs['transitiontime'] = self.transitiontime
logger.debug("Setting with transitiontime = {0} ds = {1} s".format(
self.transitiontime, float(self.transitiontime) / 10))
if (args[0] == 'on' and args[1] is False) or (
kwargs.get('on', True) is False):
self._reset_bri_after_on = True
return self.bridge.set_light(self.light_id, *args, **kwargs)
@property
def name(self):
'''Get or set the name of the light [string]'''
if PY3K:
self._name = self._get('name')
else:
self._name = self._get('name').encode('utf-8')
return self._name
@name.setter
def name(self, value):
old_name = self.name
self._name = value
self._set('name', self._name)
logger.debug("Renaming light from '{0}' to '{1}'".format(
old_name, value))
self.bridge.lights_by_name[self.name] = self
del self.bridge.lights_by_name[old_name]
@property
def on(self):
'''Get or set the state of the light [True|False]'''
self._on = self._get('on')
return self._on
@on.setter
def on(self, value):
# Some added code here to work around known bug where
# turning off with transitiontime set makes it restart on brightness = 1
# see
# http://www.everyhue.com/vanilla/discussion/204/bug-with-brightness-when-requesting-ontrue-transitiontime5
# if we're turning off, save whether this bug in the hardware has been
# invoked
if self._on and value is False:
self._reset_bri_after_on = self.transitiontime is not None
if self._reset_bri_after_on:
logger.warning(
'Turned off light with transitiontime specified, brightness will be reset on power on')
self._set('on', value)
# work around bug by resetting brightness after a power on
if self._on is False and value is True:
if self._reset_bri_after_on:
logger.warning(
'Light was turned off with transitiontime specified, brightness needs to be reset now.')
self.brightness = self._brightness
self._reset_bri_after_on = False
self._on = value
@property
def colormode(self):
'''Get the color mode of the light [hs|xy|ct]'''
self._colormode = self._get('colormode')
return self._colormode
@property
def brightness(self):
'''Get or set the brightness of the light [0-254].
0 is not off'''
self._brightness = self._get('bri')
return self._brightness
@brightness.setter
def brightness(self, value):
self._brightness = value
self._set('bri', self._brightness)
@property
def hue(self):
'''Get or set the hue of the light [0-65535]'''
self._hue = self._get('hue')
return self._hue
@hue.setter
def hue(self, value):
self._hue = int(value)
self._set('hue', self._hue)
@property
def saturation(self):
'''Get or set the saturation of the light [0-254]
0 = white
254 = most saturated
'''
self._saturation = self._get('sat')
return self._saturation
@saturation.setter
def saturation(self, value):
self._saturation = value
self._set('sat', self._saturation)
@property
def xy(self):
'''Get or set the color coordinates of the light [ [0.0-1.0, 0.0-1.0] ]
This is in a color space similar to CIE 1931 (but not quite identical)
'''
self._xy = self._get('xy')
return self._xy
@xy.setter
def xy(self, value):
self._xy = value
self._set('xy', self._xy)
@property
def colortemp(self):
'''Get or set the color temperature of the light, in units of mireds [154-500]'''
self._colortemp = self._get('ct')
return self._colortemp
@colortemp.setter
def colortemp(self, value):
if value < 154:
logger.warn('154 mireds is coolest allowed color temp')
elif value > 500:
logger.warn('500 mireds is warmest allowed color temp')
self._colortemp = value
self._set('ct', self._colortemp)
@property
def colortemp_k(self):
'''Get or set the color temperature of the light, in units of Kelvin [2000-6500]'''
self._colortemp = self._get('ct')
return int(round(1e6 / self._colortemp))
@colortemp_k.setter
def colortemp_k(self, value):
if value > 6500:
logger.warn('6500 K is max allowed color temp')
value = 6500
elif value < 2000:
logger.warn('2000 K is min allowed color temp')
value = 2000
colortemp_mireds = int(round(1e6 / value))
logger.debug("{0:d} K is {1} mireds".format(value, colortemp_mireds))
self.colortemp = colortemp_mireds
@property
def effect(self):
'''Check the effect setting of the light. [none|colorloop]'''
self._effect = self._get('effect')
return self._effect
@effect.setter
def effect(self, value):
self._effect = value
self._set('effect', self._effect)
@property
def alert(self):
'''Get or set the alert state of the light [select|lselect|none]'''
self._alert = self._get('alert')
return self._alert
@alert.setter
def alert(self, value):
if value is None:
value = 'none'
self._alert = value
self._set('alert', self._alert)
@property
def reachable(self):
'''Get the reachable state of the light [boolean]'''
self._reachable = self._get('reachable')
return self._reachable
@property
def type(self):
'''Get the type of the light [string]'''
self._type = self._get('type')
return self._type
class SensorState(dict):
def __init__(self, bridge, sensor_id):
self._bridge = bridge
self._sensor_id = sensor_id
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self._bridge.set_sensor_state(self._sensor_id, self)
class SensorConfig(dict):
def __init__(self, bridge, sensor_id):
self._bridge = bridge
self._sensor_id = sensor_id
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self._bridge.set_sensor_config(self._sensor_id, self)
class Sensor(object):
""" Hue Sensor object
Sensor config and state can be read and updated via the properties of this object
"""
def __init__(self, bridge, sensor_id):
self.bridge = bridge
self.sensor_id = sensor_id
self._name = None
self._model = None
self._swversion = None
self._type = None
self._uniqueid = None
self._manufacturername = None
self._state = SensorState(bridge, sensor_id)
self._config = {}
self._recycle = None
def __repr__(self):
# like default python repr function, but add sensor name
return '<{0}.{1} object "{2}" at {3}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self.name,
hex(id(self)))
# Wrapper functions for get/set through the bridge
def _get(self, *args, **kwargs):
return self.bridge.get_sensor(self.sensor_id, *args, **kwargs)
def _set(self, *args, **kwargs):
return self.bridge.set_sensor(self.sensor_id, *args, **kwargs)
@property
def name(self):
'''Get or set the name of the sensor [string]'''
if PY3K:
self._name = self._get('name')
else:
self._name = self._get('name').encode('utf-8')
return self._name
@name.setter
def name(self, value):
old_name = self.name
self._name = value
self._set('name', self._name)
logger.debug("Renaming sensor from '{0}' to '{1}'".format(
old_name, value))
self.bridge.sensors_by_name[self.name] = self
del self.bridge.sensors_by_name[old_name]
@property
def modelid(self):
'''Get a unique identifier of the hardware model of this sensor [string]'''
self._modelid = self._get('modelid')
return self._modelid
@property
def swversion(self):
'''Get the software version identifier of the sensor's firmware [string]'''
self._swversion = self._get('swversion')
return self._swversion
@property
def type(self):
'''Get the sensor type of this device [string]'''
self._type = self._get('type')
return self._type
@property
def uniqueid(self):
'''Get the unique device ID of this sensor [string]'''
self._uniqueid = self._get('uniqueid')
return self._uniqueid
@property
def manufacturername(self):
'''Get the name of the manufacturer [string]'''
self._manufacturername = self._get('manufacturername')
return self._manufacturername
@property
def state(self):
''' A dictionary of sensor state. Some values can be updated, some are read-only. [dict]'''
data = self._get('state')
self._state.clear()
self._state.update(data)
return self._state
@state.setter
def state(self, data):
self._state.clear()
self._state.update(data)
@property
def config(self):
''' A dictionary of sensor config. Some values can be updated, some are read-only. [dict]'''
data = self._get('config')
self._config.clear()
self._config.update(data)
return self._config
@config.setter
def config(self, data):
self._config.clear()
self._config.update(data)
@property
def recycle(self):
''' True if this resource should be automatically removed when the last reference to it disappears [bool]'''
self._recycle = self._get('manufacturername')
return self._manufacturername
class Group(Light):
""" A group of Hue lights, tracked as a group on the bridge
Example:
>>> b = Bridge()
>>> g1 = Group(b, 1)
>>> g1.hue = 50000 # all lights in that group turn blue
>>> g1.on = False # all will turn off
>>> g2 = Group(b, 'Kitchen') # you can also look up groups by name
| |
<gh_stars>10-100
from tkinter import *
from tkinter.ttk import *
from tkinter import ttk
import tkinter.messagebox as ms
import os
import database
class RecordManagement(Frame):
# Constructor Function
def __init__(self, root):
root.maxsize(560,500)
root.minsize(560,500)
root.wm_iconbitmap('RecordManager-icon.ico')
# Initializing Master Frame and Tab notebook
Frame.__init__(self)
self.grid()
self.master.title('Record Management')
tabControl = ttk.Notebook(self)
tabControl.configure(width=550, height=500)
# Initializing Tab 1 --- "Student Registration Tab"
self.tab1 = ttk.Frame(tabControl)
tabControl.add(self.tab1, text="Student Registration")
tabControl.grid()
self.tab1.configure(style='TFrame')
# Initializing Tab 2 --- "Subject Allocation Tab"
self.tab2 = ttk.Frame(tabControl)
tabControl.add(self.tab2, text="Subject Allocation")
tabControl.grid()
# Initializing Tab 3 --- "Marks Allocation Tab"
self.tab3 = ttk.Frame(tabControl)
tabControl.add(self.tab3, text="Marks Allocation")
tabControl.grid()
self.searchTab = ttk.Frame(tabControl)
tabControl.add(self.searchTab, text="Search Database")
tabControl.grid()
# Initializing Top LabelFrame
self.label_frame()
# Constructing the GUI of 3 tabs
self.tab_construct()
self.mainloop()
# Label Frame Construction
def label_frame(self):
# Initializing the LabelFrame for both 3 tabs
self.labelFrame_tab1 = LabelFrame(self.tab1, text="Records Manager - Chitkara University", width=530, height=500)
self.labelFrame_tab1.grid(row=0, column=0)
self.labelFrame_tab1.grid_propagate(0)
self.labelFrame_tab2 = LabelFrame(self.tab2, text="Records Manager - Chitkara University", width=580, height=500)
self.labelFrame_tab2.grid(row=0, column=0)
self.labelFrame_tab2.grid_propagate(0)
self.labelFrame_tab3 = LabelFrame(self.tab3, text="Records Manager - Chitkara University", width=580, height=500)
self.labelFrame_tab3.grid(row=0, column=0)
self.labelFrame_tab3.grid_propagate(0)
self.labelFrame_searchTab = LabelFrame(self.searchTab, text="Records Manager - Chitkara University", width=580, height=500)
self.labelFrame_searchTab.grid(row=0, column=0)
self.labelFrame_searchTab.grid_propagate(0)
def top_label(self, labeltext, tab, topHeadingColumn=2, sticky="", columnspan=3):
margin = Label(tab, width=2).grid(row=1, column=0)
self.topLabel = Label(tab, text=labeltext,foreground='red', font="Helvetica 18 bold")
self.topLabel.grid(row=0, column=topHeadingColumn, columnspan=columnspan, sticky=sticky)
enter1 = Label(tab).grid(row=1, column=1)
enter2 = Label(tab).grid(row=2, column=1)
def bottom_label(self, tab, buttonText, startRow, entryList, optionCheck):
enter1 = Label(tab).grid(row=startRow, column=1)
enter2 = Label(tab).grid(row=startRow+1, column=1)
submitButton = Button(tab, text=buttonText, command= lambda entryList=entryList, optionCheck=optionCheck: self.show_msg(entryList, optionCheck)).grid(row=startRow+2, column=2, columnspan=3)
# 3 Tabs main GUI Construction
def tab_construct(self):
# ------------------------------------------------------- TAB 1 ------------------------------------------------------------------
self.top_label("New Student Registration", self.labelFrame_tab1)
nameLabel = Label(self.labelFrame_tab1, text="Name:", font="comicsansms 14").grid(row = 3, column=1, sticky="w")
self.nameEntry = Entry(self.labelFrame_tab1, width=45)
self.nameEntry.grid(row = 3, column=3, columnspan=3)
self.nameEntry.insert(0, "Student Name")
self.nameEntry.bind('<FocusIn>', lambda event, entry=self.nameEntry, text="Student Name": self.on_click(event, entry, text))
self.nameEntry.bind('<FocusOut>', lambda event, entry=self.nameEntry, text="Student Name": self.of_click(event, entry, text))
self.nameEntry.configure(foreground='grey')
RollNoLabel = Label(self.labelFrame_tab1, text="Roll No.:", font="comicsansms 14").grid(row = 4, column=1, sticky="w")
self.RollNoEntry = Entry(self.labelFrame_tab1, width=45)
self.RollNoEntry.grid(row=4, column=3, columnspan=3)
self.RollNoEntry.insert(0, "Student Roll No.")
self.RollNoEntry.bind('<FocusIn>', lambda event, entry=self.RollNoEntry, text="Student Roll No.": self.on_click(event, entry, text))
self.RollNoEntry.bind('<FocusOut>', lambda event, entry=self.RollNoEntry, text="Student Roll No.": self.of_click(event, entry, text))
self.RollNoEntry.configure(foreground='grey')
FatherLabel = Label(self.labelFrame_tab1, text="Father's Name:", font="comicsansms 14").grid(row = 5, column=1, sticky="w")
self.fatherEntry = Entry(self.labelFrame_tab1, width=45)
self.fatherEntry.grid(row=5, column=3, columnspan=3)
self.fatherEntry.insert(0, "Father's Name")
self.fatherEntry.bind('<FocusIn>', lambda event, entry=self.fatherEntry, text="Father's Name": self.on_click(event, entry, text))
self.fatherEntry.bind('<FocusOut>', lambda event, entry=self.fatherEntry, text="Father's Name": self.of_click(event, entry, text))
self.fatherEntry.configure(foreground='grey')
motherLabel = Label(self.labelFrame_tab1, text="Mother's Name:", font="comicsansms 14").grid(row = 6, column=1, sticky="w")
self.motherEntry = Entry(self.labelFrame_tab1, width=45)
self.motherEntry.grid(row=6, column=3, columnspan=3)
self.motherEntry.insert(0, "Mother's Name")
self.motherEntry.bind('<FocusIn>', lambda event, entry=self.motherEntry, text="Mother's Name": self.on_click(event, entry, text))
self.motherEntry.bind('<FocusOut>', lambda event, entry=self.motherEntry, text="Mother's Name": self.of_click(event, entry, text))
self.motherEntry.configure(foreground='grey')
mobileLabel = Label(self.labelFrame_tab1, text="Mobile No.:", font="comicsansms 14").grid(row = 7, column=1, sticky="w")
self.mobileEntry = Entry(self.labelFrame_tab1, width=45)
self.mobileEntry.grid(row=7, column=3, columnspan=3)
self.mobileEntry.insert(0, "Mobile No.")
self.mobileEntry.bind('<FocusIn>', lambda event, entry=self.mobileEntry, text="Mobile No.": self.on_click(event, entry, text))
self.mobileEntry.bind('<FocusOut>', lambda event, entry=self.mobileEntry, text="Mobile No.": self.of_click(event, entry, text))
self.mobileEntry.configure(foreground='grey')
courseLabel = Label(self.labelFrame_tab1, text="Course Interested: ", font="comicsansms 14").grid(row = 8, column=1, sticky="w")
self.option = StringVar()
self.options = ("Select Course","Computer Science Engineering", "Electronics and Communcation Engineering", "Mechanical Engineering", "Civil Engineering", "Electrical Engineering", "Mechatronics")
self.option.set("Select Course")
self.optionMenu = OptionMenu(self.labelFrame_tab1, self.option, *self.options, command=self.func_tab1)
self.optionMenu.grid(row=8, column=3, columnspan=3, sticky="ew")
adressLabel = Label(self.labelFrame_tab1, text="Address:", font="comicsansms 14").grid(row = 9, column=1, sticky="w")
self.addressEntry1 = Entry(self.labelFrame_tab1, width=45)
self.addressEntry1.grid(row=9, column=3, columnspan=3, sticky="w")
self.addressEntry1.insert(0, "House No.\\Street Name\\Locality")
self.addressEntry1.bind('<FocusIn>', lambda event, entry=self.addressEntry1, text="House No.\\Street Name\\Locality": self.on_click(event, entry, text))
self.addressEntry1.bind('<FocusOut>', lambda event, entry=self.addressEntry1, text="House No.\\Street Name\\Locality": self.of_click(event, entry, text))
self.addressEntry1.configure(foreground='grey')
self.addressEntry2 = Entry(self.labelFrame_tab1, width=45)
self.addressEntry2.grid(row=10, column=3, columnspan=3, sticky="w")
self.addressEntry2.insert(0, "Colony\\Village\\Town")
self.addressEntry2.bind('<FocusIn>', lambda event, entry=self.addressEntry2, text="Colony\\Village\\Town": self.on_click(event, entry, text))
self.addressEntry2.bind('<FocusOut>', lambda event, entry=self.addressEntry2, text="Colony\\Village\\Town": self.of_click(event, entry, text))
self.addressEntry2.configure(foreground='grey')
self.addressEntry3 = Entry(self.labelFrame_tab1, width=45)
self.addressEntry3.grid(row=11, column=3, columnspan=3, sticky="w")
self.addressEntry3.insert(0, "City\\District")
self.addressEntry3.bind('<FocusIn>', lambda event, entry=self.addressEntry3, text="City\\District": self.on_click(event, entry, text))
self.addressEntry3.bind('<FocusOut>', lambda event, entry=self.addressEntry3, text="City\\District": self.of_click(event, entry, text))
self.addressEntry3.configure(foreground='grey')
self.entryList_tab1 = [self.RollNoEntry, self.nameEntry, self.fatherEntry, self.motherEntry, self.mobileEntry, self.addressEntry1, self.addressEntry2, self.addressEntry3]
self.bottom_label(self.labelFrame_tab1, "Register Student", 12, self.entryList_tab1, True)
# -------------------------------------------------------------- TAB 2 ------------------------------------------------------------------
self.top_label("Subject Allocation", self.labelFrame_tab2)
RollNoLabel_tab2 = Label(self.labelFrame_tab2, text="Roll No.: ", font="comicsansms 14").grid(row = 3, column=1, sticky="w")
self.RollNoEntry_tab2 = Entry(self.labelFrame_tab2, width=45)
self.RollNoEntry_tab2.grid(row=3, column=3, columnspan=3)
self.RollNoEntry_tab2.insert(0, "Student Roll No.")
self.RollNoEntry_tab2.bind('<FocusIn>', lambda event, entry=self.RollNoEntry_tab2, text="Student Roll No.": self.on_click(event, entry, text))
self.RollNoEntry_tab2.bind('<FocusOut>', lambda event, entry=self.RollNoEntry_tab2, text="Student Roll No.": self.of_click(event, entry, text))
self.RollNoEntry_tab2.configure(foreground='grey')
subjectID = Label(self.labelFrame_tab2, text="Subject Id: ", font="comicsansms 14").grid(row = 4, column=1, sticky="w")
self.subjectId_entry = Entry(self.labelFrame_tab2, width=45)
self.subjectId_entry.grid(row=4, column=3, columnspan=3)
self.subjectId_entry.insert(0, "Subject ID")
self.subjectId_entry.bind('<FocusIn>', lambda event, entry=self.subjectId_entry, text="Subject ID": self.on_click(event, entry, text))
self.subjectId_entry.bind('<FocusOut>', lambda event, entry=self.subjectId_entry, text="Subject ID": self.of_click(event, entry, text))
self.subjectId_entry.configure(foreground='grey')
subject_name = Label(self.labelFrame_tab2, text="Subject Name: ", font="comicsansms 14").grid(row = 5, column=1, sticky="w")
self.subject_name_entry = Entry(self.labelFrame_tab2, width=45)
self.subject_name_entry.grid(row=5, column=3, columnspan=3)
self.subject_name_entry.insert(0, "Subject Name")
self.subject_name_entry.bind('<FocusIn>', lambda event, entry=self.subject_name_entry, text="Subject Name": self.on_click(event, entry, text))
self.subject_name_entry.bind('<FocusOut>', lambda event, entry=self.subject_name_entry, text="Subject Name": self.of_click(event, entry, text))
self.subject_name_entry.configure(foreground='grey')
credits = Label(self.labelFrame_tab2, text="Credits: ", font="comicsansms 14").grid(row = 6, column=1, sticky="w")
self.credits_entry = Entry(self.labelFrame_tab2, width=45)
self.credits_entry.grid(row=6, column=3, columnspan=3)
self.credits_entry.configure(foreground='grey')
self.credits_entry.insert(0, "No. of Credits")
self.credits_entry.bind('<FocusIn>', lambda event, entry=self.credits_entry, text="No. of Credits": self.on_click(event, entry, text))
self.credits_entry.bind('<FocusOut>', lambda event, entry=self.credits_entry, text="No. of Credits": self.of_click(event, entry, text))
self.entryList_tab2 = [self.RollNoEntry_tab2, self.subjectId_entry, self.subject_name_entry, self.credits_entry]
self.bottom_label(self.labelFrame_tab2, "Allocate Subject", 7, self.entryList_tab2, False)
# ------------------------------------------------------------- TAB 3 ----------------------------------------------------------------
self.top_label("Marks Allocation", self.labelFrame_tab3)
RollNoLabel_tab3 = Label(self.labelFrame_tab3, text="Roll No.: ", font="comicsansms 14").grid(row = 3, column=1, sticky="w")
self.RollNoEntry_tab3 = Entry(self.labelFrame_tab3, width=45)
self.RollNoEntry_tab3.grid(row=3, column=3, columnspan=3)
self.RollNoEntry_tab3.insert(0, "Student Roll No.")
self.RollNoEntry_tab3.bind('<FocusIn>', lambda event, entry=self.RollNoEntry_tab3, text="Student Roll No.": self.on_click(event, entry, text))
self.RollNoEntry_tab3.bind('<FocusOut>', lambda event, entry=self.RollNoEntry_tab3, text="Student Roll No.": self.of_click(event, entry, text))
self.RollNoEntry_tab3.configure(foreground='grey')
subjectID = Label(self.labelFrame_tab3, text="Subject Id: ", font="comicsansms 14").grid(row = 4, column=1, sticky="w")
self.subjectId_entry_tab3 = Entry(self.labelFrame_tab3, width=45)
self.subjectId_entry_tab3.grid(row=4, column=3, columnspan=3)
self.subjectId_entry_tab3.insert(0, "Subject ID")
self.subjectId_entry_tab3.bind('<FocusIn>', lambda event, entry=self.subjectId_entry_tab3, text="Subject ID": self.on_click(event, entry, text))
self.subjectId_entry_tab3.bind('<FocusOut>', lambda event, entry=self.subjectId_entry_tab3, text="Subject ID": self.of_click(event, entry, text))
self.subjectId_entry_tab3.configure(foreground='grey')
subject_name_tab3 = Label(self.labelFrame_tab3, text="Subject Name: ", font="comicsansms 14").grid(row = 5, column=1, sticky="w")
self.subject_name_entry_tab3 = Entry(self.labelFrame_tab3, width=45)
self.subject_name_entry_tab3.grid(row=5, column=3, columnspan=3)
self.subject_name_entry_tab3.insert(0, "Subject Name")
self.subject_name_entry_tab3.bind('<FocusIn>', lambda event, entry=self.subject_name_entry_tab3, text="Subject Name": self.on_click(event, entry, text))
self.subject_name_entry_tab3.bind('<FocusOut>', lambda event, entry=self.subject_name_entry_tab3, text="Subject Name": self.of_click(event, entry, text))
self.subject_name_entry_tab3.configure(foreground='grey')
testTypeLabel = Label(self.labelFrame_tab3, text="Test Type: ", font="comicsansms 14").grid(row = 6, column=1, sticky="w")
self.option_tab3 = StringVar()
self.options_tab3 = ("Select Test Type","FA", "ST", "End Term", "Project Based")
self.option_tab3.set("Select Test Type")
self.optionMenu_tab3 = OptionMenu(self.labelFrame_tab3, self.option_tab3, *self.options_tab3, command=self.func_tab3)
self.optionMenu_tab3.grid(row=6, column=3, columnspan=3, sticky="w")
maxMarks = Label(self.labelFrame_tab3, text="Max Marks: ", font="comicsansms 14").grid(row = 7, column=1, sticky="w")
self.maxMarks_entry = Entry(self.labelFrame_tab3, width=45)
self.maxMarks_entry.grid(row=7, column=3, columnspan=3)
self.maxMarks_entry.insert(0, "Maximum Marks")
self.maxMarks_entry.bind('<FocusIn>', lambda event, entry=self.maxMarks_entry, text="Maximum Marks": self.on_click(event, entry, text))
self.maxMarks_entry.bind('<FocusOut>', lambda event, entry=self.maxMarks_entry, text="Maximum Marks": self.of_click(event, entry, text))
self.maxMarks_entry.configure(foreground='grey')
obtMarks = Label(self.labelFrame_tab3, text="Obtained Marks: ", font="comicsansms 14").grid(row = 8, column=1, sticky="w")
self.obtMarks_entry = Entry(self.labelFrame_tab3, width=45)
self.obtMarks_entry.grid(row=8, column=3, columnspan=3)
self.obtMarks_entry.insert(0, "Marks Obtained")
self.obtMarks_entry.bind('<FocusIn>', lambda event, entry=self.obtMarks_entry, text="Marks Obtained": self.on_click(event, entry, text))
self.obtMarks_entry.bind('<FocusOut>', lambda event, entry=self.obtMarks_entry, text="Marks Obtained": self.of_click(event, entry, text))
self.obtMarks_entry.configure(foreground='grey')
self.entryList_tab3 = [self.RollNoEntry_tab3, self.subjectId_entry_tab3, self.subject_name_entry_tab3, self.maxMarks_entry, self.obtMarks_entry]
self.bottom_label(self.labelFrame_tab3, "Assign Marks", 9, self.entryList_tab3, True)
# ---------------------------------------------------------- SERACH TAB ------------------------------------------------------------------
self.top_label("Search in Database", self.labelFrame_searchTab, 4)
searchLabel = Label(self.labelFrame_searchTab, text="Roll No.: ", font="comicsansms 14").grid(row=1, column=1, sticky='w')
space = Label(self.labelFrame_searchTab, width=15).grid(row=1, column=2)
self.searchBar = Entry(self.labelFrame_searchTab, width=55)
self.searchBar.grid(row=1, column=4, columnspan=5, sticky='e')
self.searchBar.insert(0, "Enter the roll no of the student to get the data")
self.searchBar.bind('<FocusIn>', lambda event, entry=self.searchBar, text="Enter the roll no of the student to get the data": self.on_click(event, entry, text))
self.searchBar.bind('<FocusOut>', lambda event, entry=self.searchBar, text="Enter the roll no of the student to get the data": self.of_click(event, entry, text))
self.searchBar.configure(foreground='grey')
lineBreak = Label(self.labelFrame_searchTab).grid(row=2)
self.radioVar = IntVar()
radio1 = Radiobutton(self.labelFrame_searchTab, text="Personal Data", variable=self.radioVar , value=0).grid(row=3, column=2, columnspan=3,sticky='w')
radio2 = Radiobutton(self.labelFrame_searchTab, text="Subject Data", variable=self.radioVar , value=1).grid(row=3, column=5, columnspan=2, sticky='w')
radio3 = Radiobutton(self.labelFrame_searchTab, text="Marks Data", variable=self.radioVar , value=2).grid(row=3, column=7, columnspan=2, sticky='e')
lineBreak = Label(self.labelFrame_searchTab).grid(row=4)
searchButton = Button(self.labelFrame_searchTab, text='Search', command=self.search).grid(row=5, column=3, sticky='e', columnspan=2)
lineBreak = Label(self.labelFrame_searchTab).grid(row=6)
self.searchResult = Text(self.labelFrame_searchTab,state='disabled', wrap='none',height=17, width=62)
# self.searchResult.grid(row=7, column=2, columnspan=4)
self.scrollb = ttk.Scrollbar(self.labelFrame_searchTab, command=self.searchResult.yview)
self.searchResult['yscrollcommand'] = self.scrollb.set
self.scrollbh = ttk.Scrollbar(self.labelFrame_searchTab, orient='horizontal', command=self.searchResult.xview)
self.searchResult['xscrollcommand'] = self.scrollbh.set
# scrollbh.grid(row=8, column=1, sticky='nsew')
# ---------------------------------------------------------------------------------------------------------------------------------------
# For getting the current value of option boxes present in 1st and 3rd tab
def func_tab1(self, value):
self.currentOption_tab1 = value
def func_tab3(self, value):
self.currentOption_tab3 = value
# Event | |
center
# distmat = matrix with squared distance from center (more efficient if calculated only once)
# use kernel:
# if True: select based on region overlap AND distance from center in stage 2,
# otherwise only use overlap from first pass
# Note: always use kernel when first pass returns nothing
zdim, xdim, ydim = seq_block.shape
x_center, y_center = center
bestindices = np.zeros(zdim)
if distmat == None:
xsurface=np.tile(list(range(xdim)),(ydim,1)).T
ysurface=np.tile(list(range(ydim)),(xdim,1))
distmat = (xsurface-x_center)**2 + (ysurface-y_center)**2
distkernel = np.exp(-((xsurface-x_center)**2 + (ysurface-y_center)**2)/kernel_width**2)
else:
distkernel = np.exp(-distmat/kernel_width**2)
contains_center = np.zeros(zdim)
region_selected = np.zeros(zdim)
bestcomponent = np.zeros_like(seq_block)
# stage 1: select regions containing ROI
for sidx in range(zdim):
#num_labels = int(np.max(seq_block[sidx]))
center_label = seq_block[sidx][center]
if center_label > 0:
contains_center[sidx] = 1
region_selected[sidx] = 1
bestcomponent[sidx] = extract_region(seq_block[sidx], center_label)
bestindices[sidx] = center_label
#for lidx in range(num_labels):
# component = extract_region(seq_block[sidx], lidx+1)
# if (component[center] == 1):
# contains_center[sidx] = 1
# region_selected[sidx] = 1
# bestcomponent[sidx] = component
# stage 2: create likelyhood surface: based on overlap only or + centered kernel
if np.sum(region_selected) > 0: # case where at least one region is selected
#print "Stage 2"
numselected = region_selected.sum()
if numselected == zdim: # if regions have been selected for all slices: stop here
print("Components have been selected for all time steps in stage 1")
return bestcomponent, bestindices
print("Components have been selected for " + str(numselected) + "/" + str(zdim) + " time steps in stage 1")
lsurface = np.sum(1.0*bestcomponent, axis = 0)
lsurface = lsurface / (1.0*numselected)
if use_kernel:
lsurface *= distkernel
else: # Return all-"-1" masks to make this detectable"
print("No components contain ROI!!")
return - np.ones_like(seq_block).astype(int), bestindices
#lsurface = distkernel
# stage 3: select regions for other slices depending on likelyhood surface
# selection criterion? max mean of masked likelyhood surface? favours small regions!
# excluding nonoverlapping pixels?
# max sum of likelyhood surface? (favours regions with large absolute overlap)
#print "Stage 3"
for sidx in range(zdim):
if region_selected[sidx] == 0 :
num_labels = int(np.max(seq_block[sidx]))
if num_labels == 0:
continue
regionscore_sum = np.zeros(num_labels)
regionscore_mean = np.zeros(num_labels)
regionscore_max = np.zeros(num_labels)
for lidx in range(num_labels):
component = extract_region(seq_block[sidx], lidx+1)
if np.sum(component) < min_size:
continue
npixels = component.sum()
wcomponent = 1.0 * component * lsurface
regionscore_sum[lidx] = np.sum(wcomponent[wcomponent > 0])
regionscore_mean[lidx] = regionscore_sum[lidx]/npixels
regionscore_max[lidx] = np.max(wcomponent)
#if regionscore_sum[lidx]>0:
# bestcomponent[sidx] = bestcomponent[sidx] + label * component.astype(int)
# label = label + 1
best_idx = np.argmax(regionscore_mean)
if(regionscore_mean[best_idx]) == 0:
print("Stage 3: no component selected for time step " + str(sidx))
else:
bestcomponent[sidx] = extract_region(seq_block[sidx], best_idx + 1)
bestindices[sidx] = best_idx + 1
#if do_plot:
# plt.figure()
# plt.subplot(221)
# plt.imshow(seq_block[sidx])
# plt.subplot(222)
# plt.imshow(lsurface)
# plt.subplot(223)
# plt.imshow(bestcomponent[sidx])
# plt.subplot(224)
# dum = bestcomponent[sidx].copy()
# dum[dum > 0] = 1
# plt.imshow(dum*lsurface)
# plt.show()
# stage 4 (???): update likelyhood surface, go through the stack again and do final selection
#print "Stage 4"
bestcomponent2 = np.zeros_like(bestcomponent)
dum = 1.0*bestcomponent.copy()
dum[dum>0] = 1.0
lsurface = np.sum(dum, axis = 0)
core_max = int(zdim/3)
lsurface[lsurface < core_max] = 0.0
lsurface = lsurface / core_max
for sidx in range(zdim):
npixels = np.sum(bestcomponent[sidx])
if npixels == 0:
continue
wcomponent = (1.0 * bestcomponent[sidx]) * lsurface
regionscore_sum = np.sum(wcomponent[wcomponent > 0])
regionscore_mean = regionscore_sum/npixels
regionscore_max = np.max(wcomponent)
if(regionscore_mean) == 0:
print("Stage 4: no component selected for time step " + str(sidx))
bestindices[sidx] = 0
else:
bestcomponent2[sidx] = bestcomponent[sidx].copy()
if do_plot:
plt.figure()
plt.subplot(221)
plt.imshow(seq_block[sidx])
plt.subplot(222)
plt.imshow(bestcomponent[sidx])
plt.subplot(223)
plt.imshow(lsurface)
plt.subplot(224)
plt.imshow(bestcomponent2[sidx])
plt.show()
return bestcomponent2, bestindices
def cut_superfluous_parts(bestregions, regions, bestindices, do_plot = False):
zdim, xdim, ydim = bestregions.shape
smask = np.ones((xdim, ydim)).astype(int)
cutbestregions = bestregions.copy()
for sidx in range(zdim):
dum = regions[sidx].copy()
dum[dum == bestindices[sidx]] = 0
smask[dum>0] = 0
for sidx in range(zdim):
if np.sum(cutbestregions[sidx] > 0):
dum, numlabels = ndi.label(cutbestregions[sidx] * smask)
# cut away stray pixels
maxregion = 0
for lidx in range(numlabels):
component = extract_region(dum, lidx + 1)
npixels = np.sum(component)
if npixels > maxregion:
maxregion = npixels
cutbestregions[sidx] = component
if do_plot:
plt.figure()
plt.subplot(131)
plt.imshow(bestregions[sidx])
plt.subplot(132)
plt.imshow(smask)
plt.subplot(133)
plt.imshow(cutbestregions[sidx])
plt.show()
return cutbestregions
def wrapper_regions(bestregions, opening_param = 3, mshape = ((0,1,0),(1,1,1),(0,1,0)) ):
zdim, xdim, ydim = bestregions.shape
wregions = np.zeros_like(bestregions)
for sidx in range(zdim):
if np.sum(bestregions[sidx]) > 0:
wregions[sidx] = convex_hull_image(bestregions[sidx])
return wregions
def breakup_region(component):
distance = ndi.distance_transform_edt(component)
skel = skeletonize(component)
skeldist = distance*skel
local_maxi = peak_local_max(skeldist, indices=False, footprint=disk(10))
local_maxi=ndi.binary_closing(local_maxi,structure = disk(4),iterations = 2)
markers = ndi.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=component)
return(labels)
def filter_sequence(seq_block, order = 5, relcutoff = 0.1):
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data, axis = 0)
return y
zdim = seq_block.shape[0]
xdim = seq_block.shape[1]
ydim = seq_block.shape[2]
ffdata = np.zeros((3*zdim, xdim, ydim))
ffdata[:zdim] = seq_block
ffdata[zdim:2*zdim] = seq_block
ffdata[2*zdim:3*zdim] = seq_block
ffdata = butter_lowpass_filter(ffdata, relcutoff, 1.0, order)
ffdata = ffdata[zdim:2*zdim,:,:]
return ffdata
def segment_data(data):
kernel_width = 5
center_margin = 8
num_peaks = 10
num_circles = 10 # 20
upscale = 1.5
minradius = 15
maxradius = 65
radstep = 2
segmented = {}
processdata=sort_images(data)
numslices = len(processdata)
if(numslices == 1):
centerslice = 0
else:
centerslice=numslices/2
segment_block = processdata[centerslice]['data'].copy() # make sure the numbers are floats
processdata = data
zdim, xdim, ydim = segment_block.shape
lsurface, ROImask, ROIcenter, ROIaxes = axis_likelyhood_surface(processdata, kernel_width = kernel_width,
center_margin = center_margin,
num_peaks = num_peaks,
num_circles = num_circles,
upscale = upscale,
minradius = minradius,
maxradius = maxradius,
radstep = radstep)
#rs_minradius = max(int(minradius / np.max(data[0]['metadata']['PixelSpacing'])),1)
#rs_maxradius = max(int(maxradius / np.max(data[0]['metadata']['PixelSpacing'])),1)
#x_center = ROIcenter[0]
#y_center = ROIcenter[1]
#x_axis=ROIaxes[0]
#y_axis = ROIaxes[1]
# fit GMM - I do it only once, but you could also try re-doing it for each slice
segmodel = extract_segmentation_model(segment_block)
# apply to slices
for slice in range(numslices):
opening_param = 3
mshape = square(3)
print("slice nr " + str(slice))
#if slice < 11:
# continue
#if slice == 7:
# print "stop here"
#sld = filter_sequence(processdata[slice]['data'])
sld = processdata[slice]['data']
for idx in range(sld.shape[0]):
sld[idx] = sld[idx]*ROImask
binary, sm = segment_sequence(sld,segmodel = segmodel)
regions, dum1, dum2 = extract_binary_regions(binary)
regions = split_up_binary_regions(regions, opening_param = 1, mshape = disk(3))#opening_param = 5)
bestregions, bestindices = best_component(regions, ROIcenter,use_kernel = False, kernel_width = 5, do_plot = False)
bestregions = cut_superfluous_parts(bestregions, regions, bestindices, do_plot = False)
bestregions = wrapper_regions(bestregions)
segmented[slice] = {'roi_center': ROIcenter, 'roi_radii': ROIaxes,'segmask': bestregions.astype(bool)}
return segmented
def best_component_ch(labeled_image):
sh = labeled_image.shape[-1]
selector = labeled_image[:,:,sh/2]
best_classes = np.array([scipy.stats.mstats.mode(s[s>0]).mode[0] for s in selector])
bestregions = np.array([extract_region(im, best_class_i) for im, best_class_i in zip(labeled_image, best_classes)])
return bestregions, best_classes
def numpy_mu_sigma_erf(mu_erf, sigma_erf, eps=1e-7):
x_axis = np.arange(0, 600, dtype='float32')
mu_erf = np.tile(mu_erf, (600,))
sigma_erf = np.tile(sigma_erf, (600,))
sigma_erf += eps
x = (x_axis - mu_erf) / (sigma_erf * np.sqrt(2))
return (erf(x) + 1)/2
if __name__ == "__main__":
DATA_PATH = "/home/oncilladock/"
do_plot = False
labels = pickle.load(open(DATA_PATH+"train.pkl"))
data_dump = []
if not os.path.isfile("segmentation.pkl"):
for patient_id in range(1, 701):
print("Looking for the pickle files...")
if patient_id<=TRAIN_PATIENT_IDS[1]:
files = sorted(glob.glob(os.path.expanduser(DATA_PATH+"pkl_train/%d/study/*.pkl" % patient_id)))
else:
files = sorted(glob.glob(os.path.expanduser(DATA_PATH+"pkl_validate/%d/study/*.pkl" % patient_id)))
ch2_file = [f for f in files if "2ch" in f][0]
if len([f for f in files if "4ch" in f]) > 0:
has_ch4 = True
ch4_file = [f for f in files if "4ch" in f][0]
else:
has_ch4 = False
ch4_file = ch2_file
sax_files = [f for f in files if "sax" in f]
print("%d sax files" % len(sax_files))
ch2_metadata = clean_metadata(pickle.load(open(ch2_file))["metadata"][0])
ch4_metadata = clean_metadata(pickle.load(open(ch4_file))["metadata"][0])
ch2_data = pickle.load(open(ch2_file))["data"]
ch4_data = pickle.load(open(ch4_file))["data"]
metadata_dict = dict()
for file in files:
if "sax" in file:
all_data = pickle.load(open(file,"r"))
metadata_dict[file] = all_data['metadata'][0]
datadict, sorted_indices, sorted_distances = slice_location_finder(metadata_dict)
# find top and bottom of my view
top_point_enhanced_metadata = datadict[sorted_indices[0]]["middle_pixel_position"]
bottom_point_enhanced_metadata = datadict[sorted_indices[-1]]["middle_pixel_position"]
top_point_enhanced_metadata = pickle.load(open(sorted_indices[0],"r"))['metadata'][0]
_enhance_metadata(top_point_enhanced_metadata, patient_id, slice_name = os.path.basename(sorted_indices[0]))
bottom_point_enhanced_metadata = pickle.load(open(sorted_indices[-1],"r"))['metadata'][0]
_enhance_metadata(bottom_point_enhanced_metadata, patient_id, slice_name = os.path.basename(sorted_indices[-1]))
OUTPUT_SIZE = 100
trf_2ch, trf_4ch = get_chan_transformations(
ch2_metadata=ch2_metadata,
ch4_metadata=ch4_metadata if has_ch4 else None,
top_point_metadata = top_point_enhanced_metadata,
bottom_point_metadata = bottom_point_enhanced_metadata,
output_width=OUTPUT_SIZE
)
ch4_result = np.array([fast_warp(ch4, trf_4ch, output_shape=(OUTPUT_SIZE, | |
print(
"\b\b\b{:02d}%".format(pct),
file=sys.stderr,
end="",
)
if args.gui:
worker.updateProgress(pct)
sys.stderr.flush()
os.remove("ffmpeg.log")
if os.path.exists(image["img"] + ".webp"):
ET.SubElement(asset, "type").text = "animatedImage"
ET.SubElement(asset, "resource").text = (
image["img"] + ".webp"
)
continue
except Exception:
import traceback
print(traceback.format_exc())
if args.gui:
worker.outputLog(
" - webm tiles are not supported, consider converting to an animated image or spritesheet: "
+ image["img"]
)
print(
" - webm tiles are not supported, consider converting to an animated image or a spritesheet:",
image["img"],
file=sys.stderr,
end="",
)
continue
else:
ET.SubElement(asset, "type").text = "image"
if image["img"].startswith("http"):
urllib.request.urlretrieve(
image["img"], os.path.basename(image["img"])
)
image["img"] = os.path.basename(image["img"])
if not os.path.exists(image["img"]):
if os.path.exists(os.path.splitext(image["img"])[0] + ".png"):
image["img"] = os.path.splitext(image["img"])[0] + ".png"
imgext = ".png"
else:
if args.gui:
worker.outputLog(" - MISSING RESOURCE: " + image["img"])
print(
" - MISSING RESOURCE:",
image["img"],
file=sys.stderr,
end="",
)
continue
img = PIL.Image.open(image["img"])
if (
img.width <= 300
and img.height <= 300
and 0.9 <= img.width / img.height <= 1.1
):
if "journal" in map and map["journal"]:
try:
from markerocr import placeMarker
placeMarker(img, map, image, mapentry, module, moduuid)
except:
pass
if imgext == ".webp" and args.jpeg != ".webp":
ET.SubElement(asset, "resource").text = (
os.path.splitext(image["img"])[0] + ".png"
)
if img.width > 4096 or img.height > 4096:
scale = (
4095 / img.width
if img.width >= img.height
else 4095 / img.height
)
img = img.resize(
(round(img.width * scale), round(img.height * scale))
)
if args.gui:
worker.outputLog(" - Converting tile from webp to png")
img.save(
os.path.join(
tempdir, os.path.splitext(image["img"])[0] + ".png"
)
)
os.remove(image["img"])
else:
ET.SubElement(asset, "resource").text = image["img"]
if img.width > 4096 or img.height > 4096:
scale = (
4095 / img.width
if img.width >= img.height
else 4095 / img.height
)
img = img.resize(
(round(img.width * scale), round(img.height * scale))
)
img.save(os.path.join(tempdir, image["img"]))
if "lights" in map:
for i in range(len(map["lights"])):
print(
"\rlights [{}/{}]".format(i, len(map["lights"])),
file=sys.stderr,
end="",
)
light = map["lights"][i]
if "config" in light:
light["dim"] = light["config"]["dim"]
light["bright"] = light["config"]["bright"]
if "color" in light["config"]:
light["tintColor"] = light["config"]["color"]
light["tintAlpha"] = light["config"]["alpha"]
if "lightAnimation" in light and light["lightAnimation"] and "type" in light["lightAnimation"] and light["lightAnimation"]["type"] == "ghost":
continue
lightel = ET.SubElement(
mapentry,
"light",
{
"id": str(
uuid.uuid5(moduuid, mapslug + "/lights/" + str(i) + "light")
)
},
)
ET.SubElement(lightel, "radiusMax").text = (
str(round(light["dim"])) if light["dim"] else "0"
)
ET.SubElement(lightel, "radiusMin").text = (
str(round(light["bright"])) if light["bright"] else "0"
)
ET.SubElement(lightel, "color").text = (
light["tintColor"]
if "tintColor" in light and light["tintColor"]
else "#ffffff"
)
ET.SubElement(lightel, "opacity").text = str(light["tintAlpha"])
ET.SubElement(lightel, "alwaysVisible").text = (
"YES" if "t" in light and light["t"] == "u" else "NO"
)
ET.SubElement(lightel, "x").text = str(
round((light["x"] - map["offsetX"]) * map["rescale"])
)
ET.SubElement(lightel, "y").text = str(
round((light["y"] - map["offsetY"]) * map["rescale"])
)
if "tokens" in map and len(map["tokens"]) > 0:
# encentry = ET.SubElement(module,'encounter',{'id': str(uuid.uuid5(moduuid,mapslug+"/encounter")),'parent': str(uuid.uuid5(moduuid,map['_id']+map['name'])), 'sort': '1'})
# ET.SubElement(encentry,'name').text = map['name'] + " Encounter"
# ET.SubElement(encentry,'slug').text = slugify(map['name'] + " Encounter")
for token in map["tokens"]:
if "dimLight" not in token:
token["dimLight"] = token["light"]["dim"] if "light" in token else 0
if "brightLight" not in token:
token["brightLight"] = token["light"]["bright"] if "light" in token else 0
if "lightAlpha" not in token:
token["lightAlpha"] = token["light"]["alpha"] if "light" in token else 1
if 4 <= map["gridType"] <= 5:
tokenOffsetX = round(
((2 * map["grid"] * 0.75 * token["width"]) + (map["grid"] / 2))
/ 2
)
tokenOffsetY = round(
(math.sqrt(3) * map["grid"] * token["height"]) / 2
)
if map["gridType"] == 5:
tokenOffsetX += round(map["grid"])
token["scale"] /= 0.8
elif 2 <= map["gridType"] <= 3:
tokenOffsetX = round(
(math.sqrt(3) * map["grid"] * token["width"]) / 2
)
tokenOffsetY = round(
((2 * map["grid"] * 0.75 * token["height"]) + (map["grid"] / 2))
/ 2
)
if map["gridType"] == 3:
tokenOffsetX += round(map["grid"])
else:
tokenOffsetX = round(token["width"] * (map["grid"] / 2))
tokenOffsetY = round(token["height"] * (map["grid"] / 2))
tokenel = ET.SubElement(
mapentry,
"token",
{
"id": str(
uuid.uuid5(moduuid, mapslug + "/token/" + token["_id"])
)
},
)
ET.SubElement(tokenel, "name").text = token["name"]
ET.SubElement(tokenel, "x").text = str(
round(((token["x"] - map["offsetX"]) * map["rescale"]))
+ tokenOffsetX
)
ET.SubElement(tokenel, "y").text = str(
round(((token["y"] - map["offsetY"]) * map["rescale"]))
+ tokenOffsetY
)
if os.path.exists(token["img"]):
tokenasset = ET.SubElement(
tokenel,
"asset",
{
"id": str(
uuid.uuid5(
moduuid,
mapslug + "/token/" + token["_id"] + "/asset",
)
)
},
)
ET.SubElement(tokenasset, "name").text = token["name"]
ET.SubElement(tokenasset, "type").text = "image"
ET.SubElement(tokenasset, "resource").text = token["img"]
ET.SubElement(tokenel, "hidden").text = (
"YES" if token["hidden"] else "NO"
)
ET.SubElement(tokenel, "scale").text = str(token["scale"])
if token["width"] == token["height"] and 1 <= token["width"] <= 6:
ET.SubElement(tokenel, "size").text = (
"C"
if token["width"] > 4
else "G"
if token["width"] > 3
else "H"
if token["width"] > 2
else "L"
if token["width"] > 1
else "M"
)
elif token["width"] == token["height"] and token["width"] < 1:
ET.SubElement(tokenel, "size").text = (
"T" if token["width"] <= 0.5 else "S"
)
else:
ET.SubElement(tokenel, "size").text = "{}x{}".format(
token["width"], token["height"]
)
ET.SubElement(tokenel, "rotation").text = str(token["rotation"])
ET.SubElement(tokenel, "elevation").text = str(token["elevation"])
vision = ET.SubElement(
tokenel,
"vision",
{
"id": str(
uuid.uuid5(
moduuid, mapslug + "/token/" + token["_id"] + "/vision"
)
)
},
)
ET.SubElement(vision, "enabled").text = (
"YES" if token["vision"] else "NO"
)
ET.SubElement(vision, "light").text = (
"YES" if int(token["dimLight"]) > 0 or int(token["brightLight"]) > 0 else "NO"
)
ET.SubElement(vision, "lightRadiusMin").text = str(
round(token["brightLight"])
)
ET.SubElement(vision, "lightRadiusMax").text = str(
round(token["dimLight"])
)
ET.SubElement(vision, "lightOpacity").text = str(token["lightAlpha"])
ET.SubElement(vision, "dark").text = (
"YES" if int(token["dimSight"]) > 0 or int(token["brightSight"]) > 0 else "NO"
)
ET.SubElement(vision, "darkRadiusMin").text = str(
round(int(token["brightSight"]))
)
ET.SubElement(vision, "darkRadiusMax").text = str(
round(int(token["dimSight"]))
)
actorLinked = False
for a in actors:
if a["_id"] == token["actorId"]:
ET.SubElement(tokenel, "reference").text = "/monster/{}".format(
uuid.uuid5(moduuid, a["_id"])
if args.compendium
else slugify(a["name"])
)
actorLinked = True
break
if not actorLinked and args.compendium:
for a in actors:
if a["token"]["name"] == token["name"]:
ET.SubElement(
tokenel, "reference"
).text = "/monster/{}".format(
uuid.uuid5(moduuid, a["_id"])
if args.compendium
else slugify(a["name"])
)
actorLinked = True
break
if not actorLinked:
ET.SubElement(tokenel, "reference").text = "/monster/{}".format(
slugify(token["name"])
)
if "drawings" in map and len(map["drawings"]) > 0:
for d in map["drawings"]:
if d["type"] == "t":
with PIL.Image.new(
"RGBA",
(round(d["width"]), round(d["height"])),
color=(0, 0, 0, 0),
) as img:
d["fontSize"] = round(d["fontSize"] / 0.75)
try:
font = PIL.ImageFont.truetype(
os.path.join(
moduletmp,
mod["name"],
"fonts",
d["fontFamily"] + ".ttf",
),
size=d["fontSize"],
)
except Exception:
try:
font = PIL.ImageFont.truetype(
d["fontFamily"] + ".ttf", size=d["fontSize"]
)
except Exception as e:
try:
solbera = {
"bookmania": "https://raw.githubusercontent.com/jonathonf/solbera-dnd-fonts/master/Bookinsanity/Bookinsanity.otf",
"scala sans caps": "https://raw.githubusercontent.com/jonathonf/solbera-dnd-fonts/master/Scaly%20Sans%20Caps/Scaly%20Sans%20Caps.otf",
"modesto condensed": "https://raw.githubusercontent.com/jonathonf/solbera-dnd-fonts/master/Nodesto%20Caps%20Condensed/Nodesto%20Caps%20Condensed.otf",
"mrs eaves small caps": "https://raw.githubusercontent.com/jonathonf/solbera-dnd-fonts/master/Mr%20Eaves/Mr%20Eaves%20Small%20Caps.otf",
"dai vernon misdirect": "https://raw.githubusercontent.com/jonathonf/solbera-dnd-fonts/master/Zatanna%20Misdirection/Zatanna%20Misdirection.otf",
"scala sans": "https://raw.githubusercontent.com/jonathonf/solbera-dnd-fonts/master/Scaly%20Sans/Scaly%20Sans.otf",
}
if d["fontFamily"].lower() in solbera.keys():
urllib.request.urlretrieve(
solbera[d["fontFamily"].lower()],
d["fontFamily"] + ".otf",
)
font = PIL.ImageFont.truetype(
d["fontFamily"] + ".otf", size=d["fontSize"]
)
else:
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/google/fonts/master/ofl/{}/METADATA.pb".format(
urllib.parse.quote(
d["fontFamily"].lower()
)
),
d["fontFamily"] + ".pb",
)
protobuf_file_path = d["fontFamily"] + ".pb"
protobuf_file = open(protobuf_file_path, "r")
protobuf = protobuf_file.read()
font_family = fonts_public_pb2.FamilyProto()
text_format.Merge(protobuf, font_family)
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/google/fonts/master/ofl/{}/{}".format(
urllib.parse.quote(
d["fontFamily"].lower()
),
font_family.fonts[0].filename,
),
d["fontFamily"] + ".ttf",
)
font = PIL.ImageFont.truetype(
d["fontFamily"] + ".ttf", size=d["fontSize"]
)
except Exception as e:
print(
'\rUnable to load font for "{}"'.format(
d["fontFamily"]
),
e,
file=sys.stderr,
end="\n",
)
font = PIL.ImageFont.load_default()
text = d["text"]
draw = PIL.ImageDraw.Draw(img)
if draw.multiline_textsize(text, font=font)[0] > round(
d["width"]
):
words = text.split(" ")
text = ""
for i in range(len(words)):
if draw.multiline_textsize(
text + " " + words[i], font=font
)[0] <= round(d["width"]):
text += " " + words[i]
else:
text += "\n" + words[i]
draw.multiline_text(
(0, 0), text, (255, 255, 255), spacing=0, font=font
)
img.save(os.path.join(tempdir, "text_" + d["_id"] + ".png"))
tile = ET.SubElement(mapentry, "tile")
ET.SubElement(tile, "x").text = str(
round(
(d["x"] - map["offsetX"] + (d["width"] / 2))
* map["rescale"]
)
)
ET.SubElement(tile, "y").text = str(
round(
(d["y"] - map["offsetY"] + (d["height"] / 2))
* map["rescale"]
)
)
ET.SubElement(tile, "zIndex").text = str(d["z"])
ET.SubElement(tile, "width").text = str(
round(d["width"] * map["rescale"])
)
ET.SubElement(tile, "height").text = str(
round(d["height"] * map["rescale"])
)
ET.SubElement(tile, "opacity").text = "1.0"
ET.SubElement(tile, "rotation").text = str(d["rotation"])
ET.SubElement(tile, "locked").text = "YES" if d["locked"] else "NO"
ET.SubElement(tile, "layer").text = "object"
ET.SubElement(tile, "hidden").text = "YES" if d["hidden"] else "NO"
asset = ET.SubElement(tile, "asset")
ET.SubElement(asset, "name").text = d["text"]
ET.SubElement(asset, "type").text = "image"
ET.SubElement(asset, "resource").text = "text_" + d["_id"] + ".png"
elif d["type"] == "p":
drawing = ET.SubElement(
mapentry, "drawing", {"id": str(uuid.uuid5(moduuid, d["_id"]))}
)
ET.SubElement(drawing, "layer").text = (
"dm" | |
'_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-WS-Std by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-WS-Std by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_WS_Std_adjustment(item.copy())
print ("SS-WS-Std: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS-Std' + '_' + 'class_' + str(className))
adjustment_name = str('SS_WS_Std' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-WS-Std by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-WS-Std by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_WS_Std_adjustment(item.copy())
print ("SS-WS-Std: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS-Std' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_WS_Std' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# **************************************************************** #
# Site Specific LTERRA for WC 1HZ Data Adjustment (G-LTERRA_WC_1HZ)
if method != 'SS-LTERRA-WC-1HZ':
pass
elif method == 'SS-LTERRA-WC-1HZ' and adjustments_metadata['SS-LTERRA-WC-1HZ'] == False:
pass
else:
print('Applying Adjustment Method: SS-LTERRA-WC-1HZ')
logger.info('Applying Adjustment Method: SS-LTERRA-WC-1HZ')
# ******************************************************************* #
# Site Specific LTERRA WC Machine Learning Adjustment (SS-LTERRA-MLa)
# Random Forest Regression with now ancillary columns
if method != 'SS-LTERRA-MLa':
pass
elif method == 'SS-LTERRA-MLa' and adjustments_metadata['SS-LTERRA-MLa'] == False:
pass
else:
print('Applying Adjustment Method: SS-LTERRA-MLa')
logger.info('Applying Adjustment Method: SS-LTERRA-MLa')
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_ML_adjustment(inputdata.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS_LTERRA_MLa'
adjustment_name = 'SS_LTERRA_MLa'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-LTERRA MLa by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-LTERRA MLa by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c= perform_SS_LTERRA_ML_adjustment(item[primary_idx].copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLa' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_MLa' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA MLa by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-LTERRA MLa by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_ML_adjustment(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-LTERRA_MLa' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_ML' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLa by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-LTERRA_MLa by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_ML_adjustment(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLa' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_LTERRA_MLa' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************************************************************************ #
# Site Specific LTERRA WC (w/ stability) Machine Learning Adjustment (SS-LTERRA_MLc)
if method != 'SS-LTERRA-MLc':
pass
elif method == 'SS-LTERRA-MLc' and adjustments_metadata['SS-LTERRA-MLc'] == False:
pass
else:
print('Applying Adjustment Method: SS-LTERRA-MLc')
logger.info('Applying Adjustment Method: SS-LTERRA-MLc')
all_trainX_cols = ['x_train_TI', 'x_train_TKE','x_train_WS','x_train_DIR','x_train_Hour']
all_trainY_cols = ['y_train']
all_testX_cols = ['x_test_TI','x_test_TKE','x_test_WS','x_test_DIR','x_test_Hour']
all_testY_cols = ['y_test']
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(inputdata.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS_LTERRA_MLc'
adjustment_name = 'SS_LTERRA_MLc'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj, Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-LTERRA_MLc by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-LTERRA_MLc by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c= perform_SS_LTERRA_S_ML_adjustment(item[primary_idx].copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLc' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_MLc' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLc by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-LTERRA_MLc by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(item.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-LTERRA_MLc' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_S_ML' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLc by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-LTERRA_MLc by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(item.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLc' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_LTERRA_MLc' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# *********************** #
# Site Specific SS-LTERRA-MLb
if method != 'SS-LTERRA-MLb':
pass
elif method == 'SS-LTERRA-MLb' and adjustments_metadata['SS-LTERRA-MLb'] == False:
pass
else:
print('Applying Adjustment Method: SS-LTERRA-MLb')
logger.info('Applying Adjustment Method: SS-LTERRA-MLb')
all_trainX_cols = ['x_train_TI', 'x_train_TKE']
all_trainY_cols = ['y_train']
all_testX_cols = ['x_test_TI','x_test_TKE']
all_testY_cols = ['y_test']
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(inputdata.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS_LTERRA_MLb'
adjustment_name = 'SS_LTERRA_MLb'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj, Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-LTERRA_MLb by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-LTERRA_MLb by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c= perform_SS_LTERRA_S_ML_adjustment(item[primary_idx].copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLb' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_MLb' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLb by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-LTERRA_MLb by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(item.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-LTERRA_MLb' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_MLb' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLb by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-LTERRA_MLb by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(item.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLb' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_LTERRA_MLb' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# *********************** #
# TI Extrapolation (TI-Ext)
if method != 'TI-Extrap':
pass
elif method == 'TI-Extrap' and adjustments_metadata['TI-Extrap'] == False:
pass
else:
print ('Found enough data to perform extrapolation comparison')
block_print()
# Get extrapolation height
height_extrap = float(extrap_metadata['height'][extrap_metadata['type'] == 'extrap'])
# Extrapolate
inputdata_adj, lm_adj, shearTimeseries= perform_TI_extrapolation(inputdata.copy(), extrap_metadata,
extrapolation_type, height)
adjustment_name = 'TI_EXTRAP'
lm_adj['adjustment'] = adjustment_name
inputdataEXTRAP = inputdata_adj.copy()
inputdataEXTRAP, baseResultsLists = extrap_configResult(extrapolation_type, inputdataEXTRAP, baseResultsLists, method,lm_adj)
if RSDtype['Selection'][0:4] == 'Wind':
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, | |
import os
import argparse
from datetime import datetime
import time
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import data
import track
import model
import utils
matplotlib.use("Qt5Agg")
def main():
global net
global test_loader
global scatter
parser = argparse.ArgumentParser()
# generic params
parser.add_argument(
"--name",
default=datetime.now().strftime("%Y-%m-%d_%H:%M:%S"),
help="Name to store the log file as",
)
parser.add_argument("--resume", help="Path to log file to resume from")
parser.add_argument("--encoder", default="FSEncoder", help="Encoder")
parser.add_argument("--decoder", default="DSPN", help="Decoder")
parser.add_argument(
"--epochs", type=int, default=10, help="Number of epochs to train with"
)
parser.add_argument(
"--latent", type=int, default=32, help="Dimensionality of latent space"
)
parser.add_argument(
"--dim", type=int, default=64, help="Dimensionality of hidden layers"
)
parser.add_argument(
"--lr", type=float, default=1e-2, help="Outer learning rate of model"
)
parser.add_argument(
"--batch-size", type=int, default=12, help="Batch size to train with"
)
parser.add_argument(
"--num-workers",
type=int,
default=0,
help="Number of threads for data loader"
)
parser.add_argument(
"--dataset",
choices=[
"mnist", "clevr-box", "clevr-state", "cats", "merged", "wflw"
],
help="Which dataset to use",
)
parser.add_argument(
"--no-cuda",
action="store_true",
help="Run on CPU instead of GPU (not recommended)",
)
parser.add_argument(
"--train-only",
action="store_true",
help="Only run training, no evaluation"
)
parser.add_argument(
"--eval-only",
action="store_true",
help="Only run evaluation, no training"
)
parser.add_argument(
"--multi-gpu",
action="store_true",
help="Use multiple GPUs"
)
parser.add_argument(
"--show",
action="store_true",
help="Plot generated samples in Tensorboard"
)
parser.add_argument(
"--show-skip",
type=int,
default=1,
help="Number of epochs to skip before exporting to Tensorboard"
)
parser.add_argument(
"--infer-name",
action="store_true",
help="Automatically name run based on dataset/run number"
)
parser.add_argument("--supervised", action="store_true", help="")
parser.add_argument(
"--baseline",
action="store_true",
help="Use baseline model"
)
parser.add_argument(
"--export-dir", type=str, help="Directory to output samples to")
parser.add_argument(
"--export-n",
type=int,
default=10 ** 9,
help="How many samples to output"
)
parser.add_argument(
"--export-progress",
action="store_true",
help="Output intermediate set predictions for DSPN?",
)
parser.add_argument(
"--full-eval",
action="store_true",
help="Use full evaluation set (default: 1/10 of evaluation data)",
# don't need full evaluation when training to save some time
)
parser.add_argument(
"--mask-feature",
action="store_true",
help="Treat mask as a feature to compute loss with",
)
parser.add_argument(
"--inner-lr",
type=float,
default=800,
help="Learning rate of DSPN inner optimisation",
)
parser.add_argument(
"--iters",
type=int,
default=10,
help="How many DSPN inner optimisation iteration to take",
)
parser.add_argument(
"--huber-repr",
type=float,
default=1,
help="Scaling of repr loss term for DSPN supervised learning",
)
parser.add_argument(
"--loss",
choices=["hungarian", "chamfer", "emd"],
default="emd",
help="Type of loss used",
)
parser.add_argument(
"--export-csv",
action="store_true",
help="Only perform predictions, don't evaluate in any way"
)
parser.add_argument(
"--eval-split",
help="Overwrite split on test set"
)
args = parser.parse_args()
if args.infer_name:
if args.baseline:
prefix = "base"
else:
prefix = "dspn"
used_nums = []
if not os.path.exists("runs"):
os.makedirs("runs")
runs = os.listdir("runs")
for run in runs:
if args.dataset in run:
used_nums.append(int(run.split("-")[-1]))
num = 1
while num in used_nums:
num += 1
name = f"{prefix}-{args.dataset}-{num}"
else:
name = args.name
print(f"Saving run to runs/{name}")
train_writer = SummaryWriter(f"runs/{name}", purge_step=0)
net = model.build_net(args)
if not args.no_cuda:
net = net.cuda()
if args.multi_gpu:
net = torch.nn.DataParallel(net)
optimizer = torch.optim.Adam(
[p for p in net.parameters() if p.requires_grad], lr=args.lr
)
print("Building dataloader")
if args.dataset == "mnist":
dataset_train = data.MNISTSet(train=True, full=args.full_eval)
dataset_test = data.MNISTSet(train=False, full=args.full_eval)
elif args.dataset in ["clevr-box", "clevr-state"]:
dataset_train = data.CLEVR(
"clevr",
"train",
box=args.dataset == "clevr-box",
full=args.full_eval
)
dataset_test = data.CLEVR(
"clevr",
"val",
box=args.dataset == "clevr-box",
full=args.full_eval
)
elif args.dataset == "cats":
dataset_train = data.Cats("cats", "train", 9, full=args.full_eval)
dataset_test = data.Cats("cats", "val", 9, full=args.full_eval)
elif args.dataset == "faces":
dataset_train = data.Faces("faces", "train", 4, full=args.full_eval)
dataset_test = data.Faces("faces", "val", 4, full=args.full_eval)
elif args.dataset == "wflw":
if args.eval_split:
eval_split = f"test_{args.eval_split}"
else:
eval_split = "test"
dataset_train = data.WFLW("wflw", "train", 7, full=args.full_eval)
dataset_test = data.WFLW("wflw", eval_split, 7, full=args.full_eval)
elif args.dataset == "merged":
# merged cats and human faces
dataset_train_cats = data.Cats("cats", "train", 9, full=args.full_eval)
dataset_train_wflw = data.WFLW("wflw", "train", 9, full=args.full_eval)
dataset_test_cats = data.Cats("cats", "val", 9, full=args.full_eval)
dataset_test_wflw = data.WFLW("wflw", "test", 9, full=args.full_eval)
dataset_train = data.MergedDataset(
dataset_train_cats,
dataset_train_wflw
)
dataset_test = data.MergedDataset(
dataset_test_cats,
dataset_test_wflw
)
if not args.eval_only:
train_loader = data.get_loader(
dataset_train,
batch_size=args.batch_size,
num_workers=args.num_workers
)
if not args.train_only:
test_loader = data.get_loader(
dataset_test,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False
)
tracker = track.Tracker(
train_mae=track.ExpMean(),
train_last=track.ExpMean(),
train_loss=track.ExpMean(),
test_mae=track.Mean(),
test_last=track.Mean(),
test_loss=track.Mean(),
)
if args.resume:
log = torch.load(args.resume)
weights = log["weights"]
n = net
if args.multi_gpu:
n = n.module
n.load_state_dict(weights, strict=True)
if args.export_csv:
names = []
predictions = []
export_targets = []
def run(net, loader, optimizer, train=False, epoch=0, pool=None):
writer = train_writer
if train:
net.train()
prefix = "train"
torch.set_grad_enabled(True)
else:
net.eval()
prefix = "test"
torch.set_grad_enabled(False)
if args.export_dir:
true_export = []
pred_export = []
iters_per_epoch = len(loader)
loader = tqdm(
loader,
ncols=0,
desc="{1} E{0:02d}".format(epoch, "train" if train else "test "),
)
for i, sample in enumerate(loader, start=epoch * iters_per_epoch):
# input is either a set or an image
input, target_set, target_mask = map(lambda x: x.cuda(), sample)
# forward evaluation through the network
(progress, masks, evals, gradn), (y_enc, y_label) = net(
input, target_set, target_mask
)
progress_only = progress
# if using mask as feature, concat mask feature into progress
if args.mask_feature:
target_set = torch.cat(
[target_set, target_mask.unsqueeze(dim=1)], dim=1
)
progress = [
torch.cat([p, m.unsqueeze(dim=1)], dim=1)
for p, m in zip(progress, masks)
]
if args.loss == "chamfer":
# dim 0 is over the inner iteration steps
# target set is broadcasted over dim 0
set_loss = utils.chamfer_loss(
torch.stack(progress), target_set.unsqueeze(0)
)
elif args.loss == "hungarian":
set_loss = utils.hungarian_loss(
progress[-1], target_set, thread_pool=pool
).unsqueeze(0)
elif args.loss == "emd":
set_loss = utils.emd(progress[-1], target_set).unsqueeze(0)
# Only use representation loss with DSPN and when doing general
# supervised prediction, not when auto-encoding
if args.supervised and not args.baseline:
repr_loss = args.huber_repr * F.smooth_l1_loss(y_enc, y_label)
loss = set_loss.mean() + repr_loss.mean()
else:
loss = set_loss.mean()
# restore progress variable to not contain masks for correct
# exporting
progress = progress_only
# Outer optim step
if train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Tensorboard tracking of metrics for debugging
tracked_last = tracker.update(
f"{prefix}_last", set_loss[-1].item()
)
tracked_loss = tracker.update(f"{prefix}_loss", loss.item())
if train:
writer.add_scalar(
"metric/set-loss",
loss.item(),
global_step=i
)
writer.add_scalar(
"metric/set-last",
set_loss[-1].mean().item(),
global_step=i
)
if not args.baseline:
writer.add_scalar(
"metric/eval-first",
evals[0].mean().item(),
global_step=i
)
writer.add_scalar(
"metric/eval-last",
evals[-1].mean().item(),
global_step=i
)
writer.add_scalar(
"metric/max-inner-grad-norm",
max(g.item() for g in gradn),
global_step=i
)
writer.add_scalar(
"metric/mean-inner-grad-norm",
sum(g.item() for g in gradn)/len(gradn),
global_step=i
)
if args.supervised:
writer.add_scalar(
"metric/repr_loss",
repr_loss.item(),
global_step=i
)
# Print current progress to progress bar
fmt = "{:.6f}".format
loader.set_postfix(
last=fmt(tracked_last),
loss=fmt(tracked_loss),
bad=fmt(evals[-1].detach().cpu().item() * 1000)
if not args.baseline
else 0
)
if args.export_dir:
# export last inner optim of each input as csv
# (one input per row)
if args.export_csv:
# the second to last element are the last of the
# inner optim
for batch_i, p in enumerate(progress[-2]):
img_id = i * args.batch_size + batch_i
names.append(loader.iterable.dataset.get_fname(img_id))
m = masks[-2][batch_i]
m = m.cpu().detach().numpy().astype(bool)
p = p.cpu().detach().numpy()
p = p[:, m]
sample_preds = [
p[k % 2, k // 2] for k in range(p.shape[1] * 2)
]
# remove values according to mask and add zeros to the
# end in stead
sample_preds += [0] * (len(m) * 2 - len(sample_preds))
predictions.append(sample_preds)
true_mask = target_set[batch_i, 2, :].cpu().detach()
true_mask = true_mask.numpy().astype(bool)
trues = target_set[batch_i, :2, :]
trues = trues.cpu().detach().numpy()
t = trues[:, true_mask]
t = [t[k % 2, k // 2] for k in range(t.shape[1] * 2)]
t += [0] * (len(true_mask) * 2 - len(t))
export_targets.append(t)
# Store predictions to be exported
else:
if len(true_export) < args.export_n:
for p, m in zip(target_set, target_mask):
true_export.append(p.detach().cpu())
progress_steps = []
for pro, ms in zip(progress, masks):
# pro and ms are one step of the inner optim
# score boxes contains the list of predicted
# elements for one step
score_boxes = []
for p, m in zip(
pro.cpu().detach(),
ms.cpu().detach()):
score_box = torch.cat(
[m.unsqueeze(0), p],
dim=0
)
score_boxes.append(score_box)
progress_steps.append(score_boxes)
for b in zip(*progress_steps):
pred_export.append(b)
# Plot predictions in Tensorboard
if args.show and epoch % args.show_skip == 0 and not train:
name = f"set/epoch-{epoch}/img-{i}"
# thresholded set
progress.append(progress[-1])
masks.append((masks[-1] > 0.5).float())
# target set
if args.mask_feature:
# target set is augmented with masks, so remove them
progress.append(target_set[:, :-1])
else:
progress.append(target_set)
masks.append(target_mask)
# intermediate sets
for j, (s, ms) in enumerate(zip(progress, masks)):
if args.dataset == "clevr-state":
continue
if args.dataset.startswith("clevr"):
threshold = 0.5
else:
threshold = None
s, ms = utils.scatter_masked(
s,
ms,
binned=args.dataset.startswith("clevr"),
threshold=threshold
)
if j != len(progress) - 1:
tag_name = f"{name}"
| |
<filename>scripts/preprocessing/bcdi_read_BCDI_scan.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
try:
import hdf5plugin # for P10, should be imported before h5py or PyTables
except ModuleNotFoundError:
pass
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
import bcdi.graph.graph_utils as gu
import bcdi.preprocessing.bcdi_utils as bu
import bcdi.utils.utilities as util
from bcdi.experiment.setup import Setup
from bcdi.graph.colormap import ColormapFactory
helptext = """
Open a rocking curve data, plot the mask, the monitor and the stack along the first
axis.
It is usefull when you want to localize the Bragg peak for ROI determination.
Supported beamlines: ESRF ID01, PETRAIII P10, SOLEIL SIXS, SOLEIL CRISTAL.
"""
scan = 128
root_folder = "D:/data/P10_2nd_test_isosurface_Dec2020/data_nanolab/"
data_dir = None
# leave None to use the beamline default. It will look for the data at this location
sample_name = "PtNP1" # string in front of the scan number in the folder name
save_dir = root_folder + "dataset_1_newpsf/test/"
# images will be saved here, leave it to None otherwise
# (default to data directory's parent)
save_mask = False # set to True to save the mask
debug = True # True to see more plots
binning = (1, 1, 1) # binning to apply to the data
# (stacking dimension, detector vertical axis, detector horizontal axis)
###############################
# beamline related parameters #
###############################
beamline = "P10"
# name of the beamline, used for data loading and normalization by monitor
# supported beamlines: 'ID01', 'SIXS_2018', 'SIXS_2019', 'CRISTAL', 'P10', 'NANOMAX'
actuators = None # {'rocking_angle': 'actuator_1_3'}
# Optional dictionary that can be used to define the entries corresponding to
# actuators in data files
# (useful at CRISTAL where the location of data keeps changing)
# e.g. {'rocking_angle': 'actuator_1_3', 'detector': 'data_04', 'monitor': 'data_05'}
custom_scan = False # True for a stack of images acquired without scan,
# e.g. with ct in a macro (no info in spec file)
custom_images = np.arange(11353, 11453, 1) # list of image numbers for the custom_scan
custom_monitor = np.ones(len(custom_images))
# monitor values for normalization for the custom_scan
custom_motors = {
"eta": np.linspace(16.989, 18.989, num=100, endpoint=False),
"phi": 0,
"nu": -0.75,
"delta": 36.65,
}
# ID01: eta, phi, nu, delta
# CRISTAL: mgomega, gamma, delta
# P10: om, phi, chi, mu, gamma, delta
# SIXS: beta, mu, gamma, delta
rocking_angle = "outofplane" # "outofplane" or "inplane"
is_series = True # specific to series measurement at P10
specfile_name = ""
# .spec for ID01, .fio for P10, alias_dict.txt for SIXS_2018,
# not used for CRISTAL and SIXS_2019
# template for ID01: name of the spec file without '.spec'
# template for SIXS_2018: full path of the alias dictionnary 'alias_dict.txt',
# typically: root_folder + 'alias_dict.txt'
# template for all other beamlines: ''
###############################
# detector related parameters #
###############################
detector = "Eiger4M" # "Eiger2M" or "Maxipix" or "Eiger4M" or 'Merlin'
x_bragg = 1355 # horizontal pixel number of the Bragg peak,
# leave None for automatic detection (using the max)
y_bragg = 796 # vertical pixel number of the Bragg peak,
# leave None for automatic detection (using the max)
roi_detector = [y_bragg - 200, y_bragg + 200, x_bragg - 200, x_bragg + 200]
# roi_detector = [y_bragg - 168, y_bragg + 168, x_bragg - 140, x_bragg + 140] # CH5309
# roi_detector = [552, 1064, x_bragg - 240, x_bragg + 240] # P10 2018
# roi_detector = [y_bragg - 290, y_bragg + 350, x_bragg - 350, x_bragg + 350] # PtRh Ar
# [Vstart, Vstop, Hstart, Hstop]
# leave None to use the full detector. Use with center_fft='skip'
# if you want this exact size.
peak_method = "max" # Bragg peak determination: 'max', 'com' or 'maxcom'.
normalize = "monitor"
# 'monitor' to return the default monitor values, 'skip' to do nothing
high_threshold = 500000 # everything above will be considered as hotpixel
hotpixels_file = "" # root_folder + 'hotpixels_cristal.npz'
flatfield_file = "" # root_folder + "flatfield_maxipix_8kev.npz" #
template_imagefile = "_master.h5"
# template for ID01: 'data_mpx4_%05d.edf.gz' or 'align_eiger2M_%05d.edf.gz'
# template for SIXS_2018: 'align.spec_ascan_mu_%05d.nxs'
# template for SIXS_2019: 'spare_ascan_mu_%05d.nxs'
# template for Cristal: 'S%d.nxs'
# template for P10: '_master.h5'
# template for NANOMAX: '%06d.h5'
######################
# setup for the plot #
######################
vmin = 0 # min of the colorbar (log scale)
vmax = 6 # max of the colorbar (log scale)
low_threshold = 1 # everthing <= 1 will be set to 0 in the plot
width = None # [50, 50] # [vertical, horizontal], leave None for default
# half width in pixels of the region of interest centered on the peak for the plot
##################################
# end of user-defined parameters #
##################################
###################
# define colormap #
###################
bad_color = "1.0" # white background
my_cmap = ColormapFactory(bad_color=bad_color).cmap
plt.ion()
########################################
# initialize and check some parameters #
########################################
save_dirname = "pynxraw"
flatfield = util.load_flatfield(flatfield_file)
hotpix_array = util.load_hotpixels(hotpixels_file)
if normalize not in {"skip", "monitor"}:
raise ValueError(
f"Invalid setting {normalize} for normalize,"
" allowed values are 'skip' and 'monitor'"
)
####################
# Initialize setup #
####################
setup = Setup(
beamline_name=beamline,
rocking_angle=rocking_angle,
custom_scan=custom_scan,
custom_images=custom_images,
custom_monitor=custom_monitor,
custom_motors=custom_motors,
actuators=actuators,
is_series=is_series,
detector_name=detector,
template_imagefile=template_imagefile,
roi=roi_detector,
binning=binning,
)
########################################
# print the current setup and detector #
########################################
print("\n##############\nSetup instance\n##############")
print(setup)
print("\n#################\nDetector instance\n#################")
print(setup.detector)
########################
# initialize the paths #
########################
setup.init_paths(
sample_name=sample_name,
scan_number=scan,
root_folder=root_folder,
save_dir=save_dir,
save_dirname=save_dirname,
specfile_name=specfile_name,
template_imagefile=template_imagefile,
data_dir=data_dir,
)
logfile = setup.create_logfile(
scan_number=scan, root_folder=root_folder, filename=setup.detector.specfile
)
#################
# load the data #
#################
data, mask, monitor, frames_logical = setup.loader.load_check_dataset(
scan_number=scan,
setup=setup,
flatfield=flatfield,
hotpixels=hotpix_array,
normalize=normalize,
debugging=debug,
)
numz, numy, numx = data.shape
print(f"Data shape: ({numz}, {numy}, {numx})")
##########################
# apply photon threshold #
##########################
if high_threshold != 0:
nb_thresholded = (data > high_threshold).sum()
mask[data > high_threshold] = 1
data[data > high_threshold] = 0
print(f"Applying photon threshold, {nb_thresholded} high intensity pixels masked")
######################################################
# calculate rocking curve and fit it to get the FWHM #
######################################################
if data.ndim == 3:
tilt, _, _, _ = setup.read_logfile(scan_number=scan)
rocking_curve = np.zeros(numz)
z0, y0, x0 = bu.find_bragg(data, peak_method=peak_method)
if x_bragg is None: # Bragg peak position not defined by the user, use the max
x_bragg = x0
else: # calculate the new position with binning and cropping
x_bragg = int(
(x_bragg - setup.detector.roi[2])
/ (setup.detector.preprocessing_binning[2] * setup.detector.binning[2])
)
if y_bragg is None: # Bragg peak position not defined by the user, use the max
y_bragg = y0
else: # calculate the new position with binning and cropping
y_bragg = int(
(y_bragg - setup.detector.roi[0])
/ (setup.detector.preprocessing_binning[1] * setup.detector.binning[1])
)
peak_int = int(data[z0, y0, x0])
print(
"Bragg peak (indices in the eventually binned ROI) at (z, y, x):"
f" {z0}, {y0}, {x0}, intensity = {peak_int}"
)
for idx in range(numz):
rocking_curve[idx] = data[
idx, y_bragg - 50 : y_bragg + 50, x_bragg - 50 : x_bragg + 50
].sum()
plot_title = f"Rocking curve for a ROI centered on (y, x): ({y_bragg}, {x_bragg})"
z0 = np.unravel_index(rocking_curve.argmax(), rocking_curve.shape)[0]
interpolation = interp1d(tilt, rocking_curve, kind="cubic")
interp_points = 5 * numz
interp_tilt = np.linspace(tilt.min(), tilt.max(), interp_points)
interp_curve = interpolation(interp_tilt)
interp_fwhm = (
len(np.argwhere(interp_curve >= interp_curve.max() / 2))
* (tilt.max() - tilt.min())
/ (interp_points - 1)
)
print(f"FWHM by interpolation = {interp_fwhm:.3f} deg")
_, (ax0, ax1) = plt.subplots(2, 1, sharex="col", figsize=(10, 5))
ax0.plot(tilt, rocking_curve, ".")
ax0.plot(interp_tilt, interp_curve)
ax0.set_ylabel("Integrated intensity")
ax0.legend(("data", "interpolation"))
ax0.set_title(plot_title)
ax1.plot(tilt, np.log10(rocking_curve), ".")
ax1.plot(interp_tilt, np.log10(interp_curve))
ax1.set_xlabel("Rocking angle (deg)")
ax1.set_ylabel("Log(integrated intensity)")
ax0.legend(("data", "interpolation"))
plt.pause(0.1)
# apply low threshold
data[data <= low_threshold] = 0
# data = data[data.shape[0]//2, :, :]
# select the first frame e.g. for detector mesh scan
data = data.sum(axis=0) # concatenate along the axis of the rocking curve
title = f"data.sum(axis=0) peak method={peak_method}\n"
else: # 2D
y0, x0 = bu.find_bragg(data, peak_method=peak_method)
peak_int = int(data[y0, x0])
print(
f"Bragg peak (indices in the eventually binned ROI) at (y, x): {y0}, {x0},"
f" intensity = {peak_int}"
)
# apply low threshold
data[data <= low_threshold] = 0
title = f"peak method={peak_method}\n"
######################################################################################
# cehck the width parameter for plotting the region of interest centered on the peak #
######################################################################################
if width is None:
width = [y0, numy - y0, x0, numx - x0] # plot the full range
else:
width = [
min(width[0], y0, numy - y0),
min(width[0], y0, numy - y0),
min(width[1], x0, numx - x0),
min(width[1], x0, numx - x0),
]
print(f"width for plotting: {width}")
############################################
# plot mask, monitor and concatenated data #
############################################
if save_mask:
np.savez_compressed(setup.detector.savedir + "hotpixels.npz", mask=mask)
gu.combined_plots(
tuple_array=(monitor, mask),
tuple_sum_frames=False,
tuple_sum_axis=(0, 0),
tuple_width_v=None,
tuple_width_h=None,
tuple_colorbar=(True, False),
tuple_vmin=np.nan,
tuple_vmax=np.nan,
tuple_title=("monitor", "mask"),
tuple_scale="linear",
cmap=my_cmap,
ylabel=("Counts (a.u.)", ""),
)
max_y, max_x = np.unravel_index(abs(data).argmax(), data.shape)
print(
f"Max of the concatenated data along axis 0 at (y, x): ({max_y}, {max_x}) "
f"Max = {int(data[max_y, max_x])}"
)
# plot the region of interest centered on the peak
# extent (left, right, bottom, top)
fig, ax = plt.subplots(nrows=1, ncols=1)
plot = | |
SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class ImageRepoTargetsUnsignedUptane(Uptane):
'''The image repo targets metadata has no signatures'''
class ImageStep(Step):
UPDATE_ERROR = 'UnmetThreshold::Targets'
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
'targets_sign_keys_idx': [],
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class ImageRepoSnapshotUnsignedUptane(Uptane):
'''The image repo snapshot metadata has no signatures'''
class ImageStep(Step):
UPDATE_ERROR = 'UnmetThreshold::Snapshot'
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'snapshot_sign_keys_idx': [],
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class ImageRepoTimestampUnsignedUptane(Uptane):
'''The image repo timestamp metadata has no signatures'''
class ImageStep(Step):
UPDATE_ERROR = 'UnmetThreshold::Timestamp'
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
'timestamp_sign_keys_idx': [],
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class DirectorRootBadKeyIdsUptane(Uptane):
'''The director root metadata has bad key IDs for the root role'''
class ImageStep(Step):
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
UPDATE_ERROR = 'BadKeyId'
ROOT_KEYS_IDX = [4]
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': ROOT_KEYS_IDX,
'root_bad_key_ids': ROOT_KEYS_IDX,
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class DirectorTargetsBadKeyIdsUptane(Uptane):
'''The director root metadata has bad key IDs for the targets role'''
class ImageStep(Step):
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
UPDATE_ERROR = 'BadKeyId'
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
'targets_bad_key_ids': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class ImageRepoRootBadKeyIdsUptane(Uptane):
'''The image repo root metadata has bad key IDs for the root role'''
class ImageStep(Step):
UPDATE_ERROR = 'BadKeyId'
ROOT_KEYS_IDX = [0]
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': ROOT_KEYS_IDX,
'root_bad_key_ids': ROOT_KEYS_IDX,
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class ImageRepoTargetsBadKeyIdsUptane(Uptane):
'''The image repo root metadata has bad key IDs for the targets role'''
class ImageStep(Step):
UPDATE_ERROR = 'BadKeyId'
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'targets_bad_key_ids': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class ImageRepoSnapshotBadKeyIdsUptane(Uptane):
'''The image repo root metadata has bad key IDs for the snapshot role'''
class ImageStep(Step):
UPDATE_ERROR = 'BadKeyId'
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'snapshot_bad_key_ids': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class ImageRepoTimestampBadKeyIdsUptane(Uptane):
'''The image repo root metadata has bad key IDs for the timestamp role'''
class ImageStep(Step):
UPDATE_ERROR = 'BadKeyId'
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
'timestamp_bad_key_ids': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class DirectorTargetOversizedUptane(Uptane):
'''The director's metadata states that a target is smaller than it actually is.
The target metadata in image and director do not match.
'''
class ImageStep(Step):
TARGET_ERRORS = {
DEFAULT_TARGET_NAME: 'TargetMismatch',
}
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
def __targets(hardware_id: str, ecu_identifier: str = None) -> list:
return [Target(name=DEFAULT_TARGET_NAME,
content=DEFAULT_TARGET_CONTENT,
hardware_id=hardware_id,
ecu_identifier=ecu_identifier,
alteration='oversized')]
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
'targets': __targets,
}
STEPS = [
(DirectorStep, ImageStep),
]
class ImageRepoTargetOversizedUptane(Uptane):
'''The image repo's metadata states that a target is smaller than it actually is.
The target metadata in image and director do not match.
'''
class ImageStep(Step):
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
def __targets(hardware_id: str, ecu_identifier: str = None) -> list:
return [Target(name=DEFAULT_TARGET_NAME,
content=DEFAULT_TARGET_CONTENT,
hardware_id=hardware_id,
ecu_identifier=ecu_identifier,
alteration='oversized')]
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
'targets': __targets,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGET_ERRORS = {
DEFAULT_TARGET_NAME: 'TargetMismatch',
}
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep, ImageStep),
]
class TargetOversizedUptane(Uptane):
'''Both the director's and image repo's metadata states that a target is smaller than it
actually is.
'''
class ImageStep(Step):
TARGET_ERRORS = {
DEFAULT_TARGET_NAME: 'TargetHashMismatch',
}
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
def __targets(hardware_id: str, ecu_identifier: str = None) -> list:
return [Target(name=DEFAULT_TARGET_NAME,
content=DEFAULT_TARGET_CONTENT,
hardware_id=hardware_id,
ecu_identifier=ecu_identifier,
alteration='oversized')]
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
'targets': __targets,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep(Step):
TARGET_ERRORS = {
DEFAULT_TARGET_NAME: 'OversizedTarget',
}
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
def __targets(hardware_id: str, ecu_identifier: str = None) -> list:
return [Target(name=DEFAULT_TARGET_NAME,
content=DEFAULT_TARGET_CONTENT,
hardware_id=hardware_id,
ecu_identifier=ecu_identifier,
alteration='oversized')]
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
'targets': __targets,
}
STEPS = [
(DirectorStep, ImageStep),
]
class DirectorRootRotationUptane(Uptane):
'''Director step 0 has root v1, step 1 has root v2, it is correctly cross signed'''
class ImageStep(Step):
TARGETS_KEYS_IDX = [1]
SNAPSHOT_KEYS_IDX = [2]
TIMESTAMP_KEYS_IDX = [3]
ROOT_KWARGS = {
'root_keys_idx': [0],
'targets_keys_idx': TARGETS_KEYS_IDX,
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
SNAPSHOT_KWARGS = {
'snapshot_keys_idx': SNAPSHOT_KEYS_IDX,
}
TIMESTAMP_KWARGS = {
'timestamp_keys_idx': TIMESTAMP_KEYS_IDX,
}
class DirectorStep1(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'root_keys_idx': [4],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
class DirectorStep2(Step):
TARGETS_KEYS_IDX = [5]
ROOT_KWARGS = {
'version': 2,
'root_keys_idx': [6],
'root_sign_keys_idx': [4, 6],
'targets_keys_idx': TARGETS_KEYS_IDX,
}
TARGETS_KWARGS = {
'targets_keys_idx': TARGETS_KEYS_IDX,
}
STEPS = [
(DirectorStep1, ImageStep),
(DirectorStep2, ImageStep),
]
class ImageRepoRootRotationUptane(Uptane):
'''Image repo step 0 has root v1, step 1 | |
#!/usr/bin/env python
#
# Copyright 2018 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: nsxt_tier0
short_description: 'Create/Update/Delete a Tier-0 and associated resources'
description: Creates/Updates/Deletes a Tier-0 resource using the Policy API.
Assocaited resources include 'Tier-0 Locale Service' and
'Tier-0 Interface'. 'Tier-0 Locale Service' and 'Tier-0 Interface'
attributes must be prepended with 't0ls' and 't0iface'
respectively.
version_added: '2.8'
author: '<NAME>'
extends_documentation_fragment: vmware_nsxt
options:
id:
description: Tier-0 ID
required: true
type: str
description:
description: Tier-0 description
type: str
default_rule_logging:
description: Enable logging for whitelisted rule.
Indicates if logging should be enabled for the default
whitelisting rule.
type: str
default: false
type: bool
ha_mode:
description: High-availability Mode for Tier-0
choices:
- 'ACTIVE_STANDBY'
- 'ACTIVE_ACTIVE'
default: 'ACTIVE_ACTIVE'
type: str
disable_firewall:
description: Disable or enable gateway fiewall.
default: False
type: bool
failover_mode:
description: Determines the behavior when a Tier-0 instance in
ACTIVE-STANDBY high-availability mode restarts
after a failure. If set to PREEMPTIVE, the preferred node
will take over, even if it causes
another failure. If set to NON_PREEMPTIVE, then
the instance that restarted will remain secondary.
This property must not be populated unless the
ha_mode property is set to ACTIVE_STANDBY.
choices:
- 'NON_PREEMPTIVE'
- 'PREEMPTIVE'
default: 'NON_PREEMPTIVE'
type: str
force_whitelisting:
description: Flag to add whitelisting FW rule during
realization.
default: False
type: bool
internal_transit_subnets:
description: Internal transit subnets in CIDR format.
Specify subnets that are used to assign addresses
to logical links connecting service routers and
distributed routers. Only IPv4 addresses are
supported. When not specified, subnet 169.254.0.0/
24 is assigned by default in ACTIVE_ACTIVE HA mode
or 169.254.0.0/28 in ACTIVE_STANDBY mode.
default: False
type: list
ipv6_ndra_profile_id:
description: IPv6 NDRA profile configuration on Tier0.
Either or both NDRA and/or DAD profiles can be
configured. Related attribute ipv6_dad_profile_id.
type: str
ipv6_ndra_profile_display_name:
description: Same as ipv6_ndra_profile_id. Either one can be specified.
If both are specified, ipv6_ndra_profile_id takes
precedence.
type: str
ipv6_dad_profile_id:
description: IPv6 DRA profile configuration on Tier0.
Either or both NDRA and/or DAD profiles can be
configured. Related attribute ipv6_ndra_profile_id.
type: str
ipv6_dad_profile_display_name:
description: Same as ipv6_dad_profile_id. Either one can be specified.
If both are specified, ipv6_dad_profile_id takes
precedence.
type: str
transit_subnets:
description: Transit subnets in CIDR format.
Specify transit subnets that are used to assign
addresses to logical links connecting tier-0 and
tier-1s. Both IPv4 and IPv6 addresses are
supported.
When not specified, subnet 192.168.3.11/16 is
configured by default.
type: list
dhcp_config_id:
description: DHCP configuration for Segments connected to
Tier-0. DHCP service is configured in relay mode.
type: str
dhcp_config_display_name:
description: Same as dhcp_config_id. Either one can be specified.
If both are specified, dhcp_config_id takes precedence.
type: str
static_routes:
type: list
element: dict
description: This is a list of Static Routes that need to be created,
updated, or deleted
suboptions:
id:
description: Tier-0 Static Route ID.
required: false
type: str
display_name:
description:
- Tier-0 Static Route display name.
- Either this or id must be specified. If both are
specified, id takes precedence.
required: false
type: str
description:
description:
- Tier-0 Static Route description.
type: str
state:
description:
- State can be either 'present' or 'absent'. 'present' is
used to create or update resource. 'absent' is used to
delete resource.
- Must be specified in order to modify the resource
choices:
- present
- absent
network:
description: Network address in CIDR format
required: true
type: str
next_hops:
description: Next hop routes for network
type: list
elements: dict
suboptions:
admin_distance:
description: Cost associated with next hop route
type: int
default: 1
ip_address:
description: Next hop gateway IP address
type: str
scope:
description:
- Interface path associated with current route
- For example, specify a policy path referencing the
IPSec VPN Session
type: list
tags:
description: Opaque identifiers meaningful to the API user
type: dict
suboptions:
scope:
description: Tag scope.
required: true
type: str
tag:
description: Tag value.
required: true
type: str
locale_services:
type: list
element: dict
description: This is a list of Locale Services that need to be created,
updated, or deleted
suboptions:
id:
description: Tier-0 Locale Service ID.
required: false
type: str
display_name:
description:
- Tier-0 Locale Service display name.
- Either this or id must be specified. If both are
specified, id takes precedence
required: false
type: str
description:
description:
- Tier-0 Locale Service description.
type: str
state:
description:
- State can be either 'present' or 'absent'. 'present' is
used to create or update resource. 'absent' is used to
delete resource
- Required if id is specified.
choices:
- present
- absent
tags:
description: Opaque identifiers meaningful to the API user
type: dict
suboptions:
scope:
description: Tag scope.
required: true
type: str
tag:
description: Tag value.
required: true
type: str
edge_cluster_info:
description: Used to create path to edge cluster. Auto-assigned
if associated enforcement-point has only one edge
cluster.
type: dict
suboptions:
site_id:
description: site_id where edge cluster is located
default: default
type: str
enforcementpoint_id:
description: enforcementpoint_id where edge cluster is
located
default: default
type: str
edge_cluster_id:
description: ID of the edge cluster
type: str
edge_cluster_display_name:
description:
- display name of the edge cluster.
- Either this or edge_cluster_id must be specified.
If both are specified, edge_cluster_id takes
precedence
type: str
preferred_edge_nodes_info:
description: Used to create paths to edge nodes. Specified edge
is used as preferred edge cluster member when
failover mode is set to PREEMPTIVE, not
applicable otherwise.
type: list
suboptions:
site_id:
description: site_id where edge node is located
default: default
type: str
enforcementpoint_id:
description: enforcementpoint_id where edge node is
located
default: default
type: str
edge_cluster_id:
description: edge_cluster_id where edge node is
located
type: str
edge_cluster_display_name:
description:
- display name of the edge cluster.
- either this or edge_cluster_id must be specified.
If both are specified, edge_cluster_id takes
precedence
type: str
edge_node_id:
description: ID of the edge node
type: str
edge_node_display_name:
description:
- Display name of the edge node.
- either this or edge_node_id must be specified. If
both are specified, edge_node_id takes precedence
type: str
route_redistribution_types:
description: Enable redistribution of different types of routes
on Tier-0.
choices:
- TIER0_STATIC - Redistribute user added static routes.
- TIER0_CONNECTED - Redistribute all subnets configured on
Interfaces and routes related to TIER0_ROUTER_LINK,
TIER0_SEGMENT, TIER0_DNS_FORWARDER_IP,
TIER0_IPSEC_LOCAL_IP, TIER0_NAT types.
- TIER0_EXTERNAL_INTERFACE - Redistribute external
interface subnets on Tier-0.
- TIER0_LOOPBACK_INTERFACE - Redistribute loopback
interface subnets on Tier-0.
- TIER0_SEGMENT - Redistribute subnets configured on
Segments connected to Tier-0.
- TIER0_ROUTER_LINK - Redistribute router link port subnets
on Tier-0.
- TIER0_SERVICE_INTERFACE - Redistribute Tier0 service
interface subnets.
- TIER0_DNS_FORWARDER_IP - Redistribute DNS forwarder
subnets.
- TIER0_IPSEC_LOCAL_IP - Redistribute IPSec subnets.
- TIER0_NAT - Redistribute NAT IPs owned by Tier-0.
- TIER1_NAT - Redistribute NAT IPs advertised by Tier-1
instances.
- TIER1_LB_VIP - Redistribute LB VIP IPs advertised by
Tier-1 instances.
- TIER1_LB_SNAT - Redistribute LB SNAT IPs advertised by
Tier-1 instances.
- TIER1_DNS_FORWARDER_IP - Redistribute DNS forwarder
subnets on Tier-1 instances.
- TIER1_CONNECTED - Redistribute all subnets configured on
Segments and Service Interfaces.
- TIER1_SERVICE_INTERFACE - Redistribute Tier1 service
interface subnets.
- TIER1_SEGMENT - Redistribute subnets configured on
Segments connected to Tier1.
- TIER1_IPSEC_LOCAL_ENDPOINT - Redistribute IPSec VPN
local-endpoint subnets advertised by TIER1.
type: list
ha_vip_configs:
type: | |
import random
import sys
import math
import pygame
from pygame.locals import *
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
START_MUTATION_RATE = 0.1
MIN_MUTATION_RATE = 0.05
MUTATION_STEP = 0.05
POPULATION_SIZE = 50
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image and hitmask dicts
IMAGES, HITMASKS = {}, {}
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-midflap.png',
),
# blue bird
(
'assets/sprites/bluebird-midflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-midflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
# ---------------------------------------------------------------------------------------------------------------------
class Brain:
def __init__(self,genomeInputs=7,genomeOutputs=1):
model = Sequential()
model.add(Dense(genomeOutputs, activation='sigmoid', input_dim=genomeInputs))
# model.add(Dense(genomeOutputs, activation='sigmoid')) # Output (to flap or no)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss="mse", optimizer=sgd, metrics=["accuracy"])
self.model = model
Brain.randomize(self)
self.numInputs = genomeInputs
self.numOutputs = genomeOutputs
def randomize(self):
weights = self.model.get_weights()
config = self.model.get_config()
for xi in range(len(weights)):
for yi in range(len(weights[xi])):
change = random.uniform(-1,1)
weights[xi][yi] = change
self.model.set_weights(weights)
def predict(self,inputs):
npIN = np.asarray(inputs)
npIN2 = np.atleast_2d(npIN)
outputProb = self.model.predict(npIN2,1)
if outputProb <= 0.5:
return True
return False
def mutate(self,mutationRate):
# 10% of the time that we are mutating, completely replace weight. Else slightly change it. Keep between (-1,1)
weights = self.model.get_weights()
for xi in range(len(weights)):
for yi in range(len(weights[xi])):
if random.uniform(0, 1) < mutationRate:
if random.uniform(0,1) < 0.1:
# Replace weight
weights[xi][yi] = random.uniform(-1,1)
else: # Shift weight a bit
change = random.gauss(0,1) / 50
weights[xi][yi] += change
if weights[xi][yi] > 1:
weights[xi][yi] = 1
elif weights[xi][yi] < -1:
weights[xi][yi] = -1
self.model.set_weights(weights)
def clone(self):
clone = Brain(self.numInputs,self.numOutputs)
clone.model.set_weights(self.model.get_weights())
return clone
@staticmethod
def crossover(brain1,brain2):
xover = Brain(brain1.numInputs,brain1.numOutputs)
xoverweight = xover.model.get_weights()
weight1 = brain1.model.get_weights()
weight2 = brain2.model.get_weights()
for xi in range(len(xoverweight)):
for yi in range(len(xoverweight[xi])):
if random.uniform(0, 1) < 0.5:
xoverweight[xi][yi] = weight1[xi][yi]
else:
xoverweight[xi][yi] = weight2[xi][yi]
xover.model.set_weights(xoverweight)
return xover
# ---------------------------------------------------------------------------------------------------------------------
class Bird:
def __init__(self, x=SCREENWIDTH*0.2,y=SCREENHEIGHT/2):
# Basic Player things
self.x = x
self.y = y
self.velY=0
self.velX = -4
self.isOnGround = False
self.dead = False
self.score = 0
# player velocity, max velocity, downward accleration, accleration on flap
# Current real-time values
self.playerVelY = -9 # player's velocity along Y, default same as playerFlapped
self.playerAccY = 1 # players downward accleration
self.playerRot = 45 # player's rotation
self.playerFlapped = False # True when player flaps
# limits, max values, etc. Physics
self.playerMaxVelY = 10 # max vel along Y, max descend speed
self.playerMinVelY = -8 # min vel along Y, max ascend speed
self.playerVelRot = 3 # angular speed
self.playerRotThr = 20 # rotation threshold
self.playerFlapAcc = -9 # players speed on flapping
self.visibleRot = 0 # make bird look less stupid
# AI stuffs/ Genetic Algorithm
self.fitness = 0
self.vision = [] #Input to neural Net
self.decision = [] # Output of NN
self.unadjustedFitness = 0
self.lifespan = 0 # How long player lived for self.fitness
self.bestScore = 0 # Store self.score achieved for replay
self.score = 0
self.gen = 0
self.isBest = False
self.genomeInputs = 7
self.genomeOutputs = 1
self.brain = Brain(self.genomeInputs,self.genomeOutputs)
def predict(self, upperPipes, lowerPipes):
self.defineInputVec(upperPipes,lowerPipes)
yesFlap = self.brain.predict(self.vision)
if yesFlap:
self.flap()
def flap(self):
if self.y > -2 * IMAGES['player'][0].get_height():
self.playerVelY = self.playerFlapAcc
self.playerFlapped = True
def defineInputVec(self,upperPipes,lowerPipes):
hDist = self.horizontalDistToNextPipe(upperPipes)
uDist = self.distToUpperPipe(upperPipes)
lDist = self.distToLowerPipe(lowerPipes)
gDist = self.distToGround()
vVelo = self.playerVelY
self.vision = [hDist[0],hDist[2],uDist,lDist,gDist,vVelo,1.0]
def updateScore(self,upperPipes):
playerMidPos = self.x + IMAGES['player'][0].get_width() / 2
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
self.score += 1
def horizontalDistToNextPipe(self,upperPipes):
minDist = 99999
distToFront = 999999
minIdx = 0
pipeWidth = IMAGES['pipe'][0].get_width()
playerWidth = IMAGES['player'][0].get_width()
for i in range(len(upperPipes)):
pipe = upperPipes[i]
dist = (pipe['x']+pipeWidth) - self.x
dTF = pipe['x'] - (self.x + playerWidth)
if (dist > 0) and (dist < minDist):
minDist = dist
distToFront = dTF
minIdx = i
return [minDist,minIdx,distToFront]
def distToGround(self):
playerHeight = IMAGES['player'][0].get_height()
dist = (BASEY - 1) - (self.y + playerHeight)
return dist
def distToUpperPipe(self,upperPipes):
dist = self.horizontalDistToNextPipe(upperPipes)
idx = dist[1]
pipe = upperPipes[idx]
pipeH = IMAGES['pipe'][0].get_height()
distY = self.y - (pipe['y'] + pipeH)
return distY
def distToLowerPipe(self,lowerPipes):
dist = self.horizontalDistToNextPipe(lowerPipes)
idx = dist[1]
pipe = lowerPipes[idx]
playerH = IMAGES['player'][0].get_height()
distY = pipe['y'] - (self.y + playerH)
return distY
def rotate(self):
if self.playerRot > -90:
self.playerRot -= self.playerVelRot
self.visibleRot = self.playerRotThr
if self.playerRot <= self.playerRotThr:
self.visibleRot = self.playerRot
def update(self,upperPipes,lowerPipes):
if not self.dead:
self.lifespan += 1
self.checkCrash(upperPipes,lowerPipes)
if not self.dead:
# Decide to flap
self.predict(upperPipes,lowerPipes) # calls self.flap() if flapping
# check for score
self.updateScore(upperPipes)
# rotate the player
self.rotate()
# player's movement
self.move()
return False
else:
return True
def show(self):
if not self.dead:
if self.isBest:
playerSurface = pygame.transform.rotate(IMAGES['player'][1], self.visibleRot)
SCREEN.blit(playerSurface, (self.x, self.y))
else:
playerSurface = pygame.transform.rotate(IMAGES['player'][0], self.visibleRot)
SCREEN.blit(playerSurface, (self.x, self.y))
return True
return False
def move(self):
if self.playerVelY < self.playerMaxVelY and not self.playerFlapped:
self.playerVelY += self.playerAccY
if self.playerFlapped:
self.playerFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
self.playerRot = 45
playerHeight = IMAGES['player'][0].get_height()
self.y += min(self.playerVelY, BASEY - self.y - playerHeight)
def calculateFitness(self):
self.fitness = 1 + self.score * self.score + self.lifespan / 20.0
def checkCrash(self,upperPipes,lowerPipes):
"""returns True if player collders with base or pipes."""
playerWidth = IMAGES['player'][0].get_width()
playerHeight = IMAGES['player'][0].get_height()
# if player crashes into ground
if self.y + playerHeight >= BASEY - 1:
self.dead = True
self.isOnGround = True
return True
else:
playerRect = pygame.Rect(self.x, self.y,
playerWidth, playerHeight)
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][0]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
self.dead = True
return True
return False
def gimmieBaby(self,parent2):
baby = Bird()
baby.brain = self.brain.crossover(self.brain.clone(),parent2.brain)
return baby
# ---------------------------------------------------------------------------------------------------------------------
class Population:
def __init__(self, size=100, dotStartX=0, dotStartY=0):
self.birds = []
for i in range (size):
self.birds.append(Bird(dotStartX,dotStartY))
self.fitnessSum = 0
self.generation = 1
self.bestBird = 0
# self.bestSteps = len(self.dots[0].brain.directions)
self.avgFitness = 0
self.stdDevFitness = 0
self.maxFitness = 0
self.currentBestScore = 0
def show(self):
maxShow = 20
for i in range (1,len(self.birds)):
if maxShow > 0:
if self.birds[i].show():
maxShow -= 1
self.birds[0].show()
def update(self,upperPipes,lowerPipes):
for i in range (len(self.birds)):
self.birds[i].update(upperPipes,lowerPipes)
def findMaxScore(self):
self.currentBestScore = 0
for bird in self.birds:
if self.currentBestScore < bird.score:
self.currentBestScore = bird.score
def calculateAvgFitness(self):
self.avgFitness = self.fitnessSum / len(self.birds)
def calculateStdDevFitness(self):
runningSum = 0
for bird in self.birds:
runningSum += math.pow(bird.fitness - self.avgFitness,2)
self.stdDevFitness = math.sqrt(runningSum/len(self.birds))
def printStats(self):
#print("Generation: ",self.generation)
print("Best Fit: ",self.maxFitness)
print("Best Score: ",self.currentBestScore)
print("Mean Fit: ",self.avgFitness)
print("StdDev Fit: ",self.stdDevFitness)
print(" ")
def naturalSelection(self):
# Print stats of the generation
Population.setBestBird(self)
Population.calculateFitnessSum(self)
Population.calculateAvgFitness(self)
Population.calculateStdDevFitness(self)
Population.printStats(self)
# Generate new dot list (next generation)
newBirds = []
newBirds.append(self.birds[self.bestBird].gimmieBaby(self.birds[self.bestBird]))
newBirds[0].isBest = True
for i in range(1,len(self.birds)):
# Select Parent based on fitness
parent1 = Population.selectParent(self)
parent2 = Population.selectParent(self)
# Get baby from them
baby = parent1.gimmieBaby(parent2)
newBirds.append(baby)
self.birds = newBirds.copy()
self.generation += 1
def mutateDemBabies(self,mutationRate):
for i in range(1,len(self.birds)):
self.birds[i].brain.mutate(mutationRate)
def setBestBird(self):
maxScore = 0
maxIdx = 0
for i in range (len(self.birds)):
if self.birds[i].fitness > maxScore:
maxScore = self.birds[i].fitness
maxIdx = i
self.bestBird = maxIdx
self.maxFitness = maxScore
# if self.dots[self.bestDot].reachedGoal:
# self.bestSteps = self.dots[self.bestDot].brain.step
def calculateFitness(self):
for i in range (len(self.birds)):
self.birds[i].calculateFitness()
def calculateFitnessSum(self):
self.fitnessSum = 0
for bird in self.birds:
self.fitnessSum += bird.fitness
def selectParent(self):
rand = random.uniform(0,self.fitnessSum)
runningSum = 0
for bird in self.birds:
runningSum += bird.fitness
if runningSum > rand:
return bird
# Should never get to this point
print ("HALP YOU BROKE IT - natural selection & select parent")
return None
def allBirdsDead(self):
for i in range (len(self.birds)):
if (not self.birds[i].dead):
return False
return True
# ---------------------------------------------------------------------------------------------------------------------
try:
xrange
except NameError:
xrange = range
def main():
global SCREEN, FPSCLOCK
pygame.init()
FPSCLOCK | |
import re
import inspect
import copy
import torch
from pykeops.torch.kernel_product.formula import Formula
from pykeops.torch.kernel_product.features_kernels import FeaturesKP
# Define the standard kernel building blocks.
# They will be concatenated depending on the "name" argument of Kernel.__init__
# Feel free to add your own "pet formula" at run-time,
# using for instance :
# " kernel_formulas["mykernel"] = Formula(... ) "
# In some cases, due to PyTorch's behavior mainly, we have to add a small
# epsilon in front of square roots and logs. As for KeOps code,
# note that [dSqrt(x)/dx](x=0) has been conventionally set to 0.
Epsilon = "IntInv(100000000)"
epsilon = 1e-8
# Formulas in "x_i" and "y_j", with parameters "g" (=1/sigma^2, for instance)
kernel_formulas = dict(
linear=Formula( # Linear kernel
formula_sum="({X}|{Y})",
routine_sum=lambda xsy=None, **kwargs: xsy,
formula_log="(IntInv(2) * Log(Square(({X}|{Y})) + " + Epsilon + "))",
routine_log=lambda xsy=None, **kwargs: .5 * (xsy ** 2 + epsilon).log()
),
distance=Formula( # -1* Energy distance kernel
formula_sum="Sqrt(WeightedSqDist({G},{X},{Y}))",
routine_sum=lambda gxmy2=None, **kwargs: gxmy2.sqrt(),
formula_log="(IntInv(2) * Log(WeightedSqDist({G},{X},{Y})) + " + Epsilon + "))",
routine_log=lambda gxmy2=None, **kwargs: .5 * (gxmy2 ** 2 + epsilon).log()
),
gaussian=Formula( # Standard RBF kernel
formula_sum="Exp( -(WeightedSqDist({G},{X},{Y})))",
routine_sum=lambda gxmy2=None, **kwargs: (-gxmy2).exp(),
formula_log="(-(WeightedSqDist({G},{X},{Y})))",
routine_log=lambda gxmy2=None, **kwargs: -gxmy2,
),
cauchy=Formula( # Heavy tail kernel
formula_sum="Inv( IntCst(1) + WeightedSqDist({G},{X},{Y}))",
routine_sum=lambda gxmy2=None, **kwargs: 1. / (1 + gxmy2),
formula_log="(IntInv(-1) * Log(IntCst(1) + WeightedSqDist({G},{X},{Y})))",
routine_log=lambda gxmy2=None, **kwargs: -(1 + gxmy2).log(),
),
laplacian=Formula( # Pointy kernel
formula_sum="Exp(-Sqrt( WeightedSqDist({G},{X},{Y})))",
routine_sum=lambda gxmy2=None, **kwargs: (-(gxmy2 + epsilon).sqrt()).exp(),
formula_log="(-Sqrt(WeightedSqDist({G},{X},{Y})))",
routine_log=lambda gxmy2=None, **kwargs: -(gxmy2 + epsilon).sqrt(),
),
inverse_multiquadric=Formula( # Heavy tail kernel
formula_sum="Inv(Sqrt(IntCst(1) + WeightedSqDist({G},{X},{Y})))",
routine_sum=lambda gxmy2=None, **kwargs: torch.rsqrt(1 + gxmy2),
formula_log="(IntInv(-2) * Log(IntCst(1) + WeightedSqDist({G},{X},{Y})))",
routine_log=lambda gxmy2=None, **kwargs: -.5 * (1 + gxmy2).log(),
))
def set_indices(formula, f_ind, v_ind) :
"""
Modify the patterns stored in kernel_formulas, taking into account the fact that
the current formula is the f_ind-th, working with the v_ind-th pair of variables.
"""
# KeOps backend -------------------------------------------------------------------------
n_params = formula.n_params
n_vars = formula.n_vars
if n_params == 1:
G_str = "G_"+str(f_ind)
else:
G_str = None
if n_vars == 2:
X_str, Y_str = "X_" + str(v_ind), "Y_" + str(v_ind)
else:
X_str, Y_str = None, None
formula.formula_sum = formula.formula_sum.format(G = G_str, X = X_str, Y = Y_str)
formula.formula_log = formula.formula_log.format(G = G_str, X = X_str, Y = Y_str)
# Vanilla PyTorch backend -------------------------------------------------------------------
# Guess which quantities will be needed by the vanilla pytorch binding:
params_sum = inspect.signature(formula.routine_sum).parameters
needs_x_y_gxmy2_xsy_sum = (v_ind, 'x' in params_sum, 'y' in params_sum, 'gxmy2' in params_sum, 'xsy' in params_sum)
formula.subroutine_sum = formula.routine_sum
formula.routine_sum = lambda x=None, y=None, g=None, gxmy2=None, xsy=None : \
formula.subroutine_sum(x=x[v_ind], y=y[v_ind], gxmy2=gxmy2[f_ind], xsy=xsy[f_ind])
params_log = inspect.signature(formula.routine_log).parameters
needs_x_y_gxmy2_xsy_log = (v_ind, 'x' in params_log, 'y' in params_log, 'gxmy2' in params_log, 'xsy' in params_log)
formula.subroutine_log = formula.routine_log
formula.routine_log = lambda x=None, y=None, g=None, gxmy2=None, xsy=None : \
formula.subroutine_log(x=x[v_ind], y=y[v_ind], gxmy2=gxmy2[f_ind], xsy=xsy[f_ind])
return formula, f_ind+1, needs_x_y_gxmy2_xsy_sum, needs_x_y_gxmy2_xsy_log
class Kernel:
r"""Defines a new Kernel identifier for :func:`kernel_product`.
Keyword Args:
name (string): **Computation identifier.**
The kernel **name** should be built from a small set
of atomic formulas, acting on arbitrary pairs of variables and **combined** using:
- integer constants,
- the addition ``+``,
- the product ``*``,
- the integer exponentiation ``**k``.
**Parameters and variables.** Every kernel name is associated to a list of *atomic formulas* (that will require **parameters**) and a list of **pairs of variables**, ordered as they are in the name string. Both **parameters** and **variables** will be required as inputs by :func:`kernel_product`. A few examples:
- ``"gaussian(x,y)"`` : one formula and one pair of variables.
- ``"gaussian(x,y) * linear(u,v)**2"`` : two formulas and two pairs of variables.
- ``"cauchy(x,y) + gaussian(x,y) * (1 + cauchy(u,v)**2)``: **three** formulas (``cauchy``, ``gaussian`` and ``cauchy`` once again) with **two** pairs of variables (``(x,y)`` first, ``(u,v)`` second)
Note that by convention, pairs of variables should be denoted by single-letter, non-overlapping duets: ``"gaussian(x',yy)"`` or ``"gaussian(x,y) + cauchy(y,z)"`` are not supported.
**Atomic formulas.**
As of today, the `pre-defined kernel names <https://github.com/getkeops/keops/blob/master/pykeops/torch/kernel_product/kernels.py>`_ are:
- ``linear(x,y)``, the :math:`L^2` scalar product:
.. math::
k(x,y)=\langle x,y\rangle.
- ``gaussian(x,y)``, the standard RBF kernel:
.. math::
k(x,y)=\exp(-\langle x-y, G\, (x-y)\rangle).
- ``laplacian(x,y)``, the pointy exponential kernel:
.. math::
k(x,y)=\exp(-\sqrt{\langle x-y, G\, (x-y)\rangle}).
- ``cauchy(x,y)``, a heavy-tail kernel:
.. math::
k(x,y)=1/(1+\langle x-y, G\, (x-y)\rangle).
- ``inverse_multiquadric(x,y)``, a very heavy-tail kernel:
.. math::
k(x,y)=1/\sqrt{1+\langle x-y, G\, (x-y)\rangle}.
- ``distance(x,y)``, arbitrary Euclidean norms:
.. math::
k(x,y)=\sqrt{\langle x-y, G\, (x-y)\rangle}.
**Defining your own formulas** is also possible, and documented in the second part of this :doc:`example <../../../_auto_examples/pytorch/plot_kernel_product_syntax>`.
**Parameters.** With the exception of the linear kernel (which accepts **None** as its parameter), all these kernels act on arbitrary vectors of dimension `D` and are parametrized by a variable ``G`` that can represent :
======================================= ===============================
Parameter :math:`G` Dimension of the tensor ``G``
======================================= ===============================
scalar dim-1 vector
diagonal matrix dim-`D` vector
symmetric `D`-by-`D` matrix dim-`D*D` vector
j-varying scalar `N`-by-1 array
j-varying diagonal matrix `N`-by-`D` array
j-varying symmetric `D`-by-`D` matrix `N`-by-`D*D` array
======================================= ===============================
If required by the user, a kernel-id can thus be used to represent non-uniform, non-radial kernels as documented in the :doc:`anisotropic_kernels example <../../../_auto_examples/pytorch/plot_anisotropic_kernels>`.
Example:
>>> M, N = 1000, 2000 # number of "i" and "j" indices
>>> # Generate the data as pytorch tensors.
>>> #
>>> # First, the "i" variables:
>>> x = torch.randn(M,3) # Positions, in R^3
>>> u = torch.randn(M,3) # Orientations, in R^3 (for example)
>>> #
>>> # Then, the "j" ones:
>>> y = torch.randn(N,3) # Positions, in R^3
>>> v = torch.randn(N,3) # Orientations, in R^3
>>> #
>>> # The signal b_j, supported by the (y_j,v_j)'s
>>> b = torch.randn(N,4)
>>> #
>>> # Pre-defined kernel: using custom expressions is also possible!
>>> # Notice that the parameter sigma is a dim-1 vector, *not* a scalar:
>>> sigma = torch.tensor([.5])
>>> params = {
... # The "id" is defined using a set of special function names
... "id" : Kernel("gaussian(x,y) * (linear(u,v)**2) "),
... # gaussian(x,y) requires a standard deviation; linear(u,v) requires no parameter
... "gamma" : ( 1./sigma**2 , None ) ,
... }
>>> #
>>> # Don't forget to normalize the orientations:
>>> u = torch.nn.functional.normalize(u, p=2, dim=1)
>>> v = torch.nn.functional.normalize(v, p=2, dim=1)
>>> #
>>> # We're good to go! Notice how we grouped together the "i" and "j" features:
>>> a = kernel_product(params, (x,u), (y,v), b)
>>> print(a)
"""
def __init__(self, name=None, formula_sum=None, routine_sum=None, formula_log=None, routine_log=None):
if name is not None:
# in the comments, let's suppose that name="gaussian(x,y) + laplacian(x,y) * linear(u,v)**2"
# Determine the features type from the formula : ------------------------------------------------
variables = re.findall(r'(\([a-z],[a-z]\))', name) # ['(x,y)', '(x,y)', '(u,v)']
used = set()
variables = [x for x in variables if x not in used and (used.add(x) or True)]
# = ordered, "unique" list of pairs "(x,y)", "(u,v)", etc. used
# = ['(x,y)', '(u,v)']
var_to_ind = {k: i for (i, k) in enumerate(variables)}
# = {'(x,y)': 0, '(u,v)': 1}
subformulas_str = re.findall(r'([a-zA-Z_][a-zA-Z_0-9]*)(\([a-z],[a-z]\))', name)
# = [('gaussian', '(x,y)'), ('laplacian', '(x,y)'), ('linear', '(u,v)')]
# f_ind = index of the current formula
# subformulas = list of formulas used in the kernel_product
# vars_needed_sum and vars_needed_log keep in mind the symbolic pre-computations
# |x-y|^2 and <x,y> that may be needed by the Vanilla PyTorch backend.
f_ind, subformulas, vars_needed_sum, vars_needed_log = 0, [], [], []
for formula_str, var_str in subformulas_str:
# Don't forget the copy! This code should have no side effect on kernel_formulas!
formula = copy.copy(kernel_formulas[formula_str]) # = Formula(...)
# Modify the symbolic "formula" to let it take into account the formula and variable indices:
formula, f_ind, need_sum, need_log = set_indices(formula, f_ind, var_to_ind[var_str])
# Store everyone for later use and substitution:
subformulas.append(formula)
vars_needed_sum.append(need_sum)
vars_needed_log.append(need_log)
# One after another, replace the symbolic "name(x,y)" by references to our list of "index-aware" formulas
for (i, _) in enumerate(subformulas):
name = re.sub(r'[a-zA-Z_][a-zA-Z_0-9]*\([a-z],[a-z]\)', r'subformulas[{}]'.format(i), name, count=1)
# = "subformulas[0] + subformulas[1] * subformulas[2]**2"
# Replace int values "N" with "Formula(intvalue=N)"" (except | |
<reponame>zhanghxpku/project-3-for-EMNLP-course
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import datetime
import logging
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from utils import get_dataset
import traceback
from tensorflow.python.client import timeline
logger = logging.getLogger(__name__)
class PythonEstimator(object):
"""A Python Estimator which is more flexible than tf.Estimator"""
def __init__(self, conf, model):
self.config = conf
self.model = model
if not hasattr(self.config, 'log_every_n_steps'):
self.config.add('log_every_n_steps', 100)
if not hasattr(self.config, 'max_to_keep'):
self.config.add('max_to_keep', 50)
if not os.path.exists(self.config.checkpoint_dir):
os.makedirs(self.config.checkpoint_dir)
self.summaries_dir = os.path.join(self.config.checkpoint_dir, 'summary')
if not os.path.exists(self.summaries_dir):
os.makedirs(self.summaries_dir)
# ----------- check and reformat dataset config -----------
self.dataset_configs = []
self.eval_configs = []
assert hasattr(self.config, 'dev_dataset') or \
hasattr(self.config, 'eval_datasets') or \
hasattr(self.config, 'train_dataset') or \
hasattr(self.config, 'infer_dataset')
if hasattr(self.config, 'dev_dataset'):
self.dataset_configs.append(self.config.dev_dataset)
self.eval_configs.append(self.config.dev_dataset)
if hasattr(self.config, 'eval_datasets'):
self.dataset_configs += self.config.eval_datasets
self.eval_configs += self.config.eval_datasets
if hasattr(self.config, 'train_dataset'):
if not hasattr(self.config.train_dataset, 'name'):
self.config.train_dataset.add('name', 'train')
self.train_name = self.config.train_dataset.name
self.dataset_configs += [self.config.train_dataset]
if hasattr(self.config, 'infer_dataset'):
if not hasattr(self.config.infer_dataset, 'name'):
self.config.infer_dataset.add('name', 'infer')
self.infer_name = self.config.infer_dataset.name
self.dataset_configs += [self.config.infer_dataset]
# ----------- build dataset config -----------
# name can be same
for i, dataset_config in enumerate(self.dataset_configs):
assert 'Python' in dataset_config.type, 'dataset type error'
if not hasattr(dataset_config, 'name'):
dataset_config.add('name', 'eval' + str(i))
# ----------- build datasets, writers and model_inputs -----------
self.datasets = {}
self.writers = {}
self.model_inputs = {}
for i, dataset_config in enumerate(self.dataset_configs):
logger.info('Building ' + dataset_config.name + '...')
self.datasets[dataset_config.name] = get_dataset(dataset_config)
self.datasets[dataset_config.name].build(self.model_inputs)
self.model_inputs.update(self.datasets[dataset_config.name].inputs)
self.writers[dataset_config.name] = tf.summary.FileWriter(
os.path.join(self.summaries_dir, dataset_config.name)
)
# self.eval_datasets = [self.datasets[x] for x in self.eval_datasets]
self.config.best_checkpoint_dir = self.config.checkpoint_dir + '/best'
def set_eval_and_summary(self):
self.eval_and_summary = []
for key, value in self.estimator_spec.eval_metric_ops.items():
self.eval_and_summary.append(key)
def make_fetch_dict(self, mode):
fetch_dict = {}
if mode == 'EXPORT':
fetch_dict['predictions'] = self.estimator_spec.predictions
elif mode == tf.estimator.ModeKeys.PREDICT:
fetch_dict['predictions'] = self.estimator_spec.predictions
else:
for k, v in self.estimator_spec.eval_metric_ops.items():
fetch_dict[k] = v[1]
fetch_dict['predictions'] = self.estimator_spec.predictions
if mode == tf.estimator.ModeKeys.TRAIN:
fetch_dict['train_op'] = self.estimator_spec.train_op
fetch_dict['global_step'] = self.global_step
fetch_dict['loss'] = self.estimator_spec.loss
return fetch_dict
def build_graph(self, mode, use_best=False):
self.action_mode = mode
self.fetch_dict = {}
self.add_estimator_inputs(mode)
# if mode == 'EXPORT':
# self.estimator_spec = self.model.model_fn(self.model_inputs, mode=tf.estimator.ModeKeys.PREDICT)
# self.fetch_dict[mode] = self.make_fetch_dict(mode)
# else:
self.estimator_spec = self.model.model_fn(self.model_inputs, mode=mode)
self.set_eval_and_summary()
if mode == tf.estimator.ModeKeys.TRAIN:
self.global_step = tf.train.get_global_step()
self.global_epoch = 0
self.fetch_dict[tf.estimator.ModeKeys.EVAL] = self.make_fetch_dict(tf.estimator.ModeKeys.EVAL)
self.fetch_dict[mode] = self.make_fetch_dict(mode)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.config.max_to_keep)
logger.info('Start session ...')
# ###########
self.run_metadata = tf.RunMetadata()
self.run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
config = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)))
# ###########
self.sess = tf.Session(config=config)
if hasattr(self.config, 'debug') and self.config.debug.enabled:
logger.debug('Listing all variables in graph:')
for v in tf.get_default_graph().as_graph_def().node:
logger.debug(v)
assert self.config.debug.type in ['LocalCLIDebugWrapperSession', 'TensorBoardDebugWrapperSession'], \
'unsupported debug wrapper session!'
if self.config.debug.type == 'TensorBoardDebugWrapperSession':
self.sess = TensorBoardDebugWrapperSession(self.sess)
if self.config.debug.type =='LocalCLIDebugWrapperSession':
self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)
logger.info('Debuging as %s' % type(self.sess))
self.sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
writer = tf.summary.FileWriter(self.summaries_dir, self.sess.graph)
writer.close()
self._restore_checkpoint(use_best=use_best)
def add_estimator_inputs(self, mode):
if hasattr(self.config, 'dropout_keep_prob') and mode != 'EXPORT':
self.model_inputs['dropout_keep_prob'] = tf.placeholder(tf.float32, name="dropout_keep_prob")
logger.info('using dropout_keep_prob : %s', self.model_inputs['dropout_keep_prob'])
def update_estimator_feed_dict(self, batch, mode=tf.estimator.ModeKeys.TRAIN, *args, **kwargs):
if hasattr(self.config, 'dropout_keep_prob'):
if mode == tf.estimator.ModeKeys.TRAIN:
batch[self.model_inputs['dropout_keep_prob']] = self.config.dropout_keep_prob
else:
batch[self.model_inputs['dropout_keep_prob']] = 1
def feedforward(self, batch, mode, name, with_input=False, cal_time=False):
# update input data i.e. dropout rate
self.update_estimator_feed_dict(batch, mode)
fetch_dict = {}
fetch_dict.update(self.fetch_dict[mode])
try:
###########
if cal_time:
fetch_result = self.sess.run(fetch_dict, feed_dict=batch,options=self.run_options,run_metadata=self.run_metadata)
tl = timeline.Timeline(self.run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('./results/timeline.json','w') as wd:
wd.write(ctf)
###########
else:
fetch_result = self.sess.run(fetch_dict, feed_dict=batch)
except ValueError as e:
for k, v in fetch_dict.items():
logger.error('fetch dict[%s] = %s', k, v)
for k, v in batch.items():
logger.error('Feed dict[%s] = %s', k, np.array(v).shape)
traceback.print_exc()
raise e
# update global step
if mode == tf.estimator.ModeKeys.TRAIN:
self.global_step = fetch_result['global_step']
# put input into output
if with_input:
for k, v in batch.items():
k = k.name
k = k.replace(':0', '')
k = k.replace('_placeholder', '')
fetch_result['predictions'][k] = v
return fetch_result
def log_result(self, name, speed, step, fetch_result):
if name == self.train_name:
common_output = '[%s][Epoch:%s][Step:%s][%.1f s][%.1f step/s]' % (
name, self.global_epoch, self.global_step, speed, step / speed)
eval_output = ''.join(['[%s:%s]' % (k, v) for k, v in fetch_result.items() if
k not in ['train_op', 'global_step', 'summary', 'predictions']])
else:
common_output = '[%s][%.1f s][%.1f step/s]' % (name, speed, step / speed)
eval_output = ''.join(['{%s:%s}' % (k, v) for k, v in fetch_result.items() if
(k not in ['train_op', 'global_step', 'summary',
'predictions'] and 'best' not in k)])
# print fetch_result['predictions']
if self.action_mode == tf.estimator.ModeKeys.TRAIN:
summary = tf.Summary()
for k in self.eval_and_summary:
if k in fetch_result:
summary.value.add(tag=k, simple_value=fetch_result[k])
self.writers[name].add_summary(summary, self.global_step)
output = common_output + "\t" + eval_output
logger.info(output)
def reset_metric(self):
self.sess.run(tf.local_variables_initializer())
def update_fetch_result(self, name, fetch_results, results=None):
pass
def eval(self, is_in_train=False):
if not is_in_train:
self.build_graph(mode=tf.estimator.ModeKeys.EVAL, use_best=True)
logger.info('Start evaling ...')
score = 0
watch_start = time.time()
for dataset_config in self.eval_configs:
self.reset_metric()
fetch_result = None
step = 0
results = []
for batch in self.datasets[dataset_config.name].make_iter(self.config.batch_size):
fetch_result = self.feedforward(
batch=batch,
mode=tf.estimator.ModeKeys.EVAL,
name=dataset_config.name,
)
step += 1
for single_result in self.iter_fetch_data(fetch_result['predictions']):
results.append(single_result)
self.update_fetch_result(dataset_config.name, fetch_result)
score = fetch_result['em']
self.log_result(
name=dataset_config.name,
speed=time.time() - watch_start,
step=step,
fetch_result=fetch_result,
)
watch_start = time.time()
# for res in results:
# print res['pred']
if hasattr(self.config, 'eval_to_file') and self.config.eval_to_file:
# char2id = {i : line.strip() for i, line in enumerate(open(self.config.char2id, 'r'))}
# word2id = {i : line.strip() for i, line in enumerate(open(self.config.word2id, 'r'))}
# word2id_emb = {i : line.strip() for i, line in enumerate(open(self.config.word2id_emb, 'r'))}
relation2id = {i : line.strip() for i, line in enumerate(open(self.config.comb2id, 'r'))}
# entity2id = {i : line.strip() for i, line in enumerate(open(self.config.entity2id, 'r'))}
fout = open(self.config.eval_op_path+'.'+dataset_config.name, 'w')
fout_err = open(self.config.eval_op_path+'.'+dataset_config.name+'.error', 'w')
for res in results:
if res['word_train'] is None:
break
idx = res['idx']
relation = map(lambda x: relation2id.get(x, '<UNK>'), [res['relation']])
pred = map(lambda x: relation2id.get(x, '<UNK>'), [res['pred']])
typ = res['typ']
pred_order = str(res['pred_order']) if str(typ) == '0' else ''
fout.write('\n'.join([str(idx)+'\t'+relation[0]+'\t'+pred[0]+'\t'+str(typ)+'\t'+pred_order+'\n']))
if res['relation'] != res['pred']:
fout_err.write('\n'.join([str(idx)+'\t'+relation[0]+'\t'+pred[0]+'\t'+str(typ)+'\t'+pred_order+'\n']))
fout.close()
fout_err.close()
if is_in_train:
self.reset_metric()
# skip normal eval save
if hasattr(self.config, 'skip_eval_save') and self.config.skip_eval_save:
return score
self._save_checkpoint()
return score
def do_something_when_epoch_over(self, epoch_time=None):
pass
def train(self, train_dataset=None):
self.build_graph(mode=tf.estimator.ModeKeys.TRAIN)
logger.info('Start training ...')
watch_start = start_time = time.time()
training_finished = False
count = 0 # Early stopping
max_step = 0
max_score = 0
while True:
epoch_start_time = time.time()
for batch in self.datasets[self.train_name].make_iter(self.config.batch_size):
fetch_result = self.feedforward(
batch=batch,
mode=tf.estimator.ModeKeys.TRAIN,
name=self.train_name,
cal_time=(self.global_step % self.config.log_every_n_steps == 10)
)
if not self.global_step:
continue
if self.global_step % self.config.log_every_n_steps == 0:
self.log_result(
name=self.train_name,
step=self.config.log_every_n_steps,
speed=(time.time() - watch_start),
fetch_result=fetch_result,
)
watch_start = time.time()
if hasattr(self.config, 'save_checkpoints_steps') and \
self.global_step % self.config.save_checkpoints_steps == 0:
self._save_checkpoint()
if hasattr(self.config, 'eval_interval_steps') and \
self.global_step % self.config.eval_interval_steps == 0:
score = self.eval(is_in_train=True)
print score, count, self.config.tolerance, count >= self.config.tolerance
if score < max_score:
count += 1
else:
count = 0
max_score = score
max_step = self.global_step
self._save_checkpoint(use_best=True)
if count >= self.config.tolerance:
training_finished = True
break
if hasattr(self.config, 'max_training_steps') and \
self.global_step > self.config.max_training_steps:
score = self.eval(is_in_train=True)
if score > max_score:
self._save_checkpoint(use_best=True)
max_score = score
max_step = self.global_step
training_finished = True
break
# when the eval results does not have any improvement after "auto_end_time" times, end the train
if hasattr(self.config, 'auto_end_time') and \
self.no_update_times >= self.config.auto_end_time:
training_finished = True
break
if hasattr(self.config, 'save_checkpoints_epochs'):
self._save_checkpoint()
if hasattr(self.config, 'eval_interval_epochs'):
score = self.eval(is_in_train=True)
print score, count, self.config.tolerance, count >= self.config.tolerance
if score < max_score:
count += 1
else:
count = 0
max_score = score
self._save_checkpoint(use_best=True)
if count >= self.config.tolerance:
training_finished = True
break
self.do_something_when_epoch_over(time.time() - epoch_start_time)
if training_finished:
break
logger.info('Epoch %s finished, %s elapsed.', self.global_epoch,
datetime.timedelta(seconds=time.time() - start_time))
self.global_epoch += 1
logger.info('Training finished, %s elapsed.', datetime.timedelta(seconds=time.time() - start_time))
def iter_fetch_data(self, fetch_result):
for i in range(self.config.batch_size):
data_point = {}
out_flag = False
for k, v in fetch_result.items():
if k in ['dropout_keep_prob']:
continue
if i >= len(v):
out_flag = True
break
data_point[k] = fetch_result[k][i]
if out_flag:
break
yield data_point
def infer(self, infer_dataset=None):
self.build_graph(mode=tf.estimator.ModeKeys.PREDICT, use_best=True)
logger.info('Start infering ...')
results = []
for batch in self.datasets[self.infer_name].make_iter(self.config.batch_size):
fetch_result = self.feedforward(
batch=batch,
mode=tf.estimator.ModeKeys.PREDICT,
name=self.infer_name,
with_input=True,
)
for single_result in self.iter_fetch_data(fetch_result['predictions']):
results.append(single_result)
relation2id = {i : line.strip() for i, line in enumerate(open(self.config.comb2id, 'r'))}
fout = open(self.config.infer_op_path, 'w')
relation2id = {i : line.strip() for i, line in enumerate(open(self.config.comb2id, 'r'))}
for res in results:
if res['word_train'] is None:
break
idx = res['idx']
pred = map(lambda x: relation2id.get(x, '<UNK>'), [res['pred']])
typ = res['typ']
pred_order = str(res['pred_order'])
fout.write('\n'.join([str(idx)+'\t'+pred[0]+'\t'+str(typ)+'\t'+pred_order+'\n']))
fout.close()
# def export_model(self):
# self.build_graph(mode='EXPORT')
# export_dir = os.path.join(self.config.checkpoint_dir, 'saved_model')
# outputs = self.estimator_spec.predictions
# for k, v in self.model_inputs.items():
# logger.info('inputs: [key: %s], [value: | |
def spb_setRange(self, minTuple, maxTuple):
"""
This function does the job of setting the Maximum value and Minimum value in one go.
...
Parameters
--------------
maxTuple : tuple
Maximum value of each progressbar, in tuple, with elements in order
Ex: maxVal = (100, 200, 300) : corresponding to 100 for the outermost, 200
for middle progress bar, 300 for innermost progressbar.
minVal : tuple
Minimum value of each progressbar, in tuple, with elements in order
Ex: minVal = (0, 10, 20) : corresponding to 0 for the outermost, 10
for middle progress bar, 20 for innermost progressbar.
Raises
--------------
Exception : "The Minimum and Maximum should be a Tuple"
Rasied when the user passes a non-tuple data type to the module.
ValueError : "Tuple length more than number of Progress Bars"
Raised when the tuple contains more element than the number of concentric progress bar in the spiralProgressBar widget.
ValueError : "Tuple length less than the number of Progress Bars"
Raised when the tuple contains less element than the number of concentric progress bar in the spiralProgressBar widget.
"""
if type(minTuple)!=type(()) or type(maxTuple)!=type(()):
raise Exception("The Minimum and Maximum should be a Tuple")
elif len(minTuple) > self.noProgBar or len(maxTuple) > self.noProgBar:
raise ValueError("Minimum/Maximum Tuple length exceeds the number of Progress Bar")
elif len(minTuple) < self.noProgBar or len(maxTuple) < self.noProgBar:
raise ValueError("Minimum/Maximum Tuple length is less than the number of Progress Bar")
for each in range(0, self.noProgBar, 1):
if minTuple[each]==maxTuple[each]:
raise ValueError("Minimum and Maximum cannot be the Same")
self.spb_minimValue = minTuple
self.spb_maximValue = maxTuple
self.update()
def spb_setGap(self, gap):
"""
Set the Gap between each concentric circle in the spiralProgressBar.
Default is : gap = 2*line width
...
Parameters
--------------
gap : int
Try different settings by passing an int to the function: 'int' corresponds to the "px" seperation
between the concentric circles.
Raises
--------------
Exception : "Gap should be an integer and not: " + type(gap)
Rasied when the user passes a non-tuple data type to the module.
"""
if type(gap)!=type(5):
raise ValueError("Gap should be an integer and not: " + str(type(gap)))
else:
self.spb_gap = gap
self.gapCngd = True
self.update()
def spb_setInitialPos(self, position):
"""
Sets the statring point of the progress bar or the 0% position.
Default is 'North'
...
Parameters
--------------
position : tuple
The tuple elements accepts only string of : 'North', 'South', 'East' and 'West'.
The order of arrangment matters i.e. the first element corresponds to the outer most concentric
progress bar and the last element correspinds to the innermost circle.
Ex : position = ('North', 'South', 'East')
Raises
--------------
Exception : "Position should be a Tuple and not " + type(position)
Rasied when the user passes a non-tuple data type to the module.
ValueError : "Tuple length more than number of Progress Bars"
Raised when the tuple contains more element than the number of concentric progress bar in the spiralProgressBar widget.
ValueError : "Tuple length less than the number of Progress Bars"
Raised when the tuple contains less element than the number of concentric progress bar in the spiralProgressBar widget.
"""
if type(position)!=type(()): #IF INPUT IS NOT A TUPLE
raise Exception("Position should be a Tuple and not " + str(type(position)))
elif len(position) > self.noProgBar: #IF TUPLE LENGTH IS MORE THAN THE NUMBER OF PROGRESS BAR
raise ValueError("Tuple length more than number of Progress Bars")
elif len(position) < self.noProgBar: #IF INPUT TUPLE LENGTH IS LESS THAN THE NUMBER OF PROGRESS BAR
raise ValueError("Tuple length less than the number of Progress Bars")
else:
for each in range(0, self.noProgBar, 1):
if type(position[each])!=type("string"):
raise Exception("Position Tuple elements should be String and not: " + str(type(position[each])))
elif position[each]=='North':
self.spb_startPos[each] = self.startPosFlags.North
elif position[each]=='South':
self.spb_startPos[each] = self.startPosFlags.South
elif position[each]=='East':
self.spb_startPos[each] = self.startPosFlags.East
elif position[each]=='West':
self.spb_startPos[each] = self.startPosFlags.West
else:
raise Exception("Position can hold Property: 'North', 'South', 'East' and 'West' and not: " + position[each])
self.update()
def spb_reset(self):
"""
Resets the progress bar to the 0%.
...
Parameters
--------------
none
Raises
--------------
none
"""
for each in range(0, self.noProgBar, 1):
spiralProgressBar.convValue(self, self.spb_minimValue[each], each)
self.update()
def spb_setGeometry(self, posX, posY):
"""
This module changes the position of the widget. Default it is : (0, 0).
...
Parameters
--------------
posX : int
The vertical position of the widget from the top of the window inside which the widget lies.
By default it is 0. The user can change the position to better suite his style and positioning of the
widget.
posY : int
Raises
--------------
Exception : Position should be an int
If the user passes a non-int data type.
"""
if type(posX)!=type(5) or type(posY)!=type(5):
raise Exception("Position should be a int and not: X" + str(type(posX))) + ", Y: " + str(type(posY))
return
if self.positionX!=posX:
self.positionX = posX
if self.positionY!=posY:
self.positionY = posY
self.update()
def spb_setDirection(self, direction):
"""
Direction of rotation of the spiral progress bar.
...
Parameters
--------------
direction : tuple
Direction that the round progress bar can hold are : 'Clockwise' and 'AntiClockwise'
Default is 'Clockwise'. The tuple take string as elements corresponding to the direction of
each of the concentric circles.
Raises
--------------
Exception : "Direction should be a Tuple"
Rasied when the user passes a non-tuple data type to the module.
ValueError : "Tuple length more than number of Progress Bars"
Raised when the tuple contains more element than the number of concentric progress bar in the spiralProgressBar widget.
ValueError : "Tuple length less than the number of Progress Bars"
Raised when the tuple contains less element than the number of concentric progress bar in the spiralProgressBar widget.
Exception : "Direction Tuple elements should be String"
Rasies when the elements of the tuple is not a string.
"""
if type(direction)!=type(()): #IF INPUT IS NOT A TUPLE
raise Exception("Direction should be a Tuple and not " + str(type(direction)))
elif len(direction) > self.noProgBar: #IF TUPLE LENGTH IS MORE THAN THE NUMBER OF PROGRESS BAR
raise ValueError("Tuple length more than number of Progress Bars")
elif len(direction) < self.noProgBar: #IF INPUT TUPLE LENGTH IS LESS THAN THE NUMBER OF PROGRESS BAR
raise ValueError("Tuple length less than the number of Progress Bars")
else:
for each in range(0, self.noProgBar, 1):
if type(direction[each])!=type("String"):
raise Exception("Direction Tuple elements should be String and not: " + str(type(direction[each])))
elif direction[each]=='Clockwise':
self.spb_direction[each] = self.rotationFlags.Clockwise
elif direction[each]=='AntiClockwise':
self.spb_direction[each] = self.rotationFlags.AntiClockwise
else:
raise Exception("Direction can hold Property: 'Clockwise'/'AntiClockwise' and not: " + str(type(direction[each])))
self.update()
def variableWidth(self, inp):
"""
A flag for varing the progress bar size.
...
Parameters
--------------
inp : bool
True : Changes the size of the width of line progressely.
Raises
--------------
Exception : Variable width should be a bool : True/False
Rasied when the user passes a non-bool data type to the module.
"""
if type(inp)!=type(True):
raise Exception("Variable Width should be a Bool and not " + str(type(inp)))
else:
self.varWidth = inp
self.update()
def spb_widthIncrement(self, increm):
"""
Width increment for incrment in the line width. Default is 1px. User can sepcify the
amount of px to increment form the outer to inner circle progressbar.
...
Parameters
--------------
incrment : int
Increment passed to the module as int px.
Raises
--------------
Exception : Increment should be an integer
Rasied when the user passes a non-int data type to the module.
"""
if type(increm)!=type(5):
raise Exception("Increment should be an integer and not " + str(type(increm)))
else:
self.widthIncr = increm
self.update()
def spb_lineWidth(self, width):
"""
Line width of the circles in the spiral progress bar.
...
Parameters
--------------
width : int
Raises
--------------
Exception : Width should be an Integer
Rasied when the user passes a non-int data type to the module.
"""
if type(width)!=type(5):
raise Exception("Width should be an Integer and not " + str(type(width)))
else:
self.lineWidth = width
if self.gapCngd!=True:
self.spb_gap = self.lineWidth*2
self.update()
def spb_lineColor(self, color):
"""
Color of line in the spiral progress bar. Each concentric | |
<filename>cubepy/factory.py
#functions for use as: import cubepy.factory as cp
import numpy as np
import cubepy
import numbers
from sys import platform
from cubepy.cube import kindToString, apply_op
import os
import csv
random = -123798
byVal = 1
byPos = 2
exact = 1
start_with = 2
end_with = 3
contain = 4
def cube(axes, values=None, broadcast=True,dtype=None):
"""Create a cube object.
axes: list of axis of the cube
values: optional, list of values of the cube. Can be other cubes for build a report.
Ex.
cp.cube([time])
cp.cube([time,product])
cp.cube([time,product],[10,20,30,40,50,60,70,80])
cp.cube([time,product],cp.random)
cp.cube([index_reports],[report_1,report_2])
"""
if values is None:
if not dtype is None:
if dtype is str:
return cubepy.Cube.full(axes,'', dtype='U25')
elif kindToString(np.dtype(dtype).kind)=="string":
return cubepy.Cube.full(axes,'', dtype=dtype)
return cubepy.Cube.zeros(axes)
else:
if isinstance(values,list) or isinstance(values,np.ndarray) :
if len(values)>0:
if isinstance(values[0],cubepy.Cube):
#use stack
if isinstance(axes,list):
axes = axes[0]
return cubepy.stack(values,axes,broadcast)
return cubepy.Cube(axes,values,fillValues=True, dtype=dtype)
elif isinstance(values,numbers.Number) and values==random:
theSize = [len(x) for x in axes]
return cube(axes, np.random.randint(100, size=theSize), dtype=dtype)
else:
return cubepy.Cube.full(axes,values, dtype=dtype)
def index(name,values):
"""Create a index object.
name: name for the index
values: list of values of the index.
Ex.
cp.index("items",["Item 1","Item 2","Item 3"])
cp.index("years",[2016,2017,2018])
"""
if values is None:
values = ["Item 1","Item 2","Item 3"]
return cubepy.Index(name ,values)
def find(param1, param2, compareType=1, caseSensitive = True):
"""
param1: value or indexarray for compare
param2: index compare to
compareType: cp.exact=1, cp.start_with=2, cp.end_with=3, cp.contain=4
caseSensitive: able to differentiate between uppercase and lowercase (by default True)
If param1 is a scalar (numeric or str) and param2 is an index: return cube indexed by param2 with True on ocurrences of param2
Ex. result = cp.apply("te", region, cp.end_with)
If param1 is an index and param2 is an index too: return cube indexed by param1 and param2 with True on ocurrences of param1 on param2
Ex. result = cp.apply(subregion, region, cp.contain)
"""
def _fn(item,value):
if isinstance(item,str) == False:
item = str(item)
if isinstance(value,str) == False:
value = str(value)
if compareType==1:
if caseSensitive:
return item == value
else:
return item.lower() == value.lower()
elif compareType==2:
if caseSensitive:
return item[:len(value)] == value
else:
return item[:len(value)].lower() == value.lower()
elif compareType==3:
if caseSensitive:
return item[-len(value):] == value
else:
return item[-len(value):].lower() == value.lower()
elif compareType==4:
if caseSensitive:
return value in item
else:
return value.lower() in item.lower()
if (isinstance(param1,str) or str(param1).isnumeric()) and isinstance(param2,cubepy.Index):
vfn = np.vectorize(_fn)
return cubepy.Cube([param2],vfn(param2.values,param1))
if isinstance(param1,cubepy.Index) and isinstance(param2,cubepy.Index):
_res = cubepy.Cube.full([param1,param2],False)
rr=0
for row in param1.values:
cc=0
for col in param2.values:
_res.values[rr,cc] = _fn(col,row)
cc+=1
rr+=1
return _res
def selectText(data, first = None, last = None ):
"""Returns a new cube with the text contained between the 'first' and 'last' characters of cube / index 'data'. Starts counting from 0.
If 'first' character is ommited, it returns every character from the first character of 'data' to the 'last' character, inclusive.
If 'last' character is ommited, it returns every character from "first" character of 'data', to the last character available for each cell.
"""
if first is None:
if last is None:
sliced_data = apply( lambda x: x[:], data )
else:
sliced_data = apply( lambda x: x[:last], data )
else:
if last is None:
sliced_data = apply( lambda x: x[first:], data )
else:
sliced_data = apply( lambda x: x[first:last], data )
return sliced_data
def apply(fn, param1, param2=None, start=None):
"""
Apply functions to index and cubes. Multiple results can be obtained
fn: function to apply
param1: index or cube
param2: index or cube
start: scalar or cube
Ex.
cp.apply(lambda x: x[:2] ,indexRegion): return new cube indexed by "indexRegion" and apply fn on each item
cp.apply(lambda x: x*5 ,cubeSales): return new cube result of apply fn on all values of the cubeSales
cp.apply( cp.addPeriods, start_proj , end_proj): Return new cube result of apply fn on "start_proj" with "end_proj"
cp.apply(lambda x: x+1, indexYear, start=10) : Return new cube indexed by "indexYear", result of apply fn starting with scalar value "10"
cp.apply(lambda x: x+1, indexYear, start=prices) : Return new cube indexed by "indexYear", result of apply fn starting with cube "prices"
"""
if callable(fn):
vfn = np.vectorize(fn)
if param2 is None:
if isinstance(param1,cubepy.Index):
if start is None:
return cubepy.Cube([param1],vfn(param1.values))
elif isinstance(start,cubepy.Cube):
values=[start.values]
numEls =len(param1)
for nn in range(numEls-1):
values.append(fn(values[nn]))
new_axes = start._axes.insert(param1, 0)
return cubepy.Cube(new_axes,values)
else:
values=[start]
numEls =len(param1)
for nn in range(numEls-1):
values.append(fn(values[nn]))
return cubepy.Cube(param1,values)
if isinstance(param1,cubepy.Cube):
return param1.apply(fn)
elif isinstance(param1,cubepy.Cube) and isinstance(param2,cubepy.Cube):
return apply_op( param1 , param2 , vfn)
return None
def max(cube1,cube2):
"""Return max value between two cubes
"""
return (cube1 > cube2)*cube1 + (cube1 <= cube2) * cube2
def min(cube1,cube2):
"""Return min value between two cubes
"""
return (cube1 > cube2)*cube2 + (cube1 <= cube2) * cube1
def sum(cube, axis=None, keep=None, group=None, sort_grp=True):
"""Sum of array elements over a given axis.
:param axis: Axis or axes along which a sum is performed. The default (axis = None) is perform a sum
over all the dimensions of the input array. axis may be negative, in which case it counts from the last
to the first axis. If this is a tuple of ints, a sum is performed on multiple axes, instead of a single
axis or all the axes as before.
:return: new Cube instance or a scalar value
"""
return cube.reduce(np.sum, axis, keep, group, sort_grp)
def subscript(cube, index, value):
"""Filter cube1 using the index and the value. Return a new cube without the index1 dimension
Ex.
cp.subscript(nodo_ejemplo,index_para_ejemplo,"Item 1")
"""
return cube[index==value]
def slice(cube, index, value):
"""Filter cube using the index and the value. Return a new cube without the index dimension
Ex.
cp.slice(nodo_ejemplo,index_para_ejemplo,2)
"""
if isinstance(index,cubepy.Index):
index = index.name
if isinstance(value,str) or str(value).isnumeric():
value =[value]
return cube.take(index,value).squeeze()
def shift(cube, axis, n=1, cval=0):
"""Returns a cube with the axis shifted.
Ex.
cp.shift(nodo_ejemplo,index_para_ejemplo,1)
"""
return cube.shift(axis,n,cval)
def subset(cube, indexName="new index"):
"""Returns a list of all the elements of the index for which cube is true. The function is used to create a new index that is a subset of an existing index.
Ex. cp.subset(cantidades>0)
"""
cond = cube>0
values = cond.axes[0].values[cond.values]
return index(indexName,values)
def aggregate(cube,mapCube,indexToRemove,targetIndex):
""" Aggregates the values in Cube to generate the result indexed by targetIndex.
Map gives the value of targetIndex for each element of indexToRemove
Example for aggregating time information into annual index the syntax is:
cp.aggregate(cube, map, time, years )
"""
grouping_index_mat = cubepy.Cube([targetIndex],targetIndex.values)
mat_allocation = mapCube == grouping_index_mat
return (cube * mat_allocation).sum(indexToRemove)
def cumulate(cube, index):
""" TODO coment
"""
pos=0
tmpMat=cubepy.Cube.zeros(cube.axes)
tmpInd=cubepy.Cube.zeros([index])
for j in index.values:
tmpInd.values[pos:pos+1]=1
tmpMat = tmpMat + ((tmpInd * cube).sum(index))*(j==index)
pos=pos+1
return tmpMat
def cumProd(cube, index):
"""Return the cumulative product of elements along a given axis
param cube: cube
param axis: axis name (str), index (int) or instance
Ex:
cp.cumProd(nodo,indice)
"""
return cube.cumProd(index)
def irr(flow, time_index ):
"""Returns the Internal Rate of Return (IRR) of a series of periodic payments (negative values) and inflows (positive values). The IRR is the discount rate at which the Net Present Value (NPV) of the flows equals zero.
The variable flow must be indexed by time_index.
If the cash flow never changes sign, cp.irr() has no solution and returns NAN (Not A Number).
"""
import pandas as pd
_cube_dimensions = index("flowdims",flow.dims )
_rest_of_indexes_labels = subset( _cube_dimensions != time_index.name ).values
_rest_of_indexes = [flow.axis(xx) for xx in _rest_of_indexes_labels]
_cube = None
if len( _rest_of_indexes ) == 0:
_cube = np.irr( flow.values )
else:
_cube = cube( _rest_of_indexes )
_multivalues = [idx.values for idx in _rest_of_indexes]
_values = pd.MultiIndex.from_product( _multivalues ).values
for _item in _values:
_filter = []
for _nn in range(len(_item)):
_filter.append( _rest_of_indexes[_nn].filter( [_item[_nn]] ) )
_irr = np.irr( flow.filter( _filter ).squeeze().values )
_cube.set_data( _filter, _irr )
return _cube
def npv(rate, flow, time_index, offset = 1):
""""Returns the Net Present Value (NPV) of a cash flow with equally spaced periods. The flow parameter must contain a series of periodic payments (negative values) and inflows (positive values), indexed by time_index.
The optional offset parameter especifies the offset of the first value relative to the current time period. By default, offset is | |
Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdBlueprints200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_blueprints" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_blueprints`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_blueprints`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/blueprints/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdBlueprints200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_containers_logs(self, corporation_id, **kwargs): # noqa: E501
"""Get all corporation ALSC logs # noqa: E501
Returns logs recorded in the past seven days from all audit log secure containers (ALSC) owned by a given corporation --- This route is cached for up to 600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_containers_logs(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContainersLogs200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_containers_logs_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get all corporation ALSC logs # noqa: E501
Returns logs recorded in the past seven days from all audit log secure containers (ALSC) owned by a given corporation --- This route is cached for up to 600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_containers_logs_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContainersLogs200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_containers_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_containers_logs`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_containers_logs`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v2/corporations/{corporation_id}/containers/logs/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdContainersLogs200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_divisions(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation divisions # noqa: E501
Return corporation hangar and wallet division names, only show if a division is not using the default name --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_divisions(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdDivisionsOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_divisions_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_divisions_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_divisions_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get corporation divisions # noqa: E501
Return corporation hangar and wallet division names, only show if a division is not using the default name --- This route is cached for up to 3600 seconds --- Requires one of the following EVE corporation role(s): Director # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_divisions_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCorporationsCorporationIdDivisionsOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_divisions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_divisions`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_divisions`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in | |
<filename>dnsMasterChef.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from optparse import OptionParser, OptionGroup
from configparser import ConfigParser
from dnslib import *
from IPy import IP
import threading
import random
import operator
import time
import socketserver
import socket
import sys
import os
import binascii
import string
import base64
import time
import dns.resolver
import dns.query
import hashlib
import pyasn
import asyncio
import concurrent.futures
from datetime import datetime
# The database to correlate IP with ASN
ip_to_as = "ipasn_201803.dat"
asndb = ""
if os.path.exists(ip_to_as):
asndb = pyasn.pyasn(ip_to_as)
else:
print(ip_to_as + " is not there! I need a ip to AS database...")
exit(0)
# Providers variable definition
Google = dns.resolver.Resolver()
Google.Name = "Google DNS"
Strongarm = dns.resolver.Resolver()
Strongarm.Name = "Strongarm"
Quad9 = dns.resolver.Resolver()
Quad9.Name = "Quad9"
SafeDNS = dns.resolver.Resolver()
SafeDNS.Name = "SafeDNS"
ComodoSecure = dns.resolver.Resolver()
ComodoSecure.Name = "ComodoSecure"
NortonConnectSafe = dns.resolver.Resolver()
NortonConnectSafe.Name = "NortonConnectSafe"
# Setting IP address of each DNS provider
Google.nameservers = ['8.8.8.8', '8.8.4.4']
Google.Sinkhole = '127.0.0.7'
Quad9.nameservers = ['9.9.9.9', '149.112.112.112']
Quad9.Sinkhole = '127.0.0.2'
Strongarm.nameservers = ['172.16.17.32', '192.168.3.11']
Strongarm.Sinkhole = '127.0.0.3'
SafeDNS.nameservers = ['192.168.127.12', '172.16.31.10']
SafeDNS.Sinkhole = '127.0.0.4'
ComodoSecure.nameservers = ['8.26.56.26', '8.20.247.20']
ComodoSecure.Sinkhole = '127.0.0.5'
NortonConnectSafe.nameservers = ['172.16.58.3', '172.16.58.3']
NortonConnectSafe.Sinkhole = '127.0.0.6'
Providers = [Strongarm, NortonConnectSafe, ComodoSecure, Quad9, SafeDNS]
NumberOfProviders = len(Providers)
# Query a provider and verify the answer
async def Query(domain,DnsResolver,asn_baseline,hash_baseline):
try:
#Get the A record for the specified domain with the specified provider
Answers = DnsResolver.query(domain, "A")
#Domain did not resolve
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
return [False, DnsResolver]
#List of returned IP
Arecords = []
for rdata in Answers:
Arecords.append(rdata.address)
#Compare the answer with the baseline to see if record(s) differ
if hashlib.md5(str(sorted(Arecords)).encode('utf-8')).hexdigest() != hash_baseline.hexdigest():
#Record(s) differ, checking if the first one is in the same BGP AS
if(asndb.lookup(sorted(Arecords)[0])[0] != asn_baseline):
return [False, DnsResolver]
#Domain is safe
return [True, DnsResolver]
# Creates the parallels tasks
async def main(domain,asn_baseline,hash_baseline):
with concurrent.futures.ThreadPoolExecutor(max_workers=NumberOfProviders) as executor:
tasks = [
asyncio.ensure_future(Query(domain, Providers[i],asn_baseline,hash_baseline))
for i in range(NumberOfProviders)
]
for IsSafe,provider in await asyncio.gather(*tasks):
#One DNS provider in the function 'Query' returned False, so the domain is unsafe
if IsSafe == False:
return [False, provider]
pass
#Function 'Query' never returned False at this point, the domain is safe
return [True, provider]
# Create the loop
def Createloop(domain,asn_baseline,hash_baseline):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(main(domain,asn_baseline,hash_baseline))
# return is received, let's close the objects
loop.run_until_complete(loop.shutdown_asyncgens())
return result
#Establish a baseline with Google Public DNS and call function "loop"
def launch(domain):
hash_baseline = hashlib.md5()
try:
#Lookup the 'A' record(s)
Answers_Google = Google.query(domain, "A")
#Domain did not resolve
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
return [False, Google]
# Contain the returned A record(s)
Arecords = []
for rdata in Answers_Google:
Arecords.append(rdata.address)
#Looking the ASN of the first A record (sorted)
asn_baseline = asndb.lookup(sorted(Arecords)[0])[0]
#MD5 Fingerprint of the anwser is the sorted list of A record(s)
#Because of the round-robin often used in replies.
#Ex. NS1 returns IP X,Y and NS2 returns IP Y,X
hash_baseline.update(str(sorted(Arecords)).encode('utf-8'))
return Createloop(domain,asn_baseline,hash_baseline)
# DNSHandler Mixin. The class contains generic functions to parse DNS requests and
# calculate an appropriate response based on user parameters.
class DNSHandler:
def parse(self, data):
response = ''
try:
# Parse data as DNS
d = DNSRecord.parse(data)
except Exception as e:
print(('[%s] %s: ERROR: %s' % (time.strftime('%H:%M:%S'),
self.client_address[0], 'invalid DNS request')))
if self.server.log:
self.server.log.write(
'[%s] %s: ERROR: %s\n' %
(time.strftime('%d/%b/%Y:%H:%M:%S %z'),
self.client_address[0],
'invalid DNS request'))
else:
# Only Process DNS Queries
if QR[d.header.qr] == 'QUERY':
qname = str(d.q.qname)
# Chop off the last period
if qname[-1] == '.': qname = qname[:-1]
qtype = QTYPE[d.q.qtype]
# Proxy the request
if qtype not in ['SOA', 'A']:
print("Filtering " + qtype + " requests not supported, Forwarding...")
print (
"[%s] %s: proxying the response of type '%s' for %s" %
(time.strftime("%H:%M:%S"), self.client_address[0], qtype, qname))
if self.server.log:
self.server.log.write(
"[%s] %s: proxying the response of type '%s' for %s\n" %
(time.strftime("%d/%b/%Y:%H:%M:%S %z"), self.client_address[0], qtype, qname))
nameserver_tuple = random.choice(self.server.nameservers).split('#')
response = self.proxyrequest(data, *nameserver_tuple)
else:
IsSafe, ProviderName = launch(qname)
if IsSafe:
print(qname + " is safe, proxying...")
nameserver_tuple = random.choice(self.server.nameservers).split('#')
response = self.proxyrequest(data, *nameserver_tuple)
else:
fake_records = dict()
fake_record = ProviderName.Sinkhole
fake_records[qtype] = qtype
# Create a custom response to the query
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
if qtype == "SOA":
mname, rname, t1, t2, t3, t4, t5 = fake_record.split(" ")
times = tuple([int(t) for t in [t1, t2, t3, t4, t5]])
# dnslib doesn't like trailing dots
if mname[-1] == ".":
mname = mname[:-1]
if rname[-1] == ".":
rname = rname[:-1]
response.add_answer(RR(qname, getattr(QTYPE, qtype),
rdata=RDMAP[qtype](mname, rname, times)))
elif qtype == "A":
if fake_record[-1] == ".":
fake_record = fake_record[:-1]
response.add_answer(RR(qname, getattr(QTYPE, qtype),rdata=RDMAP[qtype](fake_record)))
response = response.pack()
print(qname + ' Spoofing because it is filtered by ' + ProviderName.Name)
return response
# Find appropriate ip address to use for a queried name. The function can
def findnametodns(self, qname, nametodns):
# Make qname case insensitive
qname = qname.lower()
# Split and reverse qname into components for matching.
qnamelist = qname.split('.')
qnamelist.reverse()
# HACK: It is important to search the nametodns dictionary before iterating it so that
# global matching ['*.*.*.*.*.*.*.*.*.*'] will match last. Use sorting
# for that.
for (domain, host) in sorted(iter(list(nametodns.items())),
key=operator.itemgetter(1)):
# NOTE: It is assumed that domain name was already lowercased
# when it was loaded through --file, --fakedomains or --truedomains
# don't want to waste time lowercasing domains on every request.
# Split and reverse domain into components for matching
domain = domain.split('.')
domain.reverse()
# Compare domains in reverse.
for (a, b) in map(None, qnamelist, domain):
if a != b and b != '*':
break
else:
# Could be a real IP or False if we are doing reverse matching
# with 'truedomains'
return host
else:
return False
# Obtain a response from a real DNS server.
def proxyrequest(
self,
request,
host,
port='53',
protocol='udp',
):
reply = None
try:
if self.server.ipv6:
if protocol == 'udp':
sock = socket.socket(socket.AF_INET6,
socket.SOCK_DGRAM)
elif protocol == 'tcp':
sock = socket.socket(socket.AF_INET6,
socket.SOCK_STREAM)
else:
if protocol == 'udp':
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
elif protocol == 'tcp':
sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
sock.settimeout(3.0)
# Send the proxy request to a randomly chosen DNS server
if protocol == 'udp':
sock.sendto(request, (host, int(port)))
reply = sock.recv(1024)
sock.close()
elif protocol == 'tcp':
sock.connect((host, int(port)))
# Add length for the TCP request
length = binascii.unhexlify('%04x' % len(request))
sock.sendall(length + request)
# Strip length from the response
reply = sock.recv(1024)
reply = reply[2:]
sock.close()
except Exception as e:
print(('[!] Could not proxy request: %s' % e))
else:
return reply
# UDP DNS Handler for incoming requests
class UDPHandler(DNSHandler, socketserver.BaseRequestHandler):
def handle(self):
(data, socket) = self.request
response = self.parse(data)
if response:
socket.sendto(response, self.client_address)
# TCP DNS Handler for incoming requests
class TCPHandler(DNSHandler, socketserver.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
# Remove the addition "length" parameter used in the
# TCP DNS protocol
data = data[2:]
response = self.parse(data)
if response:
# Calculate and add the additional "length" parameter
# used in TCP DNS protocol
length = binascii.unhexlify('%04x' % len(response))
self.request.sendall(length + response)
class ThreadedUDPServer(socketserver.ThreadingMixIn,
socketserver.UDPServer):
# Override SocketServer.UDPServer to add extra parameters
def __init__(
self,
server_address,
RequestHandlerClass,
nametodns,
nameservers,
ipv6,
log,
):
self.nametodns = nametodns
self.nameservers = nameservers
self.ipv6 = ipv6
self.address_family = \
(socket.AF_INET6 if self.ipv6 else socket.AF_INET)
self.log = log
socketserver.UDPServer.__init__(self, server_address,
RequestHandlerClass)
class ThreadedTCPServer(socketserver.ThreadingMixIn,
socketserver.TCPServer):
# Override default value
allow_reuse_address = True
# Override SocketServer.TCPServer to add extra parameters
def __init__(
self,
server_address,
RequestHandlerClass,
nametodns,
nameservers,
ipv6,
log,
):
self.nametodns = nametodns
self.nameservers = nameservers
self.ipv6 = ipv6
self.address_family = \
(socket.AF_INET6 if self.ipv6 else socket.AF_INET)
self.log = log
socketserver.TCPServer.__init__(self, server_address,
RequestHandlerClass)
# Initialize and start the DNS Server
def start_cooking(
interface,
nametodns,
nameservers,
tcp=False,
ipv6=False,
port='55',
logfile=None,
):
try:
if logfile:
log = open(logfile, 'a', 0)
log.write('[%s] DNSChef is active.\n'
% time.strftime('%d/%b/%Y:%H:%M:%S %z'))
else:
log = None
if tcp:
print('[*] DNSChef is running in TCP mode')
server = ThreadedTCPServer(
(interface, int(port)),
TCPHandler,
nametodns,
nameservers,
ipv6,
log,
)
else:
server = ThreadedUDPServer(
(interface, int(port)),
UDPHandler,
nametodns,
nameservers,
ipv6,
log,
)
# Start a thread with the server -- that thread will then start
# more threads for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
# Loop in the main thread
while True:
time.sleep(100)
except (KeyboardInterrupt, SystemExit):
if log:
log.write('[%s] DNSChef is shutting down.\n'
% time.strftime('%d/%b/%Y:%H:%M:%S %z'))
log.close()
server.shutdown()
print('[*] DNSChef is shutting down.')
sys.exit()
except IOError:
print('[!] Failed to open log file for writing.')
except Exception as e:
print(('[!] Failed to start the server: %s' % e))
if __name__ == '__main__':
# Parse command line arguments
parser = OptionParser(usage="dnschef.py [options]:\n")
rungroup = OptionGroup(parser, "Optional runtime parameters.")
rungroup.add_option("--logfile", |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.