text stringlengths 957 885k |
|---|
<gh_stars>10-100
"""Collection of functions that get data from a device using Restconf"""
from json.decoder import JSONDecodeError
import requests
import json
import warnings
import ipaddress
import device_call_backup as InCaseRestDoesntWork
warnings.filterwarnings('ignore', message='Unverified HTTPS request')
headers = {"Content-Type": 'application/yang-data+json', 'Accept': 'application/yang-data+json'}
def _check_api_error(response) -> bool:
is_error = False
try:
if list(response.keys())[0] == 'errors':
is_error = True
except IndexError:
pass
return is_error
def get_vrfs(ip, port, username, password) -> list:
"""Collects device vrfs"""
device_vrfs = []
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-native:native/vrf"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
vrfs = json.loads(response.text)
check_error = _check_api_error(vrfs)
if check_error:
raise AttributeError
device_vrfs = vrfs.get('Cisco-IOS-XE-native:vrf', {}).get('definition', {})
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, AttributeError):
pass
return device_vrfs
def get_poe(ip, port, username, password) -> list:
"""Collects poe port information"""
poe_ports = []
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-poe-oper:poe-oper-data"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
poe = json.loads(response.text)
check_error = _check_api_error(poe)
if check_error:
raise AttributeError
poe_ports = poe.get('Cisco-IOS-XE-poe-oper:poe-oper-data', {}).get('poe-port', {})
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, AttributeError) as e:
pass
return poe_ports
def get_sfps(ip, port, username, password) -> list:
"""Collects device transcievers"""
transceiver_ports = []
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-transceiver-oper:transceiver-oper-data"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
transceivers = json.loads(response.text)
check_error = _check_api_error(transceivers)
if check_error:
raise AttributeError
transceiver_ports = transceivers.get('Cisco-IOS-XE-transceiver-oper:transceiver-oper-data', {}).get('transceiver', {})
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, AttributeError) as e:
pass
return transceiver_ports
def get_arps(ip, port, username, password) -> list:
"""Collects arp for the matching"""
entries = []
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-arp-oper:arp-data/arp-vrf"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
arp_entries = json.loads(response.text, strict=False)
check_error = _check_api_error(arp_entries)
if check_error:
raise AttributeError
try:
for i in arp_entries.get('Cisco-IOS-XE-arp-oper:arp-vrf'):
for entry in i.get('arp-oper'):
entry.pop('interface')
entry['vrf'] = i.get('vrf')
entry['interface'] = 'n/a'
entry['time'] = entry.get('time').split('.')[0].strip('T00')
entries.append(entry)
except (TypeError, AttributeError):
pass
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, AttributeError):
entries = InCaseRestDoesntWork.get_arp(username, password, ip)
return entries
def get_ip_sla(ip, port, username, password) -> list:
"""Collects ip sla statuses"""
sla_stats = []
try:
uri = f"https://{ip}:443/restconf/data/Cisco-IOS-XE-ip-sla-oper:ip-sla-stats"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
slas = json.loads(response.text)
check_error = _check_api_error(slas)
if check_error:
raise AttributeError
sla_stats = slas.get('Cisco-IOS-XE-ip-sla-oper:ip-sla-stats', {}).get('sla-oper-entry', {})
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, AttributeError) as e:
pass
return sla_stats
def get_interfaces(ip, port, username, password) -> dict:
"""Gets real time interface statistics using IOS-XE\n
Cisco-IOS-XE-interfaces-oper:interfaces and live arp data via Cisco-IOS-XE-arp-oper:arp-data/arp-vrf"""
data = {}
interface_data = {}
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-interfaces-oper:interfaces"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
interface_data = json.loads(response.text).get('Cisco-IOS-XE-interfaces-oper:interfaces').get('interface')
check_error = _check_api_error(interface_data)
if check_error:
raise AttributeError
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
pass
if interface_data:
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-arp-oper:arp-data/arp-vrf"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
for interface in interface_data:
#Collect inter qos statistics. Commence policy breakdown
qos_stats = collect_qos_stats(interface, ip, port, username, password)
convert_bandwidth = convert_to_mbps(interface)
data[interface.get('name')] = {'interface': interface.get('name'), 'data': convert_bandwidth, 'qos': qos_stats}
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL):
for interface in interface_data:
convert_bandwidth = convert_to_mbps(interface)
data[interface.get('name')] = {'interface': interface.get('name'), 'data': convert_bandwidth, 'qos': [[]]}
return data
def collect_qos_stats(interface, ip, port, username, password) -> list:
"""Collect interface service policies, breaks down policy."""
qos = []
# The following code will compare two sets of data. Interface queue stats and service policy config. Unfortunently we cant get this data as one
for policy in interface.get('diffserv-info', {}):
try:
#Get qos policy map details using rest and a name filter in out url path
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-native:native/policy/policy-map={policy.get('policy-name')}"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
check_error = _check_api_error(json.loads(response.text))
if check_error:
raise AttributeError
#Get policy detials. Type, Allocation etc.
allocation = _get_qos_bandwidth(json.loads(response.text))
if not policy.get('diffserv-target-classifier-stats'):
qos = []
elif isinstance(policy.get('diffserv-target-classifier-stats'), list):
#Use list comp to get out queue details, also map other details
if not allocation:
qos = []
elif len(allocation) == 1:
qos = [{'interface_policy': policy.get('policy-name', {}), 'allocation': allocation[0].get('allocation', {}),
'direction': policy.get('direction', {}).split('-')[1], 'queues': _map_queues(allocation[0], policy)}]
else:
qos = [
{'interface_policy': policy.get('policy-name', {}), 'allocation': i.get('allocation', {}),
'direction': policy.get('direction', {}).split('-')[1], 'queues': _map_queues(i, policy)}
for i in allocation
]
except AttributeError:
pass
return qos
def _get_qos_bandwidth(policy) -> list:
"""Break down each child policy"""
parent_queues = []
#Get parent policy actions and action type. ie.e bandwdith, service-policy, fair-queue etc.
for queue in policy.get('Cisco-IOS-XE-policy:policy-map', {}).get('class', {}):
try:
if isinstance(queue.get('action-list', {}), list):
allocation = [_allocation_type(action) for action in queue.get('action-list', {})]
if len(allocation) == 1 and str(allocation) != '[(\'---\', \'---\')]':
parent_queues.append({'queue': queue.get('name'), 'allocation': allocation[0][0], 'type': allocation[0][1]})
elif len(allocation) == 2:
parent_queues.append({'queue': queue.get('name'), 'allocation': allocation[0][0], 'type': allocation[1]})
except IndexError:
pass
return parent_queues
def _allocation_type(action) -> tuple:
"""Get details of child policy"""
allocation = '---'
action_type = '---'
if action.get("action-type",{}) == 'shape':
if 'bit-rate' in action.get('shape',{}).get('average',{}):
allocation = str(round(int(action.get("shape",{}).get("average",{}).get("bit-rate",{})) / 1e+6)) + " Mbps"
elif 'percent' in action.get('shape',{}).get('average'):
allocation = str(action.get("shape",{}).get("average",{}).get("percent",{})) + "%"
elif action.get("action-type",{}) == 'bandwidth':
if 'kilo-bits' in action.get('bandwidth', {}):
allocation = str(round(int(action.get("bandwidth",{}).get("kilo-bits",{})) * 1000 / 1e+6)) + " Mbps"
elif 'percent' in action.get('bandwidth', {}):
allocation = str(action.get("bandwidth",{}).get("percent",{})) + '%'
elif action.get("action-type",{}) == 'priority':
if 'kilo-bits' in action.get('priority', {}):
allocation = str(round(int(action.get("priority",{}).get("kilo-bits",{})) * 1000 / 1e+6)) + " Mbps"
elif 'percent' in action.get('priority', {}):
allocation = str(action.get("priority",{}).get("percent",{})) + '%'
elif action.get("action-type",{}) == 'set':
if 'dscp-val' in action.get('set', {}).get('dscp', {}):
allocation = action.get('set', {}).get('dscp', {}).get('dscp-val')
if action.get("action-type",{}) == 'service-policy':
action_type = 'service-policy'
elif action.get("action-type",{}) == 'random-detect':
action_type = 'fair-queue'
elif action.get("action-type",{}) == 'fair-queue':
action_type = 'fair-queue'
return allocation, action_type
def _map_queues(i, policy) -> list:
queues = []
# Check if policy type is service policy. When then can get our queue detiials
if 'service-policy' in i.get('type'):
for queue in policy.get('diffserv-target-classifier-stats', {}):
#Parent path provided allows use to check if the queue is a child queue. 1st path part is Parent Policy, second is a paren queue, anything after is child
if len(queue.get('parent-path').split()) != 2:
queues.append({'queue-name': queue.get('classifier-entry-name'), 'parent': " ".join(queue.get('parent-path').split(" ")[0:2]),
'rate': queue.get('classifier-entry-stats').get('classified-rate'), 'bytes': queue.get('classifier-entry-stats').get('classified-bytes'),
'pkts': queue.get('classifier-entry-stats').get('classified-pkts'), 'drops': queue.get('queuing-stats').get('drop-bytes'),
'tail-drops': queue.get('queuing-stats').get('wred-stats').get('tail-drop-bytes')})
elif len(queue.get('parent-path').split()) == 2 and queue.get('classifier-entry-name') == i.get('queue'):
queues.append({'queue-name': f'Parent Queue: {queue.get("classifier-entry-name")}'})
elif '---' in i.get('type'):
# This maps if the queue is not service policy. A single queue with no child
queues = [
{'queue-name': f'Parent Queue: {queue.get("classifier-entry-name")}'}
for queue in policy.get('diffserv-target-classifier-stats', {})
if len(queue.get('parent-path').split()) == 2 and queue.get('classifier-entry-name') == i.get('queue')
]
return queues
def convert_to_mbps(interface) -> dict:
"""Convert Kbps to Mbps"""
interface['statistics']['tx-kbps'] = int(interface['statistics']['tx-kbps']) / 1000
interface['statistics']['rx-kbps'] = int(interface['statistics']['rx-kbps']) / 1000
if interface['oper-status'] == 'if-oper-state-ready':
interface['oper-status'] = 'up'
else:
interface['oper-status'] = 'down'
return interface
def get_cpu_usages(ip, port, username, password) -> tuple:
"""Gets real time CPU statistics using restconf/data/Cisco-IOS-XE-process-cpu-oper:cpu-usage"""
cpu_stats = {}
memory_stats = {}
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-process-cpu-oper:cpu-usage/cpu-utilization"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
cpu_stats = json.loads(response.text)
check_error = _check_api_error(cpu_stats)
if check_error:
raise AttributeError
except Exception:
cpu_stats = {'Cisco-IOS-XE-process-cpu-oper:cpu-utilization': {'cpu-usage-processes': {'cpu-usage-process': []},'five-seconds': 'Err', 'one-minute': 'Err', 'five-minutes': 'Err'}}
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-platform-software-oper:cisco-platform-software/control-processes/control-process"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
memory_stats = json.loads(response.text)
check_error = _check_api_error(cpu_stats)
if check_error:
raise AttributeError
memory_stats = memory_stats.get('Cisco-IOS-XE-platform-software-oper:control-process')[0].get('memory-stats', {})
except Exception:
memory_stats = {'memory-status': 'Err'}
return cpu_stats, memory_stats
def get_hardware_status(ip, port, username, password) -> dict:
"""Gets CPU memory statuses IOS-XE\n
Cisco-IOS-XE-platform-software-oper:cisco-platform-software/control-processes/control-process"""
###### Future Use
data = {}
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-platform-software-oper:cisco-platform-software/control-processes/control-process"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
hardware_status = json.loads(response.text)
check_error = _check_api_error(hardware_status)
if check_error:
raise AttributeError
get_keys = dict.fromkeys(hardware_status)
parent_key = list(get_keys.keys())[0]
data = hardware_status[parent_key]
except AttributeError:
pass
return data
def get_envirmoment(ip, port, username, password) -> dict:
"""Gets real time enviroment statistics using restconf/data/Cisco-IOS-XE-environment-oper:environment-sensors"""
env_data = {}
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-environment-oper:environment-sensors"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
env = json.loads(response.text)
check_error = _check_api_error(env)
if check_error:
raise AttributeError
env_data = env.get('Cisco-IOS-XE-environment-oper:environment-sensors', {}).get('environment-sensor')
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
pass
return env_data
def get_prefix_list(ip, port, username, password) -> list:
"""Getsprefix-lists from device"""
prefix_data = [{'name': 'No Prefix-lists Found'}]
asr_uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-native:native/ip/prefix-list"
csr_uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-native:native/ip/prefix-lists"
try:
response = requests.get(asr_uri, headers=headers, verify=False, auth=(username, password))
if response.status_code == 204:
response = requests.get(csr_uri, headers=headers, verify=False, auth=(username, password))
prefix_lists = json.loads(response.text)
check_error = _check_api_error(prefix_lists)
if check_error:
raise AttributeError
else:
prefix_data = prefix_lists.get('Cisco-IOS-XE-native:prefix-lists', {}).get('prefixes')
else:
prefix_lists = json.loads(response.text)
check_error = _check_api_error(prefix_lists)
if check_error:
raise AttributeError
prefix_data = prefix_lists.get('Cisco-IOS-XE-native:prefix-list', {}).get('prefixes')
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError, TypeError) as e:
pass
return prefix_data
def get_route_maps(ip, port, username, password) -> list:
"""Gets route-maps from device"""
route_map_data = [{'name': 'No Route-maps Found'}]
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-native:native/route-map"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
route_maps = json.loads(response.text)
check_error = _check_api_error(route_maps)
if check_error or len(route_maps.get('Cisco-IOS-XE-native:route-map', {})) == 0:
raise AttributeError
route_map_data = route_maps.get('Cisco-IOS-XE-native:route-map', {})
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
pass
return route_map_data
def get_components(ip, port, username, password) -> dict:
"""Gets device components /restconf/data/openconfig-platform:components"""
data = {}
try:
uri = f"https://{ip}:{port}/restconf/data/openconfig-platform:components"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
data = json.loads(response.text)
except (JSONDecodeError, requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
pass
return data
def get_ospf(ip, port, username, password) -> tuple:
"""Gets device ospf operational data"""
ospf_neighbors = []
ospf_interfaces = []
topology = [{}]
ospf_proccess = []
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-native:native/router/Cisco-IOS-XE-ospf:router-ospf"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
ospf = json.loads(response.text)
check_error = _check_api_error(ospf)
if check_error:
raise AttributeError
ospf_proccess = ospf.get('Cisco-IOS-XE-ospf:router-ospf', {}).get('ospf', {}).get('process-id')
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError, TypeError):
pass
if ospf_proccess:
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-ospf-oper:ospf-oper-data/ospf-state/ospf-instance"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
ospf = json.loads(response.text)
check_error = _check_api_error(ospf)
if check_error:
raise AttributeError
for instance in ospf.get('Cisco-IOS-XE-ospf-oper:ospf-instance', {}):
try:
topology.append(str(ipaddress.IPv4Address(instance.get('router-id', {}))))
except ValueError:
pass
for area in instance.get('ospf-area', {}):
if isinstance(area.get('ospf-interface', {}), list):
for interface in area.get('ospf-interface', {}):
interface['area'] = area.get('area-id', {})
for neighbor in interface.get('ospf-neighbor', {}):
interface['neighbor-state'] = neighbor
neighbor['area'] = area.get('area-id', {})
ospf_neighbors.append(neighbor)
ospf_interfaces.append(interface)
for i in ospf_interfaces:
topology[0][i.get('neighbor-state').get('neighbor-id')] = None
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError, TypeError):
pass
return ospf_neighbors, ospf_interfaces, topology, ospf_proccess
def get_bridge(ip, port, username, password) -> list:
"""Gets device components /restconf/data/openconfig-platform:components"""
mac_table = []
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-matm-oper:matm-oper-data"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
bridge_data = json.loads(response.text)
check_error = _check_api_error(bridge_data)
if check_error or response.status_code == 404:
raise AttributeError
for i in bridge_data['Cisco-IOS-XE-matm-oper:matm-oper-data']['matm-table']:
if i.get('matm-mac-entry', {}):
[mac_table.append(i) for i in i.get('matm-mac-entry', {})]
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
mac_table = InCaseRestDoesntWork.get_mac_table(username, password, ip)
return mac_table
def get_span_tree(ip, port, username, password) -> tuple:
"""Gets device components /restconf/data/openconfig-platform:components"""
span_data = []
span_global_data = []
try:
span_response = requests.get(f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-spanning-tree-oper:stp-details", headers=headers, verify=False, auth=(username, password))
span_table = json.loads(span_response.text)
check_error = _check_api_error(span_table)
if check_error:
raise AttributeError
span_data = span_table.get('Cisco-IOS-XE-spanning-tree-oper:stp-details', {}).get('stp-detail', {})
span_global_response = requests.get(f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-spanning-tree-oper:stp-details/stp-global", headers=headers, verify=False, auth=(username, password))
span_global_table = json.loads(span_global_response.text)
check_error = _check_api_error(span_global_table)
if check_error:
raise AttributeError
span_global_data = span_global_table.get('Cisco-IOS-XE-spanning-tree-oper:stp-global', {})
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
pass
return span_data, span_global_data
def get_dp_neighbors(ip, port, username, password) -> list:
"""Gets device components restconf/data/Cisco-IOS-XE-cdp-oper:cdp-neighbor-details"""
dp_neighbors = {'cdp': [], 'lldp': []}
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-cdp-oper:cdp-neighbor-details"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
cdp_data = json.loads(response.text)
check_error = _check_api_error(cdp_data)
if check_error:
raise AttributeError
dp_neighbors['cdp'] = [neighbor for neighbor in cdp_data.get('Cisco-IOS-XE-cdp-oper:cdp-neighbor-details', {}).get('cdp-neighbor-detail', {})]
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
pass
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-lldp-oper:lldp-entries"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
lldp_data = json.loads(response.text)
check_error = _check_api_error(lldp_data)
if check_error:
raise AttributeError
dp_neighbors['lldp'] = [neighbor for neighbor in lldp_data.get('Cisco-IOS-XE-lldp-oper:lldp-entries', {}).get('lldp-entry', {})]
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
pass
return dp_neighbors['cdp'], dp_neighbors['lldp']
def get_vlans(ip, port, username, password) -> list:
"""Gets device components /restconf/data/openconfig-platform:components"""
vlan_data = []
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-vlan-oper:vlans"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
vlans = json.loads(response.text)
check_error = _check_api_error(vlans)
if check_error:
raise AttributeError
for i in vlans.get('Cisco-IOS-XE-vlan-oper:vlans', {}).get('vlan', {}):
try:
if i.get('vlan-interfaces'):
vlan_data.append({"id": i.get('id'), "name": i.get('name'), "status": i.get('status'), "interfaces": ", ".join([interface.get('interface') for interface in i.get('vlan-interfaces')])})
else:
vlan_data.append({"id": i.get('id'), "name": i.get('name'), "status": i.get('status'), "interfaces": []})
except TypeError:
pass
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError):
pass
return vlan_data
def get_switch(ip, port, username, password) -> tuple:
"""Gets device components /restconf/data/openconfig-platform:components"""
data = {}
trunk =[]
access = []
try:
interfaces_configs = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-native:native/interface"
interface_status = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-interfaces-oper:interfaces"
config_response = requests.get(interfaces_configs, headers=headers, verify=False, auth=(username, password))
config_json = json.loads(config_response.text)
check_error = _check_api_error(config_json)
if check_error:
raise AttributeError
stats_response = requests.get(interface_status, headers=headers, verify=False, auth=(username, password))
interface_stats = json.loads(stats_response.text)
check_error = _check_api_error(interface_stats)
if check_error:
raise AttributeError
for interface, v in config_json['Cisco-IOS-XE-native:interface'].items():
if isinstance(v, list):
mapped = [map_switchports(config, interface, interface_stats) for config in v]
data[interface] = list(mapped)
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, KeyError, AttributeError):
pass
if data:
for v in data.values():
for i in v:
if i[0].get('mode') == 'trunk':
i[0]
trunk.append(i[0])
elif i[0].get('mode') == 'access':
access.append(i[0])
return trunk, access
def map_switchports(config, interface, interfaces_statuses) -> list:
complete_interface = f"{interface}{config.get('name')}"
interface_mode = False
data = []
statistics = next((interface for interface in interfaces_statuses['Cisco-IOS-XE-interfaces-oper:interfaces']['interface'] if interface['name'] == complete_interface), None)
if config.get('switchport', {}).get('Cisco-IOS-XE-switch:mode', {}):
interface_mode = list(config.get('switchport', {}).get('Cisco-IOS-XE-switch:mode', {}).keys())[0]
if interface_mode == 'access':
access_vlan = config.get('switchport').get('Cisco-IOS-XE-switch:access').get('vlan').get('vlan')
data.append({'mode': 'access','interface': complete_interface, 'vlan': access_vlan, 'status': statistics['oper-status'],
'mbpsOut': int(statistics['statistics']['tx-kbps'])/1000, 'mbpsIn':int(statistics['statistics']['rx-kbps'])/1000})
elif interface_mode == 'trunk':
if config.get("switchport").get("Cisco-IOS-XE-switch:trunk", {}).get("allowed", {}).get("vlan", {}).get("vlans", {}):
trunked_vlans = config.get("switchport", {}).get("Cisco-IOS-XE-switch:trunk", {}).get("allowed", {}).get("vlan", {}).get("vlans", {})
native = config.get("switchport", {}).get("Cisco-IOS-XE-switch:trunk", {}).get("native", {}).get("vlan", {})
elif config.get("switchport").get("Cisco-IOS-XE-switch:trunk", {}).get("allowed", {}).get("vlan", {}).get("add", {}):
trunked_vlans = config.get('switchport', {}).get('Cisco-IOS-XE-switch:trunk', {}).get('allowed', {}).get('vlan').get('add')
native = config.get("switchport").get("Cisco-IOS-XE-switch:trunk", {}).get("native", {}).get("vlan", {})
elif config.get("switchport").get("Cisco-IOS-XE-switch:trunk", {}).get("allowed", {}).get("vlan", {}).get('vlans', {}):
trunked_vlans = config.get('switchport', {}).get('Cisco-IOS-XE-switch:trunk', {}).get('allowed', {}).get('vlan').get('vlans', {})
native = config.get("switchport", {}).get("Cisco-IOS-XE-switch:trunk", {}).get("native", {}).get("vlan", {})
else:
trunked_vlans = 'all'
native = config.get("switchport").get("Cisco-IOS-XE-switch:trunk", {}).get("native", {}).get("vlan", {})
data.append({'mode': 'trunk', 'interface': complete_interface, 'vlans': trunked_vlans, 'native': native, 'status': statistics['oper-status'], 'speed': statistics['speed'],
'mbpsOut': int(statistics['statistics']['tx-kbps'])/1000, 'mbpsIn': int(statistics['statistics']['rx-kbps'])/1000})
else:
data.append({'mode': None, 'interface': complete_interface, 'status': statistics['oper-status'],
'mbpsOut': int(statistics['statistics']['tx-kbps'])/1000, 'mbpsIn': int(statistics['statistics']['rx-kbps'])/1000})
return data
def get_bgp_status(ip, port, username, password) -> list:
"""Gets BGP neighbor statuses IOS-XE\n
Cisco-IOS-XE-bgp-oper:bgp-state-data/address-families/address-family"""
bgp_neighbors = []
bgp_details = []
bgp_topology = {}
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-bgp-oper:bgp-state-data/address-families/address-family"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
bgp_data = json.loads(response.text)
check_error = _check_api_error(bgp_data)
if check_error:
raise AttributeError
get_keys = dict.fromkeys(bgp_data)
parent_key = list(get_keys.keys())[0]
if isinstance (bgp_data[parent_key], list):
for i in bgp_data[parent_key]:
bgp_details.append(i.get('local-as'))
bgp_details.append(i.get('vrf-name'))
bgp_details.append(i.get('router-id'))
bgp_details.append(i.get('bgp-table-version'))
bgp_details.append(i.get('routing-table-version'))
bgp_details.append(i.get('prefixes').get('total-entries'))
bgp_details.append(i.get('prefixes').get('memory-usage'))
bgp_details.append(i.get('vrf-name'))
bgp_details.append(i.get('path').get('total-entries'))
bgp_details.append(i.get('path').get('memory-usage'))
bgp_details.append(i.get('as-path').get('total-entries'))
bgp_details.append(i.get('as-path').get('memory-usage'))
bgp_details.append(i.get('route-map').get('total-entries'))
bgp_details.append(i.get('route-map').get('memory-usage'))
bgp_details.append(i.get('filter-list').get('total-entries'))
bgp_details.append(i.get('filter-list').get('memory-usage'))
bgp_details.append(i.get('activities').get('prefixes'))
bgp_details.append(i.get('activities').get('paths'))
bgp_details.append(i.get('activities').get('scan-interval'))
bgp_details.append(i.get('total-memory'))
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, KeyError, AttributeError) as e:
pass
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-bgp-oper:bgp-state-data/neighbors"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
bgp_data = json.loads(response.text)
check_error = _check_api_error(bgp_data)
if check_error:
raise AttributeError
for i in bgp_data.get('Cisco-IOS-XE-bgp-oper:neighbors', {}).get('neighbor', {}):
bgp_topology[i.get('neighbor-id', {})] = i.get('as')
bgp_neighbors.append({ 'remote-as': i.get('as'),
'vrf': i.get('vrf-name'),
'neighbor-id': i.get('neighbor-id', {}),
'localIp': i.get('transport').get('local-host'),
'remote-ip': i.get('transport').get('foreign-host', {}),
'local-port': i.get('transport').get('local-port'),
'remote-port': i.get('transport').get('foreign-port'),
'last-reset': i.get('connection').get('last-reset'),
'state': i.get('connection').get('state'),
'prefixes-sent': i.get('prefix-activity').get('sent').get('current-prefixes'),
'received-prefixes': i.get('prefix-activity').get('received').get('current-prefixes'),
'installed-prefixes': i.get('installed-prefixes', {})})
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, KeyError, AttributeError) as e:
pass
return bgp_neighbors, bgp_details, bgp_topology
def get_dmvpn_ints(ip, port, username, password) -> tuple:
"""Gets device components /restconf/data/openconfig-platform:components"""
config_table = []
interf_op_table = []
tunnels = []
hubs = []
try:
response = requests.get(f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-interfaces-oper:interfaces", headers=headers, verify=False, auth=(username, password))
interface_data = json.loads(response.text)
if _check_api_error(interface_data):
pass
else:
[interf_op_table.append(interface) for interface in interface_data.get('Cisco-IOS-XE-interfaces-oper:interfaces').get('interface') if 'Tunnel' in interface.get('name', {})]
response = requests.get(f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-native:native/interface/Tunnel", headers=headers, verify=False, auth=(username, password))
config_data = json.loads(response.text)
if _check_api_error(config_data):
pass
else:
for details in config_data.values():
if isinstance(details, dict):
tunnels.append({
'name': f'Tunnel{detail.get("name", {})}',
'mss': detail.get('ip',{}).get("tcp", {}).get('adjust-mss', {}),
'mtu': detail.get('ip',{}).get("mtu", {}),
'source': detail.get('Cisco-IOS-XE-tunnel:tunnel', {}).get('source', {}),
'mode': f"{list(detail.get('Cisco-IOS-XE-tunnel:tunnel', {}).get('mode', {}).keys())[0]} {list(list(detail.get('Cisco-IOS-XE-tunnel:tunnel', {}).get('mode', {}).values())[0].keys())[0]}",
'protection': detail.get('Cisco-IOS-XE-tunnel:tunnel', {}).get('protection', {}).get('Cisco-IOS-XE-crypto:ipsec', {}).get('profile', {}),
'authentication': detail.get('ip',{}).get('Cisco-IOS-XE-nhrp:nhrp', {}).get('authentication', {}),
'holdtime': detail.get('ip',{}).get('Cisco-IOS-XE-nhrp:nhrp', {}).get('holdtime', {}),
'netwrok-id': detail.get('ip',{}).get('Cisco-IOS-XE-nhrp:nhrp', {}).get('map', {}).get('network-id', {})})
hubs = _map_dmvpn_hubs(detail.get('ip',{}).get('Cisco-IOS-XE-nhrp:nhrp', {}).get('map', {}))
else:
for detail in details:
tunnels.append({
'name': f'Tunnel{detail.get("name", {})}',
'mss': detail.get('ip',{}).get("tcp", {}).get('adjust-mss', {}),
'mtu': detail.get('ip',{}).get("mtu", {}),
'source': detail.get('Cisco-IOS-XE-tunnel:tunnel', {}).get('source', {}),
'mode': f"{list(detail.get('Cisco-IOS-XE-tunnel:tunnel', {}).get('mode', {}).keys())[0]} {list(list(detail.get('Cisco-IOS-XE-tunnel:tunnel', {}).get('mode', {}).values())[0].keys())[0]}",
'protection': detail.get('Cisco-IOS-XE-tunnel:tunnel', {}).get('protection', {}).get('Cisco-IOS-XE-crypto:ipsec', {}).get('profile', {}),
'authentication': detail.get('ip',{}).get('Cisco-IOS-XE-nhrp:nhrp', {}).get('authentication', {}),
'holdtime': detail.get('ip',{}).get('Cisco-IOS-XE-nhrp:nhrp', {}).get('holdtime', {}),
'network-id': detail.get('ip',{}).get('Cisco-IOS-XE-nhrp:nhrp', {}).get('network-id', {})})
hubs = _map_dmvpn_hubs(detail.get('ip',{}).get('Cisco-IOS-XE-nhrp:nhrp', {}).get('map', {}))
except (JSONDecodeError, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL,UnboundLocalError, AttributeError, IndexError):
pass
return config_table, interf_op_table, tunnels, hubs
def _map_dmvpn_hubs(hub_details) -> list:
hubs = []
for i in hub_details.get('dest-ipv4', {}):
if isinstance(i.get('nbma-ipv4', {}), list):
hubNbma = ", ".join([hub.get('nbma-ipv4', {}) for hub in i.get('nbma-ipv4', {})])
else:
hubNbma = i.get('nbma-ipv4', {})
hubs.append({'tunnel': i.get('dest-ipv4', {}), 'hubNbma': hubNbma})
return hubs
|
<reponame>tethys-platform/tethys<gh_stars>1-10
# Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from unittest import mock
import pytest
from tethys.core.exceptions import TethysSessionClosed, TethysRONotFound
from tethys.core.networks.network_zero import ZeroNetwork
from tethys.core.nodes.node_zero import ZeroNode
from tethys.core.nodes.operators.operator_base import OperatorBase
from tethys.core.pipes import ZeroPipe
from tethys.core.pipes.filters.filter_function import FNFilter
from tethys.core.sessions.sess_zero import ZeroSession
from tethys.core.streams.stream_zero import ZeroStream
from tethys.core.transports.connectors.connector_base import ConnectorBase
from tethys.core.transports.transport_zero import ZeroTransport
def load(*_, **__):
raise TethysRONotFound()
class MockOperator(OperatorBase):
def process(self, *args, **kwargs):
pass
class MockNode(ZeroNode):
def __init__(self, **kwargs):
operator = MockOperator()
super().__init__(operator, **kwargs)
class MockConnector(ConnectorBase):
def connect(self, channel_id: str, *args, **kwargs):
pass
class MockSession(ZeroSession):
pass
class MockNetwork(ZeroNetwork):
pass
class TestZeroPipe:
# set_transport_factory
def test_set_transport_factory_and_create_pipe(self):
node_a, node_b = MockNode(), MockNode()
transport = ZeroTransport(MockConnector())
transport_factory = mock.MagicMock(side_effect=lambda *_, **__: transport)
ZeroPipe.set_transport_factory(transport_factory)
pipe = ZeroPipe(node_a, node_b)
assert ZeroPipe._transport_factory == transport_factory
assert pipe.transport == transport
ZeroPipe._transport_factory.assert_called_once_with(pipe)
def test_set_transport_factory_as_transport(self):
node_a, node_b = MockNode(), MockNode()
pipe = ZeroPipe(node_a, node_b)
transport = ZeroTransport(MockConnector())
pipe.set_transport_factory(transport)
assert pipe._transport_factory() == transport
def test_transport_factory_context(self):
node_a, node_b = MockNode(), MockNode()
pipe = ZeroPipe(node_a, node_b)
transport = mock.MagicMock()
prev_method = pipe._transport_factory
with pipe.transport_factory_context(transport):
assert pipe._transport_factory == transport
assert pipe._transport_factory == prev_method
# filter_data_packet
def test_filter_data_packet_empty(self):
node_a, node_b = MockNode(), MockNode()
session_mock = mock.MagicMock()
pipe = ZeroPipe(node_a, node_b, filters=[])
assert pipe.filter_data_packet(..., session_mock)
def test_filter_data_packet_true(self):
node_a, node_b = MockNode(), MockNode()
session_mock = mock.MagicMock()
def f1(data_packet, *_, **__):
return data_packet
def f2(data_packet, *_, **__):
return data_packet / 2
pipe = ZeroPipe(
node_a, node_b, filters=[FNFilter(f1), FNFilter(f2)], filters_threshold=0.5
)
assert pipe.filter_data_packet(1, session_mock) is True
def test_filter_data_packet_false(self):
node_a, node_b = MockNode(), MockNode()
session_mock = mock.MagicMock()
def f1(data_packet, *_, **__):
return data_packet
def f2(data_packet, *_, **__):
return data_packet / 2
pipe = ZeroPipe(
node_a, node_b, filters=[FNFilter(f1), FNFilter(f2)], filters_threshold=1.1
)
assert pipe.filter_data_packet(2, session_mock) is False
# get_stream
def test_get_stream_exists(self):
node_a, node_b = MockNode(), MockNode()
session_mock = mock.MagicMock(spec=ZeroSession)
session_mock.id = "1"
session_mock.closed = False
session_mock.closing_mode = None
stream_mock = mock.MagicMock()
stream_cls_mock = mock.MagicMock(side_effect=lambda *_, **__: stream_mock)
stream_cls_mock.load = mock.MagicMock(side_effect=lambda *_, **__: stream_mock)
pipe = ZeroPipe(node_a, node_b)
@contextmanager
def patch():
old_load = ZeroStream.load
old_new = ZeroStream.__new__
try:
ZeroStream.load = lambda *_, **__: stream_mock
ZeroStream.__new__ = lambda *_, **__: stream_mock
yield ZeroStream
finally:
ZeroStream.load = old_load
ZeroStream.__new__ = old_new
with patch():
assert pipe.get_stream(session_mock) == stream_mock
def test_get_stream_new_with_transport(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = False
session_mock.closing_mode = None
transport_mock = ZeroTransport(MockConnector())
pipe = ZeroPipe(node_a, node_b, transport=transport_mock)
ZeroStream.load = load
ZeroStream.save = mock.MagicMock()
stream = pipe.get_stream(session_mock)
ZeroStream.save.assert_called_once_with(save_dependency=False)
assert stream.transport == transport_mock
def test_get_stream_new_without_transport(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = False
session_mock.closing_mode = None
transport_mock = ZeroTransport(MockConnector())
def transport_factory(_):
return transport_mock
transport_factory_mock = mock.MagicMock(side_effect=transport_factory)
ZeroPipe.set_transport_factory(transport_factory_mock)
pipe = ZeroPipe(node_a, node_b)
ZeroStream.load = load
ZeroStream.save = mock.MagicMock()
stream = pipe.get_stream(session_mock)
ZeroStream.save.assert_called_once_with(save_dependency=False)
ZeroPipe._transport_factory.assert_called_once_with(pipe)
assert stream.transport == transport_mock
def test_get_stream_new_when_sess_closed(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = True
session_mock.closing_mode = None
pipe = ZeroPipe(node_a, node_b)
with pytest.raises(TethysSessionClosed):
pipe.get_stream(session_mock)
def test_get_stream_new_when_sess_hard_closing(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = False
session_mock.closing_mode = ZeroSession.HARD_CLOSING_MODE
pipe = ZeroPipe(node_a, node_b)
with pytest.raises(TethysSessionClosed):
pipe.get_stream(session_mock)
# pull
def test_pull(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
session_mock.closed = False
session_mock.closing_mode = ZeroSession.HARD_CLOSING_MODE
stream_mock = mock.MagicMock()
stream_mock.read = mock.MagicMock(
side_effect=lambda *_, **__: iter([("key", "value")])
)
def get_stream(_):
return stream_mock
pipe = ZeroPipe(node_a, node_b)
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
assert next(pipe.pull(session_mock, test_kw=1)) == "value"
pipe.get_stream.assert_called_once_with(session_mock)
stream_mock.read.assert_called_once_with(test_kw=1)
# push
def test_push(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
pipe = ZeroPipe(node_a, node_b)
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push(..., session_mock, test_kw=1)
assert res is True
pipe.get_stream.assert_called_once_with(session_mock)
stream_mock.write.assert_called_once_with(..., many=False, test_kw=1)
def test_push_filter_return_false(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
def lambda_null(*_, **__):
return 0
pipe = ZeroPipe(node_a, node_b, filters=[FNFilter(lambda_null)])
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push(..., session_mock, test_kw=1)
assert res is False
pipe.get_stream.assert_not_called()
stream_mock.write.assert_not_called()
def test_push_many(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
pipe = ZeroPipe(node_a, node_b)
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push([...], session_mock, many=True, test_kw=1)
assert res is True
pipe.get_stream.assert_called_once_with(session_mock)
stream_mock.write.assert_called_once_with([...], many=True, test_kw=1)
def test_push_many_return_piece_of_data(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
def lambda_dummy(x, *_, **__):
return x
pipe = ZeroPipe(
node_a, node_b, filters=[FNFilter(lambda_dummy)], filters_threshold=2
)
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push(list(range(5)), session_mock, many=True, test_kw=1)
assert res is True
pipe.get_stream.assert_called_once_with(session_mock)
stream_mock.write.assert_called_once_with(
list(range(2, 5)), many=True, test_kw=1
)
def test_push_many_return_empty(self):
node_a, node_b = MockNode(), MockNode()
session_mock = MockSession(MockNetwork())
session_mock._id = "1"
stream_mock = mock.MagicMock()
def get_stream(_):
return stream_mock
def lambda_null(*_, **__):
return 0
pipe = ZeroPipe(node_a, node_b, filters=[FNFilter(lambda_null)])
pipe.get_stream = mock.MagicMock(side_effect=get_stream)
res = pipe.push(list(range(5)), session_mock, many=True, test_kw=1)
assert res is False
pipe.get_stream.assert_not_called()
stream_mock.write.assert_not_called()
|
<reponame>Ahuge/sept_qt
import os
from Qt import QtGui, QtWidgets, QtCore
from sept import errors
from .input_widget import TemplateInputWidget
class FileTemplateInputWidget(TemplateInputWidget):
"""
FileTemplateInputWidget extends the TemplateInputWidget in allowing users
to interactively create `sept.Template` objects by allowing you to
load and save your template strings to disk.
All that TemplateInputWidget needs is a valid `sept.PathTemplateParser`
instance, however for styling in your GUI application, you can
optionally pass an `error_colour` that will drive the highlighting
colour when errors occur.
*Automatically Load From Disk*
You may want to automatically populate your FileTemplateInputWidget with
data that persists on disk.
This can be done by passing the path to your serialized `sept.Template`
file into the `disk_path` parameter.
*Interactivity Timeouts*
You may also wish to define how long the timeout occurs between the last
character input and our error highlighting occurs.
By default this is set at 1250ms but you can override this by passing a
value to `timeout` during instantiation.
*Error handling*
When using this in a larger GUI application, you may want to have a
centralized place for displaying errors, if that is the case, you can
send errors to the `recieve_error` slot.
However, to ensure it visualizes correctly, you will want to ensure your
error class has "location" and "length" attributes on it that can be
used to display the highlighting.
"""
LOAD_TEXT = "Load SEPT template"
SAVE_TEXT = "Save SEPT template"
def __init__(
self, parser, error_colour=None, timeout=None, disk_path=None, parent=None
):
super(FileTemplateInputWidget, self).__init__(
parser=parser, error_colour=error_colour, timeout=timeout, parent=parent
)
self._load_from_disk_button = None
self._save_to_disk_button = None
self._disk_path = disk_path
if disk_path and os.path.exists(disk_path):
self.load_path(disk_path)
def load_path(self, path):
"""
load_path will attempt to read and validate your `sept.Template` from
the filepath passed in.
If you errors occur while validating, a popup will handle any exceptions
from validating the template.
:param str path: Path to a file on disk containing the template_str.
"""
try:
template_str = self._read_from_path(path)
except errors.SeptError as err:
import traceback
message = (
"Error loading template data from {path}\n"
"Error was: {error}\n"
"{long_error}".format(
path=path, error=str(err), long_error=traceback.format_exc()
)
)
self._display_error(
message=message, title="Error loading template data from disk!"
)
return
else: # No errors
self.setText(template_str)
def _build_input_widget(self):
widget = QtWidgets.QWidget(self)
widget.setLayout(QtWidgets.QHBoxLayout())
widget.layout().setSpacing(0)
widget.layout().setContentsMargins(0, 0, 0, 0)
line_edit_widget = super(FileTemplateInputWidget, self)._build_input_widget()
self._load_from_disk_button = QtWidgets.QPushButton("...")
self._load_from_disk_button.setMaximumWidth(24)
self._load_from_disk_button.clicked.connect(
self._handle_load_disk_button_clicked
)
self._save_to_disk_button = QtWidgets.QPushButton("Save...")
self._save_to_disk_button.setMaximumWidth(56)
self._save_to_disk_button.clicked.connect(self._handle_save_disk_button_clicked)
widget.layout().addWidget(line_edit_widget)
widget.layout().addWidget(self._load_from_disk_button)
widget.layout().addWidget(self._save_to_disk_button)
return widget
def _display_error(self, message, title="Error!"):
QtWidgets.QMessageBox.critical(self, title, message)
def _display_information(self, message, title="Information!"):
QtWidgets.QMessageBox.information(self, title, message)
def _read_from_path(self, path):
if not os.path.exists(path):
return
with open(path, "r") as fh:
data = fh.read()
try:
self.parser.validate_template(data)
except errors.SeptError:
raise
return data
def _get_folder_path(self):
path = os.getcwd()
if self._disk_path is None:
return path
elif os.path.isfile(self._disk_path):
if os.path.exists(self._disk_path):
path = self._disk_path
elif os.path.exists(os.path.dirname(self._disk_path)):
path = os.path.dirname(self._disk_path)
elif os.path.isdir(self._disk_path):
if os.path.exists(self._disk_path):
path = self._disk_path
return path
@QtCore.Slot()
def _handle_save_disk_button_clicked(self):
if not self.template:
self._display_information(
message="Input text box does not contain a valid template. Please fix any errors or type one.",
title="No valid template",
)
return
path = self._get_folder_path()
new_path, _ = QtWidgets.QFileDialog.getSaveFileName(self, self.SAVE_TEXT, path)
with open(new_path, "w") as fh:
fh.write(self.template._template_str)
self._display_information(
message="Template saved successfully to {}".format(new_path),
title="Success!",
)
@QtCore.Slot()
def _handle_load_disk_button_clicked(self):
path = self._get_folder_path()
new_path, _ = QtWidgets.QFileDialog.getOpenFileName(self, self.LOAD_TEXT, path)
self._disk_path = new_path
self.load_path(self._disk_path)
|
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <<EMAIL>>
# Date: October 20, 2011
"""
This is the scons_gen_rule module which inherits the SconsTarget
and generates related gen rule rules.
"""
import os
import blade
import build_rules
import console
from blade_util import var_to_list
from blade_util import location_re
from target import Target
class GenRuleTarget(Target):
"""A scons gen rule target subclass.
This class is derived from Target.
"""
def __init__(self,
name,
srcs,
deps,
outs,
cmd,
blade,
kwargs):
"""Init method.
Init the gen rule target.
"""
srcs = var_to_list(srcs)
deps = var_to_list(deps)
outs = var_to_list(outs)
Target.__init__(self,
name,
'gen_rule',
srcs,
deps,
None,
blade,
kwargs)
self.data['outs'] = outs
self.data['locations'] = []
self.data['cmd'] = location_re.sub(self._process_location_reference, cmd)
def _srcs_list(self, path, srcs):
"""Returns srcs list. """
return ','.join(['"%s"' % os.path.join(self.build_path, path, src)
for src in srcs])
def _process_location_reference(self, m):
"""Process target location reference in the command. """
key, type = self._add_location_reference_target(m)
self.data['locations'].append((key, type))
return '%s'
def _generate_header_files(self):
"""Whether this target generates header files during building."""
# Be conservative: Assume gen_rule always generates header files.
return True
def _allow_duplicate_source(self):
return True
def scons_rules(self):
"""scons_rules.
Description
-----------
It outputs the scons rules according to user options.
"""
self._clone_env()
env_name = self._env_name()
var_name = self._var_name()
targets = self.blade.get_build_targets()
srcs_str = ''
if self.srcs:
srcs_str = self._srcs_list(self.path, self.srcs)
elif self.expanded_deps:
srcs_str = ''
else:
srcs_str = 'time_value'
cmd = self.data['cmd']
cmd = cmd.replace('$SRCS', '$SOURCES')
cmd = cmd.replace('$OUTS', '$TARGETS')
cmd = cmd.replace('$FIRST_SRC', '$SOURCE')
cmd = cmd.replace('$FIRST_OUT', '$TARGET')
cmd = cmd.replace('$BUILD_DIR', self.build_path)
locations = self.data['locations']
if locations:
target_vars = []
for key, type in locations:
target_var = targets[key]._get_target_var(type)
if not target_var:
console.error_exit('%s: Invalid location reference %s %s' %
(self.fullname, ':'.join(key), type))
target_vars.append(target_var)
cmd = '"%s" %% (%s)' % (cmd, ','.join(['str(%s[0])' % v for v in target_vars]))
else:
cmd = '"%s"' % cmd
self._write_rule('%s = %s.Command([%s], [%s], '
'[%s, "@ls $TARGETS > /dev/null"])' % (
var_name,
env_name,
self._srcs_list(self.path, self.data['outs']),
srcs_str,
cmd))
for i in range(len(self.data['outs'])):
self._add_target_var('%s' % i, '%s[%s]' % (var_name, i))
# TODO(phongchen): add Target.get_all_vars
dep_var_list = []
dep_skip_list = ['system_library', 'prebuilt_cc_library']
for i in self.expanded_deps:
dep = targets[i]
if dep.type in dep_skip_list:
continue
if dep.type == 'swig_library':
dep_var_name = dep._var_name('dynamic_py')
dep_var_list.append(dep_var_name)
dep_var_name = dep._var_name('dynamic_java')
dep_var_list.append(dep_var_name)
else:
dep_var_list += dep._get_target_vars()
for dep_var_name in dep_var_list:
self._write_rule('%s.Depends(%s, %s)' % (env_name,
var_name,
dep_var_name))
def ninja_command(self):
cmd = self.data['cmd']
cmd = cmd.replace('$SRCS', '${in}')
cmd = cmd.replace('$OUTS', '${out}')
cmd = cmd.replace('$FIRST_SRC', '${_in_1}')
cmd = cmd.replace('$FIRST_OUT', '${_out_1}')
cmd = cmd.replace('$BUILD_DIR', self.build_path)
locations = self.data['locations']
if locations:
targets = self.blade.get_build_targets()
locations_paths = []
for key, label in locations:
path = targets[key]._get_target_file(label)
if not path:
console.error_exit('%s: Invalid location reference %s %s' %
(self.fullname, ':'.join(key), label))
locations_paths.append(path)
cmd = cmd % tuple(locations_paths)
return cmd
def implicit_dependencies(self):
targets = self.blade.get_build_targets()
implicit_deps = []
for dep in self.expanded_deps:
implicit_deps += targets[dep]._get_target_files()
return implicit_deps
def ninja_rules(self):
rule = '%s__rule__' % self._regular_variable_name(
self._source_file_path(self.name))
cmd = self.ninja_command()
description = '%sCOMMAND //%s%s' % (
console.colors('dimpurple'), self.fullname, console.colors('end'))
self._write_rule('''rule %s
command = %s && cd %s && ls ${out} > /dev/null
description = %s
''' % (rule, cmd, self.blade.get_root_dir(), description))
outputs = [self._target_file_path(o) for o in self.data['outs']]
inputs = [self._source_file_path(s) for s in self.srcs]
vars = {}
if '${_in_1}' in cmd:
vars['_in_1'] = inputs[0]
if '${_out_1}' in cmd:
vars['_out_1'] = outputs[0]
self.ninja_build(outputs, rule, inputs=inputs,
implicit_deps=self.implicit_dependencies(),
variables=vars)
for i, out in enumerate(outputs):
self._add_target_file(str(i), out)
def make_command(self):
cmd = self.data['cmd']
locations = self.data['locations']
if locations:
targets = self.blade.get_build_targets()
locations_paths = []
for key, label in locations:
path = targets[key]._get_target_file(label)
if not path:
console.error_exit('%s: Invalid location reference %s %s' %
(self.fullname, ':'.join(key), label))
locations_paths.append(path)
cmd = cmd % tuple(locations_paths)
return cmd
def make_rules(self):
"""make_rules.
It outputs the make rules according to user options.
"""
rule = '%s' % self._regular_variable_name(
self._source_file_path(self.name))
cmd = self.make_command()
outputs = [self._target_file_path(o) for o in self.data['outs']]
inputs = [self._source_file_path(s) for s in self.srcs]
vars = {}
self.make_build(outputs, rule, inputs=inputs,
implicit_deps=self.implicit_dependencies(),
variables=vars)
for i, out in enumerate(outputs):
self._add_target_file(str(i), out)
def gen_rule(name,
srcs=[],
deps=[],
outs=[],
cmd='',
**kwargs):
"""scons_gen_rule. """
gen_rule_target = GenRuleTarget(name,
srcs,
deps,
outs,
cmd,
blade.blade,
kwargs)
blade.blade.register_target(gen_rule_target)
build_rules.register_function(gen_rule)
|
<reponame>healthdesk-hackathon/backend<filename>patient/migrations/0001_initial.py
# Generated by Django 3.0.5 on 2020-04-12 13:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Patient',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('identifier', models.CharField(max_length=50)),
('id_type', models.CharField(max_length=50)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='patient_creator', to=settings.AUTH_USER_MODEL)),
('modifier', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='patient_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='Phone',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('phone_number', models.CharField(max_length=50)),
('phone_type', models.CharField(max_length=10)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='phone_creator', to=settings.AUTH_USER_MODEL)),
('modifier', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='phone_modifier', to=settings.AUTH_USER_MODEL)),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phones', to='patient.Patient')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PersonalData',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('O', 'Other/Prefer not to disclose')], max_length=1)),
('date_of_birth', models.DateField()),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='personaldata_creator', to=settings.AUTH_USER_MODEL)),
('modifier', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='personaldata_modifier', to=settings.AUTH_USER_MODEL)),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='personal_data', to='patient.Patient')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PatientPhoto',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('photo', models.ImageField(upload_to='patient_photos')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='patientphoto_creator', to=settings.AUTH_USER_MODEL)),
('modifier', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='patientphoto_modifier', to=settings.AUTH_USER_MODEL)),
('patient', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='patient_photo', to='patient.Patient')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='NextOfKinContact',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('title', models.CharField(max_length=20)),
('relationship', models.CharField(choices=[('WIFE', 'Wife'), ('HUSBAND', 'Husband'), ('CHILD', 'Child'), ('PARENT', 'Parent'), ('LEGAL GUARDIAN', 'Legal Guardian'), ('OTHER', 'Other')], max_length=20)),
('other_relationship', models.CharField(blank=True, max_length=20, null=True)),
('phone_number', models.CharField(max_length=50)),
('notes', models.TextField()),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='nextofkincontact_creator', to=settings.AUTH_USER_MODEL)),
('modifier', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='nextofkincontact_modifier', to=settings.AUTH_USER_MODEL)),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='next_of_kin_contacts', to='patient.Patient')),
],
options={
'abstract': False,
},
),
]
|
<filename>myenv/lib/python2.7/site-packages/promise/promise.py<gh_stars>0
import functools
from threading import Event, RLock
from .compat import Future, iscoroutine, ensure_future, iterate_promise # type: ignore
from typing import Callable, Optional, Iterator, Any, Dict, Tuple, Union # flake8: noqa
class CountdownLatch(object):
__slots__ = ('_lock', 'count')
def __init__(self, count):
# type: (CountdownLatch, int) -> None
assert count >= 0, "count needs to be greater or equals to 0. Got: %s" % count
self._lock = RLock()
self.count = count
def dec(self):
# type: (CountdownLatch) -> int
with self._lock:
assert self.count > 0, "count needs to be greater or equals to 0. Got: %s" % self.count
self.count -= 1
# Return inside lock to return the correct value,
# otherwise an other thread could already have
# decremented again.
return self.count
class Promise(object):
"""
This is the Promise class that complies
Promises/A+ specification and test suite:
http://promises-aplus.github.io/promises-spec/
"""
__slots__ = ('state', 'value', 'reason', '_cb_lock', '_callbacks', '_errbacks', '_event', '_future')
# These are the potential states of a promise
PENDING = -1
REJECTED = 0
FULFILLED = 1
def __init__(self, fn=None):
# type: (Promise, Callable) -> None
"""
Initialize the Promise into a pending state.
"""
self.state = self.PENDING # type: int
self.value = None # type: Any
self.reason = None # type: Optional[Exception]
self._cb_lock = RLock()
self._callbacks = [] # type: List[Callable]
self._errbacks = [] # type: List[Callable]
self._event = Event()
self._future = None # type: Optional[Future]
if fn:
self.do_resolve(fn)
def __iter__(self):
# type: (Promise) -> Iterator
return iterate_promise(self)
__await__ = __iter__
@property
def future(self):
# type: (Promise) -> Future
if not self._future:
self._future = Future()
self.add_callback(self._future.set_result)
self.add_errback(self._future.set_exception)
return self._future
def do_resolve(self, fn):
try:
fn(self.fulfill, self.reject)
except Exception as e:
self.reject(e)
@classmethod
def fulfilled(cls, x):
# type: (Any) -> Promise
p = cls()
p.fulfill(x)
return p
@classmethod
def rejected(cls, reason):
# type: (Any) -> Promise
p = cls()
p.reject(reason)
return p
def fulfill(self, x):
# type: (Promise, Any) -> None
"""
Fulfill the promise with a given value.
"""
if self is x:
raise TypeError("Cannot resolve promise with itself.")
elif is_thenable(x):
try:
self.promisify(x).done(self.fulfill, self.reject)
except Exception as e:
self.reject(e)
else:
self._fulfill(x)
resolve = fulfilled
def _fulfill(self, value):
# type: (Promise, Any) -> None
with self._cb_lock:
if self.state != self.PENDING:
return
self.value = value
self.state = self.FULFILLED
callbacks = self._callbacks
# We will never call these callbacks again, so allow
# them to be garbage collected. This is important since
# they probably include closures which are binding variables
# that might otherwise be garbage collected.
#
# Prevent future appending
self._callbacks = None
# Notify all waiting
self._event.set()
for callback in callbacks:
try:
callback(value)
except Exception:
# Ignore errors in callbacks
pass
def reject(self, reason):
# type: (Promise, Exception) -> None
"""
Reject this promise for a given reason.
"""
assert isinstance(reason, Exception), ("The reject function needs to be called with an Exception. "
"Got %s" % reason)
with self._cb_lock:
if self.state != self.PENDING:
return
self.reason = reason
self.state = self.REJECTED
errbacks = self._errbacks
# We will never call these errbacks again, so allow
# them to be garbage collected. This is important since
# they probably include closures which are binding variables
# that might otherwise be garbage collected.
#
# Prevent future appending
self._errbacks = None
# Notify all waiting
self._event.set()
for errback in errbacks:
try:
errback(reason)
except Exception:
# Ignore errors in errback
pass
@property
def is_pending(self):
# type: (Promise) -> bool
"""Indicate whether the Promise is still pending. Could be wrong the moment the function returns."""
return self.state == self.PENDING
@property
def is_fulfilled(self):
# type: (Promise) -> bool
"""Indicate whether the Promise has been fulfilled. Could be wrong the moment the function returns."""
return self.state == self.FULFILLED
@property
def is_rejected(self):
# type: (Promise) -> bool
"""Indicate whether the Promise has been rejected. Could be wrong the moment the function returns."""
return self.state == self.REJECTED
def get(self, timeout=None):
# type: (Promise, int) -> Any
"""Get the value of the promise, waiting if necessary."""
self.wait(timeout)
if self.state == self.PENDING:
raise ValueError("Value not available, promise is still pending")
elif self.state == self.FULFILLED:
return self.value
raise self.reason
def wait(self, timeout=None):
# type: (Promise, int) -> None
"""
An implementation of the wait method which doesn't involve
polling but instead utilizes a "real" synchronization
scheme.
"""
self._event.wait(timeout)
def add_callback(self, f):
# type: (Promise, Callable) -> None
"""
Add a callback for when this promis is fulfilled. Note that
if you intend to use the value of the promise somehow in
the callback, it is more convenient to use the 'then' method.
"""
assert callable(f), "A function needs to be passed into add_callback. Got: %s" % f
with self._cb_lock:
if self.state == self.PENDING:
self._callbacks.append(f)
return
# This is a correct performance optimization in case of concurrency.
# State can never change once it is not PENDING anymore and is thus safe to read
# without acquiring the lock.
if self.state == self.FULFILLED:
f(self.value)
def add_errback(self, f):
# type: (Promise, Callable) -> None
"""
Add a callback for when this promis is rejected. Note that
if you intend to use the rejection reason of the promise
somehow in the callback, it is more convenient to use
the 'then' method.
"""
assert callable(f), "A function needs to be passed into add_errback. Got: %s" % f
with self._cb_lock:
if self.state == self.PENDING:
self._errbacks.append(f)
return
# This is a correct performance optimization in case of concurrency.
# State can never change once it is not PENDING anymore and is thus safe to read
# without acquiring the lock.
if self.state == self.REJECTED:
f(self.reason)
def catch(self, on_rejection):
# type: (Promise, Callable) -> Promise
"""
This method returns a Promise and deals with rejected cases only.
It behaves the same as calling Promise.then(None, on_rejection).
"""
return self.then(None, on_rejection)
def done(self, success=None, failure=None):
# type: (Promise, Callable, Callable) -> None
"""
This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In contrast to then,
the return value of these callback is ignored and nothing is
returned.
"""
with self._cb_lock:
if success is not None:
self.add_callback(success)
if failure is not None:
self.add_errback(failure)
def done_all(self, handlers=None):
# type: (Promise, List[Callable]) -> List[Promise]
"""
:type handlers: list[(Any) -> object] | list[((Any) -> object, (Any) -> object)]
"""
if not handlers:
return []
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
self.done(s, f)
elif isinstance(handler, dict):
s = handler.get('success')
f = handler.get('failure')
self.done(s, f)
else:
self.done(success=handler)
def then(self, success=None, failure=None):
# type: (Promise, Callable, Callable) -> Promise
"""
This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In either case, this
method returns another promise that effectively represents
the result of either the first of the second argument (in the
case that the "self promise" is fulfilled or rejected,
respectively).
Each argument can be either:
* None - Meaning no action is taken
* A function - which will be called with either the value
of the "self promise" or the reason for rejection of
the "self promise". The function may return:
* A value - which will be used to fulfill the promise
returned by this method.
* A promise - which, when fulfilled or rejected, will
cascade its value or reason to the promise returned
by this method.
* A value - which will be assigned as either the value
or the reason for the promise returned by this method
when the "self promise" is either fulfilled or rejected,
respectively.
:type success: (Any) -> object
:type failure: (Any) -> object
:rtype : Promise
"""
ret = self.__class__()
def call_and_fulfill(v):
"""
A callback to be invoked if the "self promise"
is fulfilled.
"""
try:
if callable(success):
ret.fulfill(success(v))
else:
ret.fulfill(v)
except Exception as e:
ret.reject(e)
def call_and_reject(r):
"""
A callback to be invoked if the "self promise"
is rejected.
"""
try:
if callable(failure):
ret.fulfill(failure(r))
else:
ret.reject(r)
except Exception as e:
ret.reject(e)
self.done(call_and_fulfill, call_and_reject)
return ret
def then_all(self, handlers=None):
# type: (Promise, List[Callable]) -> List[Promise]
"""
Utility function which calls 'then' for each handler provided. Handler can either
be a function in which case it is used as success handler, or a tuple containing
the success and the failure handler, where each of them could be None.
:type handlers: list[(Any) -> object] | list[((Any) -> object, (Any) -> object)]
:param handlers
:rtype : list[Promise]
"""
if not handlers:
return []
promises = [] # type: List[Promise]
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
promises.append(self.then(s, f))
elif isinstance(handler, dict):
s = handler.get('success')
f = handler.get('failure')
promises.append(self.then(s, f))
else:
promises.append(self.then(success=handler))
return promises
@classmethod
def all(cls, values_or_promises):
# Type: (Iterable[Promise, Any]) -> Promise
"""
A special function that takes a bunch of promises
and turns them into a promise for a vector of values.
In other words, this turns an list of promises for values
into a promise for a list of values.
"""
_len = len(values_or_promises)
if _len == 0:
return cls.fulfilled(values_or_promises)
promises = (cls.promisify(v_or_p) if is_thenable(v_or_p) else cls.resolve(v_or_p) for
v_or_p in values_or_promises) # type: Iterator[Promise]
all_promise = cls() # type: Promise
counter = CountdownLatch(_len)
values = [None] * _len # type: List[Any]
def handle_success(original_position, value):
# type: (int, Any) -> None
values[original_position] = value
if counter.dec() == 0:
all_promise.fulfill(values)
for i, p in enumerate(promises):
p.done(functools.partial(handle_success, i), all_promise.reject) # type: ignore
return all_promise
@classmethod
def promisify(cls, obj):
# type: (Any) -> Promise
if isinstance(obj, cls):
return obj
add_done_callback = get_done_callback(obj) # type: Optional[Callable]
if callable(add_done_callback):
promise = cls()
add_done_callback(_process_future_result(promise))
return promise
done = getattr(obj, "done", None) # type: Optional[Callable]
if callable(done):
p = cls()
done(p.fulfill, p.reject)
return p
then = getattr(obj, "then", None) # type: Optional[Callable]
if callable(then):
p = cls()
then(p.fulfill, p.reject)
return p
if iscoroutine(obj):
return cls.promisify(ensure_future(obj))
raise TypeError("Object is not a Promise like object.")
@classmethod
def for_dict(cls, m):
# type: (Dict[Any, Promise]) -> Promise
"""
A special function that takes a dictionary of promises
and turns them into a promise for a dictionary of values.
In other words, this turns an dictionary of promises for values
into a promise for a dictionary of values.
"""
if not m:
return cls.fulfilled({})
keys, values = zip(*m.items())
dict_type = type(m)
def handle_success(resolved_values):
return dict_type(zip(keys, resolved_values))
return cls.all(values).then(handle_success)
promisify = Promise.promisify
promise_for_dict = Promise.for_dict
def _process_future_result(promise):
def handle_future_result(future):
exception = future.exception()
if exception:
promise.reject(exception)
else:
promise.fulfill(future.result())
return handle_future_result
def is_future(obj):
# type: (Any) -> bool
return callable(get_done_callback(obj))
def get_done_callback(obj):
# type: (Any) -> Callable
return getattr(obj, "add_done_callback", None)
def is_thenable(obj):
# type: (Any) -> bool
"""
A utility function to determine if the specified
object is a promise using "duck typing".
"""
return isinstance(obj, Promise) or is_future(obj) or (
hasattr(obj, "done") and callable(getattr(obj, "done"))) or (
hasattr(obj, "then") and callable(getattr(obj, "then")))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import importlib
import json
import requests
import re
import collections
import os
import copy
from django.db import models, router, connections
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import class_prepared
from django.db.models.manager import Manager, EmptyManager
from django_atlassian.models.connect import SecurityContext
from django_atlassian.backends.jira.base import DatabaseConvertion
from django_atlassian.models.fields import ArrayField
from jira import JIRA
from jira.resources import Issue as JiraIssue
from jira.resilientsession import ResilientSession
logger = logging.getLogger('django_atlassian')
class IssueManager(Manager):
def create_from_json(self, json):
"""
Some modules notify with an Issue. In order to manage it in a django way
we need to convert the json received into a real Issue instance object
relative to the database the manager belongs to
"""
convert = DatabaseConvertion(None)
args = {}
for x in self.model.AtlassianMeta.description:
field = self.model._meta.get_field(x[8])
value = convert.extract(json, x, None)
if value:
value = convert.from_native(value, x)
if type(field) == models.ForeignKey and value:
args[x[8]] = self.get(key=value)
else:
args[x[8]] = value
return self.model(**args)
class JiraManagerMixin(JIRA):
def __init__(self, *args, **kwargs):
try:
db_alias = router.db_for_read(self.model)
db_settings = connections.databases[db_alias]
if db_settings['USER'] and db_settings['PASSWORD']:
super(JiraManagerMixin, self).__init__(
server=db_settings['NAME'],
basic_auth=(
db_settings['USER'],
db_settings['PASSWORD']
)
)
elif db_settings['SECURITY']:
jwt = {
'secret': db_settings['SECURITY'].shared_secret,
'payload': {'iss': db_settings['SECURITY'].key},
}
super(JiraManagerMixin, self).__init__(
server=db_settings['NAME'],
jwt=jwt
)
except Exception as err:
logger.error(err)
class JiraManager(EmptyManager, JiraManagerMixin):
def __init__(self, *args, **kwargs):
super(JiraManager, self).__init__(None)
def __setattr__(self, attrname, val):
if attrname == 'model':
if val:
super(JiraManager, self).__init__(None)
super(JiraManager, self).__setattr__(attrname, val)
class IssueLinkList(collections.MutableSequence):
"""
IssueLink abstraction
"""
uri_get = '/rest/api/3/issue/%(key)s?fields=issuelinks'
uri_create = '/rest/api/3/issueLink'
uri_delete = '/rest/api/3/issueLink/%(link_id)s'
def __init__(self, model, db, link_type, inward):
self.model = model
self.db = db
self.link_type = link_type
self.inward = inward
def __len__(self):
uri = self.uri_get % {
'key': self.model.key,
}
response = self.db.connection.get_request(uri)
response.raise_for_status()
content = json.loads(response.content)
count = 0
for x in content['fields']['issuelinks']:
if x['type']['id'] == self.link_type['id']:
if self.inward and x.has_key('inwardIssue'):
count = count + 1
elif not self.inward and x.has_key('outwardIssue'):
count = count + 1
return count
def __delitem__(self, index):
link_id, link = self.__getlink__(index)
uri = self.uri_delete % {
'link_id': link_id
}
response = self.db.connection.delete_request(uri)
response.raise_for_status()
def __setitem__(self, index, value):
raise NotImplementedError
def insert(self, index, value):
if not isinstance(value, self.model._meta.model):
raise TypeError('The value must be an Issue')
if index != len(self):
raise IndexError
body = {
'type': { 'name': self.link_type['name'] },
}
if self.inward:
body['inwardIssue'] = {'key': value.key}
body['outwardIssue'] = {'key': self.model.key}
else:
body['inwardIssue'] = {'key': self.model.key}
body['outwardIssue'] = {'key': value.key}
response = self.db.connection.post_request(self.uri_create, body)
response.raise_for_status()
def __getitem__(self, index):
link_id, link = self.__getlink__(index)
key = link['key']
return self.model._meta.model.objects.get(key=key)
def __getlink__(self, index):
uri = self.uri_get % {
'key': self.model.key,
}
response = self.db.connection.get_request(uri)
response.raise_for_status()
content = json.loads(response.content)
count = 0
link = None
link_id = 0
for x in content['fields']['issuelinks']:
if x['type']['id'] == self.link_type['id']:
if self.inward and x.has_key('inwardIssue'):
if count == index:
link = x['inwardIssue']
link_id = x['id']
break
count = count + 1
elif not self.inward and x.has_key('outwardIssue'):
if count == index:
link = x['outwardIssue']
link_id = x['id']
break
count = count + 1
if not link:
raise IndexError("list index out of range")
return link_id, link
class IssueLinks(object):
"""
IssueLinks abstraction
"""
db = None
key = None
uri_get_all = '/rest/api/3/issueLinkType'
uri_get = '/rest/api/3/issue/%(key)s?fields=issuelinks'
def __init__(self, model):
db_alias = router.db_for_read(model._meta.model)
db_settings = connections.databases[db_alias]
if db_settings['ENGINE'] != 'django_atlassian.backends.jira':
return
self.db = connections[db_alias]
self.model = model
def __normalize(self, name):
normal = re.sub(r'\W', '_', name).lower()
normal = re.sub(r'_{2,}', '_', normal)
return normal
def __getattr__(self, name):
# Get every link type
response = self.db.connection.get_request(self.uri_get_all)
if response.status_code != requests.codes.ok:
raise AttributeError("'IssueLinks' has no attribute %s" % name)
content = json.loads(response.content)
link_type = None
inward = False
for x in content['issueLinkTypes']:
if name == self.__normalize(x['inward']):
inward = True
link_type = x
break
elif name == self.__normalize(x['outward']):
inward = False
link_type = x
break
if not link_type:
raise AttributeError("'IssueLinks' has no attribute %s" % name)
return IssueLinkList(self.model, self.db, link_type, inward)
class Attachment(object):
"""
Attachment class for jira issue
"""
def __init__(self, obj, db):
self.db = db
for key, value in obj.iteritems():
if isinstance(value, dict):
self.__init__(value, db)
else:
setattr(self, key, value)
def __getattr__(self, attr):
return "'Attachment' attribute {} not defined".format(attr)
def delete(self):
uri = '/rest/api/3/attachment/{}'.format(self.id)
try:
response = self.db.connection.delete_request(uri)
return response.status_code
except Exception as e:
raise e
class JiraIssueModel(JiraIssue):
def __init__(self, *args, **kwargs):
options = copy.copy(JIRA.DEFAULT_OPTIONS)
session = ResilientSession()
jwt = None
if self.AtlassianMeta.db:
# dynamic models via jira-addon
db = connections.databases[self.AtlassianMeta.db]
options['server'] = db['NAME']
sc = db['SECURITY']
jwt = {
'secret': sc.shared_secret,
'payload': {'iss': sc.key},
}
else:
# static models
db = self.get_db().connection
options['server'] = db.uri
session.auth = (db.user, db.password)
# Jira model init self.key = None, keep a copy & restore
self.jira_key = self.key
super(JiraIssueModel, self).__init__(options, session)
self.key = self.jira_key
if jwt:
self._session = self.__class__.jira._session
class Issue(models.base.Model, JiraIssueModel):
"""
Base class for all JIRA Issue models.
"""
# The only mandatory field we add here is the KEY. For half-dynamic models
# it is required to have a primary key otherwise django will create one for
# us.
key = models.CharField(max_length=255, primary_key=True, unique=True)
objects = IssueManager()
jira = JiraManager()
def __init__(self, *args, **kwargs):
super(Issue, self).__init__(*args, **kwargs)
self.find(self.jira_key)
def __getattr__(self, name):
if name == 'links':
self.links = IssueLinks(self)
return self.links
raise AttributeError("'Issue' has no attribute %s" % name)
def get_db(self):
db_alias = router.db_for_read(self._meta.model)
db_settings = connections.databases[db_alias]
if db_settings['ENGINE'] != 'django_atlassian.backends.jira':
return None
return connections[db_alias]
def get_property(self, prop_name):
# Create a connection
# Call curl -X GET https://jira-instance1.net/rest/api/3/issue/ENPR-4/properties/{propertyKey}
pass
def set_property(self, prop_name, value):
# Create a connection
# Call curl -X PUT -H "Content-type: application/json" https://jira-instance1.net/rest/api/3/issue/`ENPR-4`/properties/{propertyKey} -d '{"content":"Test if works on Jira Cloud", "completed" : 1}'
pass
def is_parent(self):
"""
Return True if the issue is a parent issue, False otherwise
"""
if self.epic_linked.exists():
return True
elif self.sub_tasks:
return True
else:
return False
def has_parent(self):
"""
Return True if the issue has a parent, False otherwise
"""
if self.epic_link_id is not None:
return True
if self.parent_link_id is not None:
return True
if self.parent_id is not None:
return True
return False
def get_parent(self):
"""
Get the parent issue depending on the issue type
"""
if self.epic_link_id is not None:
return self.epic_link
if self.parent_link_id is not None:
return self.parent_link
if self.parent_id is not None:
return self.parent
return None
def get_children(self):
"""
Get the children issues depending on the isue type
"""
if self.epic_linked.exists():
return self.epic_linked.all()
elif self.sub_tasks:
return self._meta.model.objects.filter(key__in=self.sub_tasks)
else:
return []
def get_changelog(self):
"""
Get the Issue's changelog
"""
uri = "/rest/api/3/issue/%(issue)s/changelog" % { 'issue': self.key }
response = self.get_db().connection.get_request(uri)
response.raise_for_status()
content = json.loads(response.content)
return content
def get_statuses(self):
"""
Get the available statuses on the system
"""
uri = "/rest/api/3/status"
response = self.get_db().connection.get_request(uri)
response.raise_for_status()
content = json.loads(response.content)
return content
def get_attachment(self):
"""
Get an url list of attached files
"""
response = self.attachment
content = [ Attachment(file, self.get_db()) for file in response]
return content
def add_attachment(self, attachment, filename=None):
"""
Attach a new file
:param attachment: a file like object
:param filename: a file name
"""
uri = "/rest/api/3/issue/{}/attachments".format(self.key)
if isinstance(attachment, str):
attachment = open(attachment, "rb")
if not filename:
filename = os.path.basename(attachment.name)
try:
response = self.get_db().connection.post_request(uri, body={}, header={'content-type': None, 'X-Atlassian-Token': '<PASSWORD>'},
fil={'file': (filename, attachment, 'application/octet-stream')})
return response
except Exception as e:
raise e
def delete_attachment(self, attachment):
"""
Delete attachment file
:param attachment: it is an Attachment object
"""
uri = '/rest/api/3/attachment/{}'.format(attachment.id)
try:
response = self.get_db().connection.delete_request(uri)
return response.status_code
except Exception as e:
raise e
def __unicode__(self):
return str(self.key)
class Meta:
abstract = True
managed = False
class AtlassianMeta:
"""
Base class for all JIRA related Meta.
"""
def __init__(self):
# The database name this model refers to. Even if this breaks django
# purpose of resuable models being independent of the database backend,
# for JIRA there's 1:1 relation between the database (connection) and the model
self.db = None
# The set of FieldInfo as returned by the introspection
# get_table_description(). Given that several REST API methods return a
# full issue in JSON, we can parse it directly without the need of another
# round trip to the server
self.description = []
def create_model(name):
# Create the module dynamically
class Meta:
pass
setattr(Meta, 'app_label', 'django_atlassian')
setattr(Meta, 'managed', False)
am = AtlassianMeta()
am.db = name
# Set up a dictionary to simulate declarations within a class
attrs = {
'__module__': create_model.__module__,
'Meta': Meta,
'AtlassianMeta': am
}
logger.info("Creating model %s", name)
model = type(name, (Issue,), attrs)
return model
def populate_model(db, model):
# Double check the model does have an AtlassianMeta
am = getattr(model, 'AtlassianMeta', None)
if not am:
am = AtlassianMeta()
setattr(model, 'AtlassianMeta', am)
# Check if the description is already populated
if am.description:
return
logger.info("Populating model %s", model)
# Create a cursor to inspect the database for the fields
with db.cursor() as cursor:
try:
relations = db.introspection.get_relations(cursor, 'issue')
except NotImplementedError:
relations = {}
try:
constraints = db.introspection.get_constraints(cursor, 'issue')
except NotImplementedError:
constraints = {}
primary_key_column = db.introspection.get_primary_key_column(cursor, 'issue')
unique_columns = [
c['columns'][0] for c in constraints.values()
if c['unique'] and len(c['columns']) == 1
]
table_description = db.introspection.get_table_description(cursor, 'issue')
for row in table_description:
extra_params = {}
column_name = row[0]
field_name = row[8]
choices = row[12]
is_relation = column_name in relations
# Use the correct column name
extra_params['db_column'] = column_name
# Skip the primary key, we already have one
if column_name == primary_key_column:
am.description.append(row)
continue
# Add unique, if necessary.
if column_name in unique_columns:
extra_params['unique'] = True
# Add choices if needed
if choices:
extra_params['choices'] = choices
if is_relation:
field_type = 'ForeignKey'
rel_to = (
"self" if relations[column_name][1] == 'issue'
else None
)
else:
try:
field_type = db.introspection.get_field_type(row[1], row)
except KeyError:
field_type = None
if field_type:
field_module = 'django.db.models' if '.' not in field_type else '.'.join(field_type.split('.')[:-1])
field_class = field_type if '.' not in field_type else field_type.split('.')[-1]
logger.info("Adding field '%s' for column '%s' for class '%s'", field_name, column_name, field_class)
try:
field_module = importlib.import_module(field_module)
try:
field_cls = getattr(field_module, field_class)
except AttributeError:
logger.error("Class '%s' does not exist", field_class)
continue
except ImportError:
logger.error("Module '%s' does not exist", field_module)
continue
if field_type == 'ForeignKey' and rel_to:
field = field_cls(rel_to, related_name='%sed' % field_name, **extra_params)
else:
field = field_cls(**extra_params)
if field:
am.description.append(row)
field.contribute_to_class(model, field_name)
else:
logger.warning("Field '%s' can not be added", field_name)
def add_fields(sender, **kwargs):
"""
Will add the corresponding fields for the database this
model belongs to
"""
db_alias = router.db_for_read(sender)
db = connections.databases[db_alias]
if db['ENGINE'] != 'django_atlassian.backends.jira':
return
connection = connections[db_alias]
logger.info("Class %s prepared, populating", sender)
populate_model(connection, sender)
class_prepared.connect(add_fields)
|
# sklearnTrainer
import numpy as np
import copy
from toolkitJ import cell2dmatlab_jsp
import matplotlib as mpl
from matplotlib.font_manager import FontProperties
zhfont = FontProperties(fname="/usr/share/fonts/cjkuni-ukai/ukai.ttc") # 图片显示中文字体
mpl.use('Agg')
import sklearn.model_selection as skmdls
import sklearn.ensemble as skemb
import sklearn.tree as sktree
import sklearn.linear_model as sklinmdl
import sklearn.discriminant_analysis as skdisa
import sklearn.svm as sksvm
import sklearn.naive_bayes as sknb
import GVal
from trainerSubFunc_NFDA_J import *
###################################
# Classifier Subfunction ################
###################################
def adaboost(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
weakClf_list = {
27: 'decisionTree',
271: 'decisionTree'
}
clf = skemb.AdaBoostClassifier(sktree.DecisionTreeClassifier(max_depth=2, min_samples_split=30, min_samples_leaf=5),
algorithm='SAMME', n_estimators=50, learning_rate=0.7)
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def lda(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skdisa.LinearDiscriminantAnalysis(solver='svd', n_components=5)
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def qda(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skdisa.QuadraticDiscriminantAnalysis()
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def naiveBayes(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clfname_list = {
25: sknb.GaussianNB,
251: sknb.GaussianNB,
252: sknb.MultinomialNB,
253: sknb.BernoulliNB,
}
clf = clfname_list[classifier_num]()
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def svmKernel(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
kernelname_list = {
22: 'rbf',
221: 'rbf',
222: 'poly',
223: 'sigmoid',
224: 'precompute'
}
kernelname = kernelname_list[classifier_num]
clf = sksvm.SVC(C=0.1, kernel=kernelname, degree=3, gamma=0.7)
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def svmLinear(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sksvm.LinearSVC(C=1, penalty='l2', dual=False)
# clf = sksvm.LinearSVC()
clf.fit(X_tra, y_tra, sample_weight=weights)
cx = clf.coef_[0]
clfc = np.around(cx, decimals=2)
print('### Feature coefficient with L penalty: ' + str(clfc))
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def linearRegression(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
# Not Applicable at this moment
# weights = Y_train_raw[:, 0]
# weights[np.nonzero(weights == 0)[0]] = 1
# weights = weights / 7
# y_tra, y_val, X_val = dataRegulation(y_tra, y_val, X_val, index_no)
# clf = sklinmdl.LinearRegression()
# clf.fit(X_tra, y_tra, sample_weight=weights)
# score = clf.score(X_tra, y_tra, sample_weight=weights)
# print()
# Z = clf.predict(X_val)
# print(Z.shape)
# TP = np.nonzero(np.logical_and(Z == 1, y_val == 1))[0]
# print(TP)
# print(TP.shape)
# print(max(weights))
# print(min(weights))
return clf, score, FRAP
##
def sgdClassifier(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sklinmdl.SGDClassifier(loss='hinge', penalty='l2', alpha=0.1)
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def logiRegression(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sklinmdl.LogisticRegression(penalty='l2', dual=False, C=0.5, max_iter=50, random_state=0, solver='liblinear')
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def decisionTree(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sktree.DecisionTreeClassifier(criterion='entropy', max_features=9, max_depth=3, min_samples_split=30, min_samples_leaf=5, random_state=0)
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def randomForest(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skemb.RandomForestClassifier(n_estimators=10, criterion='gini', max_depth=3, min_samples_split=30, min_samples_leaf=5, random_state=0)
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
###################################
# Main #############################
###################################
def sklearnTrainer(classifier_num, X_train_raw, Y_train_raw, X_valid_raw, Y_valid_raw, path):
feature_index = GVal.getPARA('feature_index_PARA')
X, y, X_valid, y_valid, index_no = dataSetPreparation(feature_index, X_train_raw, Y_train_raw, X_valid_raw, Y_valid_raw)
classifier_list = {
21: [svmLinear, 'Linear SVM', []],
22: [svmKernel, 'Kernel SVM (Default:rbf)'],
221: [svmKernel, 'Kernel SVM (rbf)'],
222: [svmKernel, 'Kernel SVM (poly)'],
223: [svmKernel, 'Kernel SVM (sigmoid)'],
224: [svmKernel, 'Kernel SVM (precompute)'],
23: [lda, 'LDA'],
24: [qda, 'QDA'],
25: [naiveBayes, 'Naive Bayes (Default: Gaussian)'],
251: [naiveBayes, 'Naive Bayes (Guassian)'],
252: [naiveBayes, 'Naive Bayes (Multinominal)'],
253: [naiveBayes, 'Naive Bayes (Bernoulli)'],
# 26: neuralNetwork,
27: [adaboost, 'Adaboost'],
271: [adaboost, 'Adaboost(WC:DecisionTree)'],
# 28: [linearRegression, 'Linear Regression'],
29: [sgdClassifier, 'SGD Classifier'],
30: [logiRegression, 'Logistic Regression'],
31: [decisionTree, 'Decision Tree'],
32: [randomForest, 'Random Forest']
}
# classifier serial code: [[model], [training score], [predicting rate]]
clf_cache = {
21: cell2dmatlab_jsp([1], 1, []),
22: cell2dmatlab_jsp([1], 1, []),
221: cell2dmatlab_jsp([1], 1, []),
222: cell2dmatlab_jsp([1], 1, []),
223: cell2dmatlab_jsp([1], 1, []),
224: cell2dmatlab_jsp([1], 1, []),
23: cell2dmatlab_jsp([1], 1, []),
24: cell2dmatlab_jsp([1], 1, []),
25: cell2dmatlab_jsp([1], 1, []),
251: cell2dmatlab_jsp([1], 1, []),
252: cell2dmatlab_jsp([1], 1, []),
253: cell2dmatlab_jsp([1], 1, []),
27: cell2dmatlab_jsp([1], 1, []),
271: cell2dmatlab_jsp([1], 1, []),
28: cell2dmatlab_jsp([1], 1, []),
29: cell2dmatlab_jsp([1], 1, []),
30: cell2dmatlab_jsp([1], 1, []),
31: cell2dmatlab_jsp([1], 1, []),
32: cell2dmatlab_jsp([1], 1, [])
}
print('### With model: [' + classifier_list[classifier_num][1] + ']')
print('######## [Predicting ... ] ########')
# Loading model to do the classification
clf, score, FRAP = classifier_list[int(str(classifier_num)[0:2])][0](X, y, X_valid, y_valid, index_no, classifier_num)
clf_cache[classifier_num] = clf
# return clf,score,FRAP
return classifier_list[classifier_num][1], score, FRAP
|
<gh_stars>0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ServiceEndpointType(Model):
"""ServiceEndpointType.
:param authentication_schemes: Authentication scheme of service endpoint type.
:type authentication_schemes: list of :class:`ServiceEndpointAuthenticationScheme <service-endpoint.v4_1.models.ServiceEndpointAuthenticationScheme>`
:param data_sources: Data sources of service endpoint type.
:type data_sources: list of :class:`DataSource <service-endpoint.v4_1.models.DataSource>`
:param dependency_data: Dependency data of service endpoint type.
:type dependency_data: list of :class:`DependencyData <service-endpoint.v4_1.models.DependencyData>`
:param description: Gets or sets the description of service endpoint type.
:type description: str
:param display_name: Gets or sets the display name of service endpoint type.
:type display_name: str
:param endpoint_url: Gets or sets the endpoint url of service endpoint type.
:type endpoint_url: :class:`EndpointUrl <service-endpoint.v4_1.models.EndpointUrl>`
:param help_link: Gets or sets the help link of service endpoint type.
:type help_link: :class:`HelpLink <service-endpoint.v4_1.models.HelpLink>`
:param help_mark_down:
:type help_mark_down: str
:param icon_url: Gets or sets the icon url of service endpoint type.
:type icon_url: str
:param input_descriptors: Input descriptor of service endpoint type.
:type input_descriptors: list of :class:`InputDescriptor <service-endpoint.v4_1.models.InputDescriptor>`
:param name: Gets or sets the name of service endpoint type.
:type name: str
:param trusted_hosts: Trusted hosts of a service endpoint type.
:type trusted_hosts: list of str
:param ui_contribution_id: Gets or sets the ui contribution id of service endpoint type.
:type ui_contribution_id: str
"""
_attribute_map = {
'authentication_schemes': {'key': 'authenticationSchemes', 'type': '[ServiceEndpointAuthenticationScheme]'},
'data_sources': {'key': 'dataSources', 'type': '[DataSource]'},
'dependency_data': {'key': 'dependencyData', 'type': '[DependencyData]'},
'description': {'key': 'description', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'endpoint_url': {'key': 'endpointUrl', 'type': 'EndpointUrl'},
'help_link': {'key': 'helpLink', 'type': 'HelpLink'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'icon_url': {'key': 'iconUrl', 'type': 'str'},
'input_descriptors': {'key': 'inputDescriptors', 'type': '[InputDescriptor]'},
'name': {'key': 'name', 'type': 'str'},
'trusted_hosts': {'key': 'trustedHosts', 'type': '[str]'},
'ui_contribution_id': {'key': 'uiContributionId', 'type': 'str'}
}
def __init__(self, authentication_schemes=None, data_sources=None, dependency_data=None, description=None, display_name=None, endpoint_url=None, help_link=None, help_mark_down=None, icon_url=None, input_descriptors=None, name=None, trusted_hosts=None, ui_contribution_id=None):
super(ServiceEndpointType, self).__init__()
self.authentication_schemes = authentication_schemes
self.data_sources = data_sources
self.dependency_data = dependency_data
self.description = description
self.display_name = display_name
self.endpoint_url = endpoint_url
self.help_link = help_link
self.help_mark_down = help_mark_down
self.icon_url = icon_url
self.input_descriptors = input_descriptors
self.name = name
self.trusted_hosts = trusted_hosts
self.ui_contribution_id = ui_contribution_id
|
<filename>utils/statistics.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.tsa.stattools import adfuller
from statsmodels.regression.linear_model import OLS
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.stats.diagnostic import het_breuschpagan
from sympy import N
def forecasting_accuracy(df:pd.DataFrame) -> None:
print('Out of sample forecast accuracy measures:')
# Root Mean Squared Error
rmse = np.sqrt(np.mean((df['forward_rolling_21d_realized_stdev'] - df['cond_vol_forecast'])**2))
print(f"RMSE: {rmse:.2f}")
# Mean Absolute Error
mae = np.mean(np.abs(df['forward_rolling_21d_realized_stdev'] - df['cond_vol_forecast']))
print(f"MAE: {mae:.2f}")
# Theil's U
theilu = rmse / (np.sqrt(np.mean(df['forward_rolling_21d_realized_stdev']**2)) + np.sqrt(np.mean(df['cond_vol_forecast']**2)))
print(f"Theil U: {theilu:.2f}")
# CDC
indicator = np.where(
(df['forward_rolling_21d_realized_stdev']-df['forward_rolling_21d_realized_stdev'].shift(1))*(df['cond_vol_forecast']-df['forward_rolling_21d_realized_stdev'].shift(1))>0,
1,
0)
cdc = np.mean(indicator) * 100
print(f"CDC: {cdc:.2f}")
def breusch_pagan(data):
'''
returns p-value of LM-test (H0 is homoscedasticity)
'''
ols_res = OLS(data, np.ones(len(data))).fit()
# convert into 2d array because function wants it
s = []
for i in data:
a = [1,i]
s.append(a)
return het_breuschpagan(ols_res.resid, np.array(s))[1]
def get_desctiptive_stats(df:pd.DataFrame, plots:bool=False):
data = df['log_ret'].dropna()
print('################')
print(' Normality test ')
print('################')
k2, p = stats.normaltest(data,nan_policy='omit')
alpha = 0.05
#print("k = {:.18f}".format(k2))
print("p = {:.5f}".format(p))
if p < alpha: # null hypothesis: x comes from a normal distribution
print("The null of normality can be rejected")
#if k2 > x: print("Distribution is fat-tailed")
else:
print("The null of normality cannot be rejected")
if plots:
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(1, 1, 1)
data.hist(bins=50, ax=ax1, density=True)
ax1.set_xlabel('Return')
ax1.set_ylabel('Frequency')
ax1.set_title('Return distribution')
xmin, xmax = plt.xlim()
mu, std = stats.norm.fit(data)
x = np.linspace(xmin, xmax, 1000)
p = stats.norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
plt.show()
print('#########################')
print(' Are returns fat-tailed? ')
print('#########################')
k, p = stats.kurtosistest(data, alternative='greater') # alternativ hypotese er at dist har fat tails
print("p = {:.5f}".format(p))
if p < alpha:
print("The null of no-excess kurtosis can be rejected and distribution is fat-tailed")
else:
print("The null of no-excess kurtosis cannot be rejected")
print('#############')
print(' Jarque-Bera ')
print('#############')
p = stats.jarque_bera(data)[1] # alternativ hypotese er at dist har fat tails
print("p = {:.5f}".format(p))
if p < alpha:
print("The null of no skewness and kurtosis=3 can be rejected")
else:
print("The null of no skewness and kurtosis=3 cannot be rejected")
print('#################################################')
print(' Are returns statistically signifcant from zero? ')
print('#################################################')
stat, p = stats.ttest_1samp(data,popmean=0, nan_policy='omit')
print('t=%.3f, p=%.5f' % (stat, p))
if p < alpha:
print("The null of zero-returns can be rejected")
else:
print("The null of zero-returns cannot be rejected")
print('#################')
print(' Autocorrelation ')
print('#################')
nlags = 5
ljung_box = acorr_ljungbox(data,lags = nlags,return_df=True)
for i in range(nlags):
print(f"{i+1} lag(s): p-value = {ljung_box.loc[i+1, 'lb_pvalue']}")
if plots:
plt.rc("figure", figsize=(12,8))
plot_acf(data)
plt.show()
print('##############')
print(' Stationarity ')
print('##############')
result = adfuller(data)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
if result[1] < alpha:
print("The null of non-stationarity can be rejected")
else:
print("The null of non-stationarity cannot be rejected")
#for key, value in result[4].items():
# print('\t%s: %.3f' % (key, value))
print('##################')
print(' Homoscedasticity ')
print('##################')
p = breusch_pagan(data)
print("p = {:.5f}".format(p))
if p < alpha:
print("The null of homoscedasticity can be rejected")
else:
print("The null of homoscedasticity cannot be rejected")
# Dunis: "The fact that our currency returns have zero unconditional mean
# enables us to use squared returns as a measure of their variance
# and absolute returns as a measure of their standard deviation or volatility"
|
"""
Implements a fuzzy definition of synteny
"""
from itertools import chain
import rasmus
from rasmus import util
from rasmus.linked_list import LinkedList
from rasmus.sets import UnionFind
from compbio.regionlib import Region
from . import SyntenyBlock
def iter_windows(hits, radius):
"""Iterate through blast hits using a window with a radius in the
query genome"""
hits = util.PushIter(hits)
cache = LinkedList()
upstream = set()
downstream = set()
try:
center = hits.next()
except StopIteration:
return
while True:
# discard anyone in the upstream that is not within radius distance
for hit in list(upstream):
if hit[0].end + radius < center[0].start:
upstream.remove(hit)
# populate downstream with all regions within in radius
for hit in hits:
if hit[0].start - radius > center[0].end:
hits.push(hit)
break
downstream.add(hit)
cache.append(hit)
yield (center, upstream, downstream)
# populate upstream
upstream.add(center)
# move center to next hit
try:
center = cache.pop_front()
except IndexError:
break
# remove new center from downstream
downstream.remove(center)
def print_window(center, upstream, downstream):
print center[0]
print
print "\n".join([str(x[0]) for x in upstream])
print
print "\n".join([str(x[0]) for x in downstream])
print "-"*70
def iter_chroms(hits):
"""
Returns an iterator of iterators it, such that each it iterates over
hits from the same species and chromosome.
"""
hits = util.PushIter(hits)
try:
hit = hits.next()
except StopIteration:
# no hits to iterate
return
# initiate which species and chrom we are going to start with
last_sp = [hit[0].species]
last_chrom = [hit[0].seqname]
hits.push(hit)
def inner_iter(hits):
"""An iterator of hits from only one species, chromome"""
for hit in hits:
if hit[0].species != last_sp[0] or hit[0].seqname != last_chrom[0]:
# if species,chrom changes, push hit back and return
last_sp[0] = hit[0].species
last_chrom[0] = hit[0].seqname
hits.push(hit)
return
yield hit
while hits.peek(None) != None:
yield inner_iter(hits)
def find_syntenic_neighbors(hits, radius, radius2=None):
"""
For each hit find the neighboring hits that are syntenic.
hits -- iterable of tuples (region1, region2, extra)
radius -- radius of window in query genome
radius2 -- radius of window in subject genome (default=radius)
hits must be sorted by query region species, chrom, and start
"""
if radius2 is None:
radius2 = radius
for hits2 in iter_chroms(hits):
for center, upstream, downstream in iter_windows(hits2, radius):
start = center[1].start - radius2
end = center[1].end + radius2
syntenic = []
for hit in chain(upstream, downstream):
# determine which subjects are wihtin the window of
if (hit[1].species == center[1].species and
hit[1].seqname == center[1].seqname and
util.overlap(start, end, hit[1].start, hit[1].end)):
syntenic.append(hit)
yield (center, syntenic)
def samedir_hits(hit1, hit2):
dir1 = hit1[0].strand * hit1[1].strand
dir2 = hit2[0].strand * hit2[1].strand
if dir1 != dir2:
return False
if dir1 > 0:
return ((hit2[0].end >= hit1[0].start and
hit2[1].end >= hit1[1].start) or
(hit2[0].start <= hit1[0].end and
hit2[1].start <= hit1[1].end))
elif dir1 < 0:
return ((hit2[0].start <= hit1[0].end and
hit2[1].end >= hit1[1].start) or
(hit2[0].end >= hit1[0].start and
hit2[1].start <= hit1[1].end))
return True
def cluster_hits(hits, radius1, radius2=None, samedir=False):
"""
Cluster hits using windows
hits -- iterable of tuples (region1, region2, extra)
radius -- radius of window in query genome
radius2 -- radius of window in subject genome (default=radius)
samdir -- whether or not to require genes in same direction
hits must be sorted by query region species, chrom, and start
"""
# connected components set
comps = {}
for hit, syntenic in find_syntenic_neighbors(hits, radius1, radius2):
# get block of hit
block = comps.get(hit, None)
if block is None:
block = UnionFind([hit])
comps[hit] = block
# union block with syntenic hits
for hit2 in syntenic:
block2 = comps.get(hit2, None)
# check whether hits are in the same direction
if samedir and not samedir_hits(hit, hit2):
if hit2 not in comps:
comps[hit2] = UnionFind([hit2])
continue
if block2 is None:
comps[hit2] = block
block.add(hit2)
else:
block2.union(block)
# get the set of blocks
comps = set(b.root() for b in comps.itervalues())
return comps
def hits2synteny_block(hits):
"""
Create a Synteny block from a cluster of hits
hits -- list of tuples (region1, region2, extra)
"""
# find containing regions within each genome
start1 = util.INF
end1 = -util.INF
start2 = util.INF
end2 = -util.INF
for hit in hits:
a, b = hit[:2]
start1 = min(start1, a.start)
end1 = max(end1, a.end)
start2 = min(start2, b.start)
end2 = max(end2, b.end)
return SyntenyBlock(Region(a.species, a.seqname, "synreg", start1, end1),
Region(b.species, b.seqname, "synreg", start2, end2),
data={"hits": hits})
|
<filename>category_upwork/real_estate_proforma_modelling/revenue_complex.py
import pandas as pd
import os
import re
# function to procure the absolute path of the file to be read
def get_file_path(filename):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
filepath = os.path.join(__location__, filename)
return filepath
def gen_rev_aggr(rev_data):
aggr_data = {}
# number of units
no_of_units = rev_data['# of Units'].astype(int).sum()
aggr_data['Total_Units'] = no_of_units
# avg square-feet (XLSX - SUMPRODUCT)
# remove the comma notation from square-feet data and convert to float
rev_data['SF'] = [float(str(val).replace(',','')) for val in rev_data['SF']]
rev_data['SF_SP'] = rev_data['SF'] * rev_data['# of Units'].astype(int)
square_feet = rev_data['SF_SP'].astype(float).sum() / no_of_units
rev_data = rev_data.drop('SF_SP', 1)
aggr_data['Avg_SF'] = square_feet
# avg rent/month (XLSX - SUMPRODUCT)
# remove the $ and comma notation from rent data and convert to float
rev_data['Rent/Month'] = [str(val).replace('$','') for val in rev_data['Rent/Month']]
rev_data['Rent/Month'] = [float(str(val).replace(',','')) for val in rev_data['Rent/Month']]
rev_data['RPM_SP'] = rev_data['Rent/Month'] * rev_data['# of Units'].astype(int)
rent_per_month = rev_data['RPM_SP'].sum() / no_of_units
rev_data = rev_data.drop('RPM_SP', 1)
aggr_data['Total_RPM'] = rent_per_month
# dollar per SF calculation (XLSX - IFERROR)
rev_data['Dollar_PSF'] = [xlsx_iferror_div(x, y) for x, y in zip(rev_data['Rent/Month'], rev_data['SF'])]
aggr_data['Agg_Dollar_PSF'] = format(float(aggr_data['Total_RPM'] / aggr_data['Avg_SF']), '.2f')
# post renovation rent calculation based on renovation premium information
renov_premium = pd.read_csv(r"/home/anshul/youtube/freelance_projects/category_upwork/real_estate_proforma_modelling/source/renov_premium_inp.csv")
print(renov_premium)
# export calculated revenue data
# rev_file_name = r'target/rev_curr_inplace_rents_op.csv'
rev_fname = r'target/rev_curr_inplace_rents_op.csv'
rev_fpath = get_file_path(rev_fname)
rev_data.to_csv(rev_fpath)
# export aggregated revenue data
rev_fname = r'target/rev_curr_inplace_rents_agg_op.csv'
rev_fpath = get_file_path(rev_fname)
df = pd.DataFrame(aggr_data.items())
df.to_csv(rev_fpath)
def xlsx_iferror_div(a, b):
return a / b if b else 0
def process_rev_data(rev_fpath):
rev_data = pd.read_csv(rev_fpath, sep='\t')
gen_rev_aggr(rev_data)
def process_post_renov_rent(renov_data_fpath):
rev_data = pd.read_csv(renov_data_fpath)
print(rev_data)
# gen_rev_aggr(rev_data)
if __name__ == "__main__":
# Table 1 - Current In-Place Rents #
# tab seperated file - hence named with .tsv extension
rev_fname = r'source/rev_curr_inplace_rents_inp.tsv'
rev_fpath = get_file_path(rev_fname)
# read the source data and generate the calculated fields
process_rev_data(rev_fpath)
# Table 2 - Post-Renovation Rents #
renov_data_fname = r'rev_curr_inplace_rents_op.csv'
renov_data_fpath = get_file_path(renov_data_fname)
process_post_renov_rent(renov_data_fpath)
|
<filename>synapse/federation/transport/server/_base.py
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import re
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.urls import FEDERATION_V1_PREFIX
from synapse.http.servlet import parse_json_object_from_request
from synapse.logging import opentracing
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
SynapseTags,
start_active_span,
start_active_span_from_request,
tags,
whitelisted_homeserver,
)
from synapse.server import HomeServer
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.stringutils import parse_and_validate_server_name
logger = logging.getLogger(__name__)
class AuthenticationError(SynapseError):
"""There was a problem authenticating the request"""
class NoAuthenticationError(AuthenticationError):
"""The request had no authentication information"""
class Authenticator:
def __init__(self, hs: HomeServer):
self._clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
self.store = hs.get_datastore()
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
self.notifier = hs.get_notifier()
self.replication_client = None
if hs.config.worker.worker_app:
self.replication_client = hs.get_tcp_replication()
# A method just so we can pass 'self' as the authenticator to the Servlets
async def authenticate_request(self, request, content):
now = self._clock.time_msec()
json_request = {
"method": request.method.decode("ascii"),
"uri": request.uri.decode("ascii"),
"destination": self.server_name,
"signatures": {},
}
if content is not None:
json_request["content"] = content
origin = None
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
if not auth_headers:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED
)
for auth in auth_headers:
if auth.startswith(b"X-Matrix"):
(origin, key, sig) = _parse_auth_header(auth)
json_request["origin"] = origin
json_request["signatures"].setdefault(origin, {})[key] = sig
if (
self.federation_domain_whitelist is not None
and origin not in self.federation_domain_whitelist
):
raise FederationDeniedError(origin)
if origin is None or not json_request["signatures"]:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED
)
await self.keyring.verify_json_for_server(
origin,
json_request,
now,
)
logger.debug("Request from %s", origin)
request.requester = origin
# If we get a valid signed request from the other side, its probably
# alive
retry_timings = await self.store.get_destination_retry_timings(origin)
if retry_timings and retry_timings.retry_last_ts:
run_in_background(self._reset_retry_timings, origin)
return origin
async def _reset_retry_timings(self, origin):
try:
logger.info("Marking origin %r as up", origin)
await self.store.set_destination_retry_timings(origin, None, 0, 0)
# Inform the relevant places that the remote server is back up.
self.notifier.notify_remote_server_up(origin)
if self.replication_client:
# If we're on a worker we try and inform master about this. The
# replication client doesn't hook into the notifier to avoid
# infinite loops where we send a `REMOTE_SERVER_UP` command to
# master, which then echoes it back to us which in turn pokes
# the notifier.
self.replication_client.send_remote_server_up(origin)
except Exception:
logger.exception("Error resetting retry timings on %s", origin)
def _parse_auth_header(header_bytes):
"""Parse an X-Matrix auth header
Args:
header_bytes (bytes): header value
Returns:
Tuple[str, str, str]: origin, key id, signature.
Raises:
AuthenticationError if the header could not be parsed
"""
try:
header_str = header_bytes.decode("utf-8")
params = header_str.split(" ")[1].split(",")
param_dict = dict(kv.split("=") for kv in params)
def strip_quotes(value):
if value.startswith('"'):
return value[1:-1]
else:
return value
origin = strip_quotes(param_dict["origin"])
# ensure that the origin is a valid server name
parse_and_validate_server_name(origin)
key = strip_quotes(param_dict["key"])
sig = strip_quotes(param_dict["sig"])
return origin, key, sig
except Exception as e:
logger.warning(
"Error parsing auth header '%s': %s",
header_bytes.decode("ascii", "replace"),
e,
)
raise AuthenticationError(
400, "Malformed Authorization header", Codes.UNAUTHORIZED
)
class BaseFederationServlet:
"""Abstract base class for federation servlet classes.
The servlet object should have a PATH attribute which takes the form of a regexp to
match against the request path (excluding the /federation/v1 prefix).
The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match
the appropriate HTTP method. These methods must be *asynchronous* and have the
signature:
on_<METHOD>(self, origin, content, query, **kwargs)
With arguments:
origin (unicode|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed.
content (unicode|None): decoded json body of the request. None if the
request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
(ie, '+' and '%xx' are decoded) but note that it is *not* utf8-decoded
yet.
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Optional[Tuple[int, object]]: either (response code, response object) to
return a JSON response, or None if the request has already been handled.
Raises:
SynapseError: to return an error code
Exception: other exceptions will be caught, logged, and a 500 will be
returned.
"""
PATH = "" # Overridden in subclasses, the regex to match against the path.
REQUIRE_AUTH = True
PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
RATELIMIT = True # Whether to rate limit requests or not
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
self.hs = hs
self.authenticator = authenticator
self.ratelimiter = ratelimiter
self.server_name = server_name
def _wrap(self, func):
authenticator = self.authenticator
ratelimiter = self.ratelimiter
@functools.wraps(func)
async def new_func(request, *args, **kwargs):
"""A callback which can be passed to HttpServer.RegisterPaths
Args:
request (twisted.web.http.Request):
*args: unused?
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Tuple[int, object]|None: (response code, response object) as returned by
the callback method. None if the request has already been handled.
"""
content = None
if request.method in [b"PUT", b"POST"]:
# TODO: Handle other method types? other content types?
content = parse_json_object_from_request(request)
try:
origin = await authenticator.authenticate_request(request, content)
except NoAuthenticationError:
origin = None
if self.REQUIRE_AUTH:
logger.warning(
"authenticate_request failed: missing authentication"
)
raise
except Exception as e:
logger.warning("authenticate_request failed: %s", e)
raise
request_tags = {
SynapseTags.REQUEST_ID: request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.getClientIP(),
"authenticated_entity": origin,
"servlet_name": request.request_metrics.name,
}
# Only accept the span context if the origin is authenticated
# and whitelisted
if origin and whitelisted_homeserver(origin):
scope = start_active_span_from_request(
request, "incoming-federation-request", tags=request_tags
)
else:
scope = start_active_span(
"incoming-federation-request", tags=request_tags
)
with scope:
opentracing.inject_response_headers(request.responseHeaders)
if origin and self.RATELIMIT:
with ratelimiter.ratelimit(origin) as d:
await d
if request._disconnected:
logger.warning(
"client disconnected before we started processing "
"request"
)
return -1, None
response = await func(
origin, content, request.args, *args, **kwargs
)
else:
response = await func(
origin, content, request.args, *args, **kwargs
)
return response
return new_func
def register(self, server):
pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
for method in ("GET", "PUT", "POST"):
code = getattr(self, "on_%s" % (method), None)
if code is None:
continue
server.register_paths(
method,
(pattern,),
self._wrap(code),
self.__class__.__name__,
)
|
<gh_stars>10-100
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
parse_age_limit,
traverse_obj,
unified_timestamp,
url_or_none
)
class TrueIDIE(InfoExtractor):
_VALID_URL = r'https?://(?P<domain>vn\.trueid\.net|trueid\.(?:id|ph))/(?:movie|series/[^/]+)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://trueid.id/movie/XYNlDOZZJzL6/pengabdi-setan/',
'md5': '2552c7535125885901f1a2a4bcf32ca3',
'info_dict': {
'id': 'XYNlDOZZJzL6',
'ext': 'mp4',
'title': 'Pengabdi Setan',
'display_id': 'pengabdi-setan',
'description': 'md5:b0b41df08601e85e5291496c9bbe52cd',
'timestamp': 1600243511,
'categories': ['Film Indonesia', 'Horror', 'Mystery'],
'release_timestamp': 1593536400,
'release_year': 1982,
'cast': list,
'thumbnail': 'https://cms.dmpcdn.com/movie/2020/09/18/8b6e35c0-f97f-11ea-81fe-c52fc9dd314f_original.png',
'upload_date': '20200916',
'release_date': '20200630',
},
'expected_warnings': ['Video is geo restricted.']
}, {
'url': 'https://trueid.id/series/zZOBVPb62EwR/qXY73rwyl7oj/one-piece-ep-1/',
'md5': '1c6d976049bc3c89a8a25aed2c3fb081',
'info_dict': {
'id': 'qXY73rwyl7oj',
'ext': 'mp4',
'title': 'One Piece Ep. 1',
'display_id': 'one-piece-ep-1',
'description': 'md5:13226d603bd03c4150a1cf5758e842ea',
'timestamp': 1610421085,
'categories': ['Animation & Cartoon', 'Kids & Family', 'Adventure'],
'release_timestamp': 1612112400,
'release_year': 1999,
'age_limit': 7,
'cast': ['Kounosuke Uda', 'Junji Shimizu'],
'thumbnail': 'https://cms.dmpcdn.com/movie/2021/01/13/f84e9e70-5562-11eb-9fe2-dd6c2099a468_original.png',
'upload_date': '20210112',
'release_date': '20210131',
},
'expected_warnings': ['Video is geo restricted.']
}, {
'url': 'https://vn.trueid.net/series/7DNPM7Bpa9wv/pwLgEQ4Xbda2/haikyu-vua-bong-chuyen-phan-1/',
'info_dict': {
'id': 'pwLgEQ4Xbda2',
'ext': 'mp4',
'title': 'Haikyu!!: Vua Bóng Chuyền Phần 1 - Tập 1',
'display_id': 'haikyu-vua-bong-chuyen-phan-1-tap-1',
'description': 'md5:0374dd44d247799169449ee30cca963a',
'timestamp': 1629270901,
'categories': ['Anime', 'Phim Hài', 'Phim Học Đường', 'Phim Thể Thao', 'Shounen'],
'release_timestamp': 1629270720,
'release_year': 2014,
'age_limit': 13,
'thumbnail': 'https://cms.dmpcdn.com/movie/2021/09/28/b6e7ec00-2039-11ec-8436-974544e5841f_webp_original.jpg',
'upload_date': '20210818',
'release_date': '20210818',
},
'expected_warnings': ['Video is geo restricted.']
}, {
'url': 'https://trueid.ph/series/l8rvvAw7Jwv8/l8rvvAw7Jwv8/naruto-trailer/',
'only_matching': True,
}]
_CUSTOM_RATINGS = {
'PG': 7,
}
def _real_extract(self, url):
domain, video_id = self._match_valid_url(url).group('domain', 'id')
webpage = self._download_webpage(url, video_id)
initial_data = traverse_obj(
self._search_nextjs_data(webpage, video_id, fatal=False), ('props', 'pageProps', 'initialContentData'), default={})
try:
stream_data = self._download_json(
f'https://{domain}/cmsPostProxy/contents/video/{video_id}/streamer?os=android', video_id, data=b'')['data']
except ExtractorError as e:
if not isinstance(e.cause, compat_HTTPError):
raise e
errmsg = self._parse_json(e.cause.read().decode(), video_id)['meta']['message']
if 'country' in errmsg:
self.raise_geo_restricted(
errmsg, [initial_data['display_country']] if initial_data.get('display_country') else None, True)
else:
self.raise_no_formats(errmsg, video_id=video_id)
if stream_data:
stream_url = stream_data['stream']['stream_url']
stream_ext = determine_ext(stream_url)
if stream_ext == 'm3u8':
formats, subs = self._extract_m3u8_formats_and_subtitles(stream_url, video_id, 'mp4')
elif stream_ext == 'mpd':
formats, subs = self._extract_mpd_formats_and_subtitles(stream_url, video_id)
else:
formats = [{'url': stream_url}]
thumbnails = [
{'id': thumb_key, 'url': thumb_url}
for thumb_key, thumb_url in (initial_data.get('thumb_list') or {}).items()
if url_or_none(thumb_url)]
return {
'id': video_id,
'title': initial_data.get('title') or self._html_search_regex(
[r'Nonton (?P<name>.+) Gratis',
r'Xem (?P<name>.+) Miễn phí',
r'Watch (?P<name>.+) Free'], webpage, 'title', group='name'),
'display_id': initial_data.get('slug_title'),
'description': initial_data.get('synopsis'),
'timestamp': unified_timestamp(initial_data.get('create_date')),
# 'duration': int_or_none(initial_data.get('duration'), invscale=60), # duration field must atleast be accurate to the second
'categories': traverse_obj(initial_data, ('article_category_details', ..., 'name')),
'release_timestamp': unified_timestamp(initial_data.get('publish_date')),
'release_year': int_or_none(initial_data.get('release_year')),
'formats': formats,
'subtitles': subs,
'thumbnails': thumbnails,
'age_limit': self._CUSTOM_RATINGS.get(initial_data.get('rate')) or parse_age_limit(initial_data.get('rate')),
'cast': traverse_obj(initial_data, (('actor', 'director'), ...)),
'view_count': int_or_none(initial_data.get('count_views')),
'like_count': int_or_none(initial_data.get('count_likes')),
'average_rating': int_or_none(initial_data.get('count_ratings')),
}
|
#
#
# open() 函数常用形式是接收两个参数:文件名(file)和模式(mode)。
#
# open(file, mode='r')
# 完整的语法格式为:
#
# open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
# 参数说明:
#
# file: 必需,文件路径(相对或者绝对路径)。
# mode: 可选,文件打开模式
# buffering: 设置缓冲
# encoding: 一般使用utf8
# errors: 报错级别
# newline: 区分换行符
# closefd: 传入的file参数类型
# opener:
#常用model
# t 文本模式 (默认)。
# x 写模式,新建一个文件,如果该文件已存在则会报错。
# + 打开一个文件进行更新(可读可写)。
# r 以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
# r+ 打开一个文件用于读写。文件指针将会放在文件的开头。
# w 打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
# w+ 打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
# a+ 打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
#file 对象常用方法
# 1
# file.close()
#
# 关闭文件。关闭后文件不能再进行读写操作。
#
# 2
# file.flush()
#
# 刷新文件内部缓冲,直接把内部缓冲区的数据立刻写入文件, 而不是被动的等待输出缓冲区写入。
#
# 3
# file.fileno()
#
# 返回一个整型的文件描述符(file descriptor FD 整型), 可以用在如os模块的read方法等一些底层操作上。
#
# 6
# file.read([size])
#
# 从文件读取指定的字节数,如果未给定或为负则读取所有。
#
# 7
# file.readline([size])
#
# 读取整行,包括 "\n" 字符。
#
# 8
# file.readlines([sizeint])
#
# 读取所有行并返回列表,若给定sizeint>0,返回总和大约为sizeint字节的行, 实际读取值可能比 sizeint 较大, 因为需要填充缓冲区。
#
# 9
# file.seek(offset[, whence])
#
# 设置文件当前位置
#
# 10
# file.tell()
#
# 返回文件当前位置。
#
# 12
# file.write(str)
#
# 将字符串写入文件,返回的是写入的字符长度。
#
# 13
# file.writelines(sequence)
#
# 向文件写入一个序列字符串列表,如果需要换行则要自己加入每行的换行符。
filePth = "./3.txt"
f = open(filePth, "a+")
f.writelines("qwe\n") |
<filename>kolibri/core/tasks/test/taskrunner/test_worker.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
import time
import pytest
from mock import patch
from kolibri.core.tasks.job import Job
from kolibri.core.tasks.job import State
from kolibri.core.tasks.test.base import connection
from kolibri.core.tasks.worker import Worker
QUEUE = "pytest"
error_text = "كوليبري is not a function"
def error_func():
"""
Function that raises an error that contains unicode.
Made this a module function due to the need to have a module path to pass to the Job constructor.
"""
raise TypeError(error_text)
@pytest.fixture
def worker():
with connection() as c:
b = Worker(c, regular_workers=1, high_workers=1)
b.storage.clear(force=True)
yield b
b.storage.clear(force=True)
b.shutdown()
@pytest.mark.django_db
class TestWorker:
def test_enqueue_job_runs_job(self, worker):
job = Job(id, 9)
worker.storage.enqueue_job(job, QUEUE)
while job.state != State.COMPLETED:
job = worker.storage.get_job(job.job_id)
time.sleep(0.5)
try:
# Get the future, or pass if it has already been cleaned up.
future = worker.future_job_mapping[job.job_id]
future.result()
except KeyError:
pass
assert job.state == State.COMPLETED
def test_can_handle_unicode_exceptions(self, worker):
# Make sure task exception info is not an object, but is either a string or None.
# See Storage.mark_job_as_failed in kolibri.core.tasks.storage for more details on why we do this.
# create a job that triggers an exception
job = Job("kolibri.core.tasks.test.taskrunner.test_worker.error_func")
job_id = worker.storage.enqueue_job(job, QUEUE)
while job.state == State.QUEUED:
job = worker.storage.get_job(job.job_id)
time.sleep(0.5)
returned_job = worker.storage.get_job(job_id)
assert returned_job.state == "FAILED"
assert returned_job.exception == "TypeError"
assert error_text in returned_job.traceback
def test_enqueue_job_writes_to_storage_on_success(self, worker):
with patch.object(
worker.storage, "complete_job", wraps=worker.storage.complete_job
) as spy:
# this job should never fail.
job = Job(id, 9)
worker.storage.enqueue_job(job, QUEUE)
while job.state == State.QUEUED:
job = worker.storage.get_job(job.job_id)
time.sleep(0.5)
try:
# Get the future, or pass if it has already been cleaned up.
future = worker.future_job_mapping[job.job_id]
future.result()
except KeyError:
pass
# verify that we sent a message through our backend
assert spy.call_count == 1
call_args = spy.call_args
job_id = call_args[0][0]
# verify that we're setting the correct job_id
assert job_id == job.job_id
def test_regular_tasks_wait_when_regular_workers_busy(self, worker):
# We have one task running right now.
worker.future_job_mapping = {"job_id": "future"}
job = Job(id, 10)
worker.storage.enqueue_job(job, QUEUE, "REGULAR")
job = worker.get_next_job()
worker.future_job_mapping.clear()
# Worker must not get this job since our regular worker is busy.
assert job is None
def test_high_tasks_dont_wait_when_regular_workers_busy(self, worker):
# We have one task running right now.
worker.future_job_mapping = {"job_id": "future"}
job = Job(id, 10)
worker.storage.enqueue_job(job, QUEUE, "HIGH")
job = worker.get_next_job()
worker.future_job_mapping.clear()
# Worker must get this job since its a 'high' priority job.
assert isinstance(job, Job) is True
|
<reponame>redhat-openstack/oslo.config<filename>oslo_config/tests/test_fixture.py
#
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
from oslo_config import cfg
from oslo_config import fixture as config
conf = cfg.CONF
class ConfigTestCase(base.BaseTestCase):
def setUp(self):
super(ConfigTestCase, self).setUp()
self.config_fixture = self.useFixture(config.Config(conf))
self.config = self.config_fixture.config
self.config_fixture.register_opt(cfg.StrOpt(
'testing_option', default='initial_value'))
def test_overridden_value(self):
self.assertEqual(conf.get('testing_option'), 'initial_value')
self.config(testing_option='changed_value')
self.assertEqual(conf.get('testing_option'),
self.config_fixture.conf.get('testing_option'))
def test_cleanup(self):
self.config(testing_option='changed_value')
self.assertEqual(self.config_fixture.conf.get('testing_option'),
'changed_value')
self.config_fixture.conf.reset()
self.assertEqual(conf.get('testing_option'), 'initial_value')
def test_register_option(self):
opt = cfg.StrOpt('new_test_opt', default='initial_value')
self.config_fixture.register_opt(opt)
self.assertEqual(conf.get('new_test_opt'),
opt.default)
def test_register_options(self):
opt1 = cfg.StrOpt('first_test_opt', default='initial_value_1')
opt2 = cfg.StrOpt('second_test_opt', default='initial_value_2')
self.config_fixture.register_opts([opt1, opt2])
self.assertEqual(conf.get('first_test_opt'), opt1.default)
self.assertEqual(conf.get('second_test_opt'), opt2.default)
def test_cleanup_unregister_option(self):
opt = cfg.StrOpt('new_test_opt', default='initial_value')
self.config_fixture.register_opt(opt)
self.assertEqual(conf.get('new_test_opt'),
opt.default)
self.config_fixture.cleanUp()
self.assertRaises(cfg.NoSuchOptError, conf.get, 'new_test_opt')
def test_register_cli_option(self):
opt = cfg.StrOpt('new_test_opt', default='initial_value')
self.config_fixture.register_cli_opt(opt)
self.assertEqual(conf.get('new_test_opt'),
opt.default)
def test_register_cli_options(self):
opt1 = cfg.StrOpt('first_test_opt', default='initial_value_1')
opt2 = cfg.StrOpt('second_test_opt', default='initial_value_2')
self.config_fixture.register_cli_opts([opt1, opt2])
self.assertEqual(conf.get('first_test_opt'), opt1.default)
self.assertEqual(conf.get('second_test_opt'), opt2.default)
def test_cleanup_unregister_cli_option(self):
opt = cfg.StrOpt('new_test_opt', default='initial_value')
self.config_fixture.register_cli_opt(opt)
self.assertEqual(conf.get('new_test_opt'),
opt.default)
self.config_fixture.cleanUp()
self.assertRaises(cfg.NoSuchOptError, conf.get, 'new_test_opt')
|
import re
from basic import *
SPECIAL_CHARACTERS = '&|<>=:#'
class Lexer:
def __init__(self, text, context):
self.text = text
self.context = context
self.pos = -1
self.cur_char = None
self.advance()
def advance(self):
"""Advances the pointer one step"""
self.pos += 1
# if position is valid
self.cur_char = self.text[self.pos] if self.pos < len(self.text) else None
def make_tokens(self):
"""Makes tokens from stored text"""
tokens = []
parenthesis_balance = 0
# handles case where text is empty
if len(self.text.strip()) == 0:
return None, IllegalCharError('Unexpected end of statement')
# while there is still more text to parse
while self.cur_char is not None:
# if current character is a digit
if self.cur_char.isdigit():
number, error = self.make_number()
if error is not None:
return None, error
tokens.append(number)
continue
# if current character is in the alphabet (part of variable name)
if self.cur_char.isalpha():
var, error = self.make_variable()
if error is not None:
return None, error
tokens.append(var)
continue
# if current character could be the start of a complex operator
if self.cur_char in SPECIAL_CHARACTERS:
operator, error = self.make_operator()
if error is not None:
return None, error
tokens.append(operator)
continue
# if character is start of a char variable
if self.cur_char == '\'':
self.advance()
char = self.cur_char
# if current char is another quote, there is nothing in the middle
# thus make an empty character
if char == '\'':
char = ''
tokens.append(Token(TT_CHAR, char))
self.advance()
continue
if char == '\\':
# otherwise if there is an escape character, read the next one instead
self.advance()
if self.cur_char == 'n':
char = '\n'
else:
char = self.cur_char
self.advance()
# if no ending quote found
if self.cur_char != '\'':
return None, IllegalArgumentError('Illegal character ' + str(self.cur_char))
# append char
tokens.append(Token(TT_CHAR, char))
self.advance()
continue
if self.cur_char == '+':
tokens.append(Token(TT_ADD))
elif self.cur_char == '-':
# if current character is first character or last token
# was not int or float
if len(tokens) == 0 or (tokens[-1].type != TT_INT and tokens[-1].type != TT_FLOAT):
tokens.append(Token(TT_UNARY_MINUS))
else:
tokens.append(Token(TT_SUBTRACT))
elif self.cur_char == '*':
tokens.append(Token(TT_MULTIPLY))
elif self.cur_char == '/':
tokens.append(Token(TT_DIVIDE))
elif self.cur_char == '%':
tokens.append(Token(TT_MODULO))
elif self.cur_char == '(':
tokens.append(Token(TT_LPAREN))
parenthesis_balance += 1
elif self.cur_char == ')':
tokens.append(Token(TT_RPAREN))
parenthesis_balance -= 1
# checks parenthesis balance
if parenthesis_balance < 0:
return None, IllegalCharError('Unbalanced parenthesis')
elif self.cur_char == '!':
tokens.append(Token(TT_NOT))
elif self.cur_char.isspace():
# pass if space
pass
else:
# invalid character
return None, IllegalCharError('\'' + self.cur_char + '\'')
self.advance()
if parenthesis_balance != 0:
return None, IllegalCharError('Unbalanced parenthesis')
return tokens, None
def make_number(self):
"""
Parses a number starting from current value pointed to.
Number could be INT or FLOAT.
"""
num = 0 # stores the number
is_float = False
decimal_digits = 0
# while there is more text to parse and the current char is number or decimal
while self.cur_char is not None and (self.cur_char.isdigit() or self.cur_char == '.'):
# if current char is decimal point
if self.cur_char == '.':
# if there was already a decimal point
if is_float:
return None, IllegalCharError('\'.\'')
decimal_digits = 1
is_float = True
self.advance()
continue # skip to next iteration
# if is decimal, add to decimal places instead
if is_float:
num += int(self.cur_char) / (10 ** decimal_digits)
decimal_digits += 1
else:
# else add normally
num *= 10
num += int(self.cur_char)
self.advance()
# assign type to token
return Token((TT_FLOAT if is_float else TT_INT), num), None
def make_variable(self):
"""
Parses a variable or language constant from current value pointed to.
Variables are taken from self.context.
"""
name = ''
# while character exists and is still alphanumeric
while self.cur_char is not None and (self.cur_char.isalpha() or self.cur_char == '_'):
name += self.cur_char
self.advance()
# if name is a language constant
if name in CONSTANTS:
return CONSTANTS[name], None
# if variable exists
# look for it in context
cur_context = self.context
while cur_context:
# if variable exists in this context, return it
if name in cur_context.variable_cache:
return cur_context.variable_cache[name], None
cur_context = cur_context.parent
# otherwise variable was not found
return None, RuntimeError('Variable ' + name + ' not found')
def make_operator(self):
"Parses a complex operator from the current value pointed to"
operator = ''
while self.cur_char is not None and self.cur_char in SPECIAL_CHARACTERS:
operator += self.cur_char
self.advance()
if operator == '&&':
return Token(TT_AND), None
if operator == '||':
return Token(TT_OR), None
if operator == '!':
return Token(TT_NOT), None
if operator == '>':
return Token(TT_GREATER), None
if operator == '<':
return Token(TT_LESS), None
if operator == '>=':
return Token(TT_GREATER_EQUALS), None
if operator == '<=':
return Token(TT_LESS_EQUALS), None
if operator == '==':
return Token(TT_EQUALS), None
if operator == '!=':
return Token(TT_NOT_EQUALS), None
if operator == ':':
return Token(TT_ARRAY_ACCESS), None
return None, RuntimeError('Operator ' + operator + ' not found')
|
<filename>Blending/comment_params.py
def lgbm_get_params():
params = []
params.append({"num_iterations": 503,
'num_leaves': 375,
'learning_rate': 0.03836392757670029,
'max_depth': 63,
'lambda_l1': 22.399701123004604,
'lambda_l2': 23.699724703370013,
'colsample_bynode': 0.9110384820808469,
'colsample_bytree': 0.9501873812869108,
'bagging_fraction': 0.9596589254449273,
'bagging_freq': 2,
'max_bin': 2932,
'min_data_in_leaf': 1328,
})
params.append({"num_iterations": 649,
'num_leaves': 70,
'learning_rate': 0.03641938649135499,
'max_depth': 48,
'lambda_l1': 5.364427515350181,
'lambda_l2': 35.168999246659034,
'colsample_bynode': 0.4800511610351818,
'colsample_bytree': 0.8612169247403304,
'bagging_fraction': 0.4372602355567855,
'bagging_freq': 6,
'max_bin': 2743,
'min_data_in_leaf': 1038,
})
params.append({"num_iterations": 592,
'num_leaves': 3699,
'learning_rate': 0.044455157068508964,
'max_depth': 20,
'lambda_l1': 35.93536492691437,
'lambda_l2': 4.693821646007298,
'colsample_bynode': 0.5023480222471233,
'colsample_bytree': 0.4111703612029055,
'bagging_fraction': 0.9656709780441118,
'bagging_freq': 8,
'max_bin': 3848,
'min_data_in_leaf': 1565,
})
params.append({"num_iterations": 899,
'num_leaves': 506,
'learning_rate': 0.028864334583348805,
'max_depth': 18,
'lambda_l1': 49.18173778418377,
'lambda_l2': 17.957576284918726,
'colsample_bynode': 0.49835869428173585,
'colsample_bytree': 0.6416774216548786,
'bagging_fraction': 0.6868836858528534,
'bagging_freq': 2,
'max_bin': 2227,
'min_data_in_leaf': 1696,
})
params.append({"num_iterations": 540,
'num_leaves': 1537,
'learning_rate': 0.025879523488669638,
'max_depth': 30,
'lambda_l1': 4.707282653530028,
'lambda_l2': 34.606775928305126,
'colsample_bynode': 0.9775852980503293,
'colsample_bytree': 0.4475769902531044,
'bagging_fraction': 0.33059359051543374,
'bagging_freq': 7,
'max_bin': 2034,
'min_data_in_leaf': 1597,
})
params.append({"num_iterations": 999,
'num_leaves': 4095,
'learning_rate': 0.01,
'max_depth': 54,
'lambda_l1': 21.046193321419064,
'lambda_l2': 27.819861130490782,
'colsample_bynode': 0.7038963804420113,
'colsample_bytree': 0.4,
'bagging_fraction': 0.7543840264573496,
'bagging_freq': 10,
'max_bin': 4474,
'min_data_in_leaf': 1258,
})
params.append({"num_iterations": 159,
'num_leaves': 4091,
'learning_rate': 0.07794072936665912,
'max_depth': 34,
'lambda_l1': 19.296982857404167,
'lambda_l2': 28.651557192388893,
'colsample_bynode': 0.44379380592503936,
'colsample_bytree': 0.8220348950720999,
'bagging_fraction': 0.8526631673159721,
'bagging_freq': 2,
'max_bin': 2674,
'min_data_in_leaf': 781,
})
params.append({"num_iterations": 541,
'num_leaves': 933,
'learning_rate': 0.03828634413994907,
'max_depth': 36,
'lambda_l1': 26.4765937140306,
'lambda_l2': 41.113231886770855,
'colsample_bynode': 0.9560255089415624,
'colsample_bytree': 0.977168022485364,
'bagging_fraction': 0.37546927644973427,
'bagging_freq': 2,
'max_bin': 3921,
'min_data_in_leaf': 996,
})
params.append({"num_iterations": 353,
'num_leaves': 3274,
'learning_rate': 0.06656112499571927,
'max_depth': 31,
'lambda_l1': 24.14505590136565,
'lambda_l2': 36.11271699540278,
'colsample_bynode': 0.7099421636523457,
'colsample_bytree': 0.4,
'bagging_fraction': 0.5040322098870347,
'bagging_freq': 5,
'max_bin': 3694,
'min_data_in_leaf': 1010,
})
params.append({"num_iterations": 238,
'num_leaves': 818,
'learning_rate': 0.1037741228302401,
'max_depth': 46,
'lambda_l1': 40.402007888863416,
'lambda_l2': 6.328091554998547,
'colsample_bynode': 0.8204791211960999,
'colsample_bytree': 0.44074459547022976,
'bagging_fraction': 0.8882992620572462,
'bagging_freq': 10,
'max_bin': 1719,
'min_data_in_leaf': 1002,
})
params.append({"num_iterations": 294,
'num_leaves': 4066,
'learning_rate': 0.05549085401009731,
'max_depth': 26,
'lambda_l1': 20.866735289940944,
'lambda_l2': 18.990142241588075,
'colsample_bynode': 0.40669390037717434,
'colsample_bytree': 0.5600243385646619,
'bagging_fraction': 0.46849103038895057,
'bagging_freq': 10,
'max_bin': 3943,
'min_data_in_leaf': 1475,
})
params.append({"num_iterations": 309,
'num_leaves': 1855,
'learning_rate': 0.08529479079275457,
'max_depth': 64,
'lambda_l1': 44.87421002364315,
'lambda_l2': 30.268987553919825,
'colsample_bynode': 0.8779495780555145,
'colsample_bytree': 0.8595621790791601,
'bagging_fraction': 0.5022315499418838,
'bagging_freq': 8,
'max_bin': 527,
'min_data_in_leaf': 1737,
})
return params
def xgb_get_params():
params = []
return params
|
from collections import Counter, OrderedDict
import torch
import torch.utils.data as data_utils
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.clip_grad import clip_grad_norm_
from torch.utils.data import DataLoader
from allennlp.modules import FeedForward
from allennlp.nn.activations import Activation
import logging
from datetime import datetime
from tqdm import tqdm
import numpy as np
import joblib
import math
import os
from models.model import Model, TUNE, PREDICT
from layers.attention import Attention
from layers.recurrent import RNN
from modules.relation_extractor import RelationExtractor
from modules.hierarchical_document_classifier import HierarchicalClassifierMulti
from layers.utils import set_model_device, set_tensor_device
from models.dataset_symptoms import DatasetSymptoms
from config.constants_pulmonary import INFILTRATES, EXTRAPARENCHYMAL
from config.constants import ENTITIES, RELATIONS, DOC_LABELS
from layers.utils import get_loss, aggregate, PRFAggregator
from layers.plotting import PlotLoss
from scoring.scorer_symptoms import ScorerSymptoms
def tensor_dict_collect(X):
# Build list of dictionaries
d = OrderedDict()
for i, x in enumerate(X):
if i == 0:
for k in x:
d[k] = []
for k, v in x.items():
d[k].append(v)
for k, v in d.items():
d[k] = torch.stack(v, dim=0)
return d
class ModelSymptoms(Model):
def __init__(self, \
hyperparams,
dataset_params,
dataloader_params,
optimizer_params,
num_workers,
num_epochs,
dataset_class = DatasetSymptoms,
scorer_class = ScorerSymptoms
):
super().__init__( \
hyperparams = hyperparams,
dataset_params = dataset_params,
dataloader_params = dataloader_params,
optimizer_params = optimizer_params,
num_workers = num_workers,
num_epochs = num_epochs,
dataset_class = dataset_class,
scorer_class = scorer_class
)
self.use_rnn = self.hyperparams['use_rnn']
self.loss_reduction = self.hyperparams['loss_reduction']
if self.use_rnn:
self.rnn = RNN(**self.hyperparams['rnn'])
self.relation_extractor = RelationExtractor(**self.hyperparams['relation_extractor'])
self.get_summary()
def forward(self, seq_tensor, seq_mask, span_indices, span_mask, verbose=False):
# seq_tensor (batch_size, sentence_length, embed_dim)
# seq_mask (batch_size, sentence_length)
# span_indices (span_count, 2)
# span_mask (batch_size, span_count)
output = OrderedDict()
# recurrent layer
if self.use_rnn:
seq_tensor = self.rnn(seq_tensor, seq_mask)
# relation extraction
span_scores, top_role_scores, top_span_mask, top_indices = \
self.relation_extractor( \
seq_tensor = seq_tensor,
span_indices = span_indices,
span_mask = span_mask,
seq_mask = seq_mask,
verbose = verbose)
output["span_scores"] = span_scores
output["top_role_scores"] = top_role_scores
output["top_span_mask"] = top_span_mask
output["top_indices"] = top_indices
return output
# OVERRIDE
def fit(self, X, y, device=None, path=None, shuffle=True):
logging.info('')
logging.info('='*72)
logging.info("Fit")
logging.info('='*72)
# Get/set device
set_model_device(self, device)
# Configure training mode
self.train()
# Set number of cores
torch.set_num_threads(self.num_workers)
# Create data set
dataset = self.dataset_class(X, y=y, **self.dataset_params, device=device)
# Create data loader
dataloader = DataLoader(dataset, shuffle=shuffle, **self.dataloader_params)
# Create optimizer
optimizer = optim.Adam(self.parameters(), **self.optimizer_params)
# Create loss plotter
plotter = PlotLoss(path=path)
# Create prf aggregator
prf_agg = PRFAggregator()
# Loop on epochs
pbar = tqdm(total=self.num_epochs)
for j in range(self.num_epochs):
loss_epoch = 0
losses_epoch = OrderedDict()
prf = []
# Loop on mini-batches
for i, (indices, seq_tensor, seq_mask, span_indices, span_mask, y_true) in enumerate(dataloader):
verbose = False #(i == 0) and (j == 0)
# Reset gradients
self.zero_grad()
y_pred = self( \
seq_tensor = seq_tensor,
seq_mask = seq_mask,
span_indices = span_indices,
span_mask = span_mask,
verbose = verbose)
loss, loss_dict = self.loss(y_true, y_pred)
plotter.update_batch(loss, loss_dict)
prf_agg.update_counts(self.perf_counts(y_true, y_pred))
# Backprop loss
loss.backward()
loss_epoch += loss.item()
for k, v in loss_dict.items():
if i == 0:
losses_epoch[k] = v.item()
else:
losses_epoch[k] += v.item()
# Clip loss
clip_grad_norm_(self.parameters(), self.hyperparams['grad_max_norm'])
# Update
optimizer.step()
plotter.update_epoch(loss_epoch, losses_epoch)
msg = []
msg.append('epoch={}'.format(j))
msg.append('{}={:.1e}'.format('Total', loss_epoch))
for k, ls in losses_epoch.items():
msg.append('{}={:.1e}'.format(k, ls))
msg.append(prf_agg.prf())
prf_agg.reset()
msg = ", ".join(msg)
pbar.set_description(desc=msg)
pbar.update()
print()
pbar.close()
return True
# OVERRIDE
def predict(self, X, device=None, path=None):
logging.info('')
logging.info('='*72)
logging.info("Predict")
logging.info('='*72)
# Do not shuffle
shuffle = False
# Get/set device
set_model_device(self, device)
# Configure training mode
self.eval()
# Set number of cores
torch.set_num_threads(self.num_workers)
# Create data set
dataset = self.dataset_class(X, **self.dataset_params, device=device)
# Create data loader
dataloader = DataLoader(dataset, shuffle=False, **self.dataloader_params)
pbar = tqdm(total=int(len(dataloader)/dataloader.batch_size))
y = []
for i, (indices, seq_tensor, seq_mask, span_indices, span_mask) in enumerate(dataloader):
verbose = False
# Push data through model
out = self( \
seq_tensor = seq_tensor,
seq_mask = seq_mask,
span_indices = span_indices,
span_mask = span_mask,
verbose = verbose)
y_batch = dataset.postprocess_y( \
indices = indices,
span_scores = out["span_scores"],
span_mask = span_mask,
role_scores = out["top_role_scores"],
role_span_mask = out["top_span_mask"],
role_indices = out["top_indices"],
)
y.extend(y_batch)
pbar.update()
pbar.close()
return y
def loss(self, y_true, y_pred, span_map=None):
span_loss, role_loss = self.relation_extractor.loss( \
span_labels = y_true['span_labels'],
span_scores = y_pred['span_scores'],
span_mask = y_true['span_mask'],
role_labels = y_true['role_labels'],
top_role_scores = y_pred['top_role_scores'],
top_span_mask = y_pred['top_span_mask'],
top_indices = y_pred['top_indices'],
)
loss_dict = OrderedDict()
loss_dict["span_loss"] = span_loss
loss_dict["role_loss"] = role_loss
loss = torch.stack([v for k, v in loss_dict.items()])
loss = aggregate(loss, self.loss_reduction)
return (loss, loss_dict)
def perf_counts(self, y_true, y_pred):
span_counts, role_counts = self.relation_extractor.perf_counts( \
span_labels = y_true["span_labels"],
span_scores = y_pred["span_scores"],
span_mask = y_true["span_mask"],
role_labels = y_true["role_labels"],
top_role_scores = y_pred["top_role_scores"],
top_span_mask = y_pred["top_span_mask"],
top_indices = y_pred["top_indices"])
d = OrderedDict()
d["span"] = span_counts
d["role"] = role_counts
return d
'''
def prf(self, y_true, y_pred):
span_prf, role_prf = self.relation_extractor.prf( \
span_labels = y_true["span_labels"],
span_scores = y_pred["span_scores"],
span_mask = y_true["span_mask"],
role_labels = y_true["role_labels"],
top_role_scores = y_pred["top_role_scores"],
top_span_mask = y_pred["top_span_mask"],
top_indices = y_pred["top_indices"])
prf_dict = OrderedDict()
prf_dict["span"] = span_prf
prf_dict["role"] = role_prf
return prf_dict
'''
|
"""
Plots the Wasserstein-2 distance as a function of translational distance.
"""
import pysdot as ot
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
xbnds = [0.0,1.0] # minimum and maximum x values
circle_radius = 0.1
ybnds = [0.5-2*circle_radius,0.5+2*circle_radius] # minimum and maximum y values
Ns = [150,int(150 * (ybnds[1]-ybnds[0])/(xbnds[1]-xbnds[0]))]
bbox = ot.BoundingBox(xbnds[0],xbnds[1],ybnds[0],ybnds[1])
grid = ot.RegularGrid(bbox, Ns[0], Ns[1])
def CreateCircleDensity(loc, radius):
""" Returns a normalized density defined on the grid with a uniform circular
region of nonzero density.
ARGUMENTS:
loc (np.array) : Center point of the circle.
radius (float) : Radius of the circle. Any grid cells with centroids inside this radius will be nonzero.
"""
dens = np.zeros(Ns)
for i in range(Ns[0]):
for j in range(Ns[1]):
pt = grid.Center(i,j)
if( la.norm(pt-loc) < radius ):
dens[i,j] = 1.0
else:
dens[i,j] = 0.0
dens /= (np.pi*radius**2)
return dens
class InitialState:
""" Creates the initial density and optimal quantization of that density
for a circle at position loc and radius.
"""
def __init__(self, loc, radius, num_points, dens_vals0, ot_class,penalty=None):
self.center = loc
self.radius = radius
self.dens_vals = dens_vals0
self.dist = ot.DiscretizedDistribution(grid, self.dens_vals)
self.probs = np.ones(num_points)*np.sum(grid.dx*grid.dy*dens_vals0)/num_points
# Create an (balanced) optimal quantization of the initial circle
opts = {'Lloyd Steps':200, 'Lloyd Tol':1e-5, 'GTol Abs':1e-9, 'Max Steps': 300}
if (penalty is not None):
opts['Penalty'] = penalty
seed_pts = self._GenerateSeedPts(num_points)
self.diag = ot_class.BuildCentroidal(self.dist, seed_pts, self.probs, opts)
self.pts = self.diag.Centroids(self.dist)
def _GenerateSeedPts(self, num_points):
""" Generates random points in the domain that lie in the support (nonzero regions)
of the density. This is a good way to seed the centroidal solver.
ARGUMENTS:
num_point (int) : Number of points to generate.
dens (np.array) : 2D array of density values used to define the support.
RETURNS:
np.array : a 2xN array of points
"""
scale = np.array([xbnds[1]-xbnds[0], ybnds[1]-ybnds[0]])
offset = np.array([xbnds[0], ybnds[0]])
pts = np.zeros((2,num_points))
for i in range(num_points):
in_supp = False
while(not in_supp):
# Generate a random point in the domain
pt = offset + scale * np.random.rand(2)
if( la.norm(pt-self.center) < self.radius ):
in_supp = True
pts[:,i] = pt
return pts
num_pts = 15
loc0 = np.array([circle_radius+0.05, 0.5])
dens_vals0 = CreateCircleDensity(loc0, circle_radius)
dxs = np.linspace(loc0[0],1+0.9*circle_radius,50)
vmin = -10
vmax = np.max(dens_vals0)
w2s = []
l2s = []
qrs = []
ghks = []
#######################
# Balanced
initial_state = InitialState(loc0, circle_radius, num_pts, dens_vals0, ot.SemidiscreteOT)
prices = initial_state.diag.Prices()
plt.figure(figsize=(10,10*(Ns[1]/Ns[0])))
#ot.PlotDiagram(initial_state.diag, distribution=initial_state.dist)
plt.imshow(dens_vals0.T, extent=[xbnds[0],xbnds[1],ybnds[0],ybnds[1]], cmap='Greys_r', vmin=vmin,vmax=vmax)
seed_pts = initial_state.diag.Centroids(initial_state.dist)
plt.plot(seed_pts[0,:],seed_pts[1,:],'.r', markersize=14)
plt.axis('off')
plt.savefig("InitialDensity.png", bbox_inches='tight')
plt.close()
plt.show()
quit()
i = 0
for dx in dxs:
loc = np.array([dx, 0.5])
dens_vals = CreateCircleDensity(loc, circle_radius)
plt.figure(figsize=(10,10*(Ns[1]/Ns[0])))
plt.imshow(dens_vals.T, cmap='Greys_r', vmin=vmin,vmax=vmax)
plt.axis('off')
plt.savefig("Density{:02d}.png".format(i), bbox_inches='tight')
plt.close()
l2s.append(0.01*np.sum((dens_vals-dens_vals0)**2)/(np.prod(Ns)))
dens_vals *= (np.sum(dens_vals0)/np.sum(dens_vals))
dist = ot.DiscretizedDistribution(grid, dens_vals)
# Compute the transport between the first points and the
solver = ot.SemidiscreteOT(dist, initial_state.pts, initial_state.probs)
num_pts = initial_state.pts.shape[1]
opts = {'Max Steps':500, 'GTol Abs':1e-6, 'FTol Abs':0.0, 'Accept Ratio':0.01, 'Shrink Ratio':0.01}
prices, obj = solver.Solve(prices, opts)
w2s.append(-2*obj)
i+=1
print('Wasserstein Distance: ', -obj)
#######################
# QR Unbalanced
penalty = 0.75
initial_state = InitialState(loc0, circle_radius, num_pts, dens_vals0, ot.SemidiscreteQR, penalty)
prices = initial_state.diag.Prices()
for dx in dxs:
loc = np.array([dx, 0.5])
dens_vals = CreateCircleDensity(loc, circle_radius)
dist = ot.DiscretizedDistribution(grid, dens_vals)
# Compute the transport between the first points and the
solver = ot.SemidiscreteQR(dist, initial_state.pts, initial_state.probs,penalty)
num_pts = initial_state.pts.shape[1]
opts = {'Max Steps':500, 'GTol Abs':1e-6, 'FTol Abs':0.0, 'Accept Ratio':0.01, 'Shrink Ratio':0.01}
prices, obj = solver.Solve(prices, opts)
qrs.append(-2*obj)
print('Wasserstein Distance: ', -obj)
#######################
# GHK Unbalanced
initial_state = InitialState(loc0, circle_radius, num_pts, dens_vals0, ot.SemidiscreteGHK,penalty)
prices = initial_state.diag.Prices()
for dx in dxs:
loc = np.array([dx, 0.5])
dens_vals = CreateCircleDensity(loc, circle_radius)
dist = ot.DiscretizedDistribution(grid, dens_vals)
# Compute the transport between the first points and the
solver = ot.SemidiscreteGHK(dist, initial_state.pts, initial_state.probs, penalty)
num_pts = initial_state.pts.shape[1]
opts = {'Max Steps':500, 'GTol Abs':1e-6, 'FTol Abs':0.0, 'Accept Ratio':0.01, 'Shrink Ratio':0.01}
prices, obj = solver.Solve(prices, opts)
ghks.append(-2*obj)
print('Wasserstein Distance: ', -obj)
plt.figure()
plt.plot(dxs,w2s, linewidth=2, label='W2')
plt.plot(dxs,qrs, linewidth=2, label='QR')
plt.plot(dxs,ghks, linewidth=2, label='GHK')
plt.plot(dxs, l2s, linewidth=2, label='L2')
plt.ylabel('Distance', fontsize=14)
plt.xlabel('Translation',fontsize=14)
plt.legend()
plt.savefig('MetricComparison.png')
plt.show()
for i in range(len(dxs)):
plt.figure()
plt.plot(dxs,w2s, linewidth=2, label='W2')
plt.plot(dxs, l2s, linewidth=2, label='L2')
ymin,ymax = plt.ylim()
plt.plot([dxs[i],dxs[i]],[ymin,ymax], '--k')
plt.ylim(ymin,ymax)
plt.xlim(dxs[0],1.0-circle_radius)
plt.ylabel('Distance', fontsize=14)
plt.xlabel('Translation',fontsize=14)
plt.legend(loc='upper left')
plt.savefig('L2Comparison_{:02d}.png'.format(i),bbox_inches='tight')
plt.close()
plt.figure()
plt.plot(dxs,w2s, linewidth=2, label='W2')
plt.plot(dxs, l2s, linewidth=2, label='L2')
plt.plot(dxs,qrs, linewidth=2, label='QR')
plt.plot(dxs,ghks, linewidth=2, label='GHK')
ymin,ymax = plt.ylim()
plt.plot([dxs[i],dxs[i]],[ymin,ymax], '--k')
plt.ylim(ymin,ymax)
plt.ylabel('Distance', fontsize=14)
plt.xlabel('Translation',fontsize=14)
plt.legend(loc='upper left')
plt.savefig('MetricComparison_{:02d}.png'.format(i),bbox_inches='tight')
plt.close()
|
import pandas as pd
from loguru import logger
import cv2
import numpy as np
from cv.image_processing import image2tiles, get_labels_tiles, predictions2image, tiles2images
from cv.tf_utils import train_model
from segmentation.pixel_tile_segmentation_model import get_model_definition
def get_params():
path = '/home/sebastian/projects/'
model_folder = "./tmp/"
model_file = f"{model_folder}/model"
input_folder = "./test_data/"
output_folder = f'./output/'
params = {
'model_folder': model_folder,
'model_file': model_file,
'input_folder': input_folder,
'output_folder': output_folder,
'epochs': 10,
'h': 200,
'w': 200
}
return params
def get_image(image, h, w):
assert image is not None
image = image2tiles(image, h, w)
image = [t[np.newaxis, ...] for t in image]
image = np.concatenate(image, axis=0)
return image
def get_label(labels, h, w):
assert labels is not None
labels = image2tiles(labels, h, w)
labels = get_labels_tiles(labels)
return labels
def get_prediction(model, x, im_shape, h, w):
y_pred = model.predict(x).round()
y_pred = predictions2image(y_pred, im_shape, h, w)
return y_pred
def train_pixel_tile_seg_model(params):
h = params['h']
w = params['w']
path = "/home/sebastian/vaihingen"
sets = f"{path}/sets.csv"
sets = pd.read_csv(sets, sep=',')
sets['image_file'] = path + '/images/' + sets.image.map(str)
sets['label_file'] = path + '/labels/' + sets.image.map(str)
train_data_1 = sets.query("set == 'training_1'")
train_data_tile = sets.query("set == 'training_2'").head(20)
test_data_tile = sets.query("set == 'training_2'").head(3)
test_data = sets.query("set == 'test'")
logger.info(f"Creating models")
features_model, tile_model, pixel_model = get_model_definition(
img_height=h, img_width=w, in_channels=3, out_channels=5)
image_file_train_tile = train_data_tile.image_file
label_file_train_tile = train_data_tile.label_file
image_file_test_tile = test_data_tile.image_file
label_file_test_tile = test_data_tile.label_file
x_train_tile = [cv2.imread(f) for f in image_file_train_tile]
y_train_tile = [cv2.imread(f) for f in label_file_train_tile]
x_test_tile = [cv2.imread(f) for f in image_file_test_tile]
y_test_tile = [cv2.imread(f) for f in label_file_test_tile]
x_train_tile = [get_image(f, h, w) for f in x_train_tile]
y_train_tile = [get_label(f, h, w) for f in y_train_tile]
x_test_tile = [get_image(f, h, w) for f in x_test_tile]
y_test_tile = [get_label(f, h, w) for f in y_test_tile]
x_train_tile = np.concatenate(x_train_tile, axis=0)
y_train_tile = np.concatenate(y_train_tile, axis=0)
x_test_tile = np.concatenate(x_test_tile, axis=0)
y_test_tile = np.concatenate(y_test_tile, axis=0)
y_train_tile = y_train_tile.any(axis=(1, 2)).astype(np.float)
y_test_tile = y_test_tile.any(axis=(1, 2)).astype(np.float)
logger.info(f"Training tile model")
tile_model = train_model(x_train_tile, y_train_tile, x_test_tile, y_test_tile, tile_model, params, logger)
features_model.trainable = False
image_file_train_1 = train_data_1.image_file
label_file_train_1 = train_data_1.label_file
image_file_test = test_data.image_file
label_file_test = test_data.label_file
x_train_1 = [cv2.imread(f) for f in image_file_train_1]
y_train_1 = [cv2.imread(f) for f in label_file_train_1]
x_test = [cv2.imread(f) for f in image_file_test]
y_test = [cv2.imread(f) for f in label_file_test]
x_train_1 = [get_image(f, h, w) for f in x_train_1]
y_train_1 = [get_label(f, h, w) for f in y_train_1]
x_test = [get_image(f, h, w) for f in x_test]
y_test = [get_label(f, h, w) for f in y_test]
x_train_1 = np.concatenate(x_train_1, axis=0)
y_train_1 = np.concatenate(y_train_1, axis=0)
x_test = np.concatenate(x_test, axis=0)
y_test = np.concatenate(y_test, axis=0)
# y_debug1 = tiles2images(y, im_shape, h, w)
# cv2.imwrite("tmp/y_debug1.png", y_debug1)
logger.info(f"Training pixel model")
pixel_model = train_model(x_train_1, y_train_1, x_test, y_test, pixel_model, params, logger)
for file_name in image_file_test:
logger.info(f"Getting inference for [{file_name}]")
x = cv2.imread(file_name)
im_shape = x.shape
x = get_image(x, h, w)
y_pred = get_prediction(pixel_model, x, im_shape, h, w)
f_name = file_name.split('/')[-1].split('.')[0]
f_name = f"output/{f_name}_pred.png"
cv2.imwrite(f_name, y_pred)
# y_debug = predictions2image(y, im_shape, h, w)
# cv2.imwrite("tmp/y_debug.png", y_debug)
if __name__ == "__main__":
train_pixel_tile_seg_model(get_params())
|
from web3 import Web3
from alastria_identity.types import (
Transaction,
NetworkDid,
Entity)
from alastria_identity.services import IdentityConfigBuilder, ContractsService, IDENTITY_MANAGER_ADDRESS
class IdentityManagerService:
def __init__(self, endpoint: Web3):
self.endpoint = endpoint
def prepare_alastria_id(self, sign_address: str) -> Transaction:
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name="prepareAlastriaID",
args=[sign_address]
))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data)
def create_alastria_identity(self, public_key: str) -> Transaction:
public_key_data = ContractsService.AlastriaPublicKeyRegistry(self.endpoint).encodeABI(
fn_name="addKey",
args=[public_key])
data = ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name="createAlastriaIdentity",
args=[public_key_data])
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data)
def add_idendity_issuer(self, did_issuer: str, level: int) -> Transaction:
issuer_address = NetworkDid.from_did(did_issuer).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='addIdentityIssuer',
args=[issuer_address, level]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def update_identity_issuer_eidas_level(self, did_issuer: str, level: int) -> Transaction:
issuer_address = NetworkDid.from_did(did_issuer).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='updateIdentityIssuerEidasLevel',
args=[issuer_address, level]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def delete_identity_issuer(self, did_issuer: str) -> Transaction:
issuer_address = NetworkDid.from_did(did_issuer).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='deleteIdentityIssuer',
args=[issuer_address]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def get_eidas_level(self, did_issuer: str) -> Transaction:
issuer_address = NetworkDid.from_did(did_issuer).proxy_address
data = ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='getEidasLevel',
args=[issuer_address])
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def add_identity_service_provider(self, did_service_provider: str) -> Transaction:
provider_address = NetworkDid.from_did(
did_service_provider).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='addIdentityServiceProvider',
args=[provider_address]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def delete_identity_service_provider(self, did_service_provider: str) -> Transaction:
provider_address = NetworkDid.from_did(
did_service_provider).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='deleteIdentityServiceProvider',
args=[provider_address]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def is_identity_service_provider(self, did_service_provider: str) -> Transaction:
provider_address = NetworkDid.from_did(
did_service_provider).proxy_address
data = ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='isIdentityServiceProvider',
args=[provider_address])
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def is_identity_issuer(self, did_issuer: str) -> Transaction:
issuer_address = NetworkDid.from_did(did_issuer).proxy_address
data = ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='isIdentityIssuer',
args=[issuer_address])
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def add_entity(self, entity: Entity) -> Transaction:
entity_address = NetworkDid.from_did(entity.did_entity).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='addEntity',
args=[entity_address,
entity.name,
entity.cif,
entity.url_logo,
entity.url_create_aid,
entity.url_aoa,
entity.status]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def set_entity_name(self, entity: Entity) -> Transaction:
entity_address = NetworkDid.from_did(entity.did_entity).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='setNameEntity',
args=[entity.name]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def set_entity_cif(self, entity: Entity) -> Transaction:
entity_address = NetworkDid.from_did(entity.did_entity).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='setCifEntity',
args=[entity.cif]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def set_entity_url_logo(self, entity: Entity) -> Transaction:
entity_address = NetworkDid.from_did(entity.did_entity).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='setUrlLogo',
args=[entity.url_logo]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def set_entity_url_create_aid(self, entity: Entity) -> Transaction:
entity_address = NetworkDid.from_did(entity.did_entity).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='setUrlCreateAID',
args=[entity.url_create_aid]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def set_entity_url_aoa_aid(self, entity: Entity) -> Transaction:
entity_address = NetworkDid.from_did(entity.did_entity).proxy_address
data = self.delegated(ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='setUrlAOA',
args=[entity.url_aoa]))
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def get_entity(self, entity: Entity) -> Transaction:
entity_address = NetworkDid.from_did(entity.did_entity).proxy_address
data = ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='getEntity',
args=[entity_address])
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def get_entities_list(self) -> Transaction:
data = ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='entitiesList',
args=[])
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def get_identity_key(self, address: str) -> Transaction:
data = ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='identityKeys',
args=[address])
return Transaction(
to=Web3.toChecksumAddress(IDENTITY_MANAGER_ADDRESS),
data=data
)
def delegated(self, delegated_data) -> str:
return ContractsService.AlastriaIdentityManager(self.endpoint).encodeABI(
fn_name='delegateCall',
args=[Web3.toChecksumAddress(
IDENTITY_MANAGER_ADDRESS), 0, delegated_data]
)
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import enum
from . import poptorch_core # type: ignore
class MeanReductionStrategy(enum.IntEnum):
"""Specify when to divide by a mean reduction factor when
``accumulationAndReplicationReductionType`` is set to
``ReductionType.Mean``.
- ``Running``: Keeps the reduction buffer as the current mean. This is
preferred for numerical stability as the buffer value is never larger than
the magnitude of the largest micro batch gradient.
- ``Post``: Divides by the accumulationFactor and replicatedGraphCount after
all of the gradients have been reduced. In some cases this can be
faster then using Running, however is prone to overflow.
- ``PostAndLoss`` (deprecated): Divides by the replicatedGraphCount before
the backwards pass, performs the gradient reduction across micro batches,
and then divides by the accumulationFactor. This is to support legacy
behaviour and is deprecated.
"""
Running = 0
Post = 1
PostAndLoss = 2
class DataLoaderMode(enum.IntEnum):
"""
- ``Sync``: Access data synchronously
- ``Async``: Uses an :py:class:`~poptorch.AsynchronousDataAccessor`
to access the dataset
- ``AsyncRebatched``: For iterable datasets by default PyTorch will round
down the number of elements to a multiple of the combined batch size in
each worker. When the number of workers is high and/or the batch size
large this might lead to a significant part of the dataset being
discarded. In this mode, the
combined batch size used by the PyTorch workers will be set to 1,
and the batched tensor will instead be constructed in the
:py:class:`~poptorch.AsynchronousDataAccessor`.
This mode is identical to Async for map-style datasets.
"""
Sync = 0
Async = 1
AsyncRebatched = 2
class SharingStrategy(enum.IntEnum):
"""Strategy to use to pass objects when spawning new processes.
- ``SharedMemory``: Fast but limited availability.
- ``FileSystem``: Slower but larger than memory.
"""
SharedMemory = 0
FileSystem = 1
class AnchorMode(enum.IntEnum):
"""
- ``All``: Return a result for each batch.
- ``Sum``: Return the sum of all the batches
- ``Final``: Return the last batch.
- ``EveryN``: Return every N batches. N is passed in as
`anchor_return_period`
- ``Default``: "All" for inference, "Final" for training.
"""
Final = 0
EveryN = 1
All = 2
Sum = 3
Default = 4
class ConnectionType(enum.IntEnum):
"""
- ``Always``: Attach to the IPU from the start (Default).
- ``OnDemand``: Wait until the compilation is complete and the executable is
ready to be run to attach to the IPU.
- ``Never``: Never try to attach to an IPU. (Useful for offline compilation,
but trying to run an executable will raise an exception).
"""
Always = 0
OnDemand = 1
Never = 2
class HalfFloatCastingBehavior(enum.IntEnum):
"""
- ``FloatDowncastToHalf``: Any op with operands (inputs) which are a
mix of float32 and float16 (half) will cast all operands to half.
- ``HalfUpcastToFloat``: Implicit casting will follow PyTorch's rules,
promoting float16 (half) inputs to float32 if another input is
float32.
"""
FloatDowncastToHalf = 0
HalfUpcastToFloat = 1
class ReductionType(enum.IntEnum):
"""
- ``Sum``: Calculate the sum of all values
- ``Mean``: Calculate the mean of all values
- ``NoReduction``: Do not reduce
"""
Sum = 0
Mean = 1
NoReduction = 2
class SyncPattern(enum.IntEnum):
"""
- ``Full``: Require all IPUs to synchronise on every communication between
IPUs or between IPUs and host.
- ``SinglePipeline``: Allow IPUs to synchronise with the host independently,
without having to synchronise with each other. This permits any one IPU to
perform host IO while other IPUs are processing data.
- ``ReplicaAndLadder``: Allow an IPU group to communicate with the host
without requiring synchronisation between groups. This permits multiple
IPU groups to alternate between performing host IO and computation.
"""
Full = 0
SinglePipeline = 1
ReplicaAndLadder = 2
class MatMulSerializationMode(enum.Enum):
"""Which dimension of the matrix multiplication to use for the
serialization"""
InputChannels = "input_channels"
ReducingDim = "reducing_dim"
OutputChannels = "output_channels"
Disabled = "none"
class Liveness(enum.IntEnum):
"""When using phased execution:
- ``AlwaysLive``: The tensors always stay on the IPU between the phases.
- ``OffChipAfterFwd``: The tensors are sent off the chip at the end of
the forward pass and before the beginning of the backward pass.
- ``OffChipAfterFwdNoOverlap``: Same as `OffChipAfterFwd`, except there is
no overlapping of load and store operations between phases. This makes it
a more memory-efficient mode at the cost of delayed computation.
- ``OffChipAfterEachPhase``: The tensors are sent off the chip at the end
of each phase.
"""
AlwaysLive = 0
OffChipAfterFwd = 1
OffChipAfterFwdNoOverlap = 2
OffChipAfterEachPhase = 3
class OverlapMode(enum.Enum):
"""
- ``NoOverlap``: The host will copy the tensor to the IPU only when
required: this minimises on-chip memory use at the cost of performance.
- ``OverlapAccumulationLoop``: The host will preload values for the next
gradient accumulation iteration onto an IO tile.
- ``OverlapDeviceIterationLoop``: The host will preload values not just for
the next gradient accumulation iteration, but the next device iteration,
onto an IO tile. This may require more IO tiles than the previous setting
but offers greater performance.
- """
NoOverlap = "no_overlap"
OverlapAccumulationLoop = "overlap_accumulation_loop"
OverlapDeviceIterationLoop = "overlap_device_iteration_loop"
class AutoStage(enum.IntEnum):
"""Defines how the stages are automatically assigned to blocks when the user
didn't explicitly provide stages to the ``IExecutionStrategy``'s
constructor.
- ``SameAsIpu``: The stage id will be set to the selected ipu number.
- ``AutoIncrement``: The stage id for new blocks is automatically
incremented.
Examples:
>>> # Block "0"
>>> with poptorch.Block(ipu_id=0):
... layer()
>>> # Block "1"
>>> with poptorch.Block(ipu_id=1):
... layer()
>>> # Block "2"
>>> with poptorch.Block(ipu_id=0):
... layer()
By default, the following execution strategy is used:
>>> stategy = poptorch.PipelinedExecution(poptorch.AutoStage.SameAsIpu)
>>> opts.setExecutionStrategy(strategy)
which would translate to ``stage_id = ipu_id``:
- Block "0" ipu=0 stage=0
- Block "1" ipu=1 stage=1
- Block "2" ipu=0 stage=0
Now if instead you use:
>>> stategy = poptorch.PipelinedExecution(poptorch.AutoStage.AutoIncrement)
>>> opts.setExecutionStrategy(strategy)
The last block would be in its own stage rather than sharing one with
Block "0":
- Block "0" ipu=0 stage=0
- Block "1" ipu=1 stage=1
- Block "2" ipu=0 stage=2
"""
SameAsIpu = 0
AutoIncrement = 1
class MultiConvPlanType(enum.IntEnum):
"""Selects the execution strategy for a ``poptorch.MultiConv``
- ``Parallel``: Execute multiple convolutions in parallel (Default).
- ``Serial``: Execute each convolution independently. This is
equivalent to using the independent convolution API.
"""
Parallel = 0
Serial = 1
class Compiler(enum.IntEnum):
"""Compiler to use to create the poplar binary.
- ``PopART``: Normal PopART backend.
- ``MLIR``: Use the new MLIR backend.
"""
PopART = poptorch_core.TracingMode.PopART
MLIR = poptorch_core.TracingMode.MLIR
|
import torch
import torch.nn as nn
from Utility.TDL import insert_tdl
from Utility.DenseLayer import Dense
import math
class NARXCell(torch.nn.Module):
__constants__ = ['input_delay_size',
'output_delay_size',
'hidden_size',
'input_size',
'output_size',
'zero_input_delay',
'activation_type',
'layers']
def __init__(self, input_delay_size, output_delay_size, hidden_size, input_size, output_size,
zero_input_delay=False, activation_type='tanh', layers=1):
"""
NARX Cell.
:param input_delay_size: size of input tap-delay
:param output_delay_size: size of output tap-delay
:param hidden_size: number of neurons in hidden layer
:param input_size: number of inputs
:param output_size: number of outputs
:param zero_input_delay: no input delay
:param activation_type: hidden layer activation type (ie 'tanh', 'sigmoid', 'relu', and 'linear')
:param layers: number of hidden layers (Default 1)
"""
super(NARXCell, self).__init__()
# NARX architecture
self.input_delay_size = input_delay_size
self.output_delay_size = output_delay_size
self.hidden_size = hidden_size
self.input_size = input_size
self.output_size = output_size
self.zero_input_delay = zero_input_delay
self.activation_type = activation_type
self.layers = layers
self.add_input_delay = 0 if self.zero_input_delay is False else 1
# Initialize hidden layers
self.hidden_layers = nn.ModuleList()
combine_input_size = (self.input_size * self.input_delay_size +
self.output_size * self.output_delay_size +
self.add_input_delay)
for i in range(layers):
if i is 0:
self.hidden_layers.append(Dense(combine_input_size, self.hidden_size, self.activation_type))
else:
self.hidden_layers.append(Dense(self.hidden_size, self.hidden_size, self.activation_type))
# Initialize the output layer
self.output_layer = Dense(self.hidden_size, self.output_size, 'linear')
def forward(self, input, itdl, otdl, output=None):
# type: (Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
"""
Forward pass through NARX cell.
Type of forward pass depends on if an output (y(t)) is provided. If output is None then
predict the next output (y_hat(t)) and insert that output prediction into the output
TDL (otdl) this is common for multi-step prediction. Otherwise, insert the
output (y(t)) into the output TDL (otdl) this is referred to as loading.
:param input: input (u(t))
:param output: output (y(t))
:param itdl: input TDL
:param otdl: output TDL
:return: outputs (y_hat(t)), input TDL, output TDL
"""
# Initialize fully connected layers
if output is None:
outputs, itdl, otdl = self.__pred_and_update(input, itdl, otdl)
else:
outputs, itdl, otdl = self.__loading(input, output, itdl, otdl)
return outputs, itdl, otdl
def __loading(self, input, output, itdl, otdl):
# type: (Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
"""
Predict 1-step ahead for the NARX cell. The measurement (y(t)) is inserted into the output
TDL (otdl) every time this method is called, which we refer to as the loading phase.
Loading is the process of inserting inputs and outputs measurements into their corresponding
TDL before performing a multi-step prediction.
:param input: input (u(t))
:param output: output (y(t))
:param itdl: input TDL
:param otdl: output TDL
:return: outputs (y_hat(t)), input TDL, output TDL
"""
# Predict y_hat(t)
a2 = self.__pred(input, itdl, otdl)
# Insert input (u(t)) into tap-delay
itdl = insert_tdl(itdl, input.view(-1, self.input_size, 1), shift=self.input_size, dim=1)
# Update output (y(t)) tap-delay
otdl = insert_tdl(otdl, output.view(-1, self.output_size, 1), shift=self.output_size, dim=1)
return a2.view(-1, 1, self.output_size), itdl, otdl
def __pred_and_update(self, input, itdl, otdl):
# type: (Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
"""
Predict 1-step ahead and update TDL for the NARX cell. The prediction (y_hat(t)) is
inserted into the output TDL (otdl) every time this method is called. This method should
be called multiple times for multi-step prediction.
:param input: input (u(t))
:param itdl: input TDL
:param otdl: output TDL
:return: outputs (y_hat(t)), input TDL, output TDL
"""
# Predict y_hat(t)
a2 = self.__pred(input, itdl, otdl)
# Insert input (u(t)) into tap-delay
itdl = insert_tdl(itdl, input.view(-1, self.input_size, 1), shift=self.input_size, dim=1)
# Update output (y_hat(t)) tap-delay
otdl = insert_tdl(otdl, a2, shift=self.output_size, dim=1)
return a2.view(-1, 1, self.output_size), itdl, otdl
def __pred(self, input, itdl, otdl):
# type: (Tensor, Tensor, Tensor) -> Tuple[Tensor]
"""
Predict 1-step ahead for the NARX cell.
:param input: input (u(t))
:param itdl: input TDL
:param otdl: output TDL
:return: outputs (y_hat(t))
"""
# Set the input tap-delay
if self.zero_input_delay:
delay = torch.cat([itdl, input.view(-1, self.input_size, 1)], dim=1)
else:
delay = itdl
hidden = torch.cat([delay, otdl], dim=1)
# Loop through all hidden layers
for i in range(self.layers):
hidden = self.hidden_layers[i](hidden)
# Output layer
a2 = self.output_layer(hidden)
return a2
def simulate(self, input, itdl, otdl, variance):
# type: (Tensor, Tensor, Tensor, flaot) -> Tuple[Tensor, Tensor, Tensor]
"""
Simulate the NARMAX model with random errors.
:param input: input (u(t))
:param itdl: input TDL
:param otdl: output TDL
:param variance: noise variance
:return: outputs (y_hat(t)), input TDL, output TDL
"""
# Predict y_hat(t)
a2 = self.__pred(input, itdl, otdl)
# Add noise
a2 = torch.randn_like(a2) * math.sqrt(variance) + a2
# Insert input (u(t)) into tap-delay
itdl = insert_tdl(itdl, input.view(-1, self.input_size, 1), shift=self.input_size, dim=1)
# Update output (y_hat(t)) tap-delay
otdl = insert_tdl(otdl, a2, shift=self.output_size, dim=1)
return a2.view(-1, 1, self.output_size), itdl, otdl
class NARX(nn.Module):
def __init__(self, input_delay_size, output_delay_size, hidden_size, input_size, output_size,
zero_input_delay=False, activation_type='tanh', layers=1):
"""
NARX Model.
:param input_delay_size: size of input TDL
:param output_delay_size: size of output TDL
:param hidden_size: number of neurons in hidden layer
:param input_size: number of inputs
:param output_size: number of outputs
:param zero_input_delay: no input delay
:param activation_type: hidden layer activation type (ie 'tanh', 'sigmoid', 'relu', and 'linear')
:param layers: number of layers (Default 1)
"""
super(NARX, self).__init__()
self.input_delay_size = input_delay_size
self.output_delay_size = output_delay_size
self.hidden_size = hidden_size
self.input_size = input_size
self.output_size = output_size
self.layers = layers
cell = NARXCell(input_delay_size, output_delay_size, hidden_size, input_size, output_size,
zero_input_delay, activation_type, layers)
self.cell = cell
def loading(self, inputs, outputs, itdl, otdl):
# type: (Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
"""
Perform loading of inputs and outputs into the NARX's TDL. The loading phase runs
from 0 to time step 't'.
:param inputs: inputs [u(0) ..... u(t)]
:param outputs: outputs [y(0) ..... y(t)]
:param itdl: input TDL
:param otdl: output TDL
:return: output [y_hat(0) .... y_hat(t)], input TDL, output TDL
"""
output_tensor = torch.zeros_like(inputs)
for input, output, time in zip(inputs.split(1, 1), outputs.split(1, 1), range(inputs.size()[1])):
prediction, itdl, otdl = self.cell(input, itdl, otdl, output)
output_tensor[:, time: time + 1] = prediction
return output_tensor, itdl, otdl
def forward(self, inputs, itdl, otdl):
# type: (Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
"""
Perform predictions k-steps ahead for for NARX. The multi-step prediction runs from time step 't' to 't+H'
with H being the maximum prediction horizon.
:param inputs: inputs [u(t+1) ..... u(t+H)] (H is maximum prediction horizon)
:param itdl: input TDL
:param otdl: output TDL
:return: output [y_hat(t+1) .... y_hat(t+H)] (H is maximum prediction horizon), input TDL, output TDL
"""
output_tensor = torch.zeros_like(inputs)
for input, time in zip(inputs.split(1, 1), range(inputs.size()[1])):
prediction, itdl, otdl = self.cell(input, itdl, otdl)
output_tensor[:, time: time + 1] = prediction
return output_tensor, itdl, otdl
def simulate(self, inputs, itdl, otdl, variance):
# type: (Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor, Tensor]
"""
Simulate the NARX. The multi-step simulation runs from time step 't' to 't+H'
with H being the maximum prediction horizon.
:param inputs: inputs [u(t+1) ..... u(t+H)] (H is maximum prediction horizon)
:param itdl: input TDL
:param otdl: output TDL
:param variance: variance in error
:return: output [y_hat(t+1) .... y_hat(t+H)] (H is maximum prediction horizon), input TDL, output TDL
"""
output_tensor = torch.zeros_like(inputs)
for input, time in zip(inputs.split(1, 1), range(inputs.size()[1])):
prediction, itdl, otdl = self.cell.simulate(input, itdl, otdl, variance)
output_tensor[:, time: time + 1] = prediction
return output_tensor, itdl, otdl
|
from __future__ import unicode_literals, division, absolute_import
from urlparse import urlparse
import logging
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('sickbeard')
class Sickbeard(object):
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'port': {'type': 'number', 'default': 80},
'api_key': {'type': 'string'},
'include_ended': {'type': 'boolean', 'default': True},
'only_monitored': {'type': 'boolean', 'default': False},
'include_data': {'type': 'boolean', 'default': False}
},
'required': ['api_key', 'base_url'],
'additionalProperties': False
}
def quality_requirement_builder(self, quality_list):
"""
Translates sickbeards' qualities into format used by Flexget
"""
sb_to_fg = {'sdtv': 'sdtv',
'sddvd': 'dvdrip',
'hdtv': '720p hdtv',
'rawhdtv': '1080p hdtv',
'fullhdtv': '1080p hdtv',
'hdwebdl': '720p webdl',
'fullhdwebdl': '1080p webdl',
'hdbluray': '720p bluray',
'fullhdbluray': '1080p bluray',
'unknown': 'any'}
return [sb_to_fg[quality] for quality in quality_list]
def on_task_input(self, task, config):
"""
This plugin returns ALL of the shows monitored by Sickbeard.
This includes both ongoing and ended.
Syntax:
sickbeard:
base_url=<value>
port=<value>
api_key=<value>
Options base_url and api_key are required.
Use with input plugin like discover and/or configure_series.
Example:
download-tv-task:
configure_series:
settings:
quality:
- 720p
from:
sickbeard:
base_url: http://localhost
port: 8531
api_key: MYAPIKEY1123
discover:
what:
- emit_series: yes
from:
torrentz: any
download:
/download/tv
Note that when using the configure_series plugin with Sickbeard
you are basically synced to it, so removing a show in Sickbeard will
remove it in flexget as well, which could be positive or negative,
depending on your usage.
"""
parsedurl = urlparse(config.get('base_url'))
url = '%s://%s:%s%s/api/%s/?cmd=shows' % (parsedurl.scheme, parsedurl.netloc,
config.get('port'), parsedurl.path, config.get('api_key'))
try:
json = task.requests.get(url).json()
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sickbeard at %s://%s:%s%s. Error: %s'
% (parsedurl.scheme, parsedurl.netloc, config.get('port'), parsedurl.path, e))
entries = []
for id, show in json['data'].items():
fg_qualities = '' # Initializes the quality parameter
if show['paused'] and config.get('only_monitored'):
continue
if show['status'] == 'Ended' and not config.get('include_ended'):
continue
if config.get('include_data'):
show_url = '%s:%s/api/%s/?cmd=show&tvdbid=%s' % (config['base_url'], config['port'],
config['api_key'], show['tvdbid'])
show_json = task.requests.get(show_url).json()
fg_qualities = self.quality_requirement_builder(show_json['data']['quality_details']['initial'])
entry = Entry(title=show['show_name'],
url='',
series_name=show['show_name'],
tvdb_id=show.get('tvdbid'),
tvrage_id=show.get('tvrage_id'))
if len(fg_qualities) > 1:
entry['configure_series_qualities'] = fg_qualities
elif len(fg_qualities) == 1:
entry['configure_series_quality'] = fg_qualities[0]
else:
entry['configure_series_quality'] = fg_qualities
if entry.isvalid():
entries.append(entry)
else:
log.error('Invalid entry created? %s' % entry)
continue
# Test mode logging
if task.options.test:
log.info("Test mode. Entry includes:")
for key, value in entry.items():
log.info(' {}: {}'.format(key.capitalize(), value))
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Sickbeard, 'sickbeard', api_ver=2)
|
<reponame>tomdoherty/salt<filename>tests/pytests/unit/beacons/test_telegram_bot_msg.py
# Python libs
import datetime
import logging
import time
import pytest
# Salt libs
from salt.beacons import telegram_bot_msg
# Salt testing libs
from tests.support.mock import MagicMock, patch
# Third-party libs
try:
import telegram
HAS_TELEGRAM = True
except ImportError:
HAS_TELEGRAM = False
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.skipif(HAS_TELEGRAM is False, reason="telegram is not available"),
]
@pytest.fixture
def configure_loader_modules():
return {telegram_bot_msg: {}}
def test_validate_empty_config(*args, **kwargs):
ret = telegram_bot_msg.validate(None)
assert ret == (False, "Configuration for telegram_bot_msg beacon must be a list.")
def test_validate_missing_accept_from_config(*args, **kwargs):
ret = telegram_bot_msg.validate([{"token": "bcd"}])
assert ret == (
False,
"Not all required configuration for telegram_bot_msg are set.",
)
def test_validate_missing_token_config(*args, **kwargs):
ret = telegram_bot_msg.validate([{"accept_from": []}])
assert ret == (
False,
"Not all required configuration for telegram_bot_msg are set.",
)
def test_validate_config_not_list_in_accept_from(*args, **kwargs):
ret = telegram_bot_msg.validate([{"token": "bcd", "accept_from": {"nodict": "1"}}])
assert ret == (
False,
"Configuration for telegram_bot_msg, "
"accept_from must be a list of "
"usernames.",
)
def test_validate_valid_config(*args, **kwargs):
ret = telegram_bot_msg.validate([{"token": "bcd", "accept_from": ["username"]}])
assert ret == (True, "Valid beacon configuration.")
def test_call_no_updates():
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = "abc"
config = [{"token": token, "accept_from": ["tester"]}]
inst = MagicMock(name="telegram.Bot()")
telegram_api.Bot = MagicMock(name="telegram", return_value=inst)
inst.get_updates.return_value = []
ret = telegram_bot_msg.validate(config)
assert ret == (True, "Valid beacon configuration.")
ret = telegram_bot_msg.beacon(config)
telegram_api.Bot.assert_called_once_with(token)
assert ret == []
def test_call_telegram_return_no_updates_for_user():
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = "abc"
username = "tester"
config = [{"token": token, "accept_from": [username]}]
inst = MagicMock(name="telegram.Bot()")
telegram_api.Bot = MagicMock(name="telegram", return_value=inst)
log.debug("telegram %s", telegram)
username = "different_user"
user = telegram.user.User(id=1, first_name="", username=username, is_bot=True)
chat = telegram.chat.Chat(1, "private", username=username)
date = time.mktime(datetime.datetime(2016, 12, 18, 0, 0).timetuple())
message = telegram.message.Message(
message_id=1, from_user=user, date=date, chat=chat
)
update = telegram.update.Update(update_id=1, message=message)
inst.get_updates.return_value = [update]
ret = telegram_bot_msg.validate(config)
assert ret == (True, "Valid beacon configuration.")
ret = telegram_bot_msg.beacon(config)
telegram_api.Bot.assert_called_once_with(token)
assert ret == []
def test_call_telegram_returning_updates():
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = "abc"
username = "tester"
config = [{"token": token, "accept_from": [username]}]
inst = MagicMock(name="telegram.Bot()")
telegram_api.Bot = MagicMock(name="telegram", return_value=inst)
user = telegram.User(id=1, first_name="", username=username, is_bot=True)
chat = telegram.Chat(1, "private", username=username)
date = time.mktime(datetime.datetime(2016, 12, 18, 0, 0).timetuple())
message = telegram.Message(message_id=1, from_user=user, date=date, chat=chat)
update = telegram.update.Update(update_id=1, message=message)
inst.get_updates.return_value = [update]
ret = telegram_bot_msg.validate(config)
assert ret == (True, "Valid beacon configuration.")
ret = telegram_bot_msg.beacon(config)
telegram_api.Bot.assert_called_once_with(token)
assert ret
assert ret[0]["msgs"][0] == message.to_dict()
|
import numpy as np
import cv2
import os
#from data_process import get_frames_from_video
def rgb2gray(img):
r = img[...,0]*0.299
g = img[...,1]*0.587
b = img[...,2]*0.114
return r+g+b
def visualize(flow, name='flow', show=True):
h, w, c = flow.shape
hsv = np.zeros((h,w,3), dtype=np.uint8)
hsv[...,1] = 255
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
if show:
cv2.imshow(name, bgr)
return bgr, np.mean(mag)*100
def calOptflow(prvs_frame, next_frame, vis=False, window_size=15):
hsv = np.zeros_like(prvs_frame, dtype=np.float32)
next_frame_gray = rgb2gray(next_frame).astype(np.float32)
prvs_frame_gray = rgb2gray(prvs_frame).astype(np.float32)
flow = cv2.calcOpticalFlowFarneback(prvs_frame_gray, next_frame_gray, None, 0.5, 3, window_size, 3, 5, 1.2, 1)
if vis:
bgr, ratio1 = visualize(flow, name='flow')
I = np.mean(bgr,axis=-1)
shape = I.shape
total = 1
for i, s in enumerate(shape):
total *= s
no_move = len(I[I==0])
prvs_frame = prvs_frame[:,:,::-1]
next_frame_fake = FlowShift(prvs_frame, flow)
_, ratio2 = visualize(calOptflow(next_frame, next_frame_fake), name='differ')
print(no_move, total, f"{no_move/total * 100:.2f}%, ratio: [{ratio1:.2f}] | [{ratio2:.2f}]")
merge = next_frame_fake*.5 + bgr*.5
merge[I<1] = next_frame_fake[I<1]
cv2.imshow('new_frames', np.uint8(merge))
k = cv2.waitKey(30) & 0xff
return flow
def FlowSplit(flow):
fm = [[None]*2, [None]*2]
wm = [[None]*2, [None]*2]
weights = [[None]*2, [None]*2]
flows = [[None]*2, [None]*2]
for i in range(2):
# xf, xc
# yf, yc
fm[i][0] = np.floor(flow[:,:,i])
fm[i][1] = np.ceil(flow[:,:,i])
for i in range(2):
for j in range(2):
wm[i][j] = np.abs(flow[:,:,i] - fm[i][1-j])
for i in range(2):
for j in range(2):
weights[i][j] = wm[0][i] * wm[1][j]
for i in range(2):
for j in range(2):
flows[i][j] = np.stack((fm[0][i], fm[1][j]), axis=-1)
return flows, np.expand_dims(np.array(weights),axis=-1)
def FlowShift(image, flow, weight=1):
h, w, c = flow.shape
y_coords, x_coords = np.mgrid[0:h, 0:w]
coords = np.float32(np.dstack([x_coords, y_coords]))
pixel_map = coords - flow
new_img = cv2.remap(image, pixel_map, None, cv2.INTER_LINEAR)
return new_img
if __name__ == '__main__':
# flows, w = FlowSplit(np.array([[[1.7,1.2]]]))
# print(flows[0][0], w)
import h5py
root_dir = r'F:\datasets\MegVSR\train_png\84.mkv_down4x.mp4_frames'
files = [os.path.join(root_dir, name) for name in os.listdir(root_dir)]
# imgs = get_frames_from_video(85)
# for k in range(2,20):
# flows = []
# images = []
# for i, file in enumerate(files[k:k+3]):
# images.append(cv2.imread(file)[:,:,::-1])
# for i in range(2):
# flow = calOptflow(images[i], images[2])
# flows.append(flow)
# visualize(flow, f'frame {i+1} --> frame 3')
# flowsplit, weights = FlowSplit(flows[1])
# new_flow = np.zeros_like(flows[1])
# # for i in range(2):
# # for j in range(2):
# # # new_flow += flowsplit[i][j]*weights[i][j]
# # new_img = FlowShift(images[1], flowsplit[i][j])
# # visualize(new_flow-flows[-1], f'new_flow')
# new_img = FlowShift(images[1], flows[1])
# cv2.imshow('new_img',images[1][:,:,::-1])
# # visualize(new_img, f'new_flow')
# # flow_add = calOptflow(images[0], images[1]) + calOptflow(images[1], images[2])
# # visualize(flow_add, f'frame 1 + frame 2 --> frame 3')
# # flow_sub = flow_add - flows[0]
# # visualize(flow_sub, f'frame 1+2->3 - frame 1->3')
# cv2.waitKey(0)
# cv2.destroyAllWindows()
prvs_img = cv2.imread(files[0])[:,:,::-1]
cv2.imshow('new_frames',prvs_img[:,:,::-1])
cv2.imshow('flow',np.zeros_like(prvs_img))
cv2.waitKey(1000)
for i, file in enumerate(files[1:]):
next_img = cv2.imread(file)[:,:,::-1]
flow = calOptflow(prvs_img, next_img, True)
prvs_img = next_img
# flows.append(flow)
# flows.append(np.zeros_like(flow))
# flows = np.array(flows).astype(np.float32)
# f = h5py.File('flows.h5', 'w')
# dst_hr = f.create_dataset('85', data=flows)
# f.close()
# np.savez_compressed("flow_85.npz", flows)
# cap = cv2.VideoCapture(cv2.samples.findFile(r"F:\datasets\MegVSR\train\85.mkv_down4x.mp4"))
# # ret, frame1 = cap.read()
# frame1 = imgs[0][:,:,::-1]
# prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
# hsv = np.zeros_like(frame1)
# hsv[...,1] = 255
# for i, file in enumerate(files[:-1]):
# prvs_img = cv2.imread(files[i])
# next_img = cv2.imread(files[i+1])
# next_frame_gray = rgb2gray(next_img)
# prvs_frame_gray = rgb2gray(prvs_img)
# # ret, frame2 = cap.read()
# frame2 = imgs[i+1][:,:,::-1]
# next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
# print(np.array_equal(next, next_frame_gray))
# # imgs[i] = next_img
# # diff = cv2.normalize(np.abs(next - next_frame_gray),None,0,255,cv2.NORM_MINMAX)
# # cv2.imshow('diff', diff)
# flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 9, 3, 5, 1.2, 1)
# bgr = visualize(flow, name='flow3')
# I = np.mean(bgr,axis=-1)
# prvs_frame = frame2
# merge = prvs_frame//2 + bgr//2
# merge[I<1] = prvs_frame[I<1]
# cv2.imshow('ori', merge)
# k = cv2.waitKey(30) & 0xff
# if k == 27:
# break
# elif k == ord('s'):
# cv2.imwrite('opticalfb.png',frame2)
# cv2.imwrite('opticalhsv.png',bgr)
# prvs = next
|
<filename>cogs/mute.py
import discord
from discord.ext import commands
import aiosqlite
import asyncio
from datetime import datetime, timedelta
from utils.ids import GuildNames, GuildIDs, TGRoleIDs, BGRoleIDs, AdminVars
from utils.time import convert_time
import utils.check
class Mute(commands.Cog):
"""
Contains the custom mute system for both of our servers.
"""
def __init__(self, bot):
self.bot = bot
async def add_mute(self, member: discord.Member):
"""
Adds the mute entry in the database,
and tries to add the role in both servers.
"""
# checks if the user is already flagged as muted in the file
# if not, goes ahead and adds the mute.
# no reason to have someone in there multiple times
async with aiosqlite.connect("./db/database.db") as db:
matching_user = await db.execute_fetchall(
"""SELECT * FROM muted WHERE user_id = :user_id""",
{"user_id": member.id},
)
if len(matching_user) == 0:
await db.execute(
"""INSERT INTO muted VALUES (:user_id, :muted)""",
{"user_id": member.id, "muted": True},
)
await db.commit()
# first we add the mute on the tg server, or try to
try:
tg_guild = self.bot.get_guild(GuildIDs.TRAINING_GROUNDS)
tg_role = discord.utils.get(tg_guild.roles, id=TGRoleIDs.MUTED_ROLE)
tg_member = tg_guild.get_member(member.id)
await tg_member.add_roles(tg_role)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to add muted role in {GuildNames.TG} server but it failed: {exc}"
)
# then we add the mute on the bg server, or try to
try:
bg_guild = self.bot.get_guild(GuildIDs.BATTLEGROUNDS)
bg_role = discord.utils.get(bg_guild.roles, id=BGRoleIDs.MUTED_ROLE)
bg_member = bg_guild.get_member(member.id)
await bg_member.add_roles(bg_role)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to add muted role in {GuildNames.BG} server but it failed: {exc}"
)
async def remove_mute(self, member: discord.Member):
"""
Basically reverses the add_mute function.
Removes the muted entry from the database
and tries to remove the role in both servers.
"""
async with aiosqlite.connect("./db/database.db") as db:
await db.execute(
"""DELETE FROM muted WHERE user_id = :user_id""",
{"user_id": member.id},
)
await db.commit()
try:
tg_guild = self.bot.get_guild(GuildIDs.TRAINING_GROUNDS)
tg_role = discord.utils.get(tg_guild.roles, id=TGRoleIDs.MUTED_ROLE)
tg_member = tg_guild.get_member(member.id)
await tg_member.remove_roles(tg_role)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to remove muted role in {GuildNames.TG} server but it failed: {exc}"
)
try:
bg_guild = self.bot.get_guild(GuildIDs.BATTLEGROUNDS)
bg_role = discord.utils.get(bg_guild.roles, id=BGRoleIDs.MUTED_ROLE)
bg_member = bg_guild.get_member(member.id)
await bg_member.remove_roles(bg_role)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to remove muted role in {GuildNames.BG} server but it failed: {exc}"
)
async def add_timeout(self, member: discord.Member, time: datetime):
"""
Tries to add the timeout on both servers.
"""
try:
tg_guild = self.bot.get_guild(GuildIDs.TRAINING_GROUNDS)
tg_member = tg_guild.get_member(member.id)
await tg_member.edit(timed_out_until=time)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to add timeout in {GuildNames.TG} server but it failed: {exc}"
)
try:
bg_guild = self.bot.get_guild(GuildIDs.BATTLEGROUNDS)
bg_member = bg_guild.get_member(member.id)
await bg_member.edit(timed_out_until=time)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to add timeout in {GuildNames.BG} server but it failed: {exc}"
)
async def remove_timeout(self, member: discord.Member):
"""
Tries to remove the timeout on both servers.
"""
try:
tg_guild = self.bot.get_guild(GuildIDs.TRAINING_GROUNDS)
tg_member = tg_guild.get_member(member.id)
# setting it to None will remove the timeout
await tg_member.edit(timed_out_until=None)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to remove timeout in {GuildNames.TG} server but it failed: {exc}"
)
try:
bg_guild = self.bot.get_guild(GuildIDs.BATTLEGROUNDS)
bg_member = bg_guild.get_member(member.id)
await bg_member.edit(timed_out_until=None)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to remove timeout in {GuildNames.BG} server but it failed: {exc}"
)
@commands.command()
@utils.check.is_moderator()
async def mute(self, ctx, member: discord.Member, *, reason):
"""
Mutes a member in both servers indefinitely and DMs them the reason for it.
"""
async with aiosqlite.connect("./db/database.db") as db:
matching_user = await db.execute_fetchall(
"""SELECT * FROM muted WHERE user_id = :user_id""",
{"user_id": member.id},
)
# we check again if the user is muted here because i dont want the user to get dm'd again if he already is muted
# didn't wanna put a separate dm function as well because the dm's change depending on what command calls it
if len(matching_user) == 0:
await self.add_mute(member)
await ctx.send(f"{member.mention} was muted!")
try:
await member.send(
f"You have been muted in the {ctx.guild.name} Server for the following reason: \n"
f"```{reason}```\n"
f"If you would like to discuss your punishment, please contact {AdminVars.GROUNDS_GENERALS}."
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to message mute reason to {str(member)}, but it failed: {exc}"
)
else:
await ctx.send("This user was already muted!")
@commands.command()
@utils.check.is_moderator()
async def unmute(self, ctx, member: discord.Member):
"""
Unmutes a member in both servers and notifies them via DM.
"""
async with aiosqlite.connect("./db/database.db") as db:
matching_user = await db.execute_fetchall(
"""SELECT * FROM muted WHERE user_id = :user_id""",
{"user_id": member.id},
)
if len(matching_user) != 0:
await self.remove_mute(member)
await ctx.send(f"{member.mention} was unmuted!")
try:
await member.send(
f"You have been unmuted in the {ctx.guild.name} Server! Don't break the rules again"
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to message unmute message to {str(member)}, but it failed: {exc}"
)
else:
await ctx.send("This user was not muted!")
@commands.command()
@utils.check.is_moderator()
async def tempmute(self, ctx, member: discord.Member, mute_time, *, reason):
"""
Mutes a member in both servers, waits the specified time and unmutes them again.
"""
# converts the input into the seconds, and also a human-readable-string
seconds, time_muted = convert_time(mute_time)
# just checking the duration is not at a crazy high/low value
if seconds < 30:
await ctx.send("Duration is too short! Minimum duration is 30 seconds.")
return
if seconds > 86401:
await ctx.send("Duration is too long! Maximum duration is 1 day.")
return
# now this is basically just "%mute, wait specified time, %unmute" but automated into one command
async with aiosqlite.connect("./db/database.db") as db:
matching_user = await db.execute_fetchall(
"""SELECT * FROM muted WHERE user_id = :user_id""",
{"user_id": member.id},
)
# the mute block from %mute, with the inclusion of time_muted
if len(matching_user) == 0:
await self.add_mute(member)
await ctx.send(f"{member.mention} was muted for *{time_muted}*!")
try:
await member.send(
f"You have been muted in the {ctx.guild.name} Server for ***{time_muted}*** for the following reason: \n"
f"```{reason}```\n"
f"If you would like to discuss your punishment, please contact {AdminVars.GROUNDS_GENERALS}."
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to message temp mute reason to {str(member)}, but it failed: {exc}"
)
else:
await ctx.send("This user is already muted!")
return
# waits the specified time
await asyncio.sleep(seconds)
# need to refresh the contents of the database
async with aiosqlite.connect("./db/database.db") as db:
matching_user = await db.execute_fetchall(
"""SELECT * FROM muted WHERE user_id = :user_id""",
{"user_id": member.id},
)
# the unmute block from %unmute,
# no need for another unmute confirmation if the user was unmuted before manually
if len(matching_user) != 0:
await self.remove_mute(member)
await ctx.send(f"{member.mention} was automatically unmuted!")
try:
await member.send(
f"You have been automatically unmuted in the {ctx.guild.name} Server! Don't break the rules again"
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to message temp unmute message to {str(member)}, but it failed: {exc}"
)
@commands.command()
@utils.check.is_moderator()
async def timeout(self, ctx, member: discord.Member, mute_time, *, reason):
"""
Times out a member with the built in timeout function.
Specify a time and a reason.
The reason will get DM'd to the member.
"""
# converts the time again, we dont need the read_time string though
seconds, _ = convert_time(mute_time)
if seconds > 2419199:
await ctx.send(
"The maximum allowed time for a timeout is just under 28 days."
)
return
# gets the time for the timeout, needs to be a dt object
timeout_dt = discord.utils.utcnow() + timedelta(seconds=seconds)
# timezone aware dt object for sending out
aware_dt = discord.utils.format_dt(timeout_dt, style="f")
if member.is_timed_out():
# if the member is already on timeout, we modify the message sent
if member.timed_out_until < timeout_dt:
message = (
f"The timeout of {member.mention} got prolonged until {aware_dt}."
)
else:
message = (
f"The timeout of {member.mention} got shortened until {aware_dt}."
)
else:
message = f"{member.mention} is on timeout until {aware_dt}."
await self.add_timeout(member, timeout_dt)
try:
await member.send(
f"You are on timeout in the {ctx.guild.name} Server until {aware_dt} for the following reason: \n"
f"```{reason}```\n"
f"If you would like to discuss your punishment, please contact {AdminVars.GROUNDS_GENERALS}."
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to message timeout message to {str(member)}, but it failed: {exc}"
)
await ctx.send(message)
@commands.command(aliases=["untimeout"])
@utils.check.is_moderator()
async def removetimeout(self, ctx, member: discord.Member):
"""
Removes a timeout from a member and notifies the member.
"""
# we check first if the member is on timeout
if not member.is_timed_out():
await ctx.send(f"{member.mention} is not on timeout!")
return
await self.remove_timeout(member)
try:
await member.send(
f"Your timeout has been manually removed in the {ctx.guild.name} Server! Don't break the rules again"
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.mute")
logger.warning(
f"Tried to message remove timeout message to {str(member)}, but it failed: {exc}"
)
await ctx.send(f"Removed the timeout of {member.mention}")
# error handling for the mute commands
@mute.error
async def mute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to specify a reason for the mute!")
elif isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a member!")
elif isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
else:
raise error
@unmute.error
async def unmute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to mention a member!")
elif isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a member!")
elif isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
else:
raise error
@tempmute.error
async def tempmute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(
"You need to mention a member, an amount of time, and a reason!"
)
elif isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a member!")
elif isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
elif isinstance(error, commands.CommandInvokeError):
await ctx.send(
"Invalid time format! Please use a number followed by d/h/m/s for days/hours/minutes/seconds."
)
else:
raise error
@timeout.error
async def timeout_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Please specify a member, a timeout length and a reason!")
elif isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a member!")
elif isinstance(error, commands.CommandInvokeError):
await ctx.send(
"Something went wrong! Either you used an invalid time format or I don't have the required permissons! "
"Try using a number followed by d/h/m/s for days/hours/minutes/seconds."
)
else:
raise error
@removetimeout.error
async def removetimeout_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to mention a member!")
elif isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a member!")
else:
raise error
async def setup(bot):
await bot.add_cog(Mute(bot))
print("Mute cog loaded")
|
import json
import re
import scrapy
from locations.items import GeojsonPointItem
class IHGHotels(scrapy.Spider):
name = "ihg_hotels"
item_attributes = { 'brand': "IHG Hotels" }
# allowed_domains = ["ihg.com"] # the Kimpton hotels each have their own domains
download_delay = 0.5
start_urls = (
'https://www.ihg.com/holidayinn/destinations/us/en/explore',
'https://www.ihg.com/armyhotels/hotels/us/en/installations',
)
def parse_hotel(self, response):
if 'hoteldetail' not in response.url:
# got redirected back to search page
return
street_address = " ".join(response.xpath('//span[@itemprop="streetAddress"]/p/text()').extract())
if not street_address:
street_address = response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first()
city = response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first()
state = response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first()
properties = {
'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
'addr_full': street_address.replace(u'\u00a0', ' ').strip(', ') if street_address else None,
'city': city.replace(u'\u00a0', ' ').strip(', ') if city else None,
'state': state.replace(u'\u00a0', ' ').strip(', ') if state else None,
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
'country': response.xpath('//span[@itemprop="addressCountry"]/text()').extract_first(),
'phone': (response.xpath('//span[@itemprop="telephone"]/text()').extract_first() or '').strip('| '),
'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
'website': response.url,
}
yield GeojsonPointItem(**properties)
def parse_kimpton(self, response):
url = response.xpath('//a[contains(text(), "VISIT HOTEL WEBSITE")]/@href').extract_first()
properties = {
'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
}
if not url: # "opening soon" hotels just have teaser pages
return
url = url.split('?')[0] # remove querystring
yield scrapy.Request(url, callback=self.parse_kimpton_data, meta={"properties": properties})
def parse_kimpton_data(self, response):
properties = response.meta["properties"]
script = response.xpath('//script[@type="application/ld+json"]/text()').extract_first()
if script:
data = json.loads(script)
else:
data = {}
if 'name' in data:
properties.update({
'name': data["name"],
'addr_full': data["address"]["streetAddress"],
'city': data["address"]["addressLocality"],
'state': data["address"].get("addressRegion"),
'postcode': data["address"]["postalCode"],
'country': data["address"].get("addressCountry"),
'phone': data.get("telephone"),
'website': data["url"]
})
else:
street_address = " ".join(response.xpath('//span[@itemprop="streetAddress"]/p/text()').extract())
if not street_address:
street_address = response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first()
city = response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first()
state = response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first()
properties.update({
'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
'addr_full': street_address.replace(u'\u00a0', ' ').strip(', ') if street_address else None,
'city': city.replace(u'\u00a0', ' ').strip(', ') if city else None,
'state': state.replace(u'\u00a0', ' ') if state else None,
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
'country': response.xpath('//span[@itemprop="addressCountry"]/text()').extract_first(),
'phone': (response.xpath('//span[@itemprop="telephone"]/text()').extract_first() or '').strip('| '),
'website': response.url,
})
yield GeojsonPointItem(**properties)
def parse_regent(self, response):
data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
properties = {
'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
'name': data["name"],
'addr_full': data["address"]["streetAddress"],
'city': data["address"]["addressLocality"],
'state': data["address"].get("addressRegion"),
'postcode': data["address"]["postalCode"],
'country': data["address"]["addressCountry"],
'phone': data["telephone"],
'lat': float(data["geo"]["latitude"]),
'lon': float(data["geo"]["longitude"]),
'website': response.url,
}
yield GeojsonPointItem(**properties)
def parse_crowne_plaza(self, response):
address = response.xpath('//a[@class="hotel-home"]/text()').extract_first().strip()
address_parts = address.split('|')
if len(address_parts) == 4: # international addresses
addr_city, postcode, country, _ = address_parts
state = ''
else: # us addresses
addr_city, state, postcode, country, _ = address_parts
street_address = ",".join(addr_city.split(',')[0:-1])
city = addr_city.split(',')[-1]
properties = {
'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
'addr_full': street_address.strip(),
'city': city.strip(),
'state': state.strip(),
'postcode': postcode.strip(),
'country': country.strip(),
'phone': response.xpath('//div[@class="new-hinfo-address"]/p/a[2]/text()').extract_first(),
'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
'website': response.url,
}
yield GeojsonPointItem(**properties)
def parse_candlewood_staybridge(self, response):
if 'hoteldetail' not in response.url:
# got redirected back to search page
return
street_address = " ".join(response.xpath('//span[@itemprop="streetAddress"]/p/text()').extract())
if not street_address:
street_address = response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first()
region = response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first().replace(u'\u00a0',' ')
match = re.search(r'([a-z]+)\s(\d+)\s(.*)', region, re.IGNORECASE)
if match:
state, postcode, country = match.groups()
else:
state, postcode, country = None, None, region.strip()
properties = {
'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
'addr_full': street_address.replace(u'\u00a0', ' ').strip(', '),
'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first().replace(u'\u00a0', ' ').strip(', '),
'state': state,
'postcode': postcode,
'country': country,
'phone': response.xpath('//div[@class="booking"]/a/text()').extract_first(),
'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
'website': response.url,
}
yield GeojsonPointItem(**properties)
def parse_army_hotel(self, response):
properties = {
'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
'addr_full': response.xpath('//meta[@property="business:contact_data:street_address"]/@content').extract_first(),
'city': response.xpath('//meta[@property="business:contact_data:locality"]/@content').extract_first(),
'state': response.xpath('//meta[@property="business:contact_data:region"]/@content').extract_first(),
'postcode': response.xpath('//meta[@property="business:contact_data:postal_code"]/@content').extract_first(),
'country': response.xpath('//meta[@property="business:contact_data:country_name"]/@content').extract_first(),
'phone': (response.xpath('//span[@title="Hotel Front Desk:"]/span/text()').extract_first() or "").strip(),
'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
'website': response.url,
}
yield GeojsonPointItem(**properties)
def parse(self, response):
hotel_parsers = {
'holidayinn': self.parse_hotel,
'crowneplaza': self.parse_crowne_plaza,
'holidayinnexpress': self.parse_hotel,
'hotelindigo': self.parse_hotel,
'candlewood': self.parse_candlewood_staybridge,
'staybridge': self.parse_candlewood_staybridge,
'holidayinnresorts': self.parse_hotel,
'intercontinental': self.parse_hotel,
'regent': self.parse_regent,
'hotels': self.parse_hotel, # vocos
'kimptonhotels': self.parse_kimpton,
'holidayinnclubvacations': self.parse_hotel,
'evenhotels': self.parse_hotel,
'avidhotels': self.parse_hotel,
'hualuxe': self.parse_hotel,
'armyhotels': self.parse_army_hotel
}
hotel_urls = response.xpath('//div[@class="hotelList"]//div[contains(@class, "hotelItem")]//a[contains(@class, "hotel-name")]/@href').extract()
if 'armyhotels' in response.url:
hotel_urls = response.xpath('//div[@id="hotelListWrap"]//a/@href').extract()
if hotel_urls:
for url in hotel_urls:
hotel_type = re.search(r'ihg.com/(.*?)/', response.urljoin(url), re.IGNORECASE).group(1)
yield scrapy.Request(response.urljoin(url),
callback=hotel_parsers[hotel_type])
else:
urls = response.xpath('//li[@class="listingItem"]/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url))
|
"""
↓ Инициализация данных ↓
"""
from PyQt5 import QtWidgets, QtCore, QtGui
from GUI.GUI_windows_source import Collection
from scripts.utils import get_collection_data, mod_name_wrap, get_info_from_stack, get_total_value, \
file_name_fix, open_file_for_resuming, find_last_file, get_collection_description, get_collection_mod_list, collection_settings_update
from scripts.stylesheets import mod_name_style, file_name_style, complete_translation_style, \
incomplete_translation_style, create_row_separator
from scripts.messeges import call_error_message, call_accept_message
from scripts.pictures import get_thumbnail
import json
from functools import partial
class CollectionWindow(QtWidgets.QDialog, Collection.Ui_Dialog):
def __init__(self, parent):
super().__init__(parent)
self.setupUi(self)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.FramelessWindowHint)
self.setModal(True)
self.parent = parent
self.oldPos = self.pos()
self.init_handlers()
self.message = None
self.string = self.StringsList.text().split('.')
self.collection = get_collection_data()
self.set_collection_name()
self.gridLayout.setSpacing(15)
self.buttons = {}
self.row_index = 0
self.OptionsListComboBox.view().parentWidget().setStyleSheet("background: #05B8CC;")
self.borders = {
'blue': 'border: 3px solid #05B8CC;',
'green': 'border: 3px solid #5abe41;',
'gray': 'border: 3px solid gray'
}
self.paint_elements()
def init_handlers(self):
self.ExitButton.clicked.connect(self.close)
self.OptionsListComboBox.activated[str].connect(lambda: self.paint_elements())
self.ReferenceButton.clicked.connect(lambda: self.parent.reference_window('QLabel_3_Collection'))
self.WindowMoveButton.installEventFilter(self)
def eventFilter(self, source, event):
"""
Данная функция предназначена для отслеживания позиции окна
и его перемещения кликом по шапке
"""
if source == self.WindowMoveButton:
if event.type() == QtCore.QEvent.MouseButtonPress:
self.oldPos = event.pos()
elif event.type() == QtCore.QEvent.MouseMove and self.oldPos is not None:
self.move(self.pos() - self.oldPos + event.pos())
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
self.oldPos = None
return super().eventFilter(source, event)
"""
↓ Рендер ↓
"""
def set_collection_name(self):
with open('Properties.json', 'r', encoding='utf-8') as properties:
properties = json.load(properties)
self.NewNameText.setText(properties["collection_name"])
self.NewNameText.setAlignment(QtCore.Qt.AlignCenter)
self.CollectionNameLabel.setText(properties["collection_name"])
def clean(self, grid):
self.ContinueButton.setText(self.string[0])
self.ContinueButton.disconnect()
self.ContinueButton.clicked.connect(self.close)
self.ContinueButton.clicked.connect(self.continue_last_translation)
for i in reversed(range(grid.count())):
grid.itemAt(i).widget().setParent(None)
def print_mod_name(self, grid, files, value):
thumbnail = QtWidgets.QLabel()
pixmap = QtGui.QPixmap(get_thumbnail(files[0].hash_key))
mod_name = QtWidgets.QPushButton(mod_name_wrap(files[0].mod_name, 35))
pixmap = pixmap.scaled(160, 100, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
thumbnail.setPixmap(pixmap)
if value == 100:
thumbnail.setStyleSheet(self.borders['green'])
elif value < 100:
thumbnail.setStyleSheet(self.borders['blue'])
mod_name_style(mod_name)
grid.addWidget(thumbnail, self.row_index + 1, 0)
grid.addWidget(mod_name, self.row_index + 1, 1, 1, 4)
def print_mod_id(self, grid, mod_id, value):
self.buttons[mod_id] = QtWidgets.QPushButton(mod_id)
status = QtWidgets.QProgressBar()
file_name_style(self.buttons[mod_id])
status.setValue(value)
if status.value() != 100:
incomplete_translation_style(status)
else:
complete_translation_style(status)
grid.addWidget(self.buttons[mod_id], self.row_index + 1, 6)
grid.addWidget(status, self.row_index + 1, 7)
self.row_index += 1
def files_not_found(self, grid):
"""
Если, вдруг, файлов для текущей опции в списке не окажется,
вместо них будет добавлена заглушка
"""
button = QtWidgets.QPushButton(f"{'—' * 8}")
status = QtWidgets.QProgressBar()
file_name_style(button)
incomplete_translation_style(status)
status.setValue(0)
status.setFormat("—— ")
grid.addWidget(button, self.row_index + 1, 6)
grid.addWidget(status, self.row_index + 1, 7)
self.row_index += 1
def print_files_names(self, grid, files, option):
files_list = [file for file in files if option in file.type]
if files_list:
for file in files_list:
button = f'{file.mod_id}-{file.original_file_name}'
self.buttons[button] = QtWidgets.QPushButton(file_name_fix(file.original_file_name, option))
message = ('start_translation', file, file.original_file_name)
self.buttons[button].clicked.connect(partial(call_accept_message,
self, message,
partial(self.start_localisation, file)))
status = QtWidgets.QProgressBar()
file_name_style(self.buttons[button])
status.setValue(file.tr_status)
if status.value() != 100:
incomplete_translation_style(status)
else:
complete_translation_style(status)
grid.addWidget(self.buttons[button], self.row_index + 1, 6)
grid.addWidget(status, self.row_index + 1, 7)
self.row_index += 1
else:
self.files_not_found(grid)
def print_rename_collection(self, grid):
name = self.CollectionNameLabel.text()
mod_list = get_collection_mod_list(self.collection.items(), self.string[3])
self.ContinueButton.setText(self.string[1])
self.ModDescriptionText.setText(get_collection_description(name, self.string[2], self.string[3]))
self.ModDescriptionText.setAlignment(QtCore.Qt.AlignCenter)
self.ModListLabel.setText(mod_list)
self.ContinueButton.disconnect()
self.ContinueButton.clicked.connect(
lambda: collection_settings_update((self.NewNameText.toPlainText(),
f'{self.ModDescriptionText.toPlainText()}\n{mod_list}',
self.CollectionNameLabel.text())
)
)
grid.addWidget(self.NewNameText, 1, 0)
grid.addWidget(self.ModDescriptionText, 2, 0)
grid.addWidget(self.ModListLabel, 3, 0)
def paint_elements(self):
grid = self.gridLayout
options = self.OptionsListComboBox
self.clean(grid)
self.set_collection_name()
for mod_id, files in self.collection.items():
value = get_total_value(files)
self.print_mod_name(grid, files, value)
separator = create_row_separator()
if options.currentText() in options.itemText(0):
self.print_mod_id(grid, mod_id, value)
grid.addWidget(separator, self.row_index + 1, 6)
elif options.currentText() in options.itemText(1):
self.print_files_names(grid, files, 'localisation')
grid.addWidget(separator, self.row_index + 1, 6)
elif options.currentText() in options.itemText(2):
self.print_files_names(grid, files, 'name_lists')
grid.addWidget(separator, self.row_index + 1, 6)
self.row_index += 1
if options.currentText() in options.itemText(3):
self.clean(grid)
self.print_rename_collection(grid)
"""
↓ Работа с локализациями ↓
"""
def start_localisation(self, file):
self.parent.orig_text = open_file_for_resuming(file.source_file_path)
self.parent.machine_text = open_file_for_resuming(file.machine_file_path)
self.parent.user_text = open_file_for_resuming(file.user_input_file_path)
self.parent.pointer = file.pointer_pos
self.parent.check_new_line_symbol_string(True)
self.parent.pointer_max_value = len(self.parent.orig_text)
self.parent.file = file
self.parent.init_helpers(True)
self.parent.progressbar_set_maximum(len(self.parent.orig_text))
self.parent.set_lines()
self.parent.ModIDLine.setText(file.mod_id)
self.parent.mod_type_pixmap(file.mod_id)
self.parent.ModNameLine.setText(file.mod_name)
self.parent.FileNameLine.setText(file.original_file_name)
self.findChild(QtWidgets.QDialog).close()
self.close()
def continue_last_translation(self):
last_file: list = get_info_from_stack()
if last_file:
file = find_last_file(self.collection, last_file)
message = ('continue_last_translation', file, file.original_file_name)
call_accept_message(self, message, lambda: self.start_localisation(file))
else:
message = 'all_is_complete'
call_error_message(self, message)
|
<reponame>raccoongang/openprocurement.tender.competitivedialogue
from openprocurement.api.validation import (
validate_data, validate_json_data
)
from openprocurement.api.utils import (
apply_data_patch, update_logging_context, error_handler, raise_operation_error
)
from openprocurement.tender.competitivedialogue.models import STAGE2_STATUS
from openprocurement.tender.competitivedialogue.utils import (
prepare_shortlistedFirms, prepare_author, prepare_bid_identifier
)
def validate_patch_tender_stage2_data(request):
data = validate_json_data(request)
if request.context.status == 'draft':
default_statuses = ['active.tendering', STAGE2_STATUS]
if data.get('status') not in default_statuses:
raise_operation_error(request, 'Can\'t update tender in current ({0}) status'.format(data['status']))
request.validated['data'] = {'status': data.get('status')}
request.context.status = data.get('status')
return
if data:
if 'items' in data:
items = request.context.items
cpv_group_lists = [i.classification.id[:3] for i in items]
for item in data['items']:
if 'classification' in item and 'id' in item['classification']:
cpv_group_lists.append(item['classification']['id'][:3])
if len(set(cpv_group_lists)) != 1:
request.errors.add('body', 'item', 'Can\'t change classification')
request.errors.status = 403
raise error_handler(request.errors)
if 'enquiryPeriod' in data:
if apply_data_patch(request.context.enquiryPeriod.serialize(), data['enquiryPeriod']):
request.errors.add('body', 'item', 'Can\'t change enquiryPeriod')
request.errors.status = 403
raise error_handler(request.errors)
if request.context.status == STAGE2_STATUS and data.get('status') == 'active.tendering':
data = validate_data(request, type(request.tender), True, data)
if data: # if no error then add status to validate data
request.context.status = 'active.tendering'
data['status'] = 'active.tendering'
else:
data = validate_data(request, type(request.tender), True, data)
return data
def get_item_by_id(tender, id):
for item in tender['items']:
if item['id'] == id:
return item
def validate_author(request, shortlistedFirms, obj):
""" Compare author key and key from shortlistedFirms """
error_message = 'Author can\'t {} {}'.format('create' if request.method == 'POST' else 'patch',
obj.__class__.__name__.lower())
firms_keys = prepare_shortlistedFirms(shortlistedFirms)
author_key = prepare_author(obj)
if obj.get('questionOf') == 'item': # question can create on item
if shortlistedFirms[0].get('lots'):
item_id = author_key.split('_')[-1]
item = get_item_by_id(request.validated['tender'], item_id)
author_key = author_key.replace(author_key.split('_')[-1], item['relatedLot'])
else:
author_key = '_'.join(author_key.split('_')[:-1])
for firm in firms_keys:
if author_key in firm: # if we found legal firm then check another complaint
break
else: # we didn't find legal firm, then return error
request.errors.add('body', 'author', error_message)
request.errors.status = 403
# return False
raise error_handler(request.errors)
return True
def validate_complaint_data_stage2(request):
if not request.check_accreditation(request.tender.edit_accreditation):
request.errors.add('procurementMethodType', 'accreditation', 'Broker Accreditation level does not permit complaint creation')
request.errors.status = 403
raise error_handler(request.errors)
if request.tender.get('mode', None) is None and request.check_accreditation('t'):
request.errors.add('procurementMethodType', 'mode', 'Broker Accreditation level does not permit complaint creation')
request.errors.status = 403
raise error_handler(request.errors)
update_logging_context(request, {'complaint_id': '__new__'})
data = validate_data(request, type(request.tender).complaints.model_class)
if data:
if validate_author(request, request.tender['shortlistedFirms'], request.validated['complaint']):
return data # validate is OK
else:
return None # we catch errors
return data
def validate_patch_complaint_data_stage2(request):
model = type(request.tender).complaints.model_class
data = validate_data(request, model, True)
if data:
if validate_author(request, request.tender['shortlistedFirms'], request.validated['complaint']):
return data # validate is OK
else:
return None # we catch errors
return data
def validate_post_question_data_stage2(request):
if not request.check_accreditation(request.tender.edit_accreditation):
request.errors.add('procurementMethodType', 'accreditation', 'Broker Accreditation level does not permit question creation')
request.errors.status = 403
raise error_handler(request.errors)
if request.tender.get('mode', None) is None and request.check_accreditation('t'):
request.errors.add('procurementMethodType', 'mode', 'Broker Accreditation level does not permit question creation')
request.errors.status = 403
raise error_handler(request.errors)
update_logging_context(request, {'question_id': '__new__'})
model = type(request.tender).questions.model_class
data = validate_data(request, model)
if data:
if validate_author(request, request.tender['shortlistedFirms'], request.validated['question']):
return data # validate is OK
else:
return None # we catch errors
return data
# tender
def validate_credentials_generation(request):
if request.validated['tender'].status != "draft.stage2":
raise_operation_error(request, 'Can\'t generate credentials in current ({}) contract status'.format(request.validated['tender'].status))
def validate_tender_update(request):
tender = request.context
data = request.validated['data']
if request.authenticated_role == 'tender_owner' and 'status' in data and \
data['status'] not in ['active.pre-qualification.stand-still', 'active.stage2.waiting', tender.status]:
raise_operation_error(request, 'Can\'t update tender status')
# bid
def validate_bid_status_update_not_to_pending_or_draft(request):
if request.authenticated_role != 'Administrator':
bid_status_to = request.validated['data'].get("status", request.context.status)
if bid_status_to not in ['pending', 'draft']:
request.errors.add('body', 'bid', 'Can\'t update bid to ({}) status'.format(bid_status_to))
request.errors.status = 403
raise error_handler(request.errors)
def validate_firm_to_create_bid(request):
tender = request.validated['tender']
bid = request.validated['bid']
firm_keys = prepare_shortlistedFirms(tender.shortlistedFirms)
bid_keys = prepare_bid_identifier(bid)
if not (bid_keys <= firm_keys):
raise_operation_error(request, 'Firm can\'t create bid')
# lot
def validate_lot_operation_for_stage2(request):
operations = {"POST": "create", "PATCH": "update", "DELETE": "delete"}
raise_operation_error(request, 'Can\'t {} lot for tender stage2'.format(operations.get(request.method)))
|
from mrcnn import visualize
import os
import sys
import time
import numpy as np
import imgaug # https://github.com/aleju/imgaug (pip3 install imgaug)
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
from mrcnn import visualize
import zipfile
import urllib.request
import shutil
import matplotlib.pyplot as plt
### Root directory of the project
ROOT_DIR = os.path.abspath("../")
CURRENT_DIR = os.path.abspath("./")
HOME_DIR = os.path.expanduser('~')
OUTPUT_DIR = './'
### Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
### Path to trained weights file
CUSTOM_MODEL_PATH = os.path.join('../../../dataset_2021/Deetas/output_Mask_RCNN/logs/deetas20211204T1611/mask_rcnn_deetas_0119.h5')
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
####### custom
ANNOTATION_PATH = '../../../dataset_2021/Deetas/data_22_01_14/json_raw/N-E-H-017.json'
IMAGE_DIR = '../../../../../../mnt/4T_01/Deetas/image'
NUM_CLASSE = 16
SAVE_DIR = '../../../../test_maeng'
CLASSES_NAMES = '../../../dataset_2021/Deetas/class_list/deetas_16.names'
# CLASSES_NAMES = '../../../dataset_2021/dataset_2021/Deetas/class_list/deetas_14.names'
### Directory to save logs and model checkpoints, if not provided
### through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join('../../../dataset_2021/Deetas/output_Mask_RCNN/logs')
DEFAULT_DATASET_YEAR = ""
########################################################################################################################
# Configurations
########################################################################################################################
class Deetas_Config(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
### Give the configuration a recognizable name
NAME = "deetas"
### We use a GPU with 12GB memory, which can fit two images.
### Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
### Uncomment to train on 8 GPUs (default is 1)
GPU_COUNT = 1
### Number of classes (including background)
NUM_CLASSES = NUM_CLASSE # Deetas has 25 classes
########################################################################################################################
# Dataset
########################################################################################################################
class Deetas_Dataset(utils.Dataset):
def load_deetas(self, dataset_dir, subset, class_ids=None,
class_map=None, return_coco=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
### coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
# json_deetas = COCO("{}/annotations/seg_{}.json".format(dataset_dir, subset)) # annotation path
# json_deetas = COCO("{}/sample_21_10_21/N-B-C-008.json".format(dataset_dir)) # sample
# annotation_path = "/home/dblab/maeng_space/output_submodule/deetas/data_21_12_02/json_obj/status_{}.json".format(subset)
# annotation_path = "/home/dblab/maeng_space/dataset_2021/Deetas/data_21_12_02/json_raw/N-E-U-005.json".format(subset)
annotation_path = ANNOTATION_PATH
# annotation_path = "/home/dblab/maeng_space/dataset_2021/Deetas/data_21_12_02/json_raw/N-PK-P-013.json".format(subset)
# annotation_path = "/home/dblab/maeng_space/dataset_2021/Deetas/data_21_12_02/json_raw/S-W-P-013.json".format(subset)
json_deetas = COCO(annotation_path)
# image_dir = "{}/data_21_10_21/image".format(dataset_dir)
image_dir = IMAGE_DIR
print(image_dir)
### Load all classes or a subset?
if not class_ids:
### All classes
class_ids = sorted(json_deetas.getCatIds())
### All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(json_deetas.getImgIds(catIds=[id])))
### Remove duplicates
image_ids = list(set(image_ids))
else:
### All images
image_ids = list(json_deetas.imgs.keys())
### Add classes
for i in class_ids:
self.add_class("coco", i, json_deetas.loadCats(i)[0]["name"])
### Add images
for i in image_ids:
self.add_image(
"json_deetas", image_id=i,
path=os.path.join(image_dir, json_deetas.imgs[i]['file_name']),
width=json_deetas.imgs[i]["width"],
height=json_deetas.imgs[i]["height"],
annotations=json_deetas.loadAnns(json_deetas.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return json_deetas
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
### If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(Deetas_Dataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
### Build mask of shape [height, width, instance_count] and list
### of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
### Some objects are so small that they're less than 1 pixel area
### and end up rounded out. Skip those objects.
if m.max() < 1:
continue
### Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
### For crowd masks, annToMask() sometimes returns a mask
### smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
### Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
### Call super class to return an empty mask
return super(Deetas_Dataset, self).load_mask(image_id)
### The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
### polygon -- a single object might consist of multiple parts
### we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
### uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
### rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
########################################################################################################################
# COCO Evaluation
########################################################################################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def generate_mask(model, dataset_class, deetas_data, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
idx_image_ndarray = image_ids or dataset_class.image_ids
# Limit to a subset
limit = 100
if limit:
idx_image_ndarray = idx_image_ndarray[:limit]
# Get corresponding COCO image IDs.
idx_image_list = [dataset_class.image_info[id]["id"] for id in idx_image_ndarray]
t_prediction = 0
t_start = time.time()
results_list = []
print("idx_image_ndarray", len(idx_image_ndarray))
for idx_for, image_id in enumerate(idx_image_ndarray):
print("idx_for :", idx_for)
if idx_for >= limit:
break
### Load image
image = dataset_class.load_image(image_id)
### Run detection
t = time.time()
detections = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# exit()
boxes = detections["rois"]
masks = detections["masks"]
class_ids = detections["class_ids"]
names = {}
# with open('/home/dblab/maeng_space/dataset/deetas/class_list/deetas_status.names', 'r') as data:
with open(CLASSES_NAMES, 'r') as data:
for ID, name in enumerate(data):
names[ID] = name.strip('\n')
deetas_names_dict = names
name_list = []
for key in names:
name_list.append(names[key])
class_names = name_list
# print(class_ids, class_names)
save_dir = SAVE_DIR
### apply_mask
# color = visualize.random_colors(1)
# image_with_mask = visualize.apply_mask(image, mask_ndarray, color)
masked_image = visualize.display_instances(image, boxes, masks, class_ids, class_names)
# plt.imsave(save_dir+str(idx_for)+'.jpeg', masked_image)
plt.savefig("{}/{}.png".format(save_dir, dataset_class.image_info[image_id]["id"]))
# print(type(masked_image), masked_image.shape)
### Convert results to COCO format
### Cast masks to uint8 because COCO tools errors out on bool
# image_results = build_coco_results(dataset_class, idx_image_list[idx_for:idx_for + 1],
# detections["rois"], detections["class_ids"],
# detections["scores"],
# detections["masks"].astype(np.uint8))
# results_list.extend(image_results)
# print(len(results_list))
### Load results. This modifies results with additional attributes.
# deetas_results = deetas_data.loadRes(results_list)
# print(deetas_results)
### Evaluate
# cocoEval = COCOeval(deetas_data, coco_results, eval_type)
# cocoEval.params.imgIds = idx_image_list
# cocoEval.evaluate()
# cocoEval.accumulate()
# cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(idx_image_ndarray)))
print("Total time: ", time.time() - t_start)
########################################################################################################################
# Training
########################################################################################################################
if __name__ == '__main__':
import argparse
### Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
default='generate_mask',
metavar="<command>",
help="only generate_mask")
parser.add_argument('--dataset', required=False,
metavar="/path/to/coco/",
help='Directory of the any dataset')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=100,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
### Configurations
class InferenceConfig(Deetas_Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.display()
### Create model
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "custom":
model_path = CUSTOM_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
model_path = args.model
### Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
### Train or evaluate
if args.command == "generate_mask":
### Validation dataset
input_dataset = Deetas_Dataset()
deetas_data = input_dataset.load_deetas(args.dataset, "test", return_coco=True)
input_dataset.prepare()
print("Running COCO evaluation on {} images.".format(args.limit))
generate_mask(model, input_dataset, deetas_data, "bbox", limit=int(args.limit))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
|
<reponame>Ajuajmal/art-fest-event-manager-sattva
from django.shortcuts import render, redirect, get_object_or_404
from django.http import JsonResponse
from django.template.loader import render_to_string
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.contrib.auth.models import User
from django.contrib import messages
from django.conf import settings
from django.views import generic
from django.utils import timezone
from django.views.generic import ListView, CreateView, UpdateView
from django.urls import reverse_lazy
from .forms import ParticipantForm
from .models import Participant,Event
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
def homeviews(request):
return render(request, 'home.html')
class ParticipantCreateView(SuccessMessageMixin,LoginRequiredMixin, CreateView):
model = Participant
form_class = ParticipantForm
template_name = 'participant_form.html'
success_url = reverse_lazy('newregistration')
success_message = "%(event)s registration for %(name)s was successfully Processed"
def form_valid(self, form):
form.instance.branch = self.request.user.profile.branch
return super(ParticipantCreateView, self).form_valid(form)
@login_required
@transaction.atomic
def newreg(request):
user = get_object_or_404(User, username=username)
if request.method == 'POST':
form = ParticipantForm(request.POST)
if form.is_valid():
user = form.save()
messages.success(request, 'Your Registraion Processed Successfully')
return redirect('newregistration')
else:
messages.error(request, 'Please correct the error below.')
else:
form = ParticipantForm()
return render(request, 'participant_form.html', {'form': form, 'user': user})
def participant_list(request):
participants = Participant.objects.filter(branch=request.user.profile.branch).filter(deletable=True)
return render(request, 'participant_list.html', {'participants': participants})
def save_participant_form(request, form, template_name):
data = dict()
if request.method == 'POST':
if form.is_valid():
form.save()
data['form_is_valid'] = True
participants = Participant.objects.all()
data['html_participant_list'] = render_to_string('partial_participant_list.html', {
'participants': participants
})
else:
data['form_is_valid'] = False
context = {'form': form}
data['html_form'] = render_to_string(template_name, context, request=request)
return JsonResponse(data)
def participant_update(request, pk):
participant = get_object_or_404(Participant, pk=pk)
if request.method == 'POST':
form = ParticipantForm(request.POST, instance=participant)
else:
form = ParticipantForm(instance=participant)
return save_participant_form(request,form ,'participant_update.html')
def load_events(request):
category_id = request.GET.get('category')
events = Event.objects.filter(category_id=category_id).filter(venue=1).order_by('name')
return render(request, 'event_dropdown_list_options.html', {'events': events})
def participant_delete(request, pk):
participant = get_object_or_404(Participant, pk=pk)
data = dict()
if request.method == 'POST':
participant.delete()
data['form_is_valid'] = True
participants = Participant.objects.all()
data['html_participant_list'] = render_to_string('participant_list.html', {
'participants': participants
})
print(data)
else:
context = {'participant': participant}
data['html_form'] = render_to_string('partial_participant_delete.html', context, request=request)
print(data)
return JsonResponse(data)
|
"""GitHub Module"""
import json
import requests
from django.contrib.auth.models import User
#from readux import __version__
__version__ = "2.0.0"
class GithubApiException(Exception):
"""custom exception"""
pass
class GithubAccountNotFound(GithubApiException):
"""custom exception"""
pass
class GithubApi(object):
"""
Partial GitHub API access.
Does **NOT** implement the full API, only those portions currently
needed for readux export functionality.
"""
url = 'https://api.github.com'
def __init__(self, token):
# initialize a request session that will pass the oauth token
# as an authorization header
self.session = requests.Session()
self.session.headers.update({
'Authorization': 'token %s' % token,
'Accept': 'application/vnd.github.v3+json',
'User-Agent': 'Readux %s / python-requests %s' % (__version__, __version__),
})
@staticmethod
def github_account(user):
'''Static method to find a user's GitHub account (current or
linked account via python-social-auth); raises
:class:`GithubAccountNotFound` if none is found.'''
account = user.socialaccount_set.filter(provider='github').first()
if account is None:
raise GithubAccountNotFound
return account
@staticmethod
def github_token(user):
'''Retrieve a GitHub user account's token'''
# TODO: handle no github account, check for appropriate scope
social_account = GithubApi.github_account(user)
return social_account.socialtoken_set.first().token
@staticmethod
def github_username(user):
'''Retrieve the GitHub username for a linked GitHub social auth
account'''
return GithubApi.github_account(user).extra_data['login']
@classmethod
def connect_as_user(cls, user):
'''Initialize a new GithubApi connection for the specified user.
'''
return cls(cls.github_token(user))
def oauth_scopes(self, test=False):
"""Get a list of scopes available for the current oauth token
:param test: Flag for if code is being executed for testing, defaults to False
:type test: bool, optional
:return: List of OAuth headers
:rtype: list
"""
# TODO: httpretty does not like the HEAD method, so this is a quick
# workaround for testing. It might be fine to just use GET.
if test:
response = self.session.get('%s/user' % self.url)
else:
response = self.session.head('%s/user' % self.url) # pragma: no cover
if response.status_code == requests.codes.ok:
return response.headers['x-oauth-scopes'].split(', ')
return None
def create_repo(self, name, user, description=None, homepage=None):
"""Create a new user repository with the specified name.
:param name: Repo name
:type name: str
:param description: Repo description, defaults to None
:type description: str, optional
:param homepage: URL for site, defaults to None
:type homepage: str, optional
:return: True if the POST succeeds.
:rtype: bool
"""
self.session.headers['Authorization'] = f'token {self.github_token(user)}'
repo_data = {'name': name}
if description:
repo_data['description'] = description
if homepage:
repo_data['homepage'] = homepage
# other options we might care about: homepage url, private/public,
# has_issues, has_wiki, has_downloads, license_template
response = self.session.post(
'{u}/user/repos'.format(u=self.url),
data=json.dumps(repo_data)
)
return response.status_code == requests.codes.created
def list_repos(self, user):
"""Get a list of a repositories by person
:param user: GitHub username
:type user: str
:return: List of person's repositories.
:rtype: list
"""
# could get current user repos at: /user/repos
# but that includes org repos user has access to
# could specify type, but default of owner is probably fine for now
repos = []
page_url = '%s/users/%s/repos?per_page=3' % (self.url, user)
response = self.session.get(page_url)
repos += response.json()
headers = response.headers
while 'link' in headers and 'next' in headers['link']:
links = {}
link_headers = headers["link"].split(", ")
for link_header in link_headers:
(url, rel) = link_header.split("; ")
url = url[1:-1]
rel = rel[5:-1]
links[rel] = url
page_url = links.get('next')
response = self.session.get(page_url)
repos += response.json()
headers = response.headers
return repos
def list_user_repos(self):
"""Get a list of the current person's repositories
:return: List of person's repositories
:rtype: list
"""
response = self.session.get('%s/user/repos' % self.url)
if response.status_code == requests.codes.ok:
repos = response.json()
# repository list is paged; if there is a rel=next link,
# there is more content
while response.links.get('next', None):
# for now, assuming that if the first response is ok
# subsequent ones will be too
response = self.session.get(response.links['next']['url'])
repos.extend(response.json())
return repos
return None
def create_pull_request(self, repo, title, head, base, text=None):
"""Create a new pull request.
https://developer.github.com/v3/pulls/#create-a-pull-request
:param repo: repository where the pull request will be created,
in owner/repo format
:param title: title of the pull request
:param head: name of the branch with the changes to be pulled in
:param base: name of the branch where changes should will be
pulled into (e.g., master)
:param text: optional text body content for the pull request
"""
# pull request url is /repos/:owner/:repo/pulls
data = {'title': title, 'head': head, 'base': base}
if text is not None:
data['body'] = text
response = self.session.post('%s/repos/%s/pulls' % (self.url, repo),
data=json.dumps(data))
if response.status_code == requests.codes.created:
return response.json()
error_message = 'Error creating pull request'
try:
error_message += ': %s' % response.json()['message']
except Exception:
pass
raise GithubApiException(error_message)
|
import torch as th
from components.action_selectors import EpsilonGreedyAttackerActionSelector, EpsilonGreedyIdentifierActionSelector
from module.agents.rnn_agent import RNNIdentifierAgent, RNNAttackerAgent
class SeparateMAC:
def __init__(self, scheme, groups, args):
self.n_peers = args.n_peers
self.args = args
self.scheme = scheme
self.attacker = None
self.identifier = None
self.attacker_hidden_states = None
self.identifier_hidden_states = None
self._build_agents()
self.attacker_action_selector = EpsilonGreedyAttackerActionSelector(args)
self.identifier_action_selector = EpsilonGreedyIdentifierActionSelector(args)
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
# Only select actions for the selected batch elements in bs
attacker_outputs, identifier_outputs = self.forward(ep_batch, t_ep, test_mode)
attacker_chosen_actions = self.attacker_action_selector.select_action(attacker_outputs[bs], t_env,
test_mode=test_mode)
identifier_chosen_actions = self.identifier_action_selector.select_action(identifier_outputs[bs], t_env,
test_mode=test_mode)
return attacker_chosen_actions, identifier_chosen_actions
def forward(self, ep_batch, t, test_mode):
attacker_input, identifier_input = self._build_inputs(ep_batch, t)
attacker_outs, self.attacker_hidden_states = self.attacker(attacker_input, self.attacker_hidden_states)
identifier_outs, self.identifier_hidden_states = self.identifier(identifier_input, self.identifier_hidden_states)
# process the attacker
if not test_mode:
# Epsilon floor for attacker
exploring_attacker_outs = []
for out in attacker_outs[:-1]:
epsilon_action_num = out.size(-1)
out = out.view(-1, epsilon_action_num)
exploring_out = ((1 - self.attacker_action_selector.epsilon) * out
+ th.ones_like(out) * self.attacker_action_selector.epsilon/epsilon_action_num)
total_msgs_num = self.args.num_malicious*self.args.max_message_num_per_round
exploring_out = exploring_out.view(ep_batch.batch_size, total_msgs_num, -1)
exploring_attacker_outs.append(exploring_out)
cert_outs = []
for idx in range(self.n_peers): # attacker_outs[-1]: ([bs, max_msg_num, 2])*n_peers
cert_out = attacker_outs[-1][idx]
epsilon_action_num = 2
cert_out = cert_out.view(-1, epsilon_action_num)
exploring_cert_out = ((1 - self.attacker_action_selector.epsilon) * cert_out
+ th.ones_like(cert_out) * self.attacker_action_selector.epsilon / epsilon_action_num)
total_msgs_num = self.args.num_malicious * self.args.max_message_num_per_round
exploring_cert_out = exploring_cert_out.view(ep_batch.batch_size, total_msgs_num, -1)
cert_outs.append(exploring_cert_out)
exploring_attacker_outs.append(cert_outs)
# epsilon floor for identifier
epsilon_action_num = 2
exploring_identifier_outs = ((1 - self.identifier_action_selector.epsilon) * identifier_outs
+ th.ones_like(identifier_outs) * self.identifier_action_selector.epsilon / epsilon_action_num)
return exploring_attacker_outs, exploring_identifier_outs
else:
return attacker_outs, identifier_outs
def init_hidden(self, batch_size):
self.attacker_hidden_states = self.attacker.init_hidden().expand(batch_size, -1) # bav
self.identifier_hidden_states = self.identifier.init_hidden().expand(batch_size, -1)
def attacker_parameters(self):
return list(self.attacker.parameters())
def identifier_parameters(self):
return list(self.identifier.parameters())
def parameters(self):
return list(self.attacker.parameters()) + list(self.identifier.parameters())
def load_state(self, other_mac):
self.attacker.load_state_dict(other_mac.attacker.state_dict())
self.identifier.load_state_dict(other_mac.identifier.state_dict())
def cuda(self):
self.attacker.cuda()
self.identifier.cuda()
def save_models(self, path):
th.save(self.attacker.state_dict(), "{}/attacker.th".format(path))
th.save(self.identifier.state_dict(), "{}/identifier.th".format(path))
def load_models(self, path):
self.attacker.load_state_dict(th.load("{}/attacker.th".format(path), map_location=lambda storage, loc: storage))
self.identifier.load_state_dict(th.load("{}/identifier.th".format(path), map_location=lambda storage, loc: storage))
def _build_agents(self):
input_shape_attacker, input_shape_identifier = self._get_input_shape(self.scheme)
output_shape_attacker, output_shape_identifier = self._get_output_shape(self.scheme)
self.attacker = RNNAttackerAgent(input_shape_attacker, output_shape_attacker, self.args)
self.identifier = RNNIdentifierAgent(input_shape_identifier, output_shape_identifier, self.args)
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
attacker_inputs = [batch["attacker_obs"][:, t]]
attacker_inputs = th.cat(attacker_inputs, dim=1)
identifier_inputs = [batch["identifier_obs"][:, t]]
identifier_inputs = th.cat(identifier_inputs, dim=1)
return attacker_inputs, identifier_inputs
def _get_input_shape(self, scheme):
attacker_input_shape = scheme["attacker_obs"]["vshape"]
identifier_input_shape = scheme["identifier_obs"]["vshape"]
return attacker_input_shape, identifier_input_shape
def _get_output_shape(self, scheme):
num_msg_type = 10 # no client type, 9 is no-op
msg_action_space = num_msg_type + self.args.num_malicious + \
self.args.max_seq_num + self.args.max_view_num + \
self.args.total_client_vals + self.args.n_peers + self.args.n_peers * 2
attacker_output_shape = msg_action_space * self.args.max_message_num_per_round * self.args.num_malicious
identifier_output_shape = self.n_peers
return attacker_output_shape, identifier_output_shape
|
<filename>xlremed/finetune.py
import argparse
from .model import XLMForMTBFineTuning
from .dataset import EHealthKD
from .framework import Framework
from torch.optim import SGD, Adam
import torch
torch.manual_seed(0)
import numpy as np
np.random.seed(0)
import random
random.seed(0)
def parse():
parser = argparse.ArgumentParser(description='Fine-tuning script')
parser.add_argument('--pretrained_model', type=str, default='xlm-roberta-base',
help='The transformer pretrained model')
parser.add_argument('--force_preprocess', action='store_true', default=False,
help='Force the data preprocessing step.')
parser.add_argument('--max_seq_length', type=int, default=128,
help='Maximum sequence length.')
parser.add_argument('--batch_size', type=int, default=64,
help='Training batch size.')
parser.add_argument('--dev_batch_size', type=int, default=4,
help='Validation batch size.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of training epochs.')
parser.add_argument('--optimizer', type=str, default='Adam',
help='The optimizer, SGD or Adam.')
parser.add_argument('--lr', type=float, default=1e-5,
help='Learning rate.')
parser.add_argument('--momentum', type=float, default=0.9,
help='Momentum')
parser.add_argument('--nesterov', action='store_true', default=False,
help='Use nesterov momentum')
parser.add_argument('--grad_clip', type=float, default=None,
help='Momentum')
parser.add_argument('--l2', type=float, default=0.01,
help='L2 normalization')
parser.add_argument('--mlm_probability', type=float, default=.15,
help='Masked Language Model masking probability.')
parser.add_argument('--linear_scheduler', action='store_true', default=False,
help='Whether to use linear scheduler.')
parser.add_argument('--warmup_steps', type=int, default=0,
help='Number of warmups of the linear scheduler.')
parser.add_argument('--mtb_probability', type=float, default=.7,
help='Matching The Blanks masking probability.')
parser.add_argument('--lambd', type=float, default=.7,
help='Interpolation weight for the MTB loss.')
parser.add_argument('--half', action='store_true', default=False,
help='Use of half precision training.')
parser.add_argument('--grad_acc', type=int, default=1,
help='Number of steps for gradient accumulation')
parser.add_argument('--patience', type=int, default=3,
help='Number of extra iterations for the early stopping.')
parser.add_argument('--delta', type=float, default=0.0,
help='Aceptable loss difference between iterations.')
parser.add_argument('--debug', action='store_true', default=False,
help='Debug flag. Enable when you are testing changes.')
parser.add_argument('--ensemble_data', action='store_true', default=False,
help='Add extra data on training.')
parser.add_argument('--recover_training', action='store_true', default=False,
help='Continues the previous training.')
parser.add_argument('--device', type=str, default="cuda",
help='Which device to use.')
args = parser.parse_args()
return args
def main(opt):
dataset = EHealthKD('data/ehealthkd-2020/', opt.pretrained_model, add_ensemble_data=opt.ensemble_data)
opt.model = 'MTBFineTune'
opt.vocab_size = len(dataset.tokenizer)
opt.dropout_p = .2
opt.n_rel = dataset.get_n_rel()
config = vars(opt)
rge = Framework(**config)
rge.fit(dataset, batch_size=opt.batch_size, patience=opt.patience, delta=opt.delta)
name = opt.pretrained_model
rge.save_model(f'checkpoints/{name}')
dataset.save_tokenizer(f'checkpoints/{name}')
if __name__ == "__main__":
opt = parse()
main(opt)
|
"""
Copyright 2017-2018 lvaleriu (https://github.com/lvaleriu/)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import json
import os
import warnings
import numpy as np
from PIL import Image
from .generator import Generator
from ..utils.image import read_image_bgr
def load_hierarchy(metadata_dir, version='v4'):
hierarchy = None
if version == 'challenge2018':
hierarchy = 'bbox_labels_500_hierarchy.json'
elif version == 'v4':
hierarchy = 'bbox_labels_600_hierarchy.json'
elif version == 'v3':
hierarchy = 'bbox_labels_600_hierarchy.json'
hierarchy_json = os.path.join(metadata_dir, hierarchy)
with open(hierarchy_json) as f:
hierarchy_data = json.loads(f.read())
return hierarchy_data
def load_hierarchy_children(hierarchy):
res = [hierarchy['LabelName']]
if 'Subcategory' in hierarchy:
for subcategory in hierarchy['Subcategory']:
children = load_hierarchy_children(subcategory)
for c in children:
res.append(c)
return res
def find_hierarchy_parent(hierarchy, parent_cls):
if hierarchy['LabelName'] == parent_cls:
return hierarchy
elif 'Subcategory' in hierarchy:
for child in hierarchy['Subcategory']:
res = find_hierarchy_parent(child, parent_cls)
if res is not None:
return res
return None
def get_labels(metadata_dir, version='v4'):
if version == 'v4' or version == 'challenge2018':
csv_file = 'class-descriptions-boxable.csv' if version == 'v4' else 'challenge-2018-class-descriptions-500.csv'
boxable_classes_descriptions = os.path.join(metadata_dir, csv_file)
id_to_labels = {}
cls_index = {}
i = 0
with open(boxable_classes_descriptions) as f:
for row in csv.reader(f):
# make sure the csv row is not empty (usually the last one)
if len(row):
label = row[0]
description = row[1].replace("\"", "").replace("'", "").replace('`', '')
id_to_labels[i] = description
cls_index[label] = i
i += 1
else:
trainable_classes_path = os.path.join(metadata_dir, 'classes-bbox-trainable.txt')
description_path = os.path.join(metadata_dir, 'class-descriptions.csv')
description_table = {}
with open(description_path) as f:
for row in csv.reader(f):
# make sure the csv row is not empty (usually the last one)
if len(row):
description_table[row[0]] = row[1].replace("\"", "").replace("'", "").replace('`', '')
with open(trainable_classes_path, 'rb') as f:
trainable_classes = f.read().split('\n')
id_to_labels = dict([(i, description_table[c]) for i, c in enumerate(trainable_classes)])
cls_index = dict([(c, i) for i, c in enumerate(trainable_classes)])
return id_to_labels, cls_index
def generate_images_annotations_json(main_dir, metadata_dir, subset, cls_index, version='v4'):
validation_image_ids = {}
if version == 'v4':
annotations_path = os.path.join(metadata_dir, subset, '{}-annotations-bbox.csv'.format(subset))
elif version == 'challenge2018':
validation_image_ids_path = os.path.join(metadata_dir, 'challenge-2018-image-ids-valset-od.csv')
with open(validation_image_ids_path, 'r') as csv_file:
reader = csv.DictReader(csv_file, fieldnames=['ImageID'])
next(reader)
for line, row in enumerate(reader):
image_id = row['ImageID']
validation_image_ids[image_id] = True
annotations_path = os.path.join(metadata_dir, 'challenge-2018-train-annotations-bbox.csv')
else:
annotations_path = os.path.join(metadata_dir, subset, 'annotations-human-bbox.csv')
fieldnames = ['ImageID', 'Source', 'LabelName', 'Confidence',
'XMin', 'XMax', 'YMin', 'YMax',
'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction', 'IsInside']
id_annotations = dict()
with open(annotations_path, 'r') as csv_file:
reader = csv.DictReader(csv_file, fieldnames=fieldnames)
next(reader)
images_sizes = {}
for line, row in enumerate(reader):
frame = row['ImageID']
if version == 'challenge2018':
if subset == 'train':
if frame in validation_image_ids:
continue
elif subset == 'validation':
if frame not in validation_image_ids:
continue
else:
raise NotImplementedError('This generator handles only the train and validation subsets')
class_name = row['LabelName']
if class_name not in cls_index:
continue
cls_id = cls_index[class_name]
if version == 'challenge2018':
# We recommend participants to use the provided subset of the training set as a validation set.
# This is preferable over using the V4 val/test sets, as the training set is more densely annotated.
img_path = os.path.join(main_dir, 'images', 'train', frame + '.jpg')
else:
img_path = os.path.join(main_dir, 'images', subset, frame + '.jpg')
if frame in images_sizes:
width, height = images_sizes[frame]
else:
try:
with Image.open(img_path) as img:
width, height = img.width, img.height
images_sizes[frame] = (width, height)
except Exception as ex:
if version == 'challenge2018':
raise ex
continue
x1 = float(row['XMin'])
x2 = float(row['XMax'])
y1 = float(row['YMin'])
y2 = float(row['YMax'])
x1_int = int(round(x1 * width))
x2_int = int(round(x2 * width))
y1_int = int(round(y1 * height))
y2_int = int(round(y2 * height))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
if y2_int == y1_int:
warnings.warn('filtering line {}: rounding y2 ({}) and y1 ({}) makes them equal'.format(line, y2, y1))
continue
if x2_int == x1_int:
warnings.warn('filtering line {}: rounding x2 ({}) and x1 ({}) makes them equal'.format(line, x2, x1))
continue
img_id = row['ImageID']
annotation = {'cls_id': cls_id, 'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2}
if img_id in id_annotations:
annotations = id_annotations[img_id]
annotations['boxes'].append(annotation)
else:
id_annotations[img_id] = {'w': width, 'h': height, 'boxes': [annotation]}
return id_annotations
class OpenImagesGenerator(Generator):
def __init__(
self, main_dir, subset, version='v4',
labels_filter=None, annotation_cache_dir='.',
parent_label=None,
**kwargs
):
if version == 'challenge2018':
metadata = 'challenge2018'
elif version == 'v4':
metadata = '2018_04'
elif version == 'v3':
metadata = '2017_11'
else:
raise NotImplementedError('There is currently no implementation for versions older than v3')
if version == 'challenge2018':
self.base_dir = os.path.join(main_dir, 'images', 'train')
else:
self.base_dir = os.path.join(main_dir, 'images', subset)
metadata_dir = os.path.join(main_dir, metadata)
annotation_cache_json = os.path.join(annotation_cache_dir, subset + '.json')
self.hierarchy = load_hierarchy(metadata_dir, version=version)
id_to_labels, cls_index = get_labels(metadata_dir, version=version)
if os.path.exists(annotation_cache_json):
with open(annotation_cache_json, 'r') as f:
self.annotations = json.loads(f.read())
else:
self.annotations = generate_images_annotations_json(main_dir, metadata_dir, subset, cls_index, version=version)
json.dump(self.annotations, open(annotation_cache_json, "w"))
if labels_filter is not None or parent_label is not None:
self.id_to_labels, self.annotations = self.__filter_data(id_to_labels, cls_index, labels_filter, parent_label)
else:
self.id_to_labels = id_to_labels
self.id_to_image_id = dict([(i, k) for i, k in enumerate(self.annotations)])
super(OpenImagesGenerator, self).__init__(**kwargs)
def __filter_data(self, id_to_labels, cls_index, labels_filter=None, parent_label=None):
"""
If you want to work with a subset of the labels just set a list with trainable labels
:param labels_filter: Ex: labels_filter = ['Helmet', 'Hat', 'Analog television']
:param parent_label: If parent_label is set this will bring you the parent label
but also its children in the semantic hierarchy as defined in OID, ex: Animal
hierarchical tree
:return:
"""
children_id_to_labels = {}
if parent_label is None:
# there is/are no other sublabel(s) other than the labels itself
for label in labels_filter:
for i, lb in id_to_labels.items():
if lb == label:
children_id_to_labels[i] = label
break
else:
parent_cls = None
for i, lb in id_to_labels.items():
if lb == parent_label:
parent_id = i
for c, index in cls_index.items():
if index == parent_id:
parent_cls = c
break
if parent_cls is None:
raise Exception('Couldnt find label {}'.format(parent_label))
parent_tree = find_hierarchy_parent(self.hierarchy, parent_cls)
if parent_tree is None:
raise Exception('Couldnt find parent {} in the semantic hierarchical tree'.format(parent_label))
children = load_hierarchy_children(parent_tree)
for cls in children:
index = cls_index[cls]
label = id_to_labels[index]
children_id_to_labels[index] = label
id_map = dict([(ind, i) for i, ind in enumerate(children_id_to_labels.keys())])
filtered_annotations = {}
for k in self.annotations:
img_ann = self.annotations[k]
filtered_boxes = []
for ann in img_ann['boxes']:
cls_id = ann['cls_id']
if cls_id in children_id_to_labels:
ann['cls_id'] = id_map[cls_id]
filtered_boxes.append(ann)
if len(filtered_boxes) > 0:
filtered_annotations[k] = {'w': img_ann['w'], 'h': img_ann['h'], 'boxes': filtered_boxes}
children_id_to_labels = dict([(id_map[i], l) for (i, l) in children_id_to_labels.items()])
return children_id_to_labels, filtered_annotations
def size(self):
return len(self.annotations)
def num_classes(self):
return len(self.id_to_labels)
def has_label(self, label):
""" Return True if label is a known label.
"""
return label in self.id_to_labels
def has_name(self, name):
""" Returns True if name is a known class.
"""
raise NotImplementedError()
def name_to_label(self, name):
raise NotImplementedError()
def label_to_name(self, label):
return self.id_to_labels[label]
def image_aspect_ratio(self, image_index):
img_annotations = self.annotations[self.id_to_image_id[image_index]]
height, width = img_annotations['h'], img_annotations['w']
return float(width) / float(height)
def image_path(self, image_index):
path = os.path.join(self.base_dir, self.id_to_image_id[image_index] + '.jpg')
return path
def load_image(self, image_index):
return read_image_bgr(self.image_path(image_index))
def load_annotations(self, image_index):
image_annotations = self.annotations[self.id_to_image_id[image_index]]
labels = image_annotations['boxes']
height, width = image_annotations['h'], image_annotations['w']
annotations = {'labels': np.empty((len(labels),)), 'bboxes': np.empty((len(labels), 4))}
for idx, ann in enumerate(labels):
cls_id = ann['cls_id']
x1 = ann['x1'] * width
x2 = ann['x2'] * width
y1 = ann['y1'] * height
y2 = ann['y2'] * height
annotations['bboxes'][idx, 0] = x1
annotations['bboxes'][idx, 1] = y1
annotations['bboxes'][idx, 2] = x2
annotations['bboxes'][idx, 3] = y2
annotations['labels'][idx] = cls_id
return annotations
|
<filename>scripts/qgps_2d_j1_j2_10_by_10_32_continue.py
import numpy as np
import netket as nk
import sys
import shutil
from shutil import move
import mpi4py.MPI as mpi
import symmetries
import os
N = 32
L = 10
mode = 1
J2 = 0.0
rank = mpi.COMM_WORLD.Get_rank()
initial_folder = "/home/mmm0475/Scratch/J1_J2_2D_10_by_10_J2_0.0_prod_sym_N_32_323408"
if rank == 0:
for item in os.listdir(initial_folder):
s = os.path.join(initial_folder, item)
d = os.path.join("", "OLD_"+item)
if not os.path.isdir(s):
shutil.copy2(s, d)
shutil.copyfile("OLD_epsilon.npy", "epsilon.npy")
shutil.copyfile("OLD_epsilon_old.npy", "epsilon_old.npy")
mpi.COMM_WORLD.barrier()
opt_process = np.genfromtxt("OLD_out.txt")
g = nk.graph.Hypercube(length=L, n_dim=2, pbc=True)
# Spin based Hilbert Space
hi = nk.hilbert.Spin(s=0.5, total_sz=0.0, N=g.n_nodes)
ha = nk.custom.J1J2(g, J2=J2, msr=True)
transl = nk.custom.get_symms_square_lattice(L)
if mode == 0:
ma = nk.machine.QGPSSumSym(hi, n_bond=N, automorphisms=transl, spin_flip_sym=True, dtype=complex)
elif mode == 1:
ma = nk.machine.QGPSProdSym(hi, n_bond=N, automorphisms=transl, spin_flip_sym=True, dtype=complex)
elif mode == 2:
ma = nk.machine.QGPSProdSym(hi, n_bond=N, automorphisms=None, spin_flip_sym=False, dtype=complex)
ma._exp_kern_representation = False
if L > 8 and N > 10:
ma.init_random_parameters(sigma=0.01, start_from_uniform=False)
else:
ma.init_random_parameters(sigma=0.02, start_from_uniform=False)
# Optimizer
op = nk.optimizer.Sgd(ma, learning_rate=0.02)
# Sampler
sa = nk.sampler.MetropolisExchange(machine=ma,graph=g,d_max=2*L,n_chains=1)
sa.reset(True)
# Stochastic Reconfiguration
sr = nk.optimizer.SR(ma)
samples = 5100
# Create the optimization driver
gs = nk.custom.SweepOpt(hamiltonian=ha, sampler=sa, optimizer=op, n_samples=samples, sr=sr, n_discard=20, max_opt=6400, check_improvement=False, reset_bias=False)
eps = np.load("OLD_epsilon_old.npy")
ma._epsilon = eps.copy()
ma._opt_params = ma._epsilon[ma._der_ids >= 0].copy()
ma.reset()
best_epsilon = ma._epsilon.copy()
np.save("epsilon.npy", ma._epsilon)
np.save("epsilon_old.npy", ma._epsilon)
best_en_upper_bound = min(opt_process[:,0] + opt_process[:,2])
if rank == 0:
with open("out.txt", "w") as fl:
fl.write("")
est = nk.variational.estimate_expectations(ha, sa, 5000//mpi.COMM_WORLD.size, n_discard=200)
if rank == 0:
print("Init calc:", est.mean, flush=True)
count = 0
total_count = 0
for i in range(opt_process.shape[0]-2):
if mpi.COMM_WORLD.Get_rank() == 0:
with open("out.txt", "a") as fl:
fl.write("{} {} {}\n".format(opt_process[i,0], opt_process[i,1], opt_process[i,2]))
if i == 1959:
best_en_upper_bound = None
count += 1
total_count += 1
if count == 40:
count = 0
samples += 100
gs.n_samples = samples
for it in gs.iter(2000 - total_count,1):
if mpi.COMM_WORLD.Get_rank() == 0:
if it >= 1:
move("epsilon.npy", "epsilon_old.npy")
np.save("epsilon.npy", ma._epsilon)
print(it+total_count,gs.energy, flush=True)
with open("out.txt", "a") as fl:
fl.write("{} {} {}\n".format(np.real(gs.energy.mean), np.imag(gs.energy.mean), gs.energy.error_of_mean))
if best_en_upper_bound is None:
best_en_upper_bound = gs.energy.mean.real + gs.energy.error_of_mean
else:
if (gs.energy.mean.real + gs.energy.error_of_mean) < best_en_upper_bound:
best_epsilon = ma._epsilon.copy()
best_en_upper_bound = gs.energy.mean.real + gs.energy.error_of_mean
np.save("best_epsilon.npy", best_epsilon)
if it+total_count == 1959:
best_en_upper_bound = None
count += 1
if count == 40:
count = 0
samples += 100
gs.n_samples = samples
mpi.COMM_WORLD.Bcast(best_epsilon, root=0)
mpi.COMM_WORLD.barrier()
ma._epsilon = best_epsilon
ma._opt_params = ma._epsilon[ma._der_ids >= 0].copy()
ma.reset()
est = nk.variational.estimate_expectations(ha, sa, 100000//mpi.COMM_WORLD.size, n_discard=200)
if mpi.COMM_WORLD.Get_rank() == 0:
with open("result.txt", "a") as fl:
fl.write("{} {} {} {} {}\n".format(L, N, np.real(est.mean), np.imag(est.mean), est.error_of_mean))
|
<filename>scripts/show_darknet_loss.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import argparse
class Batch:
def __init__(self, iteration, total_loss, avg_loss):
self.iteration = iteration
self.total_loss = total_loss
self.avg_loss = avg_loss
def __str__(self):
text = 'iteration = {}, '.format(self.iteration)
text += 'total loss = {}, '.format(self.total_loss)
text += 'avg_loss = {}'.format(self.avg_loss)
return text
def main():
parser = argparse.ArgumentParser(
description='Parse DarkNet stdout, plot the loss and indicate weights file '
'with lowest avg precision and total precision.'
)
parser.add_argument('input', help='Input text file containing darknet stdout.')
parser.add_argument(
'--weights-step',
'-w',
metavar='N',
default=100,
help='Multiple of iterations a new weights file is saved. '
'This is used to point to the most interesting weights file.',
)
parser.add_argument(
'--backend',
'-b',
default='mpl',
help='Set the rendering engine of the plot to "mpl" or "ply".',
)
args = parser.parse_args()
with open(args.input) as f:
text = f.read()
# clean up output by removing 'Saving weights to .*\.weights'
text = re.sub(r'Saving weights to .*\.weights\n', '', text)
# parse text file
values = []
lines = text.split('\n')
for line in lines:
if line.endswith('images'):
elements = line.split()
iteration = int(elements[0].strip(':'))
total_loss = float(elements[1].strip(','))
avg_loss = float(elements[2])
values.append(Batch(iteration, total_loss, avg_loss))
# find optimal weight file candidates
subsampled_values = [v for v in values if v.iteration % args.weights_step == 0]
sorted_subsampled_values = sorted(subsampled_values, key=lambda v: v.avg_loss)
for i in range(10):
print('Candidate', i + 1, '=', sorted_subsampled_values[i])
# plot loss
total_losses = [v.total_loss for v in values]
avg_losses = [v.avg_loss for v in values]
iterations = [v.iteration for v in values]
plot(avg_losses, iterations, total_losses, backend=args.backend)
def plot(avg_losses, iterations, total_losses, backend='mpl'):
if not backend or backend == 'mpl':
plot_mpl(avg_losses, iterations, total_losses)
elif backend == 'ply':
plot_ply(avg_losses, iterations, total_losses)
def plot_mpl(avg_losses, iterations, total_losses):
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 8))
plt.plot(iterations, total_losses, label='total loss', linewidth=1)
plt.plot(iterations, avg_losses, label='avg loss', linewidth=1)
plt.gcf().suptitle('Training loss', weight='bold')
plt.gca().set_ylabel('Loss')
plt.gca().set_xlabel('Iteration')
plt.gca().set_ylim([0, 10])
plt.gca().set_xlim(left=0)
plt.show()
def plot_ply(avg_losses, iterations, total_losses):
import plotly.offline as po
import plotly.graph_objs as go
plots = list(
map(
lambda loss: go.Scatter(x=iterations, y=loss, mode='lines'),
[total_losses, avg_losses],
)
)
plots[0].name = 'total loss'
plots[1].name = 'average loss'
layout = go.Layout(
title='Training loss',
xaxis=dict(title='Iteration'),
yaxis=dict(title='Loss', range=[0, 10]),
)
fig = go.Figure(data=plots, layout=layout)
po.plot(fig)
if __name__ == '__main__':
main()
|
<filename>appengine_utilities/rotmodel.py
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import time
from google.appengine.api import datastore
from google.appengine.ext import db
# settings
try:
import settings
except:
import settings_default as settings
class ROTModel(db.Model):
"""
ROTModel overrides the db.Model functions, retrying each method each time
a timeout exception is raised.
Methods superclassed from db.Model are:
get(cls, keys)
get_by_id(cls, ids, parent)
get_by_key_name(cls, key_names, parent)
get_or_insert(cls, key_name, kwargs)
put(self)
"""
@classmethod
def get(cls, keys):
count = 0
while count < settings.rotmodel["RETRY_ATTEMPTS"]:
try:
return db.Model.get(keys)
except db.Timeout():
count += 1
time.sleep(count * settings.rotmodel["RETRY_INTERVAL"])
else:
raise db.Timeout()
@classmethod
def get_by_id(cls, ids, parent=None):
count = 0
while count < settings.rotmodel["RETRY_ATTEMPTS"]:
try:
return db.Model.get_by_id(ids, parent)
except db.Timeout():
count += 1
time.sleep(count * settings.rotmodel["RETRY_INTERVAL"])
else:
raise db.Timeout()
@classmethod
def get_by_key_name(cls, key_names, parent=None):
if isinstance(parent, db.Model):
parent = parent.key()
key_names, multiple = datastore.NormalizeAndTypeCheck(key_names, basestring)
keys = [datastore.Key.from_path(cls.kind(), name, parent=parent)
for name in key_names]
count = 0
if multiple:
while count < settings.rotmodel["RETRY_ATTEMPTS"]:
try:
return db.get(keys)
except db.Timeout():
count += 1
time.sleep(count * settings.rotmodel["RETRY_INTERVAL"])
else:
while count < settings.rotmodel["RETRY_ATTEMPTS"]:
try:
return db.get(*keys)
except db.Timeout():
count += 1
time.sleep(count * settings.rotmodel["RETRY_INTERVAL"])
@classmethod
def get_or_insert(cls, key_name, **kwargs):
def txn():
entity = cls.get_by_key_name(key_name, parent=kwargs.get('parent'))
if entity is None:
entity = cls(key_name=key_name, **kwargs)
entity.put()
return entity
return db.run_in_transaction(txn)
def put(self):
count = 0
while count < settings.rotmodel["RETRY_ATTEMPTS"]:
try:
return db.Model.put(self)
except db.Timeout():
count += 1
time.sleep(count * settings.rotmodel["RETRY_INTERVAL"])
else:
raise db.Timeout()
def delete(self):
count = 0
while count < settings.rotmodel["RETRY_ATTEMPTS"]:
try:
return db.Model.delete(self)
except db.Timeout():
count += 1
time.sleep(count * settings.rotmodel["RETRY_INTERVAL"])
else:
raise db.Timeout()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
from tools.translate import _
# Creates forecasts records for products from selected Product Category for selected 'Warehouse - Period'
# Object added by contributor in ver 1.1
class stock_sale_forecast_createlines(osv.osv_memory):
_name = "stock.sale.forecast.createlines"
_description = "stock.sale.forecast.createlines"
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True, select=1),
'warehouse_id': fields.many2one('stock.warehouse' , 'Warehouse', required=True, \
help='Warehouse which forecasts will concern. '\
'If during stock planning you will need sales forecast for all warehouses choose any warehouse now.'),
'period_id': fields.many2one('stock.period', 'Period', required=True, help='Period which forecasts will concern.'),
'product_categ_id': fields.many2one('product.category' , 'Product Category', required=True, \
help ='Product Category of products which created forecasts will concern.'),
'copy_forecast': fields.boolean('Copy Last Forecast', help="Copy quantities from last Stock and Sale Forecast."),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.sale.forecast.createlines', context=c),
}
def create_forecast(self, cr, uid, ids, context=None):
product_obj = self.pool.get('product.product')
forecast_obj = self.pool.get('stock.sale.forecast')
mod_obj = self.pool.get('ir.model.data')
prod_categ_obj = self.pool.get('product.category')
template_obj = self.pool.get('product.template')
forecast_lines = []
for f in self.browse(cr, uid, ids, context=context):
categ_ids = f.product_categ_id.id and [f.product_categ_id.id] or []
prod_categ_ids = prod_categ_obj.search(cr, uid, [('parent_id','child_of', categ_ids)])
templates_ids = template_obj.search(cr, uid, [('categ_id','in',prod_categ_ids)])
products_ids = product_obj.search(cr, uid, [('product_tmpl_id','in',templates_ids)])
if len(products_ids) == 0:
raise osv.except_osv(_('Error !'), _('No products in selected category !'))
copy = f.copy_forecast
for p in product_obj.browse(cr, uid, products_ids,{}):
if len(forecast_obj.search(cr, uid, [('product_id','=',p.id) , \
('period_id','=',f.period_id.id), \
('user_id','=',uid), \
('warehouse_id','=',f.warehouse_id.id)]))== 0:
forecast_qty = 0.0
prod_uom = False
if copy:
cr.execute("SELECT period.date_stop, forecast.product_qty, forecast.product_uom \
FROM stock_sale_forecast AS forecast \
LEFT JOIN stock_period AS period \
ON forecast.period_id = period.id \
WHERE (forecast.user_id = %s OR forecast.create_uid = %s OR forecast.write_uid = %s) \
AND forecast.warehouse_id = %s AND forecast.product_id = %s \
AND period.date_stop < %s \
ORDER BY period.date_stop DESC",
(uid, uid, uid, f.warehouse_id.id, p.id, f.period_id.date_stop) )
ret = cr.fetchone()
if ret:
forecast_qty = ret[1]
prod_uom = ret[2]
prod_uom = prod_uom or p.uom_id.id
prod_uos_categ = False
if p.uos_id:
prod_uos_categ = p.uos_id.category_id.id
forecast_lines.append(forecast_obj.create(cr, uid, {
'company_id': f.warehouse_id.company_id.id,
'period_id': f.period_id.id,
'warehouse_id': f.warehouse_id.id,
'product_id': p.id,
'product_qty': forecast_qty,
'product_amt': 0.0,
'product_uom': prod_uom,
'active_uom': prod_uom,
'product_uom_categ': p.uom_id.category_id.id,
'product_uos_categ': prod_uos_categ,
}))
return {
'domain': "[('id','in', ["+','.join(map(str, forecast_lines))+"])]",
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'stock.sale.forecast',
'type': 'ir.actions.act_window',
}
stock_sale_forecast_createlines()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# pyramco
# version 0.9.2
# a complete wrapper class for RAMCO API calls
# documentation on the RAMCO API at: https://api.ramcoams.com/api/v2/ramco_api_v2_doc.pdf
# set your RAMCO api key in a separate file 'config.py' as 'ramco_api_key'
# requires Python 3.6+ and the 'requests' module
# imports
import requests
import config
# the base RAMCO API url is always the same
ramco_api_url = 'https://api.ramcoams.com/api/v2/'
## response code/error handling
# definitions
code_200 = {'DescriptionShort':'OK', 'DescriptionVerbose':'The request was successfully processed and data is included in the response'}
code_204 = {'DescriptionShort':'OK: No Data','DescriptionVerbose':'The request was successfully processed but no data is included in the response. This is typical of UpdateEntity requests.'}
code_206 = {'DescriptionShort':'OK: Partial Data','DescriptionVerbose':'The request was successfully processed and partial data is included in the response. This is the expected response when the dataset that Ramco needs to return to the user is too large. A StreamToken will be returned to allow the user to fetch the remaining data.'}
code_400 = {'DescriptionShort':'Bad Request','DescriptionVerbose':'The request was not understood. See the response text for more information.'}
code_401 = {'DescriptionShort':'Unauthorized','DescriptionVerbose':'The request was understood but it will not be fulfilled due to a lack of user permissions. See the response text for more information.'}
code_404 = {'DescriptionShort':'Not Found','DescriptionVerbose':'The request is understood but no matching data is found to return.'}
code_422 = {'DescriptionShort':'Invalid User','DescriptionVerbose':'No user with provided username/password combination. This error is specific to the AuthenticateUser request.'}
code_500 = {'DescriptionShort':'Server Error','DescriptionVerbose':'Something is not working correctly server-side. This is not an issue that can be resolved by modifying query syntax.'}
code_unknown = {'ResponseCode':999,'DescriptionShort':'Unknown Internal/pyramco Error','DescriptionVerbose':'No code or response returned from RAMCO. Verify you are on Python version 3.6+. Check your connections and settings. This error originates in your code or pyramco itself.'}
# handler function - adds additional context to errors and consolidates streamtoken requests into single reply
# ALL pyramco responses will contain a 'ResponseCode' property as above, even if no connection is made
def handler(reply):
if reply['ResponseCode'] == 200 or reply['ResponseCode'] == 206: # returns results as a single object with code 200 if no streamtoken was needed and code 206 if it was - fetches all streamtoken pages and combines them
combined_reply = []
combined_reply.append(reply)
while 'StreamToken' in reply:
reply = resume_streamtoken(reply['StreamToken'])
combined_reply.append(reply['Data'])
return(combined_reply)
elif reply['ResponseCode'] == 204: # returns unmodified results
return(reply)
elif reply['ResponseCode'] == 400: # returns results plus additional error text from documentation
reply = {**reply, **code_400}
return(reply)
elif reply['ResponseCode'] == 404: # returns results plus additional error text from documentation
reply = {**reply, **code_404}
return(reply)
elif reply['ResponseCode'] == 422: # returns results plus additional error text from documentation
reply = {**reply, **code_422}
return(reply)
elif reply['ResponseCode'] == 500: # returns results plus additional error text from documentation
reply = {**reply, **code_500}
return(reply)
else: # returns the text for other/unknown errors
return(code_unknown)
# pyramco wrapper operations
## metadata operations
### get_entity_types
# no arguments are accepted. fetches all entities in the system.
def get_entity_types():
payload = {
'key': config.ramco_api_key,
'Operation':'GetEntityTypes'
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
### get_entity_metadata
# accepts a valid entity name enclosed in apostrophes, like: 'Contact' returns all metadata on that entity.
def get_entity_metadata(entity):
payload = {
'key': config.ramco_api_key,
'Operation':'GetEntityMetadata',
'Entity': entity
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
### get_option_set
#accepts a valid entity name and a single attribute. returns value/label pairs for the specified OptionSet.
def get_option_set(entity, attribute):
payload = {
'key': config.ramco_api_key,
'Operation':'GetOptionSet',
'Entity': entity,
'Attribute': attribute
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
### clear_cache
# no arguments are accepted. clears the server-side metadata cache
def clear_cache():
payload = {
'key': config.ramco_api_key,
'Operation':'ClearCache'
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
## data querying operations
### get_entity
# accepts a valid entity name, GUID, and a tuple of comma-separated attribute names, returns attribute values for the specified contact matching the GUID
def get_entity(entity, guid, *attributes):
payload = {
'key': config.ramco_api_key,
'Operation':'GetEntity',
'Entity': entity,
'GUID': guid,
'Attributes': attributes
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
### get_entities
# accepts a valid entity name, a tuple of comma-separated attribute names, and (optionally) a valid filters string, a string delimiter character, and an integer value for the max results.
def get_entities(entity, *attributes, filters='', string_delimiter='', max_results=''):
payload = {
'key': config.ramco_api_key,
'Operation':'GetEntities',
'Entity': entity,
'Filter': filters,
'Attributes': attributes,
'StringDelimiter': string_delimiter,
'MaxResults': max_results
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
### resume_streamtoken
# accepts a valid streamtoken string and resumes the get_entities request that generated it.
def resume_streamtoken(streamtoken):
payload = {
'key': config.ramco_api_key,
'Operation':'GetEntities',
'StreamToken': streamtoken
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
### validate_user
# accepts a username and password. for valid combinations, returns that Contact's guid. for invalid combinations, returns 422 error.
def validate_user(username, password):
payload = {
'key': config.ramco_api_key,
'Operation':'ValidateUser',
'cobalt_username ': username,
'cobalt_password': password
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
## data transformation operations
### update_entity
# accepts a valid entity name + guid, a tuple of comma separated attribute=value pairs, and optionally a string delimiter character
def update_entity(entity, guid, *attributes, string_delimiter=''):
payload = {
'key': config.ramco_api_key,
'Operation':'UpdateEntity',
'Entity': entity,
'Guid': guid,
'AttributeValues': attributes,
'StringDelimiter': string_delimiter
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
### create_entity
# accepts a valid entity name, a tuple of comma separated attribute=value pairs, and optionally a string delimiter character
def create_entity(entity, *attributes, string_delimiter=''):
payload = {
'key': config.ramco_api_key,
'Operation':'CreateEntity',
'Entity': entity,
'AttributeValues': attributes,
'StringDelimiter': string_delimiter
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
### delete_entity
# accepts a guid and deletes the corresponding record
def delete_entity(entity, guid):
payload = {
'key': config.ramco_api_key,
'Operation':'DeleteEntity',
'Entity': entity,
'GUID': guid
}
reply = handler(requests.post(ramco_api_url,payload).json())
return(reply)
# end pyramco wrapper operations
|
<gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
"""
script to install all the necessary things
for working on a linux machine with nothing
Installing minimum dependencies
"""
import sys
import os
import logging
import subprocess
import xml.etree.ElementTree as ElementTree
import xml.dom.minidom as minidom
import socket
import time
import pwd
###---------------------------------------------------##
# Configuration Section, will be modified by script #
###---------------------------------------------------##
node_apt_packages = [
'emacs',
'git',
'g++',
'make',
'python-numpy',
'libprotobuf-dev',
'libcurl4-openssl-dev']
# master only packages
master_apt_packages = [
'protobuf-compiler']
# List of r packages to be installed in master
master_r_packages = [
'r-base-dev',
'r-base',
'r-cran-statmod',
'r-cran-RCurl',
'r-cran-rjson'
]
# download link of hadoop.
hadoop_url = 'http://apache.claz.org/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz'
hadoop_dir = 'hadoop-2.8.0'
# customized installation script.
# See optional installation scripts for options.
def custom_master_install():
#install_spark()
#install_r()
pass
# customized installation script for all nodes.
def custom_all_nodes_install():
install_gcc()
pass
###---------------------------------------------------##
# Automatically set by script #
###---------------------------------------------------##
USER_NAME = 'ubuntu'
# setup variables
MASTER = os.getenv('MY_MASTER_DNS', '')
# node type the type of current node
NODE_TYPE = os.getenv('MY_NODE_TYPE', 'm3.xlarge')
NODE_VMEM = int(os.getenv('MY_NODE_VMEM', str(1024*15)))
NODE_VCPU = int(os.getenv('MY_NODE_VCPU', '4'))
AWS_ID = os.getenv('AWS_ACCESS_KEY_ID', 'undefined')
AWS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'undefined')
JAVA_HOME = os.getenv('JAVA_HOME')
HADOOP_HOME = os.getenv('HADOOP_HOME')
DISK_LIST = [('xvd' + chr(ord('b') + i)) for i in range(10)]
ENVIRON = os.environ.copy()
###--------------------------------##
# Optional installation scripts. #
###--------------------------------##
def install_r():
if master_r_packages:
sudo("apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9")
sudo("echo deb https://cran.r-project.org/bin/linux/ubuntu trusty/ >>/etc/apt/sources.list")
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(master_r_packages)))
def install_spark():
run('wget https://www.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz')
run('tar xf spark-2.1.1-bin-hadoop2.7.tgz')
run('rm -rf spark-2.1.1-bin-hadoop2.7.tgz')
with open('.bashrc', 'a') as fo:
fo.write('\nexport PATH=${PATH}:spark-2.1.1-bin-hadoop2.7\n')
def install_xgboost():
run('git clone --recursive https://github.com/dmlc/xgboost')
run('cd xgboost; cp make/config.mk .; echo USE_S3=1 >> config.mk; make -j4')
### Script section ###
def run(cmd):
try:
print cmd
logging.info(cmd)
proc = subprocess.Popen(cmd, shell=True, env = ENVIRON,
stdout=subprocess.PIPE, stderr = subprocess.PIPE)
out, err = proc.communicate()
retcode = proc.poll()
if retcode != 0:
logging.error('Command %s returns %d' % (cmd,retcode))
logging.error(out)
logging.error(err)
else:
print out
except Exception as e:
print(str(e))
logging.error('Exception running: %s' % cmd)
logging.error(str(e))
pass
def sudo(cmd):
run('sudo %s' % cmd)
### Installation helpers ###
def install_packages(pkgs):
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(pkgs)))
# install g++4.9, needed for regex match.
def install_gcc():
sudo('add-apt-repository -y ppa:ubuntu-toolchain-r/test')
sudo('apt-get -y update')
sudo('apt-get -y install g++-4.9')
def install_java():
"""
install java and setup environment variables
Returns environment variables that needs to be exported
"""
if not os.path.exists('jdk1.8.0_131'):
run('wget --no-check-certificate --no-cookies'\
' --header \"Cookie: oraclelicense=accept-securebackup-cookie\"'\
' http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz')
run('tar xf jdk-8u131-linux-x64.tar.gz')
run('rm -f jdk-8u131-linux-x64.tar.gz')
global JAVA_HOME
if JAVA_HOME is None:
JAVA_HOME = os.path.abspath('jdk1.8.0_131')
return [('JAVA_HOME', JAVA_HOME)]
def install_hadoop(is_master):
def update_site(fname, rmap):
"""
update the site script
"""
try:
tree = ElementTree.parse(fname)
root = tree.getroot()
except Exception:
cfg = ElementTree.Element("configuration")
tree = ElementTree.ElementTree(cfg)
root = tree.getroot()
rset = set()
for prop in root.getiterator('property'):
prop = dict((p.tag, p) for p in prop)
name = prop['name'].text.strip()
if name in rmap:
prop['value'].text = str(rmap[name])
rset.add(name)
for name, text in rmap.iteritems():
if name in rset:
continue
prop = ElementTree.SubElement(root, 'property')
ElementTree.SubElement(prop, 'name').text = name
ElementTree.SubElement(prop, 'value').text = str(text)
rough_string = ElementTree.tostring(root, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty = reparsed.toprettyxml(indent='\t')
fo = open(fname, 'w')
fo.write(pretty)
fo.close()
def setup_hadoop_site(master, hadoop_dir, hdfs_dir, vcpu, vmem):
"""
setup hadoop side given the parameters
Parameters
----------
master: the dns to master uri
hadoop_dir: the directory to store temp files
hdfs_dir: the directories for hdfs
vcpu: the number of cpus current machine have
vmem: the memory(MB) current machine have
"""
if vmem < 4 * 1024:
reserved_ram = 256
elif vmem < 8 * 1024:
reserved_ram = 1 * 1024
elif vmem < 24 * 1024 :
reserved_ram = 2 * 1024
elif vmem < 48 * 1024:
reserved_ram = 2 * 1024
elif vmem < 64 * 1024:
reserved_ram = 6 * 1024
else:
reserved_ram = 8 * 1024
ram_per_container = (vmem - reserved_ram) / vcpu
if is_master:
vcpu = vcpu - 2
tmp_dir = hadoop_dir[0]
core_site = {
'fs.defaultFS': 'hdfs://%s:9000/' % master,
'fs.s3n.impl': 'org.apache.hadoop.fs.s3native.NativeS3FileSystem',
'hadoop.tmp.dir': tmp_dir
}
if AWS_ID != 'undefined':
core_site['fs.s3n.awsAccessKeyId'] = AWS_ID
core_site['fs.s3n.awsSecretAccessKey'] = AWS_KEY
update_site('%s/etc/hadoop/core-site.xml' % HADOOP_HOME, core_site)
hdfs_site = {
'dfs.data.dir': ','.join(['%s/data' % d for d in hdfs_dir]),
'dfs.permissions': 'false',
'dfs.replication': '1'
}
update_site('%s/etc/hadoop/hdfs-site.xml' % HADOOP_HOME, hdfs_site)
yarn_site = {
'yarn.resourcemanager.resource-tracker.address': '%s:8025' % master,
'yarn.resourcemanager.scheduler.address': '%s:8030' % master,
'yarn.resourcemanager.address': '%s:8032' % master,
'yarn.scheduler.minimum-allocation-mb': 512,
'yarn.scheduler.maximum-allocation-mb': 640000,
'yarn.scheduler.minimum-allocation-vcores': 1,
'yarn.scheduler.maximum-allocation-vcores': 32,
'yarn.nodemanager.resource.memory-mb': vcpu * ram_per_container,
'yarn.nodemanager.resource.cpu-vcores': vcpu,
'yarn.log-aggregation-enable': 'true',
'yarn.nodemanager.vmem-check-enabled': 'false',
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.nodemanager.aux-services.mapreduce.shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
'yarn.nodemanager.remote-app-log-dir': os.path.join(tmp_dir, 'logs'),
'yarn.nodemanager.log-dirs': os.path.join(tmp_dir, 'userlogs'),
'yarn.nodemanager.local-dirs': ','.join(['%s/yarn/nm-local-dir' % d for d in hadoop_dir])
}
update_site('%s/etc/hadoop/yarn-site.xml' % HADOOP_HOME, yarn_site)
mapred_site = {
'mapreduce.application.classpath' : ':'.join(['$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*',
'$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*',
'$HADOOP_MAPRED_HOME/share/hadoop/tools/lib/*']),
'yarn.app.mapreduce.am.resource.mb': 2 * ram_per_container,
'yarn.app.mapreduce.am.command-opts': '-Xmx%dm' % int(0.8 * 2 * ram_per_container),
'mapreduce.framework.name': 'yarn',
'mapreduce.map.cpu.vcores': 1,
'mapreduce.map.memory.mb': ram_per_container,
'mapreduce.map.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container),
'mapreduce.reduce.cpu.vcores': 1,
'mapreduce.reduce.memory.mb': 2 * ram_per_container,
'mapreduce.reduce.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container)
}
update_site('%s/etc/hadoop/mapred-site.xml' % HADOOP_HOME, mapred_site)
capacity_site = {
'yarn.scheduler.capacity.resource-calculator': 'org.apache.hadoop.yarn.util.resource.DominantResourceCalculator'
}
update_site('%s/etc/hadoop/capacity-scheduler.xml' % HADOOP_HOME, capacity_site)
fo = open('%s/etc/hadoop/hadoop-env.sh' % HADOOP_HOME, 'w')
fo.write('export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HADOOP_PREFIX/share/hadoop/tools/lib/*\n')
fo.write('export HADOOP_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export YARN_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export JAVA_HOME=\"%s\"\n' % JAVA_HOME)
fo.close()
fo = open('%s/etc/hadoop/slaves' % HADOOP_HOME, 'w')
fo.write(master + '\n')
fo.close()
def run_install():
if not os.path.exists('hadoop-2.8.0'):
run('wget %s' % hadoop_url)
run('tar xf hadoop-2.8.0.tar.gz')
run('rm -f hadoop-2.8.0.tar.gz')
global HADOOP_HOME
if HADOOP_HOME is None:
HADOOP_HOME = os.path.abspath('hadoop-2.8.0')
env = [('HADOOP_HOME', HADOOP_HOME)]
env += [('HADOOP_PREFIX', HADOOP_HOME)]
env += [('HADOOP_MAPRED_HOME', HADOOP_HOME)]
env += [('HADOOP_COMMON_HOME', HADOOP_HOME)]
env += [('HADOOP_HDFS_HOME', HADOOP_HOME)]
env += [('YARN_HOME', HADOOP_HOME)]
env += [('YARN_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
env += [('HADOOP_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
disks = ['/disk/%s' % d for d in DISK_LIST if os.path.exists('/dev/%s' % d)]
setup_hadoop_site(MASTER,
['%s/hadoop' % d for d in disks],
['%s/hadoop/dfs' % d for d in disks],
NODE_VCPU, NODE_VMEM)
return env
return run_install()
def regsshkey(fname):
for dns in (open(fname).readlines() + ['localhost', '0.0.0.0']):
try:
run('ssh-keygen -R %s' % dns.strip())
except:
pass
run('ssh-keyscan %s >> ~/.ssh/known_hosts' % dns.strip())
# main script to install all dependencies
def install_main(is_master):
if is_master:
install_packages(master_apt_packages + node_apt_packages)
else:
install_packages(node_apt_packages)
env = []
env += install_java()
env += install_hadoop(is_master)
path = ['$HADOOP_HOME/bin', '$HADOOP_HOME/sbin', '$JAVA_HOME/bin']
env += [('LD_LIBRARY_PATH', '$HADOOP_HOME/native/lib')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:$HADOOP_HDFS_HOME/lib/native:$JAVA_HOME/jre/lib/amd64/server')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:/usr/local/lib')]
env += [('LIBHDFS_OPTS', '--Xmx128m')]
env += [('MY_MASTER_DNS', MASTER)]
env += [('MY_NODE_TYPE', NODE_TYPE)]
env += [('MY_NODE_VMEM', str(NODE_VMEM))]
env += [('MY_NODE_VCPU', str(NODE_VCPU))]
if AWS_ID != 'undefined':
env += [('AWS_ACCESS_KEY_ID', AWS_ID)]
if AWS_KEY != 'undefined':
env += [('AWS_SECRET_ACCESS_KEY', AWS_KEY)]
# setup environments
fo = open('.hadoop_env', 'w')
for k, v in env:
fo.write('export %s=%s\n' % (k,v))
ENVIRON[k] = v
fo.write('export PATH=$PATH:%s\n' % (':'.join(path)))
fo.write('export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib\n')
fo.close()
for l in open('.bashrc'):
if l.find('.hadoop_env') != -1:
return
run('echo source ~/.hadoop_env >> ~/.bashrc')
# allow ssh, if they already share the key.
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
run(key_setup)
regsshkey('%s/etc/hadoop/slaves' % HADOOP_HOME)
# end of instalation.
# Make startup script for bulding
def make_startup_script(is_master):
assert JAVA_HOME is not None
assert HADOOP_HOME is not None
assert NODE_VCPU is not None
assert NODE_VMEM is not None
disks = []
cmds = []
if is_master:
cmds.append('$HADOOP_HOME/sbin/stop-all.sh')
for d in DISK_LIST:
if os.path.exists('/dev/%s' % d):
cmds.append('sudo umount /dev/%s' % d)
cmds.append('sudo mkfs -t ext4 /dev/%s' % d)
cmds.append('sudo mkdir -p /disk/%s' % d)
cmds.append('sudo mount /dev/%s /disk/%s' % (d, d))
disks.append('/disk/%s' % d)
for d in disks:
cmds.append('sudo mkdir -p %s/hadoop' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/hadoop' % d)
cmds.append('sudo mkdir -p %s/tmp' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/tmp' % d)
cmds.append('rm -rf %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs/name' % d)
cmds.append('mkdir %s/hadoop/dfs/data' % d)
# run command
if is_master:
cmds.append('$HADOOP_HOME/bin/hadoop namenode -format')
cmds.append('$HADOOP_HOME/sbin/start-all.sh')
else:
cmds.append('export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec &&'\
' $HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_HOME/etc/hadoop start nodemanager')
with open('startup.sh', 'w') as fo:
fo.write('#!/bin/bash\n')
fo.write('set -v\n')
fo.write('\n'.join(cmds))
run('chmod +x startup.sh')
run('./startup.sh')
def main():
global MASTER
logging.basicConfig(filename = 'bootstrap.log', level = logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
if MASTER == '':
is_master = True
MASTER = socket.getfqdn()
logging.info('assuming master is myself as %s' % MASTER)
else:
is_master = socket.getfqdn() == MASTER
tstart = time.time()
install_main(is_master)
tmid = time.time()
logging.info('installation finishes in %g secs' % (tmid - tstart))
make_startup_script(is_master)
ENVIRON['HADOOP_HOME'] = HADOOP_HOME
ENVIRON['JAVA_HOME'] = JAVA_HOME
tend = time.time()
if is_master:
custom_master_install()
custom_all_nodes_install()
logging.info('boostrap finishes in %g secs' % (tend - tmid))
logging.info('all finishes in %g secs' % (tend - tstart))
if __name__ == '__main__':
pw_record = pwd.getpwnam(USER_NAME)
user_name = pw_record.pw_name
user_home_dir = pw_record.pw_dir
user_uid = pw_record.pw_uid
user_gid = pw_record.pw_gid
env = os.environ.copy()
cwd = user_home_dir
ENVIRON['HOME'] = user_home_dir
os.setgid(user_gid)
os.setuid(user_uid)
os.chdir(user_home_dir)
main()
|
<filename>elmo/api/client.py<gh_stars>0
from threading import Lock
from contextlib import contextmanager
from functools import lru_cache
from requests import Session
from requests.exceptions import HTTPError
from .router import Router
from .decorators import require_session, require_lock
from .exceptions import (
QueryNotValid,
CredentialError,
CodeError,
InvalidSector,
InvalidInput,
LockError,
)
from .. import query as q
class ElmoClient(object):
"""ElmoClient class provides all the functionalities to connect
to an Elmo system. During the authentication a short-lived token is stored
in the instance and is used to arm/disarm the system.
Usage:
# Authenticate to the system (read-only mode)
c = ElmoClient()
c.auth("username", "password")
# Obtain a lock to do actions on the system (write mode)
with c.lock("alarm_code"):
c.arm() # Arms all alarms
c.disarm() # Disarm all alarms
"""
def __init__(self, base_url=None, domain=None, session_id=None):
self._router = Router(base_url)
self._domain = domain
self._session = Session()
self._session_id = session_id
self._session_expire = 0
self._lock = Lock()
self._strings = None
# TODO: this item doesn't belong to the client. Split the stateful
# implementation from the client, so that it can stay stateless.
self._latestEntryId = {
q.SECTORS: 0,
q.INPUTS: 0,
}
def auth(self, username, password):
"""Authenticate the client and retrieves the access token. This method uses
the Authentication API.
Args:
username: the Username used for the authentication.
password: the Password used for the authentication.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
The access token retrieved from the API. The token is also cached in
the `ElmoClient` instance.
"""
try:
payload = {"username": username, "password": password}
if self._domain is not None:
payload["domain"] = self._domain
response = self._session.get(self._router.auth, params=payload)
response.raise_for_status()
except HTTPError as err:
# 403: Incorrect username or password
if err.response.status_code == 403:
raise CredentialError
raise err
# Store the session_id
data = response.json()
self._session_id = data["SessionId"]
# Register the redirect URL and try the authentication again
if data["Redirect"]:
self._router._base_url = data["RedirectTo"]
redirect = self._session.get(self._router.auth, params=payload)
redirect.raise_for_status()
data = redirect.json()
self._session_id = data["SessionId"]
return self._session_id
@require_session
def poll(self):
"""Use a long-polling API to identify when something changes in the
system. Calling this method blocks the thread for 15 seconds, waiting
for a backend response that happens only when the alarm system status
changes. Don't call this method from your main thread otherwise the
application hangs.
When the API returns that something is changed, you must call the
`client.check()` to update your identifiers. Missing this step means
that the next time you will call `client.poll()` the API returns immediately
with an old result.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A dictionary that includes what items have been changed. The following
structure means that `areas` are not changed, while inputs are:
{
"areas": False,
"inputs": True,
}
"""
payload = {
"sessionId": self._session_id,
"Areas": self._latestEntryId[q.SECTORS],
"Inputs": self._latestEntryId[q.INPUTS],
"CanElevate": "1",
"ConnectionStatus": "1",
}
response = self._session.post(self._router.update, data=payload)
response.raise_for_status()
state = response.json()
return {
"has_changes": state["HasChanges"],
"areas": state["Areas"],
"inputs": state["Inputs"],
}
@contextmanager
@require_session
def lock(self, code):
"""Context manager to obtain a system lock. The alerting system allows
only one user at a time and obtaining the lock is mandatory. When the
context manager is closed, the lock is automatically released.
Args:
code: the alarm code used to obtain the lock.
Raises:
CodeError: if used `code` is not valid.
LockError: if the server is refusing to assign the lock. It could mean
that an unexpected issue happened, or that another application is
holding the lock. It's possible to retry the operation.
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A client instance with an acquired lock.
"""
payload = {"userId": 1, "password": <PASSWORD>, "sessionId": self._session_id}
response = self._session.post(self._router.lock, data=payload)
try:
response.raise_for_status()
except HTTPError as err:
# 403: Not possible to obtain the lock, probably because of a race condition
# with another application
if err.response.status_code == 403:
raise LockError
raise err
# A wrong code returns 200 with a fail state
body = response.json()
if not body[0]["Successful"]:
raise CodeError
self._lock.acquire()
try:
yield self
finally:
self.unlock()
@require_session
@require_lock
def unlock(self):
"""Release the system lock so that other threads (or this instance) can
acquire the lock again. This method requires a valid session ID and if called
when a Lock() is not acquired it bails out.
If there is a server error or if the call fails, the lock is not released
so the current thread can do further work before letting another thread
gain the lock.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A boolean if the lock has been released correctly.
"""
payload = {"sessionId": self._session_id}
response = self._session.post(self._router.unlock, data=payload)
response.raise_for_status()
# Release the lock only in case of success, so that if it fails
# the owner of the lock can properly unlock the system again
# (maybe with a retry)
self._lock.release()
return True
@require_session
@require_lock
def arm(self, sectors=None):
"""Arm system alarms without any activation delay. This API works only
if a system lock has been obtained, otherwise the action ends with a failure.
It is possible to enable ALL sectors, or provide a list of sectors such as:
client.arm() # Arms all sectors
client.arm([3, 4]) # Arms only sectors 3 and 4
Args:
sector: (optional) list of sectors that must be armed. If the variable is
empty, ALL is assumed and the entire system is armed. If multiple items
are in the list, multiple requests are sent to arm given sectors.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A boolean if the system has been armed correctly.
"""
payloads = []
sectors = sectors or []
if sectors:
# Arm only selected sectors
for sector in sectors:
payloads.append(
{
"CommandType": 1,
"ElementsClass": 9,
"ElementsIndexes": sector,
"sessionId": self._session_id,
}
)
else:
# Arm ALL sectors
payloads = [
{
"CommandType": 1,
"ElementsClass": 1,
"ElementsIndexes": 1,
"sessionId": self._session_id,
}
]
# Arming multiple sectors requires multiple requests
errors = []
for payload in payloads:
response = self._session.post(self._router.send_command, data=payload)
response.raise_for_status()
# A not existing sector returns 200 with a fail state
body = response.json()
if not body[0]["Successful"]:
errors.append(payload["ElementsIndexes"])
# Raise an exception if errors are detected
if errors:
invalid_sectors = ",".join(str(x) for x in errors)
raise InvalidSector(
"Selected sectors don't exist: {}".format(invalid_sectors)
)
return True
@require_session
@require_lock
def disarm(self, sectors=None):
"""Disarm system alarms. This API works only if a system lock has been
obtained, otherwise the action ends with a failure.
It is possible to disable ALL sectors, or provide a list of sectors such as:
client.disarm() # Disarms all sectors
client.disarm([3]) # Disarms only sector 3
Args:
sector: (optional) list of sectors that must be disarmed. If the variable is
empty, ALL is assumed and the entire system is disarmed. If multiple items
are in the list, multiple requests are sent to disarm given sectors.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A boolean if the system has been disarmed correctly.
"""
payloads = []
sectors = sectors or []
if sectors:
# Disarm only selected sectors
for sector in sectors:
payloads.append(
{
"CommandType": 2,
"ElementsClass": 9,
"ElementsIndexes": sector,
"sessionId": self._session_id,
}
)
else:
# Disarm ALL sectors
payloads = [
{
"CommandType": 2,
"ElementsClass": 1,
"ElementsIndexes": 1,
"sessionId": self._session_id,
}
]
# Disarming multiple sectors requires multiple requests
errors = []
for payload in payloads:
response = self._session.post(self._router.send_command, data=payload)
response.raise_for_status()
# A not existing sector returns 200 with a fail state
body = response.json()
if not body[0]["Successful"]:
errors.append(payload["ElementsIndexes"])
# Raise an exception if errors are detected
if errors:
invalid_sectors = ",".join(str(x) for x in errors)
raise InvalidSector(
"Selected sectors don't exist: {}".format(invalid_sectors)
)
return True
@require_session
@require_lock
def exclude(self, inputs):
"""Exclude passed inputs: they are not alarmed
when you arm the area they belongs to.
This API provides the same effects as turning them
from "idle" to "bypassed" on the E-Connect web UI.
This API works only if a system lock has been
obtained, otherwise the action ends with a failure.
It is possible to provide a list of inputs such as:
client.exclude([3]) # Excludes only input 3
client.exclude([3, 5]) # Excludes input 3 and 5
Args:
inputs: list of inputs that must be excluded. If multiple items
are in the list, multiple requests are sent to exclude given inputs.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A boolean if the input has been excluded correctly.
"""
payloads = []
# Exclude only selected inputs
for element in inputs:
payloads.append(
{
"CommandType": 2,
"ElementsClass": 10,
"ElementsIndexes": element,
"sessionId": self._session_id,
}
)
# Excluding multiple inputs requires multiple requests
errors = []
for payload in payloads:
response = self._session.post(self._router.send_command, data=payload)
response.raise_for_status()
# A not existing input returns 200 with a fail state
body = response.json()
if not body[0]["Successful"]:
errors.append(payload["ElementsIndexes"])
# Raise an exception if errors are detected
if errors:
invalid_inputs = ",".join(str(x) for x in errors)
raise InvalidInput("Selected inputs don't exist: {}".format(invalid_inputs))
return True
@require_session
@require_lock
def include(self, inputs):
"""Include system inputs: they are alarmed
when you arm the area they belongs to.
This API provides the same effects as turning them
from "bypassed" to "idle" on the E-Connect web UI.
This API works only if a system lock has been
obtained, otherwise the action ends with a failure.
It is possible to provide a list of inputs such as:
client.include([3]) # Includes only input 3
client.include([3, 5]) # Includes input 3 and 5
Args:
inputs: list of inputs that must be included. If multiple items
are in the list, multiple requests are sent to include given inputs.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A boolean if the input has been included correctly.
"""
payloads = []
# Include only selected inputs
for element in inputs:
payloads.append(
{
"CommandType": 1,
"ElementsClass": 10,
"ElementsIndexes": element,
"sessionId": self._session_id,
}
)
# Including multiple inputs requires multiple requests
errors = []
for payload in payloads:
response = self._session.post(self._router.send_command, data=payload)
response.raise_for_status()
# A not existing input returns 200 with a fail state
body = response.json()
if not body[0]["Successful"]:
errors.append(payload["ElementsIndexes"])
# Raise an exception if errors are detected
if errors:
invalid_inputs = ",".join(str(x) for x in errors)
raise InvalidInput("Selected inputs don't exist: {}".format(invalid_inputs))
return True
@lru_cache(maxsize=1)
@require_session
def _get_descriptions(self):
"""Retrieve Sectors and Inputs names to map `Class` and `Index` into a
human readable description. This method calls the E-Connect API, but the
result is cached for the entire `ElmoClient` life-cycle.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A dictionary having `Class` as key, and a dictionary of strings (`Index`)
as a value, to map sectors and inputs names.
"""
payload = {"sessionId": self._session_id}
response = self._session.post(self._router.descriptions, data=payload)
response.raise_for_status()
# Transform the list of items in a dict -> dict of strings
descriptions = {}
for item in response.json():
classes = descriptions.get(item["Class"], {})
classes[item["Index"]] = item["Description"]
descriptions[item["Class"]] = classes
return descriptions
@require_session
def query(self, query):
"""Query an Elmo System to retrieve registered entries. Items are grouped
by "Active" status. It's possible to query different part of the system
using the `elmo.query` module:
from elmo import query
sectors_armed, sectors_disarmed = client.query(query.SECTORS)
inputs_alerted, inputs_wait = client.query(query.INPUTS)
Raises:
QueryNotValid: if the query is not recognized.
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A tuple containing two list `(active, not_active)`. Every item is an entry
(sector or input) represented by a `dict` with the following fields: `id`,
`index`, `element`, `name`.
"""
# Query detection
if query == q.SECTORS:
endpoint = self._router.sectors
status = "Active"
elif query == q.INPUTS:
status = "Alarm"
endpoint = self._router.inputs
else:
# Bail-out if the query is not recognized
raise QueryNotValid()
response = self._session.post(endpoint, data={"sessionId": self._session_id})
response.raise_for_status()
# Retrieve cached descriptions
descriptions = self._get_descriptions()
# Filter only entries that are used
active = []
not_active = []
entries = response.json()
# The last entry ID is used in `self.poll()` long-polling API
self._latestEntryId[query] = entries[-1]["Id"]
# Massage data
for entry in entries:
if entry["InUse"]:
item = {
"id": entry["Id"],
"index": entry["Index"],
"element": entry["Element"],
"name": descriptions[query][entry["Index"]],
}
if entry[status]:
active.append(item)
else:
not_active.append(item)
return active, not_active
@require_session
def check(self):
"""Check the Elmo System to get the status of armed or disarmed sectors or inputs
that are in alerted state or that are waiting. This method checks:
* If any sector is in alerted state
* If the alarm for each sector is armed or disarmed
* If the alarm for each input is in alerted state or not
This method is considered a shortcut that calls `client.query()` with `SECTORS`
and `INPUTS` queries.
Raises:
HTTPError: if there is an error raised by the API (not 2xx response).
Returns:
A `dict` object that includes all the above information. The `dict` is in
the following format:
{
"sectors_armed": [{"id": 0, "name": "Entryway", "element": 1, "index": 0}, ...],
"sectors_disarmed": [{"id": 1, "name": "Kitchen", "element": 2, "index": 1}, ...],
"inputs_alerted": [{"id": 0, "name": "Door", "element": 3, "index": 0}, ...],
"inputs_wait": [{"id": 1, "name": "Window", "element": 4, "index": 1}, ...],
}
"""
# Retrieve sectors and inputs
sectors_armed, sectors_disarmed = self.query(q.SECTORS)
inputs_alerted, inputs_wait = self.query(q.INPUTS)
return {
"sectors_armed": sectors_armed,
"sectors_disarmed": sectors_disarmed,
"inputs_alerted": inputs_alerted,
"inputs_wait": inputs_wait,
}
|
from app import db
from models.issn import ISSNMetaData, ISSNToISSNL
from models.journal import Journal
from models.usage import DOICount, OpenAccess
class MergeIssn:
def __init__(self, issn_from, issn_to):
self.issn_from = issn_from
self.issn_to = issn_to
self.old_issns = []
self.old_title = None
def merge_issn(self):
self.delete_old_journal()
self.delete_old_issn_metadata()
self.delete_old_issn_to_issnl()
self.delete_old_doi_data()
self.delete_old_open_access_data()
self.map_issns_to_new_issn_l()
self.add_issn_to_new_issn_org_issns()
self.set_other_title()
def delete_old_journal(self):
j = db.session.query(Journal).filter_by(issn_l=self.issn_from).one()
if j.subscription_prices or j.apc_prices:
raise Exception(
"subscription or apc price exists for journal to be deleted."
)
self.old_title = j.title
db.session.delete(j)
db.session.commit()
print("journal entry for issn {} deleted".format(self.issn_from))
def delete_old_issn_metadata(self):
i = db.session.query(ISSNMetaData).filter_by(issn_l=self.issn_from).one()
self.old_issns = i.issn_org_issns
db.session.delete(i)
db.session.commit()
print("issn metadata for issn {} deleted".format(self.issn_from))
def delete_old_issn_to_issnl(self):
issns_to_remove = (
db.session.query(ISSNToISSNL).filter_by(issn_l=self.issn_from).all()
)
for issn in issns_to_remove:
db.session.delete(issn)
db.session.commit()
print(
"issn to issnl mapping data for issn {} and issn_l {} deleted".format(
issn.issn, issn.issn_l
)
)
def delete_old_doi_data(self):
dois = db.session.query(DOICount).filter_by(issn_l=self.issn_from).one_or_none()
if dois:
db.session.delete(dois)
db.session.commit()
def delete_old_open_access_data(self):
open_access = (
db.session.query(OpenAccess).filter_by(issn_l=self.issn_from).all()
)
for o in open_access:
db.session.delete(o)
db.session.commit()
def map_issns_to_new_issn_l(self):
for old_issn in self.old_issns:
issn_to_issnl = ISSNToISSNL(issn_l=self.issn_to, issn=old_issn)
db.session.add(issn_to_issnl)
db.session.commit()
print(
"issn {} mapped to {} in issn to issnl table".format(
self.issn_from, self.issn_to
)
)
def add_issn_to_new_issn_org_issns(self):
metadata = db.session.query(ISSNMetaData).filter_by(issn_l=self.issn_to).one()
metadata.issn_org_issns = metadata.issn_org_issns + self.old_issns
metadata.previous_issn_ls = (
metadata.previous_issn_ls + [self.issn_from]
if metadata.previous_issn_ls
else [self.issn_from]
)
db.session.commit()
print(
"issn {} added to issn_org column for {} issn_l metadata record".format(
self.issn_from, self.issn_to
)
)
def set_other_title(self):
j = db.session.query(Journal).filter_by(issn_l=self.issn_to).one()
if j.title.lower() != self.old_title.lower():
j.other_titles = (
j.other_titles + [self.old_title]
if j.other_titles
else [self.old_title]
)
db.session.commit()
print(
"set other title {} on journal with title {}".format(
self.old_title, j.title
)
)
|
# -*- coding: utf-8 -*-
"""
Created on Feb 09, 2018
@author: Tyranic-Moron
"""
from twisted.plugin import IPlugin
from pymoronbot.moduleinterface import IModule
from pymoronbot.modules.commandinterface import BotCommand, admin
from zope.interface import implementer
import re
from collections import OrderedDict
from pymoronbot.response import IRCResponse, ResponseType
@implementer(IPlugin, IModule)
class Admin(BotCommand):
def triggers(self):
return ['admin']
@admin("Only my admins may add new admins!")
def _add(self, message):
"""add <nick/full hostmask> - adds the specified user to the bot admins list.
You can list multiple users to add them all at once.
Nick alone will be converted to a glob hostmask, eg: *!user@host"""
if len(message.ParameterList) < 2:
return IRCResponse(ResponseType.Say,
u"You didn't give me a user to add!",
message.ReplyTo)
for admin in message.ParameterList[1:]:
if message.ReplyTo in self.bot.channels:
if admin in self.bot.channels[message.ReplyTo].Users:
user = self.bot.channels[message.ReplyTo].Users[admin]
admin = u'*!{}@{}'.format(user.User, user.Hostmask)
admins = self.bot.config.getWithDefault('admins', [])
admins.append(admin)
self.bot.config['admins'] = admins
self.bot.config.writeConfig()
return IRCResponse(ResponseType.Say,
u"Added specified users as bot admins!",
message.ReplyTo)
@admin("Only my admins may remove admins!")
def _del(self, message):
"""del <full hostmask> - removes the specified user from the bot admins list.
You can list multiple users to remove them all at once."""
if len(message.ParameterList) < 2:
return IRCResponse(ResponseType.Say,
u"You didn't give me a user to remove!",
message.ReplyTo)
deleted = []
skipped = []
admins = self.bot.config.getWithDefault('admins', [])
for admin in message.ParameterList[1:]:
if message.ReplyTo in self.bot.channels:
if admin in self.bot.channels[message.ReplyTo].Users:
user = self.bot.channels[message.ReplyTo].Users[admin]
admin = u'*!{}@{}'.format(user.User, user.Hostmask)
if admin not in admins:
skipped.append(admin)
continue
admins.remove(admin)
deleted.append(admin)
self.bot.config['admins'] = admins
self.bot.config.writeConfig()
return IRCResponse(ResponseType.Say,
u"Removed '{}' as admin(s), {} skipped"
.format(u', '.join(deleted), len(skipped)),
message.ReplyTo)
def _list(self, message):
"""list - lists all admins"""
owners = self.bot.config.getWithDefault('owners', [])
admins = self.bot.config.getWithDefault('admins', [])
return IRCResponse(ResponseType.Say,
u"Owners: {} | Admins: {}".format(u', '.join(owners),
u', '.join(admins)),
message.ReplyTo)
subCommands = OrderedDict([
(u'add', _add),
(u'del', _del),
(u'list', _list)])
def help(self, query):
"""
@type query: list[str]
@rtype str
"""
if len(query) > 1:
subCommand = query[1].lower()
if subCommand in self.subCommands:
return u'{1}admin {0}'.format(re.sub(r"\s+", u" ", self.subCommands[subCommand].__doc__),
self.bot.commandChar)
else:
return self._unrecognizedSubcommand(subCommand)
else:
return self._helpText()
def _helpText(self):
return u"{1}admin ({0}) - manages users with bot admin permissions. " \
u"Use '{1}help admin <subcommand> for subcommand help.".format(u'/'.join(self.subCommands.keys()),
self.bot.commandChar)
def _unrecognizedSubcommand(self, subCommand):
return u"unrecognized subcommand '{}', " \
u"available subcommands for admin are: {}".format(subCommand, u', '.join(self.subCommands.keys()))
def execute(self, message):
if len(message.ParameterList) > 0:
subCommand = message.ParameterList[0].lower()
if subCommand not in self.subCommands:
return IRCResponse(ResponseType.Say,
self._unrecognizedSubcommand(subCommand),
message.ReplyTo)
return self.subCommands[subCommand](self, message)
else:
return IRCResponse(ResponseType.Say,
self._helpText(),
message.ReplyTo)
adminCommand = Admin()
|
<gh_stars>0
# A request to get a total count on refdata for a search query
search_request1 = {
"id": "xyz",
"method": "KBaseSearchEngine.search_objects",
"version": "1.1",
"params": [{
"access_filter": {
"with_private": 0,
"with_public": 1
},
"match_filter": {
"exclude_subobjects": 1,
"full_text_in_all": "coli",
"source_tags": ["refdata"],
"source_tags_blacklist": 0
},
"pagination": {
"count": 0,
"start": 0
},
"post_processing": {
"ids_only": 1,
"include_highlight": 1,
"skip_data": 1,
"skip_info": 1,
"skip_keys": 1
}
}]
}
search_response1 = {
"id": "xyz",
"jsonrpc": "2.0",
"result": [{
"pagination": {
"start": 0,
"count": 0
},
"sorting_rules": [{
"property": "timestamp",
"is_object_property": 0,
"ascending": 1
}],
"objects": [],
"total": 15039,
"search_time": 1848
}]
}
# Method call to get type counts
search_request2 = {
"version": "1.1",
"method": "KBaseSearchEngine.search_types",
"id": "6959719268936883",
"params": [{
"access_filter": {
"with_private": 1,
"with_public": 1
},
"match_filter": {
"exclude_subobjects": 1,
"full_text_in_all": "coli",
"source_tags": ["refdata", "noindex"],
"source_tags_blacklist": 1
}
}]
}
search_response2 = {
"id": "6959719268936883",
"jsonrpc": "2.0",
"result": [{
"type_to_count": {
"Narrative": 13,
"Taxon": 3318,
"Tree": 3,
"Genome": 3174,
"Workspace": 1
},
"search_time": 1506
}]
}
search_request3 = {
"id": "xyz",
"method": "KBaseSearchEngine.search_objects",
"params": [{
"access_filter": {
"with_private": 1,
"with_public": 1
},
"match_filter": {
"exclude_subobjects": 1,
"full_text_in_all": "coli",
"source_tags": ["refdata", "noindex"],
"source_tags_blacklist": 1
},
"pagination": {
"count": 20,
"start": 0
},
"post_processing": {
"add_narrative_info": 1,
"ids_only": 0,
"include_highlight": 1,
"skip_data": 0,
"skip_info": 0,
"skip_keys": 0
},
"sorting_rules": [{
"ascending": 0,
"is_object_property": 0,
"property": "access_group_id"
}, {
"ascending": 1,
"is_object_property": 0,
"property": "type"
}]
}],
"version": "1.1"
}
search_response3 = {
"id": "xyz",
"jsonrpc": "2.0",
"result": [{
"pagination": {"start": 0, "count": 20},
"sorting_rules": [{
"property": "access_group_id",
"is_object_property": 0,
"ascending": 0
}, {
"property": "type",
"is_object_property": 0,
"ascending": 1
}],
"objects": [],
"objects_info": [],
"total": 6509,
"search_time": 1918,
"access_group_narrative_info": {},
}]
}
# Genome features search to get the total count
search_request4 = {
"id": "17499051636214047",
"method": "KBaseSearchEngine.search_objects",
"params": [{
"access_filter": {
"with_private": 1,
"with_public": 1
},
"match_filter": {
"exclude_subobjects": 0,
"full_text_in_all": "coli",
"source_tags": [],
"source_tags_blacklist": 0
},
"object_types": ["GenomeFeature"],
"pagination": {
"count": 0,
"start": 0
},
"post_processing": {
"add_access_group_info": 0,
"ids_only": 1,
"include_highlight": 0,
"skip_data": 1,
"skip_info": 1,
"skip_keys": 1
}
}],
"version": "1.1"
}
search_response4 = {
"jsonrpc": "2.0",
"id": "17499051636214047",
"result": [{
"pagination": {
"start": 0,
"count": 0
},
"sorting_rules": [{
"property": "timestamp",
"is_object_property": 0,
"ascending": 1
}],
"objects": [],
"total": 94222799,
"search_time": 355
}]
}
search_request5 = {
"id": "2328138435664152",
"method": "KBaseSearchEngine.search_objects",
"params": [{
"access_filter": {
"with_private": 1,
"with_public": 1
},
"match_filter": {
"exclude_subobjects": 0,
"full_text_in_all": "coli",
"source_tags": [],
"source_tags_blacklist": 0
},
"object_types": ["GenomeFeature"],
"pagination": {
"count": 20,
"start": 0
},
"post_processing": {
"add_access_group_info": 1,
"ids_only": 0,
"include_highlight": 1,
"skip_data": 0,
"skip_info": 0,
"skip_keys": 0
},
"sorting_rules": [{
"ascending": 1,
"is_object_property": 1,
"property": "genome_scientific_name"
}, {
"ascending": 1,
"is_object_property": 0,
"property": "guid"
}, {
"ascending": 1,
"is_object_property": 1,
"property": "feature_type"
}, {
"ascending": 1,
"is_object_property": 1,
"property": "id"
}]
}],
"version": "1.1"
}
search_response5 = {
"jsonrpc": "2.0",
"id": "2328138435664152",
"result": [{
"pagination": {
"start": 0,
"count": 20
},
"sorting_rules": [{
"property": "genome_scientific_name",
"is_object_property": 1,
"ascending": 1
}, {
"property": "guid",
"is_object_property": 0,
"ascending": 1
}, {
"property": "feature_type",
"is_object_property": 1,
"ascending": 1
}, {
"property": "id",
"is_object_property": 1,
"ascending": 1
}],
"objects": [{
"guid": "WS:4258/13216/1:feature/kb|g.2231.peg.5834",
"parent_guid": "WS:4258/13216/1",
"object_name": "kb|g.2231",
"timestamp": 1453530416321,
"type": "GenomeFeature",
"type_ver": 1,
"creator": "kbasetest",
"mod": "KBase Search",
"parent_data": {
"domain": "Bacteria",
"scientific_name": "'Nostoc azollae' 0708",
"taxonomy": "Bacteria; Cyanobacteria; Nostocales; Nostocaceae; Trichormus; 'Nostoc azollae' 0708"
},
"data": {
"aliases": ["Aazo_0443", "Aazo_0443"],
"function": "Ribosomal protein S12p Asp88 (E. coli) methylthiotransferase (EC 2.8.4.4)",
"id": "kb|g.2231.peg.5834",
"location": [
["kb|g.2231.c.0", 456225, "-", 1254]
],
"protein_translation": "MLGLLVEAGYGVDTNDELADYVIVNTCSFIEAAREESVKTLVELAEANKKVVITGCLAQHFQEQLLEELPEALAVIGTGDYHKIVNVIERVEQGERVKQITPQPTYIADETTPRYRTTTEGVAYLRVAEGCDYRCAFCIIPHLRGNQRSRTIESIVAEAKQLASQGVKEIILISQITTNYGLDIYGKPKLAELLCALGKVDVPWIRMHYAYPTGLTPDVIAAIQEIPNVLPYLDLPLQHSHPEILRAMNRPWQGRVNDTIIESIKTALPSAVLRTTFIVGFPGETQEHFEHLLEFTERHEFDHVGVFTFSPEEGTPAYNLLNQLPQELMVERRDQLMALQQPISLLKNQQEVDKIVDVLIEQENPESGELIGRSGRFSPEVDGQVYVKGDAGLGTIVPVKIHSADAYDLYGQIIMSN", # noqa
"type": "CDS"
},
"key_props": {
"feature_type": "CDS",
"strand": "-",
"aliases": "Aazo_0443, Aazo_0443",
"genome_domain": "Bacteria",
"stop": "456225",
"ontology_terms": "",
"function": "Ribosomal protein S12p Asp88 (E. coli) methylthiotransferase (EC 2.8.4.4)",
"start": "454972",
"genome_scientific_name": "'Nostoc azollae' 0708",
"contig_id": "kb|g.2231.c.0",
"id": "kb|g.2231.peg.5834",
"genome_taxonomy": "Bacteria; Cyanobacteria; Nostocales; Nostocaceae; Trichormus; 'Nostoc azollae' 0708"
},
"highlight": {
"function": ["Ribosomal protein S12p Asp88 (E. <em>coli</em>) methylthiotransferase (EC 2.8.4.4)"]
}
}],
"total": 28138389,
"search_time": 3183,
"access_groups_info": {
"4258": [4258, "KBasePublicGenomesV5", "kbasetest", "2017-02-02T06:31:09+0000", 36987, "n", "r", "unlocked", {}]
},
"objects_info": {
"4258/25234/1": [
25234,
"kb|g.28454",
"KBaseGenomes.Genome-8.0",
"2016-01-23T10:20:08+0000",
1,
"kbasetest",
4258,
"KBasePublicGenomesV5",
"f0f46269f7491407f8377cf5969c0cd9",
6317976,
{
"GC content": "39.3381390370841",
"Genetic code": "11",
"Taxonomy": "Bacteria; Firmicutes; Clostridia; Clostridiales; Eubacteriaceae; Acetobacterium; Acetobacterium woodii DSM 1030", # noqa
"Source ID": "931626.3",
"Size": "4044777",
"Number features": "3832",
"Number contigs": "1",
"Domain": "Bacteria",
"Source": "KBase Central Store",
"Name": "Acetobacterium woodii DSM 1030",
"MD5": "31c52c7713b9ba638580e9d1812558e5"
}
]
}
}]
}
# Basic ecoli search example with all metadtaa
search_request6 = {
"params": [{
"match_filter": {
"full_text_in_all": "coli",
"exclude_subobjects": 1,
"source_tags": ["refdata", "noindex"],
"source_tags_blacklist": 1
},
"pagination": {
"start": 0,
"count": 20
},
"post_processing": {
"ids_only": 0,
"skip_info": 0,
"skip_keys": 0,
"skip_data": 0,
"include_highlight": 1,
"add_narrative_info": 1
},
"access_filter": {
"with_private": 1,
"with_public": 1
},
"sorting_rules": [{
"is_object_property": 0,
"property": "access_group_id",
"ascending": 0
}, {
"is_object_property": 0,
"property": "type",
"ascending": 1
}]
}],
"method": "KBaseSearchEngine.search_objects",
"version": "1.1",
"id": "4564119057768642"
}
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
from django.urls import reverse
from django.conf import settings
from django.db.models import Q
import json
from rest_framework import generics
from rest_framework.renderers import JSONRenderer
from .serializers import jobSerialize
from . import models, permissions
from . import serializers
from user.views import CustomUser
from datetime import date
from django.contrib.sessions.models import Session
import io
import base64
from PIL import Image, ImageDraw
from pathlib import Path
import uuid
import operator
from django.contrib.auth import authenticate
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from user.models import Rating
from user.serializers import RatingSerializer
from django.utils.dateparse import parse_date
#GET: /job/view
@api_view(["POST"])
@permission_classes([IsAuthenticated])
def jobView(request):
req_dict = request.data
total_rating = 0
return_data = {}
#try:
token = Token.objects.get(key=req_dict['session_token'])
job_id = req_dict['job_id']
serialized_qs = serializers.jobSerialize(models.job.objects.get(id=job_id))
# add rating to the return object
ratings = Rating.objects.filter(being_rated=serialized_qs.data['business_id'])
for rating in ratings:
serialized_rating = RatingSerializer(rating)
total_rating += int(serialized_rating.data['rating'])
if total_rating > 0:
total_rating = total_rating/len(ratings)
return_data = {
"id": serialized_qs.data['id'],
"project_name": serialized_qs.data['project_name'],
"date_created": serialized_qs.data['date_created'],
"date_start": serialized_qs.data['date_start'],
"date_end": serialized_qs.data['date_end'],
"description": serialized_qs.data['description'],
"category": serialized_qs.data['category'],
"jobType": serialized_qs.data['jobType'],
"premium": serialized_qs.data['premium'],
"business_id": serialized_qs.data['business_id'],
"business_name": serialized_qs.data['business_name'],
"location": serialized_qs.data['location'],
"current_bid": serialized_qs.data['current_bid'],
"bidder": serialized_qs.data['bidder'],
"project_photos": serialized_qs.data['project_photos'],
"rating": total_rating
}
return JsonResponse(return_data, safe=False, status=200)
#except:
#return JsonResponse({'error':'lookup failed','status':'failure'}, status=400)
#POST: /job/register
@api_view(["POST"])
@permission_classes([IsAuthenticated])
def jobRegister(request):
try:
req_dict = request.data
token = Token.objects.get(key=req_dict['session_token'])
user = CustomUser.objects.get(id=token.user_id)
if(req_dict['project_title']!=""):
test = models.job.objects.create(project_name=req_dict['project_title'],
date_start=req_dict['date_start'],
date_end=req_dict['date_end'],
description=req_dict['project_description'],
category=req_dict['project_category'],
jobType=req_dict['project_type'],
premium=(True if req_dict['project_premium']=="T" else False),
business=user,
location=req_dict['project_location'],
current_bid=0)
for photo in req_dict['project_photos']:
models.project_photos.objects.create(project=test,image=photo['image'],title=photo['title'])
return JsonResponse({"status":"success"}, status=200)
except:
return JsonResponse({'error':'failed to register','status':'failure'}, status=400)
return JsonResponse({'error':'failed to register','status':'failure'}, status=400, safe=False)
#POST: /job/bid
@api_view(["POST"])
@permission_classes([IsAuthenticated])
def jobBid(request):
try:
req_dict = request.data
token = Token.objects.get(key=req_dict['session_token'])
user = CustomUser.objects.get(id=token.user_id)
model = models.job.objects.get(id=req_dict['job_id'])
#check bid amount is bigger than current top bid amount
currBid = int(req_dict['current_bid'])
if model.current_bid != '':
if currBid < int(model.current_bid):
return JsonResponse({'error': 'current bid is less than top bidder', 'status': 'failure'}, status=400)
serialized_qs = serializers.jobSerialize(model, data={'bidder': user.id, 'current_bid': req_dict['current_bid']}, partial=True)
if serialized_qs.is_valid():
serialized_qs.save()
return JsonResponse({"status": "success"}, status=200)
except:
return JsonResponse({'error':'failed job bidding','status':'failure'}, status=400)
#POST: /Job/Photo/upload
@api_view(["POST"])
@permission_classes([IsAuthenticated])
def jobPhotoUpload(request):
try:
req_dict = request.data
token = Token.objects.get(key=req_dict['session_token'])
user = CustomUser.objects.get(id=token.user_id)
tempname = str(uuid.uuid4())
f = open('media/images/'+tempname+'.jpeg', 'wb')
f.write(base64.b64decode(req_dict['image_byte_array']) )
f.close()
return HttpResponse(settings.MEDIA_URL+'images/'+tempname+'.jpeg', content_type="application/json")
except:
return JsonResponse({'error':'failed photo upload','status':'failure'}, status=400)
return JsonResponse({'error':'failed photo upload','status':'failure'}, status=400)
class resultPage(object):
def __init__(self, amount, total, results):
self.amount = amount
self.total = total
self.results = results
sorted_hash = []
#GET: /job/search
@api_view(["POST"])
def jobSearch(request):
req_dict = request.data
# try:
terms = req_dict['search_terms'].split()
category = req_dict['category_1']
jobType = req_dict['category_2']
status = req_dict['job_status']
order = req_dict['order_by']
searchLocal = req_dict['location']
date_start = req_dict['date_start']
date_start_p = parse_date(date_start)
date_end = req_dict['date_end']
date_end_p = parse_date(date_end)
minPrice = int(req_dict['min_price'])
maxPrice =int(req_dict['max_price'])
pageAmount = int(req_dict['page_amount'])
pageNumber =int(req_dict['page_number'])
result_hash = {}
job_list = models.job.objects.all()
#search job, description and business for search terms, if none add all to results
if(len(terms)>0):
for term in terms:
queryset = job_list.filter(
Q(project_name__icontains=term) |
Q(description__icontains=term) |
Q(business__business_name__icontains=term))
for q in queryset:
currBid = int(q.current_bid)
if(minPrice<=currBid and maxPrice>=currBid):
#order by relevance
if(order == "Relevance"):
if q in result_hash.keys():
result_hash[q] = result_hash[q]+1
else:
result_hash[q] = 1
#order by bid
if(order == "Lowest Bid" or order == "Highest Bid"):
if q not in result_hash.keys():
result_hash[q] = currBid
#order by date
if(order == "Oldest" or order == "Newest"):
if q not in result_hash.keys():
result_hash[q] = q.date_created
if(order == "Alphabetic"):
if q not in result_hash.keys():
result_hash[q] = q.project_name[0]
else:
for q in job_list:
currBid = int(q.current_bid)
if(minPrice<=currBid and maxPrice>=currBid):
#order by relevance
if(order == "Relevance"):
result_hash[q] = 1
#order by bid
if(order == "Lowest Bid" or order == "Highest Bid"):
if q not in result_hash.keys():
result_hash[q] = currBid
#order by date
if(order == "Oldest" or order == "Newest"):
if q not in result_hash.keys():
result_hash[q] = q.date_created
if(order == "Alphabetic"):
if q not in result_hash.keys():
result_hash[q] = q.project_name[0]
#If location is set, filter out all jobs not in region
if (searchLocal!=""):
for q in list(result_hash.keys()):
if q.location!=searchLocal:
del result_hash[q]
#If category is set, filter out all jobs not in region
if (category!=""):
for q in list(result_hash.keys()):
if q.category!=category:
del result_hash[q]
#If type is set, filter out all jobs not in region
if (jobType!=""):
for q in list(result_hash.keys()):
if q.jobType!=jobType:
del result_hash[q]
#If status is set, filter out all jobs not in region
if (status!=""):
for q in list(result_hash.keys()):
if q.premium=="F" and status == "Premium":
del result_hash[q]
if q.premium=="T" and status == "Standard":
del result_hash[q]
#order dict depending on choice
if(order=="Alphabetic"):
sorted_hash=sorted(result_hash.items(), key=operator.itemgetter(1))
elif(order == "Relevance" or order == "Highest Bid" or order == "Newest"):
sorted_hash = sorted(result_hash.items(), key=operator.itemgetter(1), reverse=True)
else:
sorted_hash = sorted(result_hash.items(), key=operator.itemgetter(1))
# #Display Results according to page parameters
temp_list = []
if(len(sorted_hash)<=pageAmount):
if(pageNumber==0):
for i in sorted_hash:
temp_list.append(i[0])
else:
return HttpResponse("")
else:
temp_list = []
for i in range(pageAmount*pageNumber, (pageAmount*pageNumber)+pageAmount):
temp_list.append(sorted_hash[i][0])
pageResponse = resultPage(len(temp_list),len(sorted_hash),temp_list)
serialized_qs = serializers.resultSerializer(pageResponse)
return HttpResponse(JSONRenderer().render(serialized_qs.data), content_type='application/json')
# except:
# return JsonResponse({'error':'search failed','status':'failure'}, status=400)
|
<gh_stars>0
import pandas as pd
import logging
from constants import INPUT_PATH, PARTIDO_MUNZONA, FORMAT_FILE, STATES, CANDIDATO_MUNZONA
from helpers import flat_lists
def factory_partido(
ano_eleicao,
descricao_ue,
sigla_uf,
nome_partido,
numero_partido,
sigla_partido,
nome_legenda,
composicao_legenda,
tipo_agremiacao,
total_votos,
total_legenda
):
return {
'ano_eleicao': ano_eleicao,
'descricao_ue': descricao_ue,
'sigla_uf': sigla_uf,
'nome_partido': nome_partido,
'numero_partido': numero_partido,
'sigla_partido': sigla_partido,
'nome_legenda': nome_legenda,
'composicao_legenda': composicao_legenda,
'tipo_agremiacao': tipo_agremiacao,
'total_votos': total_votos,
'total_legenda': total_legenda
}
def factory_deputado(
ano_eleicao,
nome,
nome_urna,
numero_urna,
descricao_ue,
sigla_uf,
tipo_agremiacao,
nome_partido,
numero_partido,
sigla_partido,
nome_legenda,
composicao_legenda,
descricao_totalizacao_turno,
total_votos,
descricao_detalhe_situacao_candidatura,
descricao_situacao_candidatura
):
return {
'ano_eleicao': ano_eleicao,
'nome': nome,
'nome_urna': nome_urna,
'numero_urna': numero_urna,
'descricao_ue': descricao_ue,
'sigla_uf': sigla_uf,
'tipo_agremiacao': tipo_agremiacao,
'nome_partido': nome_partido,
'numero_partido': numero_partido,
'sigla_partido': sigla_partido,
'nome_legenda': nome_legenda,
'composicao_legenda': composicao_legenda,
'descricao_totalizacao_turno': descricao_totalizacao_turno,
'total_votos': total_votos,
'descricao_detalhe_situacao_candidatura': descricao_detalhe_situacao_candidatura,
'descricao_situacao_candidatura': descricao_situacao_candidatura
}
def generate_party_data():
df = pd.read_csv(f'{INPUT_PATH}/{PARTIDO_MUNZONA}{FORMAT_FILE}')
processed = []
for state in STATES:
dfl = df[(df['sigla_uf'] == state) & (df['codigo_cargo'] == 6)]
logging.info(f'Processando dados para o {state}')
partidos_grouped_by_vote = dfl[['nome_partido', 'total_votos']].groupby(by='nome_partido').sum()
legenda_grouped_by_vote = dfl[['nome_partido', 'voto_em_legenda']].groupby(by='nome_partido').sum()
items = dict()
for row in dfl.itertuples():
if row.nome_partido in items:
continue
if row.codigo_cargo != 6:
# se trata de um cargo que não é o deputado federal
continue
items[row.nome_partido] = factory_partido(
ano_eleicao=row.ano_eleicao,
nome_partido=row.nome_partido,
numero_partido=row.numero_partido,
sigla_partido=row.sigla_partido,
nome_legenda=row.nome_legenda,
composicao_legenda=row.composicao_legenda,
descricao_ue=row.descricao_ue,
sigla_uf=row.sigla_uf,
tipo_agremiacao=row.tipo_agremiacao,
total_votos=partidos_grouped_by_vote.loc[row.nome_partido].values.item(),
total_legenda=legenda_grouped_by_vote.loc[row.nome_partido].values.item(),
)
processed.append(list(items.values()))
return flat_lists(lists=processed)
def generate_candidate_data():
df = pd.read_csv(f'{INPUT_PATH}/{CANDIDATO_MUNZONA}{FORMAT_FILE}')
candidates_grouped_by_vote = df[['numero_sequencial', 'total_votos']].groupby(by='numero_sequencial').sum()
processed = []
for state in STATES:
dfl = df[df['sigla_uf'] == state]
logging.info(f'Processando dados para o {state}')
items = dict()
for row in dfl.itertuples():
if row.nome_urna in items:
continue
if row.codigo_cargo != 6:
# se trata de um cargo que não é o deputado federal
continue
items[row.nome_urna] = factory_deputado(
ano_eleicao=row.ano_eleicao,
nome=row.nome,
nome_urna=row.nome_urna,
numero_urna=row.numero_urna,
descricao_ue=row.descricao_ue,
sigla_uf=row.sigla_uf,
tipo_agremiacao=row.tipo_agremiacao,
nome_partido=row.nome_partido,
numero_partido=row.numero_partido,
sigla_partido=row.sigla_partido,
nome_legenda=row.nome_legenda,
composicao_legenda=row.composicao_legenda,
descricao_totalizacao_turno=row.descricao_totalizacao_turno,
descricao_detalhe_situacao_candidatura=row.descricao_detalhe_situacao_candidatura,
descricao_situacao_candidatura=row.descricao_situacao_candidatura,
total_votos=candidates_grouped_by_vote.loc[row.numero_sequencial].values.item()
)
processed.append(list(items.values()))
return flat_lists(lists=processed)
|
<reponame>justin8/convert_videos
from dataclasses import dataclass
import os
import shutil
import tempfile
import logging
import traceback
from enum import Enum, auto
from stringcase import titlecase, lowercase
from video_utils import Video
from .ffmpeg_converter import FFmpegConverter
from .settings import AudioSettings, VideoSettings
from .colour import colour
log = logging.getLogger()
class Status(Enum):
# Successful conversion
CONVERTED = auto()
# The file would be converted, if we weren't running in dry-run mode
WOULD_CONVERT = auto()
# If a file is already converted with renaming
# (e.g. appending the output codec) and this is that converted file
ALREADY_PROCESSED = auto()
# Force can override already in desired format, but not if both original and converted files already exist
FORCE_CONVERTED = auto()
# The file is already using the target format, can be overridden with --force
IN_DESIRED_FORMAT = auto()
FAILED = auto()
def __str__(self):
return titlecase(lowercase(self.name))
def colour(self):
c = "green"
if self == Status.FAILED:
c = "red"
return colour(c, str(self))
@dataclass
class VideoProcessor:
video: Video
video_settings: VideoSettings
audio_settings: AudioSettings
container: str
force: bool = False
dry_run: bool = False
in_place: bool = False
extra_ffmpeg_input_args: str = ""
extra_ffmpeg_output_args: str = ""
temp_directory: str = None
def _create_temp_file(self):
return tempfile.mkstemp(dir=self.temp_directory, suffix=f".{self.container}")[1]
def __str__(self):
return f"Video: {self.video.full_path}, format: {self.video.codec.pretty_name}, quality: {self.video.quality}"
def process(self):
if self.video.codec == self.video_settings.codec:
log.debug(f"'{self.video.name}' is already in the desired format")
if not self.force:
return Status.IN_DESIRED_FORMAT
log.debug("Forcing conversion anyway (--force is enabled)")
if self.already_processed():
return Status.ALREADY_PROCESSED
try:
self.temp_file = self._create_temp_file()
converter = FFmpegConverter(
source_file_path=self.video.full_path,
destination_file_path=self.temp_file,
extra_ffmpeg_input_args=self.extra_ffmpeg_input_args,
extra_ffmpeg_output_args=self.extra_ffmpeg_output_args,
video_settings=self.video_settings,
audio_settings=self.audio_settings,
dry_run=self.dry_run,
)
converter.process()
self._move_output_video()
if self.dry_run:
return Status.WOULD_CONVERT
return Status.CONVERTED
except Exception as e:
log.error(colour("red", f"Failed to convert {self.video.full_path}. Exception:"))
log.error(e)
traceback.print_exc()
return Status.FAILED
def already_processed(self):
renamed_path = self.renamed_path()
if os.path.exists(renamed_path):
log.debug(f"File '{self.video.name}' appears to have already been converted to {renamed_path} exists. Skipping")
return True
split_filename = os.path.splitext(self.video.name)
codec_name = self.video_settings.codec.pretty_name
if split_filename[0].endswith(codec_name):
log.debug(f"File '{self.video.name}' already matches the renaming format. Skipping")
return True
return False
def renamed_path(self):
split_filename = os.path.splitext(self.video.full_path)
codec_name = self.video_settings.codec.pretty_name
return f"{split_filename[0]} - {codec_name}.{self.container}"
def in_place_file_path(self):
split_filename = os.path.splitext(self.video.full_path)
return f"{split_filename[0]}.{self.container}"
def _move_output_video(self):
log.debug("Moving file from temporary storage back to original folder")
if not self.dry_run:
shutil.move(self.temp_file, self.renamed_path())
if self.in_place:
if self.dry_run:
log.info(colour("blue", f"DRY-RUN: Would replace original file {self.video.full_path}"))
return
print(f"Replacing original file {self.video.full_path}")
os.remove(self.video.full_path)
shutil.move(self.renamed_path(), self.in_place_file_path())
|
from larlib import *
from meshpy.tet import MeshInfo, build, Options
# LAR model with non-contractible faces
# ------------------------------------------------------------------------------
V = [[0.25, 0.25, 0.0], [0.25, 0.75, 0.0], [0.75, 0.75, 0.0], [0.75, 0.25, 0.0], [1.0,
0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.25, 0.25, 1.0], [0.25,
0.25, 2.0], [0.25, 0.75, 2.0], [0.25, 0.75, 1.0], [0.25, 0.75, -1.0], [0.25, 0.25,
-1.0], [0.75, 0.75, -1.0], [0.75, 0.25, -1.0], [0.75, 0.25, 1.0], [0.75, 0.75, 1.0],
[1.0, 0.0, 1.0], [0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0], [0.75, 0.75, 2.0],
[0.75, 0.25, 2.0]]
CV = [(0,1,2,3,4,5,6,7,8,11,16,17,18,19,20,21),
(0,1,2,3,8,11,16,17),
(0,1,2,3,12,13,14,15),
(8,9,10,11,16,17,22,23)]
FV = [(2,3,16,17),(6,7,20,21),(12,13,14,15),(0,1,8,11),(1,2,11,17),(0,1,12,13),
(4,6,18,20),(5,7,19,21),(0,3,13,15),(0,3,8,16),(0,1,2,3),
(10,11,17,22),(2,3,14,15),(8,9,16,23),(8,11,16,17),
(1,2,12,14),(16,17,22,23),(4,5,18,19),(8,9,10,11),(
9,10,22,23),(0,1,2,3,4,5,6,7),(8, 11,16,17,18,19,20,21)]
EV =[(3,15),(7,21),(10,11),(4,18),(12,13),(5,19),(8,9),(18,19),(22,23),(0,3),(1,11),
(16,17),(0,8),(6,7),(20,21),(3,16),(10,22),(18,20),(19,21),(1,2),(12,14),(4,5),(
8,11),(13,15),(16,23),(14,15),(11,17),(17,22),(2,14),(2,17),(0,1),(9,10),(8,16),
(4,6),(1,12),(5,7),(0,13),( 9,23),(6,20),(2,3)]
VV = AA(LIST)(range(len(V)))
hpc = STRUCT(MKPOLS((V,EV)))
VIEW(larModelNumbering(1,1,1)(V,[VV,EV,FV,CV],hpc,0.6))
BF = boundaryCells(CV,FV)
VIEW(EXPLODE(1.2,1.2,1.2)(MKTRIANGLES((V,[FV[f] for f in BF],EV))))
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((V,FV+EV+VV))))
VIEW(EXPLODE(1.2,1.2,1.2)(AA(SKEL_1)(MKPOLS((V,CV)))))
V,BF,BE = larBoundary3(V,CV,FV,EV)([1,1,1,1])
VIEW(STRUCT(MKTRIANGLES((V,BF,BE),color=True)))
VIEW(EXPLODE(1.2,1.2,1.2)(MKTRIANGLES((V,BF,BE),color=True)))
# Correction of non-signed boundary op for general (non contractible) LAR cells
# ------------------------------------------------------------------------------
V,[VV,EV,FV,CV] = larCuboids([2,1,1],True)
mod1 = Struct([(V,FV,EV),t(.25,.25,0),s(.25,.5,2),(V,FV,EV)])
V,FV,EV = struct2lar(mod1)
W,FW,EW = V,FV,EV
quadArray = [[W[v] for v in face] for face in FW]
parts = boxBuckets3d(containmentBoxes(quadArray))
Z,FZ,EZ = spacePartition(W,FW,EW, parts)
Z,FZ,EZ = larSimplify((Z,FZ,EZ),radius=0.0001)
V,FV,EV = Z,FZ,EZ
CF = AA(sorted)([[20,12,21,5,19,6],
[27,1,5,28,13,23],
[12,14,25,17,10,4],
[1,7,17,24,11,18],
[30,29,26,16,8,22,10,11,4,18,24,25],
[2,3,8,9,0,15]])
CV = [list(set(CAT([FV[f] for f in faces]))) for faces in CF]
CV = [[10,11,12,13,18,19,20,21],
[18,19,20,21,22,23,25,26],
[0,1,4,5,10,13,18,21],
[2,3,4,5,18,21,25,26],
[0,1,2,3,4,5,6,7,8,9,10,13,16,17,18,21,24,25,26,27],
[6,8,14,15,16,24,28,29]]
VV = AA(LIST)(range(len(V)))
hpc = STRUCT(MKPOLS((V,EV)))
VIEW(larModelNumbering(1,1,1)(V,[VV,EV,FV,CV],hpc,0.6))
V,BF,BE = larBoundary3(V,CV,FV,EV)([1,1,1,1])
VIEW(STRUCT(MKTRIANGLES((V,BF,BE),color=True)))
VIEW(EXPLODE(1.2,1.2,1.2)(MKTRIANGLES((V,BF,BE),color=True)))
VIEW(EXPLODE(1.2,1.2,1.2)(MKTRIANGLES((V,[FV[f] for f in BF],EV))))
# Tetrahedralization
# ------------------------------------------------------------------------------
CF = crossRelation(len(V),CV,FV)
FE = crossRelation(len(V),FV,EV) # correct (for general LAR cells)
cycles = []
for faceEdges in FE:
vcycles,_ = makeCycles((V,[EV[e] for e in faceEdges]))
cycles += [[vcycle for k,vcycle in enumerate(vcycles) if k%2==0]]
cycles = [[[16, 17, 2, 3]], # removed dups ...
[[20, 21, 7, 6]],
[[14, 15, 13, 12]],
[[8, 11, 1, 0]],
[[11, 17, 2, 1]],
[[12, 13, 0, 1]],
[[18, 20, 6, 4]],
[[19, 21, 7, 5]],
[[13, 15, 3, 0]],
[[8, 16, 3, 0]],
[[17, 22, 10, 11]],
[[14, 15, 3, 2]],
[[16, 23, 9, 8]],
[[12, 14, 2, 1]],
[[22, 23, 16, 17]],
[[18, 19, 5, 4]],
[[10, 11, 8, 9]],
[[22, 23, 9, 10]],
[[6, 7, 5, 4], [2, 3, 0, 1]],
[[20, 21, 19, 18], [16, 17, 11, 8]]]
CF = [[1, 7, 17, 6, 0, 4, 9, 3, 20,21], # plus 20,21
[14, 0, 4, 10, 9, 3], # no 20,21
[2, 12, 15, 10, 8, 5], # no 20
[16, 11, 19, 18, 14, 13]] # no 21
VIEW(EXPLODE(1.2,1.2,1.2)(AA(POLYLINE)([[V[v] for v in cycle]+[V[cycle[0]]]
for cycle in CAT(cycles)])))
def faces(tet):
v1,v2,v3,v4 = tet
return [(v2,v3,v4),(v3,v1,v4),(v1,v2,v4),(v2,v1,v3)]
def edges(tria):
v1,v2,v3 = tria
return AA(sorted)([(v1,v2),(v1,v3),(v2,v3)])
def brep2lar(model,cycles,holes):
V,FV,EV = model
FE = crossRelation(V,FV,EV)
mesh_info = MeshInfo()
mesh_info.set_points(V)
mesh_info.set_facets_ex(cycles)
#mesh_info.set_holes(holes)
mesh = build(mesh_info,options=Options("pqYY"))
W = [v for h,v in enumerate(mesh.points)]
CW = [tet for k,tet in enumerate(mesh.elements)]
def simplify(fun):
def simplify0(simplices):
cellDict = defaultdict(tuple)
for cell in CAT(AA(fun)(simplices)):
cellDict[tuple(sorted(cell))] = tuple(cell)
return cellDict.values()
return simplify0
FW = sorted(simplify(faces)(CW))
EW = sorted(simplify(edges)(FW))
return W,CW,FW,EW
holes = [[0.5,0.5,0.5]]
W,CW,FW,EW = brep2lar((V,FV,EV),cycles,holes)
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((W,FW))))
BF = signedSimplicialBoundary(CW,FW)
bf = (BF * mat(len(CW)*[1]).T).tolist()
bfs = [k for k,face in enumerate(CAT(bf)) if ABS(face)==1]
VIEW(EXPLODE(2,2,2)(MKPOLS((W,[FW[f] for f in bfs]))))
# Tetrahedralization
# ------------------------------------------------------------------------------
frame = boundary(CV,FV)
CVW = crossRelation(len(V),CV,CW) # tetrahedra by LAR cell # maps cells to tetrahedra
CVW[0] = list(set(CVW[0]).difference(CVW[1])) # removed double tetrahedra
tetraBreps = [[faces(CW[t]) for t in c] for c in CVW]
[Volume([W,tetra]) for tetra in CAT(tetraBreps)] # test of tetrahedra coherent orientation
for c in range(len(CV)):
VIEW(STRUCT(MKPOLS((V,CAT(tetraBreps[c]))))) # 2-scheletro di ogni cella
BCt = []
for c in range(len(CV)):
A = [CW[t] for t in CVW[c]]
B = CAT(tetraBreps[c])
cellBoundaryChain = boundary(A,B)*mat(len(A)*[[1]])
bc = [k for k,val in enumerate (cellBoundaryChain) if val==1]
print "c,bc =",c,bc
BCt += [bc]
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((V,[B[t] for t in bc])))) # 2-bordo di ogni cella
FVt = [[t for t in FW if set(t).issubset(f)] for f in FV]
FVt
[[(16, 2, 3), (16, 17, 2)],
[(6, 7, 21), (21, 20, 6)],
[(12, 15, 13), (15, 12, 14)],
[(0, 1, 11), (11, 8, 0)],
[(1, 17, 2), (1, 17, 11)],
[(0, 1, 12), (0, 12, 13)],
[(4, 20, 18), (20, 4, 6)],
[(5, 21, 7), (21, 5, 19)],
[(15, 0, 13), (15, 3, 0)],
[(0, 16, 3), (16, 0, 8)],
[(0, 1, 2), (3, 2, 0)],
[(10, 17, 11), (17, 10, 22)],
[(2, 15, 14), (15, 2, 3)],
[(8, 23, 9), (23, 8, 16)],
[(17, 8, 11), (17, 8, 16)],
[(1, 14, 12), (14, 1, 2)],
[(17, 22, 16), (23, 16, 22)],
[(4, 19, 5), (19, 4, 18)],
[(8, 10, 11), (9, 10, 8)],
[(10, 9, 22), (23, 22, 9)],
[(0, 1, 2), (0, 5, 7), (0, 7, 1), (1, 6, 2), (3, 2, 0), (3, 2, 4), (4, 0, 3), (4, 2, 6), (4, 5, 0), (6, 1, 7)],
[(8, 21, 19), (11, 20, 21), (16, 18, 20), (17, 8, 11), (17, 8, 16), (17, 16, 20), (17, 20, 11), (18, 8, 19), (18, 16, 8), (21, 8, 11)]]
ET = crossRelation(len(W),EV,FW)
EVW = extendEV(EV,ET,FW)
triaModel, larModel = (FW,EVW), (FV,EV)
FT = crossRelation(len(V),FV,FW)
boundary2op = larSignedBoundary(larModel,triaModel,FT)
boundary2op.todense()
m,n,p = AA(len)([CV,FV,EV])
absFE = []
for f,face in enumerate(FV):
faceVect = zeros((n,1))
faceVect[f] = [1]
edgeVect = CAT((boundary2op * faceVect).tolist())
absFE += [[int(value)*e for e,value in enumerate(edgeVect) if value!=0]]
absFE
[[11, 15, -29, 39],
[1, 13, -14, -38],
[-4, 20, -23, 25],
[10, -12, -22, 30],
[-10, -19, -26, -29],
[4, 30, 34, -36],
[-3, -17, 33, 38],
[-1, 5, 18, -35],
[0, -9, 23, 36],
[-9, 12, -15, 32],
[9, 19, 30, -39],
[-2, 16, -26, -27],
[0, -25, -28, 39],
[-6, 24, 32, -37],
[11, 22, 26, 32],
[19, -20, 28, -34],
[8, 11, -24, 27],
[3, -5, 7, -21],
[2, 6, -22, 31],
[-8, -16, -31, 37],
[9, -13, 19, 21, 30, -33, 35, -39],
[-7, 11, 14, 17, -18, 22, 26, 32]]
absCt = []
for c,cell in enumerate(CVW):
cellVect = zeros((len(CW),1))
for T in CVW[c]: cellVect[T] = [1]
trianglesVect = CAT((BF * cellVect).tolist())
absCt += [[int(value)*t for t,value in enumerate(trianglesVect) if abs(value)==1]]
for k in range(len(absCt)):
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((V,[FW[abs(t)] for t in absCt[k]]))))
CF = crossRelation(len(V), CV,FV)
[k for k,val in enumerate(CAT((boundary(CV,FV)*(mat([[0,1,0,0]])).T).tolist())) if val!=0]
for f in range(frame.shape[0]):
row = frame[f]
for c in range(frame.shape[1]):
if frame[f,c] != 0:
print f,c, frame[f,c]
|
import sys
import unittest
from pysgrs.tests.test_cipher import TestStreamCipher
from pysgrs import alphabets
from pysgrs import ciphers
class TestIdentityStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.RotationCipher(offset=0)
ciphertexts = TestStreamCipher.plaintexts
class TestRotationStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.RotationCipher(offset=7)
ciphertexts = [
"HIJKLMNOPQRSTUVWXYZABCDEFG",
"GFEDCBAZYXWVUTSRQPONMLKJIH",
"AOLXBPJRIYVDUMVEQBTWZVCLYAOLSHGFKVN",
"DHSAGIHKUFTWOMVYXBPJRQPNZCLE",
"QPCLKMVEUFTWONYHIZXBPJRDHSAG",
"NSPIQVJRZXBPGUFTWOAVCLEKDHYM",
"ZW<KEY>",
"OVDCLEPUNSFXBPJRKHMAGLIYHZQBTW",
"<KEY>",
"<KEY>",
"<KEY>",
"Spcl hz pm fvb dlyl av kpl avtvyyvd. Slhyu hz pm fvb dlyl av spcl mvylcly.",
"Il dov fvb hyl huk zhf doha fvb mlls, iljhbzl aovzl dov tpuk kvu’a thaaly huk aovzl dov thaaly kvu’a tpuk.",
"Pm fvb jhuuva kv nylha aopunz, kv zthss aopunz pu h nylha dhf.",
"Dpzl tlu zwlhr iljhbzl aolf ohcl zvtlaopun av zhf; mvvsz iljhbzl aolf ohcl av zhf zvtlaopun.",
]
class TestNegativeRotationStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.RotationCipher(offset=-7)
ciphertexts = [
"TUVWXYZABCDEFGHIJKLMNOPQRS"
]
class TestCaesarStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.CaesarCipher()
ciphertexts = [
"DEFGHIJKLMNOPQRSTUVWXYZABC",
"CBAZYXWVUTSRQPONMLKJIHGFED",
"WKHTXLFNEURZQIRAMXPSVRYHUWKHODCBGRJ",
"ZDOWCEDGQBPSKIRUTXLFNMLJVYHA",
"MLYHGIRAQBPSKJUDEVTXLFNZDOWC",
"JOLEMRFNVTXLCQBPSKWRYHAGZDUI",
"VSKLQARIEODFNTXDUWCMXGJHPBYRZ",
"KR<KEY>LFNGDIWCHEUDVMXPS",
"<KEY>",
"M<KEY>BELJVSKLQARITXDUWC",
"SDFNPBERAZLWKILYHGRCHQOLTXRUMXJV",
"Olyh dv li brx zhuh wr glh wrpruurz. Ohduq dv li brx zhuh wr olyh iruhyhu.",
"Eh zkr brx duh dqg vdb zkdw brx ihho, ehfdxvh wkrvh zkr plqg grq’w pdwwhu dqg wkrvh zkr pdwwhu grq’w plqg.",
"Li brx fdqqrw gr juhdw wklqjv, gr vpdoo wklqjv lq d juhdw zdb.",
"Zlvh phq vshdn ehfdxvh wkhb kdyh vrphwklqj wr vdb; irrov ehfdxvh wkhb kdyh wr vdb vrphwklqj.",
"Jdjd Jrxjrx Jrxjrx Gdgd",
"Txdwuh mrxuqdxa krvwlohv vrqw soxv d fudlqguh txh plooh edlrqqhwwhv.", # This one is a bit ironic!
]
class TestReversedStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.ReversedCipher()
ciphertexts = [
"ZYXWVUTSRQPONMLKJIHGFEDCBA"
]
class TestAlphabetStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.AlphabetCipher(
alphabet=alphabets.StringAlphabet("ABCDEFGHIJKLMNOPQRSTUVWXYZ",
indices="DEFGHIJKLMNOPQRSTUVWXYZABC")
)
ciphertexts = TestCaesarStreamCipher.ciphertexts
class TestPermutationIdentityStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PermutationCipher()
ciphertexts = TestStreamCipher.plaintexts
class TestPermutationStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PermutationCipher(
[
10, 24, 8, 18, 15, 13, 1, 25, 9,
22, 20, 6, 2, 0, 5, 3, 12, 21,
19, 14, 16, 11, 7, 4, 23, 17
]
)
ciphertexts = [
"KYISPNBZJWUGCAFDMVTOQLHEXR"
]
class TestPermutationStreamCipherRandom(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PermutationCipher(auto=True)
ciphertexts = []
class TestPermutationStreamCipherIdentity(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PermutationCipher()
ciphertexts = TestStreamCipher.plaintexts
class TestAffineStreamCipher(TestStreamCipher, unittest.TestCase):
cipher = ciphers.AffineCipher()
ciphertexts = [
"INSXCHMRWBGLQVAFKPUZEJOTYD"
]
def main():
unittest.main()
sys.exit(0)
if __name__ == "__main__":
main()
|
import numpy as np
import tensorflow as tf
from scipy.stats import randint
from sklearn.model_selection import GridSearchCV
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.models import Model
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from mnistData import MNIST
np.random.seed(0)
tf.random.set_seed(0)
def build_model(
filters_1: int,
kernel_size_1: int,
filters_2: int,
kernel_size_2: int,
filters_3: int,
kernel_size_3: int
) -> Model:
input_img = Input(shape=(28, 28, 1))
x = Conv2D(filters=filters_1, kernel_size=kernel_size_1, padding="same")(input_img)
x = Activation("relu")(x)
x = Conv2D(filters=filters_1, kernel_size=kernel_size_1, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Conv2D(filters=filters_2, kernel_size=kernel_size_2, padding="same")(x)
x = Activation("relu")(x)
x = Conv2D(filters=filters_2, kernel_size=kernel_size_2, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Conv2D(filters=filters_3, kernel_size=kernel_size_3, padding="same")(x)
x = Activation("relu")(x)
x = Conv2D(filters=filters_3, kernel_size=kernel_size_3, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Flatten()(x)
x = Dense(units=128)(x)
x = Activation("relu")(x)
x = Dense(units=10)(x)
y_pred = Activation("softmax")(x)
model = Model(
inputs=[input_img],
outputs=[y_pred]
)
model.compile(
loss="categorical_crossentropy",
optimizer="Adam",
metrics=["accuracy"]
)
return model
if __name__ == "__main__":
data = MNIST(with_normalization=True)
x_train, y_train = data.get_train_set()
param_grid = {
"filters_1": [16, 32],
"kernel_size_1": [3, 5],
"filters_2": [32, 64],
"kernel_size_2": [3, 5],
"filters_3": [64, 128],
"kernel_size_3": [5],
}
keras_clf = KerasClassifier(
build_fn=build_model,
epochs=3,
batch_size=128,
verbose=1
)
grid_cv = GridSearchCV(
estimator=keras_clf,
param_grid=param_grid,
n_jobs=1,
verbose=0,
cv=3
)
grid_result = grid_cv.fit(x_train, y_train)
print(f"Best: {grid_result.best_score_} using {grid_result.best_params_}")
means = grid_result.cv_results_["mean_test_score"]
stds = grid_result.cv_results_["std_test_score"]
params = grid_result.cv_results_["params"]
for mean, std, param in zip(means, stds, params):
print(f"Acc: {mean} (+/- {std * 2}) with: {param}")
|
from datetime import datetime
from decimal import Decimal
from typing import Optional
from django.conf import settings
from authentication.models import Dealer
from cashback.models import Cashback
from .exceptions import (DealerDoesNotExist, OrderCodeAlreadyExists,
OrderDoesNotExist, StatusNotAllowed)
from .models import Order
class OrderService:
@classmethod
def create_order(
cls: 'OrderService',
code: str,
amount: Decimal,
date: datetime,
cpf: str,
) -> Order:
dealer: Dealer = cls.get_dealer_or_raise_exception(code=code, cpf=cpf)
status = Order.Status.IN_VALIDATION
if cpf in settings.APPROVED_ALLOWED_DEALERS:
status = Order.Status.APPROVED
order = Order.objects.create(
code=code, amount=amount, date=date, dealer=dealer, status=status
)
cls._create_or_update_cashback_by_order(order)
return order
@classmethod
def update_order(cls: 'OrderService', order_id: str, **kwargs) -> Order:
cpf: str = kwargs.get('cpf')
code: str = kwargs.get('code')
order = Order.objects.filter(pk=order_id).first()
if not order:
raise OrderDoesNotExist()
if order.status != Order.Status.IN_VALIDATION:
raise StatusNotAllowed()
if cpf:
dealer: Dealer = cls.get_dealer_or_raise_exception(code=code, cpf=cpf)
order.dealer = dealer
for attr, value in kwargs.items():
if hasattr(order, attr):
setattr(order, attr, value)
order.save()
cls._create_or_update_cashback_by_order(order)
return order
@staticmethod
def list_orders(user, limit: int = 10, offset: int = 0):
return list(Order.objects.filter(dealer=user)[offset:offset + limit])
@staticmethod
def delete_order(order_id: str) -> None:
order: Order = Order.objects.filter(pk=order_id).first()
if not order:
raise OrderDoesNotExist()
if order.status != Order.Status.IN_VALIDATION:
raise StatusNotAllowed()
order.delete()
@staticmethod
def get_dealer_or_raise_exception(cpf: str, code: Optional[str] = None) -> None:
if code:
if Order.objects.filter(code=code).exists():
raise OrderCodeAlreadyExists()
dealer: Dealer = Dealer.objects.filter(cpf=cpf).first()
if not dealer:
raise DealerDoesNotExist()
return dealer
@staticmethod
def _create_or_update_cashback_by_order(order: Order) -> None:
"""
Creates cashback based on order instance and apply the level rules
"""
first_level_cashback_percent = Decimal(settings.FIRST_LEVEL_CASHBACK_PERCENT)
second_level_cashback_percent = Decimal(settings.SECOND_LEVEL_CASHBACK_PERCENT)
third_level_cashback_percent = Decimal(settings.THIRD_LEVEL_CASHBACK_PERCENT)
if order.amount <= settings.FIRST_LEVEL_CASHBACK_TARGET:
amount = order.amount * first_level_cashback_percent
percentage = first_level_cashback_percent
elif order.amount <= settings.SECOND_LEVEL_CASHBACK_TARGET:
amount = order.amount * second_level_cashback_percent
percentage = second_level_cashback_percent
else:
amount = order.amount * third_level_cashback_percent
percentage = third_level_cashback_percent
cashback: Cashback = Cashback.objects.filter(order=order).first()
if cashback:
cashback.amount = amount
cashback.percentage = percentage
cashback.save()
else:
Cashback.objects.create(order=order, amount=amount, percentage=percentage)
|
<filename>pyclustering/nnet/tests/som_templates.py
"""!
@brief Templates for tests of Self-Organization Map (SOM).
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import pickle
import matplotlib
matplotlib.use('Agg')
from pyclustering.nnet.som import som, type_conn, som_parameters
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
class SomTestTemplates:
@staticmethod
def templateTestAwardNeurons(file, rows, cols, time, expected_result, autostop, ccore_flag, parameters = None, **kwargs):
store_load = kwargs.get('store_load', False)
types = [type_conn.func_neighbor, type_conn.grid_eight, type_conn.grid_four, type_conn.honeycomb]
sample = read_sample(file)
if parameters is None:
parameters = som_parameters()
for structure in types:
network = som(rows, cols, structure, parameters, ccore=ccore_flag)
if store_load:
dump_network = pickle.dumps(network)
network = pickle.loads(dump_network)
network.train(sample, time, autostop)
winners = network.get_winner_number()
assert winners == len(expected_result)
if sorted(network.awards) != expected_result:
network.show_network(awards=True)
assert sorted(network.awards) == expected_result
total_capture_points = 0
for points in network.capture_objects:
total_capture_points += len(points)
assert total_capture_points == sum(expected_result)
del network
@staticmethod
def templateTestWinners(ccore_flag):
types = [type_conn.func_neighbor, type_conn.grid_eight, type_conn.grid_four, type_conn.honeycomb]
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
for stucture in types:
network = som(5, 5, stucture, ccore=ccore_flag)
network.train(sample, 100)
assert sum(network.awards) == 60
points = list()
for i in range(network.size):
if network.awards[i] > 0:
points += network.capture_objects[i]
assert len(points) == len(sample)
points = sorted(points)
for i in range(len(points)):
assert points[i] == i
@staticmethod
def templateTestSimulate(connections, ccore_flag, **kwargs):
store_load = kwargs.get('store_load', False)
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
network = som(1, 2, connections, ccore=ccore_flag)
network.train(sample, 100)
if store_load:
dump_network = pickle.dumps(network)
network = pickle.loads(dump_network)
expected_winners = [0, 1]
for i in range(len(sample)):
index_winner = network.simulate(sample[i])
if (i == 0) and (index_winner == 1):
expected_winners = [1, 0]
if i < 5:
assert expected_winners[0] == index_winner
else:
assert expected_winners[1] == index_winner
@staticmethod
def random_state(rows, cols, connections, random_state, ccore_flag):
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
params = som_parameters()
params.random_state = random_state
network_1 = som(rows, cols, connections, ccore=ccore_flag)
steps_1 = network_1.train(sample, 100, True)
network_2 = som(rows, cols, connections, ccore=ccore_flag)
steps_2 = network_2.train(sample, 100, True)
assert steps_1 == steps_2
assert network_1.weights == network_2.weights
assert network_1.capture_objects == network_2.capture_objects
assert network_1.awards == network_2.awards
|
"""
1-lead ECG monitor FarosTM 180 from Bittium is a one channel ECG monitor with
sampling frequency up to 1000 Hz and a 3D acceleration sampling up to 100Hz.
"""
import json
import os
import random
import numpy as np
import pandas as pd
import pyedflib as edf
class FarosReader:
"""
Read, timeshift and write data
generated by Bittium Faros devices.
Attributes
----------
start_time : pandas.Timestamp
Start time of all measurements.
sample_freqs : dict
Sampling frequencies of all signals in Hz.
units : dict
Units of all signals
ECG : pandas.Series
ECG signal, indexed by timestamp.
ACC : pandas.DataFrame
Three ACC axes, indexed by timestamp.
Marker : pandas.Series
Markers, indexed by timestamp.
HRV : pandas.Series
HRV signal, indexed by timestamp.
data : DataFrame
Contain all signals (ECG, ACC, Marker, HRV) indexed by timestamp.
Since the signals have different sampling frequencies, many values will be NaN.
"""
def __init__(self, path):
"""
Read a Faros-generated EDF file or a directory created by a FarosReader.
Parameters
----------
path : str
Can either be a Faros-generated EDF file or a directory created by the FarosReader.write() method.
"""
self.start_time = None
self.sample_freqs = None
self.units = None
self._edf_metadata = None
self.ECG = None
self.ACC = None
self.Marker = None
self.HRV = None
self.data = None
if os.path.isfile(path):
self._read_from_edf_file(path)
if os.path.isdir(path):
self._read_from_directory(path)
def _read_from_edf_file(self, path):
reader = edf.EdfReader(path)
self.start_time = pd.Timestamp(reader.getStartdatetime())
self.sample_freqs = {
'ECG': reader.getSampleFrequency(0),
'ACC': reader.getSampleFrequency(1),
'Marker': reader.getSampleFrequency(4),
'HRV': reader.getSampleFrequency(5)
}
self.units = {
'ECG': reader.getSignalHeader(0)['dimension'],
'ACC': reader.getSignalHeader(1)['dimension'],
'HRV': reader.getSignalHeader(5)['dimension'],
}
self._edf_metadata = reader.getSignalHeaders()
self._n_samples = reader.getNSamples()
self._n_datarecords = reader.datarecords_in_file
ecg = reader.readSignal(0)
self.ECG = pd.Series(ecg, name='ECG',
index=pd.date_range(start=self.start_time,
periods=len(ecg),
freq=f"{1/self.sample_freqs['ECG']}S")
)
acc = np.array([reader.readSignal(i) for i in range(1, 4)]).T
self.ACC = pd.DataFrame(acc, columns=['X', 'Y', 'Z'],
index=pd.date_range(start=self.start_time,
periods=len(acc),
freq=f"{1/self.sample_freqs['ACC']}S")
)
marker = reader.readSignal(4)
self.Marker = pd.Series(marker, name='Marker',
index=pd.date_range(start=self.start_time,
periods=len(marker),
freq=f"{1/self.sample_freqs['Marker']}S")
)
hrv = reader.readSignal(5)
self.HRV = pd.Series(
hrv, name='HRV',
index=pd.date_range(start=self.start_time,
periods=len(hrv),
freq=f"{1/self.sample_freqs['HRV']}S")
)
reader.close()
def _read_from_directory(self, path):
with open(os.path.join(path, 'meta.json'), 'r') as meta_file:
meta = json.load(meta_file)
self.start_time = pd.Timestamp(meta['start_time'])
self.sample_freqs = meta['sample_freqs']
self.units = meta['units']
self.ECG = self._read_dir_csv(os.path.join(path, 'ECG.csv'), self.start_time, self.sample_freqs['ECG'])
self.ACC = self._read_dir_csv(os.path.join(path, 'ACC.csv'), self.start_time, self.sample_freqs['ACC'])
self.Marker = self._read_dir_csv(os.path.join(path, 'Marker.csv'), self.start_time, self.sample_freqs['Marker'])
self.HRV = self._read_dir_csv(os.path.join(path, 'HRV.csv'), self.start_time, self.sample_freqs['HRV'])
def _read_dir_csv(self, path, start_time, sample_freq):
dataframe = pd.read_csv(path)
idx = pd.date_range(start=start_time, periods=len(dataframe), freq=f"{1/sample_freq}S")
dataframe.index = idx
return dataframe.squeeze()
def join_dataframes(self):
"""
Join the individual signal dataframes by timestamp.
The resulting dataframe is saved in the attribute reader.data.
"""
# get index for joined dataframe
joined_idx = pd.concat(map(pd.Series, [self.ECG.index, self.ACC.index, self.Marker.index, self.HRV.index]))
joined_idx = pd.Index(joined_idx.drop_duplicates().sort_values())
# create joined dataframe
col_names = ['ECG', 'ACC_X', 'ACC_Y', 'ACC_Z', 'ACC_mag', 'Marker', 'HRV']
joined_df = pd.DataFrame(index=joined_idx, columns=col_names)
# set non-nan values of joined dataframe
joined_df.loc[self.ECG.index, 'ECG'] = self.ECG
joined_df.loc[self.ACC.index, 'ACC_X'] = self.ACC['X']
joined_df.loc[self.ACC.index, 'ACC_Y'] = self.ACC['Y']
joined_df.loc[self.ACC.index, 'ACC_Z'] = self.ACC['Z']
joined_df.loc[self.Marker.index, 'Marker'] = self.Marker
joined_df.loc[self.HRV.index, 'HRV'] = self.HRV
self.data = joined_df
def write(self, path, file_format='directory'):
"""
Write the data either to an EDF file or to several files into a new directory.
Because of the `special structure of EDF files <https://www.edfplus.info/specs/edf.html>`_
writing to EDF is only possible for readers that have been created from an EDF file and without any changes to the ACC, ECG, Marker, HRV and sample_freqs attributes.
Because we want you to be able to modify the signals, you can write the data back to a directory of individual files.
Writing to a directory is the preferred method and works in all cases.
Parameters
----------
path : str
Name of the file or directory to write the data to.
file_format: {'directory', 'edf'}, default 'directory'
Format of the written data.
"""
if file_format == 'directory':
self._write_to_directory(path)
if file_format == 'edf':
self._write_to_edf(path)
def _write_to_edf(self, path):
if self._edf_metadata is None:
raise Exception("There is no EDF metadata in this reader, most likely because it was initialized from a directory. Writing to EDF file not possible.")
writer = edf.EdfWriter(path, 6, 0)
writer.setStartdatetime(self.start_time.to_pydatetime())
writer.setSignalHeaders(self._edf_metadata)
ecg_freq = int(self.sample_freqs['ECG'])
acc_freq = int(self.sample_freqs['ACC'])
marker_freq = int(self.sample_freqs['Marker'])
hrv_freq = int(self.sample_freqs['HRV'])
n_records = int(len(self.ECG) / ecg_freq)
for i in range(n_records):
writer.writePhysicalSamples(self.ECG.values[ecg_freq * i: ecg_freq * (i + 1)])
writer.writePhysicalSamples(self.ACC['X'].values[acc_freq * i: acc_freq * (i + 1)])
writer.writePhysicalSamples(self.ACC['Y'].values[acc_freq * i: acc_freq * (i + 1)])
writer.writePhysicalSamples(self.ACC['Z'].values[acc_freq * i: acc_freq * (i + 1)])
writer.writePhysicalSamples(self.Marker.values[marker_freq * i: marker_freq * (i + 1)])
writer.writePhysicalSamples(self.HRV.values[hrv_freq * i: hrv_freq * (i + 1)])
writer.close()
def _write_to_directory(self, path):
if not os.path.isdir(path):
os.mkdir(path)
meta = {
'start_time': str(self.start_time),
'sample_freqs': self.sample_freqs,
'units': self.units
}
with open(os.path.join(path, 'meta.json'), 'w') as meta_file:
json.dump(meta, meta_file)
self.ECG.to_csv(os.path.join(path, 'ECG.csv'), index=None, line_terminator='\n')
self.ACC.to_csv(os.path.join(path, 'ACC.csv'), index=None, line_terminator='\n')
self.Marker.to_csv(os.path.join(path, 'Marker.csv'), index=None, line_terminator='\n')
self.HRV.to_csv(os.path.join(path, 'HRV.csv'), index=None, line_terminator='\n')
def timeshift(self, shift='random'):
"""
Timeshift the data by shifting all time related values (i.e. start_time
and data.index).
Parameters
----------
shift : None/'random', pd.Timestamp or pd.Timedelta
If shift is not specified, shifts the data by a random time interval
between one month and two years to the past.
If shift is a timdelta, shifts the data by that timedelta.
If shift is a timestamp, shifts the data such that the earliest entry
has that timestamp. The remaining values will mantain the same
time difference to the first entry.
"""
if shift == 'random':
one_month = pd.Timedelta('30 days').value
two_years = pd.Timedelta('730 days').value
random_timedelta = - pd.Timedelta(random.uniform(one_month, two_years)).round('s')
self.timeshift(random_timedelta)
dfs_to_shift = [self.ECG, self.ACC, self.Marker, self.HRV]
if self.data is not None:
dfs_to_shift.append(self.data)
if isinstance(shift, pd.Timestamp):
self.start_time = shift
for dataframe in dfs_to_shift:
timedeltas = dataframe.index - dataframe.index.min()
dataframe.index = shift + timedeltas
if isinstance(shift, pd.Timedelta):
for dataframe in dfs_to_shift:
dataframe.index += shift
|
"""
Ingestor and egestor for VOC formats.
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/index.html
"""
import os
import xml.etree.ElementTree as ET
from pathlib import Path
from workers.lib.messenger import message
from .abstract import Ingestor
from .validation_schemas import get_blank_image_detection_schema, get_blank_detection_schema
class VocCityIngestor(Ingestor):
iii = 0
detection_counter = 0
folder_names = {"images": "JPEGImages", "annotations": "Annotations", "sets": "ImageSets/Main"}
chosen_set = "trainval.txt"
def validate(self, root, folder_names, chosen_set="trainval.txt"):
self.chosen_set = chosen_set
if folder_names is None:
folder_names = self.folder_names
for subdir in folder_names.values():
if not os.path.isdir(os.path.join(root, subdir)):
return False, f"Expected subdirectory {subdir}"
if not os.path.isfile(os.path.join(root, os.path.join(folder_names["sets"], chosen_set))):
return False, f"Expected {chosen_set} to exist within {os.path.join(root, folder_names['sets'])}"
return True, None
def ingest(self, path, folder_names=None):
self.iii = 0
self.detection_counter = 0
if folder_names is None:
folder_names = self.folder_names
image_names = self._get_image_ids(path, folder_names)
return [self._get_image_detection(path, image_name, folder_names) for image_name in image_names]
def _get_image_ids(self, root, folder_names):
if folder_names is None:
path = os.path.join(root, self.folder_names["sets"], self.chosen_set)
else:
path = os.path.join(root, folder_names["sets"], self.chosen_set)
with open(path, "r+") as f:
lines = f.readlines()
if len(lines) == 0:
fnames = [Path(file).stem for file in os.listdir(folder_names["images"])]
f.writelines(fnames)
else:
fnames = [x.replace("\n", "") for x in lines]
return fnames
def _get_image_detection(self, root, image_id, folder_names):
if self.iii % 100 == 0:
message(f"Processed {self.iii} xmls")
self.iii += 1
image_path = os.path.join(os.path.join(root, os.path.join(folder_names["images"], f"{image_id}.jpg")))
if not os.path.isfile(image_path):
raise Exception(f"Expected {image_path} to exist.")
annotation_path = os.path.join(os.path.join(root, os.path.join(folder_names["annotations"], f"{image_id}.xml")))
if not os.path.isfile(annotation_path):
raise Exception(f"Expected annotation file {annotation_path} to exist.")
tree = ET.parse(annotation_path)
xml_root = tree.getroot()
segmented = False
segmented_path = None
if segmented:
segmented_path = os.path.join(root, "SegmentationObject", f"{image_id}.png")
if not os.path.isfile(segmented_path):
raise Exception(f"Expected segmentation file {segmented_path} to exist.")
image_width = int(xml_root.find("width").text)
image_height = int(xml_root.find("height").text)
single_img_detection = get_blank_image_detection_schema()
single_img_detection["image"]["id"] = image_id
single_img_detection["image"]["dataset_id"] = None
single_img_detection["image"]["path"] = image_path
single_img_detection["image"]["segmented_path"] = segmented_path
single_img_detection["image"]["width"] = image_width
single_img_detection["image"]["height"] = image_height
single_img_detection["image"]["file_name"] = f"{image_id}.jpg"
detections = [self._get_detection(node, image_id, False, image_width, image_height) for node in
xml_root.findall("vehicle")]
detections2 = [self._get_detection(node, image_id, True, image_width, image_height) for node in
xml_root.findall("passengers")]
for detection in detections2:
detections.append(detection)
single_img_detection["detections"] = detections
return single_img_detection
def _get_detection(self, node, img_id, passenger, width, height):
curr_detection = get_blank_detection_schema()
bndbox = node.find("bndbox")
curr_detection["id"] = self.detection_counter
self.detection_counter += 1
curr_detection["image_id"] = str(img_id)
if passenger:
curr_detection["label"] = "person"
else:
curr_detection["label"] = node.find("type").text
curr_detection["segmentation"] = None
curr_detection["top"] = float(bndbox.find("ymin").text)
if curr_detection["top"] < 0:
curr_detection["top"] = 0
curr_detection["left"] = float(bndbox.find("xmin").text)
if curr_detection["left"] < 0:
curr_detection["left"] = 0
curr_detection["right"] = float(bndbox.find("xmax").text)
if curr_detection["right"] > width:
curr_detection["right"] = width
curr_detection["bottom"] = float(bndbox.find("ymax").text)
if curr_detection["bottom"] > height:
curr_detection["bottom"] = height
curr_detection["iscrowd"] = False
curr_detection["isbbox"] = True
curr_detection["keypoints"] = []
return curr_detection
|
"""
A definition of a decorator that adds noise to input values.
"""
from .idata_decorator import IDataDecorator
from .funcs.noise import select_noise
def calc_var_indices(input_vars, affected_vars):
"""Calculate indices of `affected_vars` in `input_vars`"""
if affected_vars is None:
return None
return [ input_vars.index(var) for var in affected_vars ]
class DataNoise(IDataDecorator):
"""A decorator around `IDataGenerator` that adds noise to inputs.
This decorator modifies input variables that decorated object produces by
adding random noise to them. Random value is sampled once per slice and
applied to all variables specified at affected_vars_*.
The noise is added multiplicatively to inputs.
Parameters
----------
dgen : IDataGenerator
`IDataGenerator` to be decorated.
noise : { None, 'uniform', 'gaussian', 'discrete', 'debug' }
Type of noise to be added.
noise_kwargs : dict or None, optional
Noise parameters. C.f. `select_noise`.
affected_vars_slice : list of str or None, optional
List of slice level variables that will be affected by noise.
affected_vars_png2d : list of str or None, optional
List of 2D prong level variables that will be affected by noise.
affected_vars_png3d : list of str or None, optional
List of 3D prong level variables that will be affected by noise.
See Also
--------
vlne.data.data_generator.funcs.noise.select_noise
"""
def __init__(
self, dgen,
noise = None,
noise_kwargs = None,
affected_vars_slice = None,
affected_vars_png2d = None,
affected_vars_png3d = None,
):
super(DataNoise, self).__init__(dgen)
if noise_kwargs is None:
noise_kwargs = {}
self._noise = select_noise(noise, **noise_kwargs)
self._vars_slice = affected_vars_slice
self._vars_png3d = affected_vars_png3d
self._vars_png2d = affected_vars_png2d
self._vars_idx_slice = calc_var_indices(
self.vars_input_slice, affected_vars_slice
)
self._vars_idx_png2d = calc_var_indices(
self.vars_input_png2d, affected_vars_png2d
)
self._vars_idx_png3d = calc_var_indices(
self.vars_input_png3d, affected_vars_png3d
)
@staticmethod
def _apply_noise(input_values, var_idx, noise):
"""
Apply noise to vars specified by `var_idx` in `input_values` inplace
"""
if input_values.size == 0:
return
broadcasted_shape = noise.shape + (1,) * (input_values.ndim - 1)
broadcasted_noise = noise.reshape(broadcasted_shape)
input_values[..., var_idx] *= (1 + broadcasted_noise)
def _get_noise(self, inputs):
batch_sizes = [ x.shape[0] for x in inputs.values() ]
batch_size = batch_sizes[0]
return self._noise.get(batch_size)
def __getitem__(self, index):
batch_data = self._dgen[index]
inputs = batch_data[0]
noise = self._get_noise(inputs)#.ravel()
if self._vars_slice is not None:
DataNoise._apply_noise(
inputs['input_slice'], self._vars_idx_slice, noise
)
if self._vars_png2d is not None:
DataNoise._apply_noise(
inputs['input_png2d'], self._vars_idx_png2d, noise
)
if self._vars_png3d is not None:
DataNoise._apply_noise(
inputs['input_png3d'], self._vars_idx_png3d, noise
)
return batch_data
|
<reponame>slemasne/lusid-sdk-python-preview
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3725
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class ComplianceRun(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'run_id': 'str',
'as_at': 'datetime'
}
attribute_map = {
'run_id': 'runId',
'as_at': 'asAt'
}
required_map = {
'run_id': 'required',
'as_at': 'required'
}
def __init__(self, run_id=None, as_at=None, local_vars_configuration=None): # noqa: E501
"""ComplianceRun - a model defined in OpenAPI"
:param run_id: The unique identifier of a compliance run (required)
:type run_id: str
:param as_at: The date at time at which the compliance run was run (required)
:type as_at: datetime
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._run_id = None
self._as_at = None
self.discriminator = None
self.run_id = run_id
self.as_at = as_at
@property
def run_id(self):
"""Gets the run_id of this ComplianceRun. # noqa: E501
The unique identifier of a compliance run # noqa: E501
:return: The run_id of this ComplianceRun. # noqa: E501
:rtype: str
"""
return self._run_id
@run_id.setter
def run_id(self, run_id):
"""Sets the run_id of this ComplianceRun.
The unique identifier of a compliance run # noqa: E501
:param run_id: The run_id of this ComplianceRun. # noqa: E501
:type run_id: str
"""
if self.local_vars_configuration.client_side_validation and run_id is None: # noqa: E501
raise ValueError("Invalid value for `run_id`, must not be `None`") # noqa: E501
self._run_id = run_id
@property
def as_at(self):
"""Gets the as_at of this ComplianceRun. # noqa: E501
The date at time at which the compliance run was run # noqa: E501
:return: The as_at of this ComplianceRun. # noqa: E501
:rtype: datetime
"""
return self._as_at
@as_at.setter
def as_at(self, as_at):
"""Sets the as_at of this ComplianceRun.
The date at time at which the compliance run was run # noqa: E501
:param as_at: The as_at of this ComplianceRun. # noqa: E501
:type as_at: datetime
"""
if self.local_vars_configuration.client_side_validation and as_at is None: # noqa: E501
raise ValueError("Invalid value for `as_at`, must not be `None`") # noqa: E501
self._as_at = as_at
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComplianceRun):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComplianceRun):
return True
return self.to_dict() != other.to_dict()
|
<filename>cpg-core/src/main/python/CPGPython/__init__.py
#
# Copyright (c) 2021, Fraunhofer AISEC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $$$$$$\ $$$$$$$\ $$$$$$\
# $$ __$$\ $$ __$$\ $$ __$$\
# $$ / \__|$$ | $$ |$$ / \__|
# $$ | $$$$$$$ |$$ |$$$$\
# $$ | $$ ____/ $$ |\_$$ |
# $$ | $$\ $$ | $$ | $$ |
# \$$$$$ |$$ | \$$$$$ |
# \______/ \__| \______/
#
from ._code_extractor import CodeExtractor
from de.fraunhofer.aisec.cpg.graph import NodeBuilder
import ast
class PythonASTToCPG(ast.NodeVisitor):
def __init__(self, fname, frontend, code):
self.sourcecode = CodeExtractor(fname)
self.tud = NodeBuilder.newTranslationUnitDeclaration(fname, code)
self.tud.setName(fname)
self.fname = fname
self.frontend = frontend
self.scopemanager = frontend.getScopeManager()
self.scopemanager.resetToGlobal(self.tud)
self.logger = self.frontend.log
self.rootNode = ast.parse(code, filename=fname, type_comments=True)
# import methods from other files
from ._expressions import handle_expression
from ._misc import add_loc_info
from ._misc import get_src_code
from ._misc import is_declaration
from ._misc import is_declared_reference
from ._misc import is_field_declaration
from ._misc import is_member_expression
from ._misc import is_statement
from ._misc import is_variable_declaration
from ._misc import log_with_loc
from ._statements import handle_argument
from ._statements import handle_for
from ._statements import handle_function_or_method
from ._statements import handle_statement
from ._statements import make_compound_statement
def execute(self):
if isinstance(self.rootNode, ast.Module):
self.log_with_loc("Handling tree root: %s" %
(ast.dump(self.rootNode)))
# Module(stmt* body, type_ignore* type_ignores)
# TODO how to name the namespace?
# TODO improve readability
nsd_name = ".".join(self.fname.split("/")[-1].split(".")[:-1])
nsd = NodeBuilder.newNamespaceDeclaration(nsd_name, "")
self.tud.addDeclaration(nsd)
self.scopemanager.enterScope(nsd)
for stmt in self.rootNode.body:
self.log_with_loc("Handling statement %s" % (ast.dump(stmt)))
r = self.handle_statement(stmt)
self.log_with_loc("Handling statement result is: %s" % (r))
if self.is_declaration(r):
nsd.addDeclaration(r)
elif self.is_statement(r):
nsd.addStatement(r)
else:
self.log_with_loc("Don't know what to do with this: %s" %
(r), loglevel="ERROR")
self.scopemanager.leaveScope(nsd)
self.scopemanager.addDeclaration(nsd)
else:
self.log_with_loc("Expected an ast.Module node but recieved %s." %
(type(self.rootNode)), level="ERROR")
raise RuntimeError
|
from mlpractice.stats.stats_utils import _update_stats, print_stats
from mlpractice.utils import ExceptionInterception
try:
from mlpractice_solutions.mlpractice_solutions\
.linear_classifier_solution import linear_softmax
except ImportError:
linear_softmax = None
import torch
import numpy as np
def test_all(linear_softmax=linear_softmax):
test_interface(linear_softmax)
test_public(linear_softmax)
test_normalization(linear_softmax)
test_random(linear_softmax, 100)
print('All tests passed!')
_update_stats('linear_classifier', 'linear_softmax')
print_stats('linear_classifier')
def test_interface(linear_softmax=linear_softmax):
with ExceptionInterception():
objects1 = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights1 = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
target_index1 = np.array([0, 1, 2])
loss1, gradient1 = linear_softmax(objects1, weights1, target_index1)
objects2 = np.array([[1],
[2],
[3]])
weights2 = np.array([[1, 2, 3]])
target_index2 = np.array([0])
loss2, gradient2 = linear_softmax(objects2, weights2, target_index2)
assert isinstance(loss1, float), \
"linear_softmax must return a float and an ndarray"
assert isinstance(gradient1, np.ndarray), \
"linear_softmax must return a float and an ndarray"
assert gradient1.shape == weights1.shape, \
"The output gradient shape must match the W shape"
assert isinstance(loss2, float), \
"linear_softmax must return a float and an ndarray"
assert isinstance(gradient2, np.ndarray), \
"linear_softmax must return a float and an ndarray"
assert gradient2.shape == weights2.shape, \
"The output gradient shape must match the W shape"
def test_public(linear_softmax=linear_softmax):
with ExceptionInterception():
objects = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
target_index = np.array([0, 1, 2])
loss, gradient = linear_softmax(objects, weights, target_index)
objects_tensor = torch.from_numpy(objects).float()
weights_tensor = torch.from_numpy(weights).float()
weights_tensor.requires_grad = True
predictions_tensor = objects_tensor @ weights_tensor
sample_loss = torch.nn.CrossEntropyLoss(reduction='sum')
sample_output = sample_loss(predictions_tensor,
torch.from_numpy(target_index).long())
sample_output.backward()
assert abs(loss - sample_output) < 10 ** -6
assert np.all(np.abs(gradient - weights_tensor.grad.numpy()) <
10 ** -6)
def test_normalization(linear_softmax=linear_softmax):
with ExceptionInterception():
objects = np.array([[0, 0, 10000]])
weights = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
target_index = np.array([2])
loss, gradient = linear_softmax(objects, weights, target_index)
objects_tensor = torch.from_numpy(objects).float()
weights_tensor = torch.from_numpy(weights).float()
weights_tensor.requires_grad = True
predictions_tensor = objects_tensor @ weights_tensor
sample_loss = torch.nn.CrossEntropyLoss(reduction='sum')
sample_output = sample_loss(predictions_tensor,
torch.from_numpy(target_index).long())
sample_output.backward()
assert abs(loss - sample_output) < 10 ** -6
assert np.all(np.abs(gradient - weights_tensor.grad.numpy()) <
10 ** -6)
def test_random(linear_softmax=linear_softmax, iterations=1):
with ExceptionInterception():
np.random.seed(42)
for _ in range(iterations):
objects = np.random.rand(3, 3)
weights = np.random.rand(3, 3)
target_index = np.random.randint(0, 3, size=3)
loss, gradient = linear_softmax(objects, weights, target_index)
objects_tensor = torch.from_numpy(objects).float()
weights_tensor = torch.from_numpy(weights).float()
weights_tensor.requires_grad = True
predictions_tensor = objects_tensor @ weights_tensor
sample_loss = torch.nn.CrossEntropyLoss(reduction='sum')
sample_output = sample_loss(predictions_tensor,
torch.from_numpy(target_index).long())
sample_output.backward()
assert abs(loss - sample_output) < 10 ** -6
assert np.all(np.abs(gradient - weights_tensor.grad.numpy()) <
10 ** -6)
|
import torch
import torch.nn as nn
class STLocalizedConv(nn.Module):
def __init__(self, hidden_dim, pre_defined_graph=None, use_pre=None, dy_graph=None, sta_graph=None, **model_args):
super().__init__()
# gated temporal conv
self.k_s = model_args['k_s']
self.k_t = model_args['k_t']
self.hidden_dim = hidden_dim
# graph conv
self.pre_defined_graph = pre_defined_graph
self.use_predefined_graph = use_pre
self.use_dynamic_hidden_graph = dy_graph
self.use_static__hidden_graph = sta_graph
self.support_len = len(self.pre_defined_graph) + int(dy_graph) + int(sta_graph)
self.num_matric = (int(use_pre) * len(self.pre_defined_graph) + len(self.pre_defined_graph) * int(dy_graph) + int(sta_graph)) * self.k_s + 1 # TODO: 3是指Multi-Modalities Graph的数量,这些需要reformat代码
self.dropout = nn.Dropout(model_args['dropout'])
self.pre_defined_graph = self.get_graph(self.pre_defined_graph)
self.fc_list_updt = nn.Linear(self.k_t * hidden_dim, self.k_t * hidden_dim, bias=False)
self.gcn_updt = nn.Linear(self.hidden_dim*self.num_matric, self.hidden_dim)
# others
self.bn = nn.BatchNorm2d(self.hidden_dim)
self.activation = nn.ReLU()
def gconv(self, support, X_k, X_0):
out = [X_0]
for graph in support:
if len(graph.shape) == 2: # staitic or predefined graph
pass
else:
graph = graph.unsqueeze(1)
H_k = torch.matmul(graph, X_k)
out.append(H_k)
out = torch.cat(out, dim=-1)
out = self.gcn_updt(out)
out = self.dropout(out)
return out
def get_graph(self, support):
# Only used in static including static hidden graph and predefined graph, but not used for dynamic graph.
graph_ordered = []
mask = 1 - torch.eye(support[0].shape[0]).to(support[0].device)
for graph in support:
k_1_order = graph # 1 order
graph_ordered.append(k_1_order * mask)
for k in range(2, self.k_s+1): # e.g., order = 3, k=[2, 3]; order = 2, k=[2]
k_1_order = torch.matmul(graph, k_1_order)
graph_ordered.append(k_1_order * mask)
# get st localed graph
st_local_graph = []
for graph in graph_ordered:
graph = graph.unsqueeze(-2).expand(-1, self.k_t, -1)
graph = graph.reshape(graph.shape[0], graph.shape[1] * graph.shape[2])
st_local_graph.append(graph) # [num_nodes, kernel_size x num_nodes]
return st_local_graph # [order, num_nodes, kernel_size x num_nodes]
def forward(self, X, dynamic_graph, static_graph):
# X: [bs, seq, nodes, feat]
X = X.unfold(1, self.k_t, 1).permute(0, 1, 2, 4, 3) # [bs, seq, num_nodes, ks, num_feat]
batch_size, seq_len, num_nodes, kernel_size, num_feat = X.shape # seq_len is changing
# support
support = []
## predefined graph
if self.use_predefined_graph:
support = support + self.pre_defined_graph
## dynamic graph
if self.use_dynamic_hidden_graph:
support = support + dynamic_graph # k_order is caled in dynamic_graph_constructor component
## predefined graphs and static hidden graphs
if self.use_static__hidden_graph:
support = support + self.get_graph(static_graph)
# parallelize
X = X.reshape(batch_size, seq_len, num_nodes, kernel_size * num_feat)
out = self.fc_list_updt(X) # batch_size, seq_len, num_nodes, kernel_size * hidden_dim
out = self.activation(out)
out = out.view(batch_size, seq_len, num_nodes, kernel_size, num_feat)
X_0 = torch.mean(out, dim=-2) # TODO: test last
X_k = out.transpose(-3, -2).reshape(batch_size, seq_len, kernel_size*num_nodes, num_feat) # batch_size, seq_len, kernel_size x num_nodes, hidden_dim
hidden = self.gconv(support, X_k, X_0) # Nx3N 3NxD -> NxD: batch_size, seq_len, num_nodes, hidden_dim
return hidden
|
<reponame>tuandnvn/ecat_learning
# from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import time
import datetime
import numpy as np
import tensorflow as tf
#from tf.nn import rnn, rnn_cell
import codecs
import collections
import random
from collections import Counter
import argparse
import xml.etree.ElementTree as ET
import glob
import nltk
from nltk.stem.porter import PorterStemmer
import sys
import pickle
import os
import shutil
import os.path
from sklearn.metrics import confusion_matrix
def print_and_log(log_str):
print (log_str)
logging.info(log_str)
SESSION_NAME = "session_name"
SESSION_DATA = "session_data"
SESSION_EVENTS = "session_events"
ps = PorterStemmer()
role_to_id = {'None' : 0, 'Subject': 1, 'Object': 2, 'Theme' : 3}
event_to_id = { 'None': 0, 'push' : 1, 'pull' : 2 , 'roll': 3, 'slide' : 4}
prep_to_id = {'None': 0, 'Past': 1, 'From': 2, 'To': 3}
id_to_role = {}
id_to_event = {}
id_to_prep = {}
for key, value in role_to_id.iteritems():
id_to_role[value] = key
for key, value in event_to_id.iteritems():
id_to_event[value] = key
for key, value in prep_to_id.iteritems():
id_to_prep[value] = key
def from_str_labels_to_id_labels(rig_role, glyph_role_1, glyph_role_2, event, prep):
rig_role = role_to_id[rig_role]
glyph_role_1 = role_to_id[glyph_role_1]
glyph_role_2 = role_to_id[glyph_role_2]
event = event_to_id[event]
prep = prep_to_id[prep]
return (rig_role, glyph_role_1, glyph_role_2, event, prep)
def from_id_labels_to_str_labels(rig_role, glyph_role_1, glyph_role_2, event, prep):
rig_role = id_to_role[rig_role]
glyph_role_1 = id_to_role[glyph_role_1]
glyph_role_2 = id_to_role[glyph_role_2]
event = id_to_event[event]
prep = id_to_prep[prep]
return (rig_role, glyph_role_1, glyph_role_2, event, prep)
project_data = {}
data_length = None
# For each data sample, we have to learn an output of 5 values
# (role_to_id, role_to_id, role_to_id, event_to_id, prep_to_id)
label_classes = [role_to_id, role_to_id, role_to_id, event_to_id, prep_to_id]
num_labels = len(label_classes)
def read_project_data():
global data_length
for file_name in glob.glob('data/*.txt'):
project_name = file_name[file_name.rfind('/') + 1:]
project_name = project_name[:len(project_name)-4]
project_data[project_name] = []
tree = ET.parse(file_name)
doc = tree.getroot()
for session_element in doc.findall('session'):
session_data = {}
session_name = session_element.attrib['name']
print(session_name)
session_data[SESSION_NAME] = session_name
session_data[SESSION_DATA] = []
session_data[SESSION_EVENTS] = []
frame_elements = session_element.findall('data/frame')
for frame_element in frame_elements:
object_point_elements = frame_element.findall('o')
point_data = []
for object_point_element in object_point_elements:
for s in object_point_element.text.split(','):
point_data.append(float(s))
if data_length == None:
data_length = len(point_data)
session_data[SESSION_DATA].append(point_data)
# # Calculate the difference of data points -> gradient feature
# Move all points to the same coordinations
session_data[SESSION_DATA] = [[(session_data[SESSION_DATA][i][t] - session_data[SESSION_DATA][0][0])\
for t in xrange(data_length)]\
for i in xrange(0, len(session_data[SESSION_DATA]))]
event_elements = session_element.findall('events/event')
for event_element in event_elements:
event_str = {}
event_str['start'] = event_element.attrib['start']
event_str['end'] = event_element.attrib['end']
rig_role, glyph_role_1, glyph_role_2, event, prep = event_element.text.split(',')
event = ps.stem(event)
rig_role, glyph_role_1, glyph_role_2, event, prep =\
from_str_labels_to_id_labels(rig_role, glyph_role_1, glyph_role_2, event, prep)
event_str['label'] = (rig_role, glyph_role_1, glyph_role_2, event, prep)
session_data[SESSION_EVENTS].append(event_str)
# print ('session name = %s' % session_data[SESSION_NAME])
# print ('len %d ' % len(session_data[SESSION_DATA]))
# print ('correct %d ' % correct_no_samples)
project_data[project_name].append(session_data)
'''Generate a training set and a testing set of data'''
def generate_data(data, config) :
training_data = []
testing_data = []
# Flatten the data (collapse the project and session hierarchy into a list of session_data)
for v in config.train_project_names:
session_data = random.sample(project_data[v], len(project_data[v]))
print(len(session_data))
training_data += session_data[int(config.session_training_percentage[0] * len(session_data)):
int(config.session_training_percentage[1] * len(session_data))]
testing_data += session_data[int(config.session_testing_percentage[0] * len(session_data)):
int(config.session_testing_percentage[1] * len(session_data))]
return (training_data, testing_data)
def check_validity_label(labels):
# Event is None -> All other values are None
if labels[3] == 0:
for i in xrange(5):
if labels[i] != 0:
return False
return True
# If two objects play the same role return False
for i in xrange(3):
for j in xrange(3):
if i != j and labels[i] == labels[j] and labels[i] != 0:
return False
# If there is a Theme, there needs to be a Preposition and vice versa
has_a_theme = False
for i in xrange(3):
if labels[i] == 3:
has_a_theme = True
break
if has_a_theme and labels[4] == 0:
return False
if not has_a_theme and labels[4] != 0:
return False
return True
'''A function to generate a pair of batch-data (x, y)
Inputs
data: a list of session_data
data_point_size: Vector feature size (63)
num_steps: A fix number of steps for each event (this should be the original num_steps - 1
because data point difference is used instead of )
hop_step: A fix number of frame offset btw two events
Outputs
Take batch_size of data samples, each is a chain of num_steps data points
x: [batch_size, num_steps, data_point_size]
y: [batch_size, num_labels]
'''
def gothrough(data, data_point_size, batch_size, num_steps, hop_step):
samples = 0 # Number of samples of interpolating
#counters = [Counter() for _ in xrange(num_labels)]
sample_counter = 0
for session_data in data:
# This should be the correct number of sample for each session
# But it could be different with the number of events in the session
# There is some difference in the way events in session is created
# For example, when create and annotate a session having frame from 0 to 79
# I actually create events [0,20] to [60,80] so the right hand side brace should be
# [0,20) -> Excluding last frame
correct_no_samples = ( len(session_data[SESSION_DATA]) - num_steps ) // hop_step + 1
# print ('session name = %s' % session_data[SESSION_NAME])
# print ('len %d ' % len(session_data[SESSION_DATA]))
# print ('correct %d ' % correct_no_samples)
if correct_no_samples != len(session_data[SESSION_EVENTS]):
# A step to find session that has problem to fix
print (session_data[SESSION_NAME])
print ("correct_no_samples " + str(correct_no_samples))
print ("session_data_events " + str(len(session_data[SESSION_EVENTS])))
print ("=========================PROBLEMATIC========================")
else:
samples += len(session_data[SESSION_EVENTS])
print('Total number of samples' + str(samples))
interpolated_data = np.zeros([samples * num_steps, data_point_size], dtype=np.float32)
interpolated_lbls = np.zeros([samples, num_labels], dtype=np.int32)
for session_data in data:
session_data_vals = session_data[SESSION_DATA]
session_data_events = session_data[SESSION_EVENTS]
correct_no_samples = ( len(session_data_vals) - num_steps ) // hop_step + 1
if correct_no_samples == len(session_data_events):
for i in range(len(session_data_events)):
for j in range(num_steps):
interpolated_data[( ( sample_counter + i ) * num_steps + j)] =\
session_data_vals[i * hop_step + j]
event_labels = session_data[SESSION_EVENTS][i]['label']
# for i, event_label in enumerate(event_labels):
# counters[i][event_label] += 1
interpolated_lbls[sample_counter + i] = list(event_labels)
sample_counter += len(session_data_events)
# Number of epoch, each epoch has a batch_size of data
epoch_size = samples // batch_size
# Divide the first dimension from samples * num_steps -> (samples, num_steps)
rearranged_data = interpolated_data.reshape((samples, num_steps, data_point_size))
# Divide first dimenstion from samples -> epoch_size * batch_size (remove remaining)
rearranged_data = rearranged_data[:epoch_size * batch_size].\
reshape((epoch_size, batch_size, num_steps, data_point_size))
rearranged_lbls = interpolated_lbls[:epoch_size * batch_size].\
reshape((epoch_size, batch_size, num_labels))
for i in range(epoch_size):
x = rearranged_data[i, :, :, :]
y = rearranged_lbls[i, :, :]
yield (x, y)
class LSTM_CRF(object):
"A model to recognize event recorded in 3d motions"
def __init__(self, is_training, config):
with tf.device('/gpu:3'):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.n_input = n_input = config.n_input
self.label_classes = label_classes = config.label_classes
self.n_labels = len(self.label_classes)
self.hop_step = config.hop_step
size = config.hidden_size
# Input data and labels should be set as placeholders
self._input_data = tf.placeholder(tf.float32, [batch_size, num_steps, n_input])
self._targets = tf.placeholder(tf.int32, [batch_size, self.n_labels])
# self.n_labels cells for self.n_labels outputs
lstm_cells = [tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias = 0.0, state_is_tuple=True)\
for _ in xrange(self.n_labels)]
# DropoutWrapper is a decorator that adds Dropout functionality
if is_training and config.keep_prob < 1:
lstm_cells = [tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=config.keep_prob)\
for lstm_cell in lstm_cells]
cells = [tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers, state_is_tuple=True)\
for lstm_cell in lstm_cells]
# Initial states of the cells
# cell.state_size = config.num_layers * 2 * size
# Size = self.n_labels x ( batch_size x cell.state_size )
self._initial_state = [cell.zero_state(batch_size, tf.float32) for cell in cells]
# Transformation of input to a list of num_steps data points
inputs = tf.transpose(self._input_data, [1, 0, 2]) #(num_steps, batch_size, n_input)
inputs = tf.reshape(inputs, [-1, n_input]) # (num_steps * batch_size, n_input)
with tf.variable_scope("hidden"):
weight = tf.get_variable("weight", [n_input, size])
bias = tf.get_variable("bias", [size])
inputs = tf.matmul(inputs, weight) + bias
inputs = tf.split(0, num_steps, inputs) # num_steps * ( batch_size, size )
outputs_and_states = []
# A list of n_labels values
# Each value is (output, state)
# output is of size: num_steps * ( batch_size, size )
# state is of size: ( batch_size, cell.state_size )
# outputs_and_states = [tf.nn.rnn(cells[i], inputs, initial_state = self._initial_state[i])\
# for i in xrange(self.n_labels)]
for i in xrange(self.n_labels):
with tf.variable_scope("lstm" + str(i)):
output_and_state = tf.nn.rnn(cells[i], inputs, initial_state = self._initial_state[i])
outputs_and_states.append(output_and_state)
# n_labels x ( batch_size, size )
outputs = [output_and_state[0][-1]\
for output_and_state in outputs_and_states]
# n_labels x ( batch_size, cell.state_size )
self._final_state = [output_and_state[1]\
for output_and_state in outputs_and_states]
cost = 0
# self.n_labels x batch_size
max_logits = []
# self.n_labels x ( batch_size, n_classes )
logits = []
role_scope = None
for i in xrange(self.n_labels):
label_class = label_classes[i]
n_classes = len(label_class)
with tf.variable_scope("output" + str(i)):
weight = tf.get_variable("weight", [size, n_classes])
bias = tf.get_variable("bias", [n_classes])
# ( batch_size, n_classes )
logit = tf.matmul(outputs[i], weight) + bias
# batch_size
max_logit = tf.argmax(logit, 1)
max_logits.append(max_logit)
# logits
logits.append(logit)
self._debug = logits[2]
# batch_size
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits[i] for i in xrange(self.n_labels)],
[self._targets[:,i] for i in xrange(self.n_labels)],
[tf.ones([batch_size]) for i in xrange(self.n_labels)])
# losses = [tf.nn.seq2seq.sequence_loss_by_example(
# [logits[i]],
# [self._targets[:,i]],
# [tf.ones([batch_size])]) for i in xrange(self.n_labels)]
# self.n_labels
# self._cost = cost = [tf.reduce_sum(loss) / batch_size for loss in losses]
self._cost = cost = tf.reduce_sum(loss) / batch_size
if is_training:
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
self._train_op = []
# for i in xrange(self.n_labels):
# grads, _ = tf.clip_by_global_norm(tf.gradients(cost[i], tvars),
# config.max_grad_norm)
# optimizer = tf.train.GradientDescentOptimizer(self.lr)
# self._train_op.append(optimizer.apply_gradients(zip(grads, tvars)))
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
else:
correct_preds = [tf.equal(tf.cast(max_logits[i], tf.int32), self._targets[:,i]) \
for i in xrange(self.n_labels)]
# self._test_op = max_logits
# Return number of correct predictions as well as predictions
self._test_op = ([tf.cast(max_logits[i], tf.int32) for i in xrange(self.n_labels)],
[tf.reduce_sum (tf.cast(correct_pred, tf.float32)) / batch_size \
for correct_pred in correct_preds])
self._saver = saver = tf.train.Saver()
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def debug(self):
return self._debug
@property
def saver(self):
return self._saver
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
@property
def test_op(self):
return self._test_op
def run_epoch(session, m, data, eval_op, verbose=False, is_training=True):
"""Runs the model on the given data."""
start_time = time.time()
# costs = np.zeros(len(m.label_classes))
costs = 0
evals = np.zeros(len(m.label_classes))
cost_iters = 0
eval_iters = 0
state = [session.run(s) for s in m.initial_state]
# For each label class, create a confusion matrix
confusion_matrixs = [np.zeros((len(label_classes[i]), len(label_classes[i])), dtype=np.int)
for i in xrange(len(m.label_classes))]
total_correct_pred = 0
valid_labels = {True: 0, False: 0}
for step, (x, y) in enumerate( gothrough(data, m.n_input, m.batch_size, m.num_steps, m.hop_step) ):
feed_dict = {}
feed_dict[m.input_data] = x
feed_dict[m.targets] = y
for i in xrange(len(m.initial_state)):
feed_dict[m.initial_state[i]] = state[i]
debug_val, cost, state, eval_val = session.run([m.debug, m.cost, m.final_state, eval_op], feed_dict)
if not is_training:
# Unpack the predictions and cost values
y_pred, eval_val = eval_val
costs += cost
cost_iters += 1
eval_iters += 1
if not is_training:
# print('-----------')
# print(y)
# print('===========')
# print(eval_val)
evals += eval_val
correct_pred = np.sum(np.all([np.equal(y_pred[i], y[:,i]) \
for i in xrange(len(m.label_classes))], axis = 0))
total_correct_pred += correct_pred
# self.n_label x m.batch_size
y_pred_array = np.array(y_pred)
for i in xrange(m.batch_size):
valid = check_validity_label( y_pred_array[:,i] )
valid_labels[valid] += 1
if verbose and not valid:
print(from_id_labels_to_str_labels(*y_pred_array[:,i]))
epoch_confusion_matrixs = [confusion_matrix(y[:,i], y_pred[i], label_classes[i].values())
for i in xrange(len(m.label_classes))]
for i in xrange(len(m.label_classes)):
confusion_matrixs[i] += epoch_confusion_matrixs[i]
# if verbose and step % 30 == 0:
# print('---')
# print("cost_iters %d, eval_iters %d, Step %d" % (cost_iters, eval_iters, step))
# print("Rig: cost %.3f, costs %.3f, perplexity: %.3f" %
# (cost[0], costs[0], np.exp(costs[0] / cost_iters)))
# print("Glyph 1: cost %.3f, costs %.3f, perplexity: %.3f" %
# (cost[1], costs[1], np.exp(costs[1] / cost_iters)))
# print("Glyph 2: cost %.3f, costs %.3f, perplexity: %.3f" %
# (cost[2], costs[2], np.exp(costs[2] / cost_iters)))
# print("Event: cost %.3f, costs %.3f, perplexity: %.3f" %
# (cost[3], costs[3], np.exp(costs[3] / cost_iters)))
# print("Preposition: cost %.3f, costs %.3f, perplexity: %.3f" %
# (cost[4], costs[4], np.exp(costs[4] / cost_iters)))
if verbose and step % 30 == 0 and step > 0:
print(debug_val)
print_and_log("cost %.3f, costs %.3f, iters %d, Step %d, perplexity: %.3f" %
(cost, costs, cost_iters, step, np.exp(costs / cost_iters)))
if not is_training:
print_and_log("Number of valid/Number of invalid = %d/%d" %
(valid_labels[True], valid_labels[False]))
print_and_log("Number of correct predictions = %d, Percentage = %.3f" %
(total_correct_pred, total_correct_pred/ (eval_iters * m.batch_size) ))
print_and_log("Rig accuracy = %.5f" % (evals[0] / eval_iters))
if verbose:
print_and_log("-- Confusion matrix --")
print_and_log(confusion_matrixs[0])
print_and_log("Glyph 1 accuracy = %.5f" % (evals[1] / eval_iters))
if verbose:
print_and_log("-- Confusion matrix --")
print_and_log(confusion_matrixs[1])
print_and_log("Glyph 2 accuracy = %.5f" % (evals[2] / eval_iters))
if verbose:
print_and_log("-- Confusion matrix --")
print_and_log(confusion_matrixs[2])
print_and_log("Event accuracy = %.5f" % (evals[3] / eval_iters))
if verbose:
print_and_log("-- Confusion matrix --")
print_and_log(confusion_matrixs[3])
print_and_log("Preposition accuracy = %.5f" % (evals[4] / eval_iters))
if verbose:
print_and_log("-- Confusion matrix --")
print_and_log(confusion_matrixs[4])
return np.exp(costs / cost_iters)
# Train
# Statistics
# Counter({1: 1888, 0: 835}) 72 %
# Counter({2: 1182, 3: 759, 1: 418, 0: 364}) 42%
# Counter({3: 1175, 2: 693, 0: 469, 1: 386}) 38%
# Counter({3: 1164, 2: 603, 1: 516, 4: 409, 0: 31}) 43%
# Counter({2: 838, 0: 783, 1: 681, 3: 421}) 30 %
'''Train on a subset of sessions for each project'''
'''Training_percentages = Percentage of training sessions/ Total # of sessions'''
class Simple_Train_Test_Config(object):
def __init__(self, project_data):
# Using all projects for training
self.train_project_names = project_data.keys()
self.test_project_names = project_data.keys()
self.session_training_percentage = (0, 0.6)
self.session_testing_percentage = (0.6, 1)
'''Only train on a subset of projects'''
'''For each training project, train on all sessions'''
'''Training_percentages = 1'''
class Partial_Train_Test_Config(object):
# Using a subset of projects for training
train_project_names = ['pullpast', 'pullfrom', 'pushfrom', 'pushto',
'rollpast', 'rollto', 'selfrollpast', 'selfrollto',
'selfslidefrom']
test_project_names = ['pullto', 'pushpast', 'rollfrom', 'selfrollfrom',
'selfslidepast', 'selfslideto']
session_training_percentage = (0, 1)
session_testing_percentage = (0, 1)
# default mode is to train and test at the same time
TRAIN = 'TRAIN'
TEST = 'TEST'
mode = TRAIN
if __name__ == '__main__':
# ========================================================================
# ========================================================================
# ===========================SETUP TRAIN TEST=============================
parser = argparse.ArgumentParser(description = 'Train and test using 3d data from ECAT')
parser.add_argument('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
if len(sys.argv) > 1:
train_test_config = sys.argv[1]
if train_test_config == 'train':
mode = TRAIN
if train_test_config == 'test' :
mode = TEST
if mode == TRAIN:
if len(sys.argv) > 2:
log_dir = sys.argv[2]
else:
current_time = datetime.datetime.now()
time_str = '%s_%s_%s_%s_%s_%s' % (current_time.year, current_time.month, current_time.day,
current_time.hour, current_time.minute, current_time.second)
log_dir = 'logs/run_' + time_str
print('Train and output into directory ' + log_dir)
os.makedirs(log_dir)
logging.basicConfig(filename = log_dir + '/logs.log',level=logging.DEBUG)
# Copy the current executed py file to log (To make sure we can replicate the experiment with the same code)
shutil.copy(os.path.realpath(__file__), log_dir)
if mode == TEST:
if len(sys.argv) > 2:
model_path = sys.argv[2]
print('Test using model ' + model_path)
else:
sys.exit("learning.py test model_path")
# ========================================================================
# ========================================================================
# =============================READING INPUT =============================
SIMPLE_SPLIT = 'simple_train_test.pkl'
if os.path.isfile(SIMPLE_SPLIT) :
# Load the file
logging.info("Load file into training and testing data sets " + SIMPLE_SPLIT)
with open(SIMPLE_SPLIT, 'rb') as f:
t = pickle.load(f)
train = t['train']
test = t['test']
data_length = 63
else:
logging.info("Read training and testing data sets from data directory ")
read_project_data()
print("data_length " + str(data_length))
train, test = generate_data(project_data, Simple_Train_Test_Config(project_data))
with open(SIMPLE_SPLIT, 'wb') as f:
pickle.dump({'train': train,
'test': test},
f, pickle.HIGHEST_PROTOCOL)
print_and_log('----Done saving training and testing data---')
print_and_log('Train size ' + str(len(train)))
print_and_log('Test size ' + str(len(test)))
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1 # Set this value higher without norm clipping
# might make the cost explodes
max_grad_norm = 5 # The maximum permissible norm of the gradient
num_layers = 1 # Number of LSTM layers
num_steps = 20 # Divide the data into num_steps segment
hidden_size = 200 # the number of LSTM units
max_epoch = 10 # The number of epochs trained with the initial learning rate
max_max_epoch = 250 # Number of running epochs
keep_prob = 0.6 # Drop out keep probability, = 1.0 no dropout
lr_decay = 0.980 # Learning rate decay
batch_size = 40 # We could actually still use batch_size for convenient
n_input = data_length # Number of float values for each frame
label_classes = label_classes # Number of classes, for each output label
hop_step = 5 # Hopping between two samples
test_epoch = 20 # Test after these many epochs
config = SmallConfig()
intermediate_config = SmallConfig()
intermediate_config.keep_prob = 1
eval_config = SmallConfig()
eval_config.keep_prob = 1
eval_config.batch_size = 1
logging.info("Train Configuration")
for attr in dir(config):
# Not default properties
if attr[:2] != '__':
log_str = "%s = %s" % (attr, getattr(config, attr))
logging.info(log_str)
logging.info("Evaluation Configuration")
for attr in dir(eval_config):
# Not default properties
if attr[:2] != '__':
log_str = "%s = %s" % (attr, getattr(eval_config, attr))
logging.info(log_str)
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = LSTM_CRF(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
m_intermediate_test = LSTM_CRF(is_training=False, config=intermediate_config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mtest = LSTM_CRF(is_training=False, config=eval_config)
if mode == TRAIN:
tf.initialize_all_variables().run()
# print('Go through train data')
# for step, (x, y) in enumerate( gothrough(train, 63, 10, 19, 5) ):
# pass
# print('Go through test data')
# for step, (x, y) in enumerate( gothrough(test, 63, 10, 19, 5) ):
# pass
random.seed()
random.shuffle(train)
print_and_log('---------------BASELINE-------------')
test_perplexity = run_epoch(session, m_intermediate_test, test, m_intermediate_test.test_op,
is_training=False,
verbose=False)
print_and_log("Test Perplexity on Test: %s" % str(test_perplexity))
print_and_log('----------------TRAIN---------------')
for i in range(config.max_max_epoch):
print_and_log('-------------------------------')
start_time = time.time()
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print_and_log("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, train, m.train_op,
verbose=True)
print_and_log("Epoch: %d Train Perplexity: %s" % (i + 1, str(train_perplexity)))
print_and_log("Time %.3f" % (time.time() - start_time) )
print_and_log('-------------------------------')
if i % config.test_epoch == 0:
print_and_log('----------Intermediate test -----------')
# Run test on train
print_and_log('Run model on train data')
test_perplexity = run_epoch(session, m_intermediate_test, train, m_intermediate_test.test_op,
is_training=False, verbose = False)
print_and_log('Run model on test data')
test_perplexity = run_epoch(session, m_intermediate_test, test, m_intermediate_test.test_op,
is_training=False, verbose = False)
# Save the variables to disk.
model_path = m.saver.save(session, log_dir + "/model.ckpt")
print_and_log("Model saved in file: %s" % model_path)
if mode == TEST:
m.saver.restore(session, model_path)
print_and_log("Restore model saved in file: %s" % model_path)
print_and_log('--------------TEST--------------')
# Run test on train
print_and_log('Run model on train data')
test_perplexity = run_epoch(session, mtest, train, mtest.test_op,
is_training=False, verbose=True)
print_and_log('Run model on test data')
test_perplexity = run_epoch(session, mtest, test, mtest.test_op,
is_training=False, verbose=True)
|
<reponame>openprocurement/market.prozorro.ua
from django_filters import rest_framework as filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status, viewsets
from rest_framework.filters import OrderingFilter
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from criteria.permissions import IsAdminOrReadOnlyPermission
from profiles import models as profile_models
from profiles import serializers as profile_serializers
class ProfileFilter(filters.FilterSet):
classification_id = filters.CharFilter(lookup_expr='icontains')
classification_description = filters.CharFilter(lookup_expr='icontains')
autor = filters.CharFilter(lookup_expr='icontains')
criteria_requirementGroups_requirements_relatedCriteria_id = filters.UUIDFilter( # noqa
field_name='criteria_requirementGroups_requirements_relatedCriteria_id', # noqa
method='filter_related_criteria'
)
class Meta:
model = profile_models.Profile
fields = (
'classification_id', 'classification_description', 'autor',
'criteria_requirementGroups_requirements_relatedCriteria_id',
'status'
)
def filter_related_criteria(self, queryset, name, value):
return queryset.filter(
criteria__requirement_groups__requirements__related_criteria__id=value # noqa
).distinct()
class ProfileViewSet(viewsets.ModelViewSet):
queryset = profile_models.Profile.objects.all()
permission_classes = (IsAuthenticated, IsAdminOrReadOnlyPermission)
serializer_class = profile_serializers.ProfileCreateSerializer
filter_backends = (DjangoFilterBackend, OrderingFilter)
filterset_class = ProfileFilter
ordering_fields = '__all__'
def get_serializer_class(self):
if self.action in ('list', 'retrieve', 'create'):
return profile_serializers.ProfileCreateSerializer
else:
return profile_serializers.ProfileEditSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
serializer = self.get_serializer(instance=instance)
return Response(
{
'access': {
'token': instance.access_token.hex,
'owner': instance.author,
},
'data': serializer.data
},
status=status.HTTP_201_CREATED
)
def update(self, request, *args, **kwargs):
data = request.data
access_data = data.get('access')
if access_data is None:
return Response(
{'detail': 'Missing access data'},
status=status.HTTP_400_BAD_REQUEST
)
else:
instance = self.get_object()
if access_data.get('owner') != instance.author or \
access_data.get('token') != instance.access_token.hex:
return Response(
{'detail': 'Wrong access data'},
status=status.HTTP_400_BAD_REQUEST
)
else:
serializer = self.get_serializer(
instance, data=data['data'], partial=True
)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
def destroy(self, request, *args, **kwargs):
data = request.data
access_data = data.get('access')
if access_data is None:
return Response(
{'detail': 'Missing access data'},
status=status.HTTP_400_BAD_REQUEST
)
else:
instance = self.get_object()
if access_data.get('owner') != instance.author or \
access_data.get('token') != instance.access_token.hex:
return Response(
{'detail': 'Wrong access data'},
status=status.HTTP_400_BAD_REQUEST
)
else:
instance = self.get_object()
instance.status = 'hidden'
instance.save()
serializer = profile_serializers.ProfileCreateSerializer(
instance
)
return Response(serializer.data, status=status.HTTP_200_OK)
|
<reponame>sissaschool/elementpath
#
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
import re
import math
from calendar import isleap, leapdays
from decimal import Decimal
from typing import Optional, Union
###
# Data validation helpers
NORMALIZE_PATTERN = re.compile(r'[^\S\xa0]')
WHITESPACES_PATTERN = re.compile(r'[^\S\xa0]+') # include ASCII 160 (non-breaking space)
NCNAME_PATTERN = re.compile(r'^[^\d\W][\w.\-\u00B7\u0300-\u036F\u203F\u2040]*$')
QNAME_PATTERN = re.compile(
r'^(?:(?P<prefix>[^\d\W][\w\-.\u00B7\u0300-\u036F\u0387\u06DD\u06DE\u203F\u2040]*):)?'
r'(?P<local>[^\d\W][\w\-.\u00B7\u0300-\u036F\u0387\u06DD\u06DE\u203F\u2040]*)$',
)
EQNAME_PATTERN = re.compile(
r'^(?:Q{(?P<namespace>[^}]+)}|'
r'(?P<prefix>[^\d\W][\w\-.\u00B7\u0300-\u036F\u0387\u06DD\u06DE\u203F\u2040]*):)?'
r'(?P<local>[^\d\W][\w\-.\u00B7\u0300-\u036F\u0387\u06DD\u06DE\u203F\u2040]*)$',
)
WRONG_ESCAPE_PATTERN = re.compile(r'%(?![a-fA-F\d]{2})')
XML_NEWLINES_PATTERN = re.compile('\r\n|\r|\n')
def collapse_white_spaces(s: str) -> str:
return WHITESPACES_PATTERN.sub(' ', s).strip(' ')
def is_idrefs(value: Optional[str]) -> bool:
return isinstance(value, str) and \
all(NCNAME_PATTERN.match(x) is not None for x in value.split())
###
# Sequence type checking
SEQUENCE_TYPE_PATTERN = re.compile(r'\s?([()?*+,])\s?')
def normalize_sequence_type(sequence_type: str) -> str:
sequence_type = WHITESPACES_PATTERN.sub(' ', sequence_type).strip()
sequence_type = SEQUENCE_TYPE_PATTERN.sub(r'\1', sequence_type)
return sequence_type.replace(',', ', ').replace(')as', ') as')
###
# Date/Time helpers
MONTH_DAYS = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
MONTH_DAYS_LEAP = [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def adjust_day(year: int, month: int, day: int):
if month in {1, 3, 5, 7, 8, 10, 12}:
return day
elif month in {4, 6, 9, 11}:
return min(day, 30)
else:
return min(day, 29) if isleap(year) else min(day, 28)
def days_from_common_era(year: int) -> int:
"""
Returns the number of days from from 0001-01-01 to the provided year. For a
common era year the days are counted until the last day of December, for a
BCE year the days are counted down from the end to the 1st of January.
"""
if year > 0:
return year * 365 + year // 4 - year // 100 + year // 400
elif year >= -1:
return year * 366
else:
year = -year - 1
return -(366 + year * 365 + year // 4 - year // 100 + year // 400)
DAYS_IN_4Y = days_from_common_era(4)
DAYS_IN_100Y = days_from_common_era(100)
DAYS_IN_400Y = days_from_common_era(400)
def months2days(year: int, month: int, months_delta: int) -> int:
"""
Converts a delta of months to a delta of days, counting from the 1st day of the month,
relative to the year and the month passed as arguments.
:param year: the reference start year, a negative or zero value means a BCE year \
(0 is 1 BCE, -1 is 2 BCE, -2 is 3 BCE, etc).
:param month: the starting month (1-12).
:param months_delta: the number of months, if negative count backwards.
"""
if not months_delta:
return 0
total_months = month - 1 + months_delta
target_year = year + total_months // 12
target_month = total_months % 12 + 1
if month <= 2:
y_days = 365 * (target_year - year) + leapdays(year, target_year)
else:
y_days = 365 * (target_year - year) + leapdays(year + 1, target_year + 1)
months_days = MONTH_DAYS_LEAP if isleap(target_year) else MONTH_DAYS
if target_month >= month:
m_days = sum(months_days[m] for m in range(month, target_month))
return y_days + m_days if y_days >= 0 else y_days + m_days
else:
m_days = sum(months_days[m] for m in range(target_month, month))
return y_days - m_days if y_days >= 0 else y_days - m_days
def round_number(value: Union[float, int, Decimal]) -> Union[float, int, Decimal]:
if math.isnan(value) or math.isinf(value):
return value
number = Decimal(value)
if number > 0:
return type(value)(number.quantize(Decimal('1'), rounding='ROUND_HALF_UP'))
else:
return type(value)(number.quantize(Decimal('1'), rounding='ROUND_HALF_DOWN'))
def normalized_seconds(seconds: Decimal) -> str:
# Decimal.normalize() does not remove exp every time: eg. Decimal('1E+1')
return '{:.6f}'.format(seconds).rstrip('0').rstrip('.')
def is_xml_codepoint(cp: int) -> bool:
return cp in {0x9, 0xA, 0xD} or \
0x20 <= cp <= 0xD7FF or \
0xE000 <= cp <= 0xFFFD or \
0x10000 <= cp <= 0x10FFFF
def ordinal(n: int) -> str:
if n in {11, 12, 13}:
return '%dth' % n
least_significant_digit = n % 10
if least_significant_digit == 1:
return '%dst' % n
elif least_significant_digit == 2:
return '%dnd' % n
elif least_significant_digit == 3:
return '%drd' % n
else:
return '%dth' % n
|
<filename>codes/SRN/utils/util.py
import os
import math
from datetime import datetime
import numpy as np
import cv2
from torchvision.utils import make_grid
import random
import torch
import logging
import torch.nn.parallel as P
import math
import torch.nn as nn
####################
# miscellaneous
####################
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
logger = logging.getLogger('base')
logger.info('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False):
'''set up logger'''
l = logging.getLogger(logger_name)
formatter = logging.Formatter(
'%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%y-%m-%d %H:%M:%S')
log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
l.addHandler(sh)
def setup_old_logger(logger_name, root, phase, level=logging.INFO, screen=False):
'''set up logger'''
l = logging.getLogger(logger_name)
formatter = logging.Formatter(
'%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%y-%m-%d %H:%M:%S')
log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
l.addHandler(sh)
####################
# image convert
####################
def forward_chop(img, scale, model, shave=20, min_size=160000):
# scale = 1 if self.input_large else self.scale[self.idx_scale]
n_GPUs = min(1, 4)
# height, width
h, w = img.size()[-2:]
top = slice(0, h // 2 + shave)
bottom = slice(h - h // 2 - shave, h)
left = slice(0, w // 2 + shave)
right = slice(w - w // 2 - shave, w)
x_chops = [torch.cat([
img[..., top, left],
img[..., top, right],
img[..., bottom, left],
img[..., bottom, right]])]
y_chops = []
if h * w < 4 * min_size:
for i in range(0, 4, n_GPUs):
x = [x_chop[i:(i + n_GPUs)] for x_chop in x_chops]
y = P.data_parallel(model, *x, range(n_GPUs))
if not isinstance(y, list): y = [y]
if not y_chops:
y_chops = [[c for c in _y.chunk(n_GPUs, dim=0)] for _y in y]
else:
for y_chop, _y in zip(y_chops, y):
y_chop.extend(_y.chunk(n_GPUs, dim=0))
else:
for p in zip(*x_chops):
p = map(lambda x:x.reshape([1]+list(x.size())), p)
y = forward_chop(*p, scale, model, shave=shave, min_size=min_size)
if not isinstance(y, list): y = [y]
if not y_chops:
y_chops = [[_y] for _y in y]
else:
for y_chop, _y in zip(y_chops, y): y_chop.append(_y)
h *= scale
w *= scale
h, w = round(h), round(w)
if h % 2 != 0: h += 1
if w % 2 != 0: w += 1
top = slice(0, h // 2)
bottom = slice(h - h // 2, h)
bottom_r = slice(h // 2 - h, None)
left = slice(0, w // 2)
right = slice(w - w // 2, w)
right_r = slice(w // 2 - w, None)
# batch size, number of color channels
b, c = y_chops[0][0].size()[:-2]
y = [y_chop[0].new(b, c, h, w) for y_chop in y_chops]
for y_chop, _y in zip(y_chops, y):
_y[..., top, left] = y_chop[0][..., top, left]
_y[..., top, right] = y_chop[1][..., top, right_r]
_y[..., bottom, left] = y_chop[2][..., bottom_r, left]
_y[..., bottom, right] = y_chop[3][..., bottom_r, right_r]
if len(y) == 1: y = y[0]
return y
def b_split(batch, mask):
real_data, fake_data = [], []
for i in range(len(mask)):
j = int(mask[i])
if j == 0:
fake_data.append(torch.unsqueeze(batch[i], dim=0))
elif j == 1:
real_data.append(torch.unsqueeze(batch[i], dim=0))
if real_data:
real_data = torch.cat(real_data)
if fake_data:
fake_data = torch.cat(fake_data)
return fake_data, real_data
def b_merge(real_data, fake_data, mask):
res = []
for i in range(len(mask)):
j = int(mask[i])
# m, n = 0, 0
if j == 0:
res.append(torch.unsqueeze(fake_data[i], dim=0))
# m += 1
elif j == 1:
res.append(torch.unsqueeze(real_data[i], dim=0))
# n += 1
return torch.cat(res)
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
def save_img(img, img_path, mode='RGB'):
cv2.imwrite(img_path, img)
def dwt_init(x):
x01 = x[:, :, fc00:e968:6179::de52:7100, :] / 2
x02 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :] / 2
x1 = x01[:, :, :, 0::2]
x2 = x02[:, :, :, 0::2]
x3 = x01[:, :, :, 1::2]
x4 = x02[:, :, :, 1::2]
x_LL = x1 + x2 + x3 + x4
x_HL = -x1 - x2 + x3 + x4
x_LH = -x1 + x2 - x3 + x4
x_HH = x1 - x2 - x3 + x4
return x_LL, torch.cat((x_HL, x_LH, x_HH), 1)
class DWT(nn.Module):
def __init__(self):
super(DWT, self).__init__()
self.requires_grad = False
def forward(self, x):
return dwt_init(x)
####################
# metric
####################
def calculate_psnr(img1, img2):
# img1 and img2 have range [0, 255]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
if __name__ == '__main__':
from PIL import Image
import numpy as np
import os
from data.util import bgr2ycbcr
sr_path = '/media/4T/Dizzy/BasicSR-master/results/Test/DeviceVal20_2xd/'
hr_path = '/media/4T/Dizzy/SR_classical_DataSet/RealWorldDataSet/Device_degration_Data/City100_iPhoneX/HR_val/'
psnrtotal, ssimtotal = 0, 0
psnr_ytotal, ssim_ytotoal = 0, 0
idx = 0
crop_border = 4
for name in os.listdir(hr_path):
name = name.split('.')[0]
sr_img_np = np.array(Image.open(sr_path+name+'.png'))/255
hr_img_np = np.array(Image.open(hr_path+name+'.PNG'))/255
sr_img_np = sr_img_np[crop_border:-crop_border, crop_border:-crop_border, :]
hr_img_np = hr_img_np[crop_border:-crop_border, crop_border:-crop_border, :]
psnr = calculate_psnr(hr_img_np*255, sr_img_np*255)
ssim_ = calculate_ssim(hr_img_np*255, sr_img_np*255)
psnrtotal += psnr
ssimtotal += ssim_
sr_img_np_y = bgr2ycbcr(sr_img_np, only_y=True)
hr_img_np_y = bgr2ycbcr(hr_img_np, only_y=True)
psnr = calculate_psnr(sr_img_np_y*255, hr_img_np_y*255)
ssim_ = calculate_ssim(sr_img_np_y*255, hr_img_np_y*255)
psnr_ytotal += psnr
ssim_ytotoal += ssim_
idx += 1
print('PSNR: ',psnrtotal/idx,'SSIM: ', ssimtotal/idx)
print('PSNR_y: ',psnr_ytotal/idx,'SSIM_y: ', ssim_ytotoal/idx)
|
import json
import logging
from django.conf import settings
from django.http import (
HttpResponseBadRequest,
HttpResponseNotFound, HttpResponseForbidden
)
from django.utils import timezone
from rest_framework import permissions, status
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.views import APIView
from api.models import Case
from api.utils import get_case_for_user
LOGGER = logging.getLogger(__name__)
class CaseView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def encrypt_data(self, data):
try:
data_bin = json.dumps(data).encode("ascii")
(data_key_id, data_enc) = settings.ENCRYPTOR.encrypt(data_bin)
return (data_key_id, data_enc)
except Exception as ex:
LOGGER.error("ERROR! %s", ex)
def get(self, request, pk=None, format=None):
uid = request.user.id
includeArchives = bool(request.query_params.get("includeArchives")=="true")
if pk:
case = get_case_for_user(pk, uid)
data_dec = settings.ENCRYPTOR.decrypt(case.key_id, case.data)
case_data = json.loads(data_dec)
responseData = {
"id": case.id,
"type": case.type,
"status":case.status,
"modified": case.modified,
"personId": case.user_id,
"archive": case.archive,
"pdf_types": case.pdf_types,
"description": case.description,
"packageNumber": case.package_number,
"packageUrl": case.package_url,
"data": case_data,
}
else:
cases = get_case_for_user(pk, uid, includeArchives)
responseData = list()
for case in cases:
data_dec = settings.ENCRYPTOR.decrypt(case.key_id, case.data)
case_data = json.loads(data_dec)
responseData.append({
"id": case.id,
"type": case.type,
"status":case.status,
"modified": case.modified,
"personId": case.user_id,
"archive": case.archive,
"pdf_types": case.pdf_types,
"description": case.description,
"data": case_data,
"packageNumber": case.package_number,
"packageUrl": case.package_url,
})
return Response(responseData)
def post(self, request: Request):
uid = request.user.id
if not uid:
return HttpResponseForbidden("Missing user ID")
body = request.data
if not body:
return HttpResponseBadRequest("Missing request body")
(data_key_id, data_enc) = self.encrypt_data(body["data"])
description = ""
if body.get("description"):
description = body.get("description")
db_app = Case(
type =body.get("type"),
description =description,
status ="Draft",
modified = timezone.now(),
data =data_enc,
key_id =data_key_id,
user_id =uid)
db_app.save()
return Response({"case_id": db_app.pk})
def put(self, request, pk, format=None):
uid = request.user.id
body = request.data
if not body:
return HttpResponseBadRequest("Missing request body")
case_ids = request.query_params.getlist("id")
if not case_ids:
case = get_case_for_user(pk, uid)
if not case:
return HttpResponseNotFound("No record found")
(data_key_id, data_enc) = self.encrypt_data(body["data"])
description = case.description
if body.get("description"):
description = body.get("description")
case.modified = timezone.now()
case.type = body.get("type")
case.status = body.get("status")
case.description = description
case.data = data_enc
case.key_id = data_key_id
case.save()
return Response("success")
else:
for caseId in case_ids:
case = get_case_for_user(caseId, uid)
if not case:
continue
case.modified = timezone.now()
if body.get("status"):
case.status = body.get("status")
case.archive = body.get("archive")
case.save()
return Response("success")
def delete(self, request, pk, format=None):
uid = request.user.id
case_ids = request.query_params.getlist("id")
if not case_ids:
case = get_case_for_user(pk, uid)
if not case or pk==0:
return HttpResponseNotFound("No record found")
if case.package_number or case.package_url:
return HttpResponseBadRequest("Not able to delete submitted application")
case.delete()
else:
for caseId in case_ids:
case = get_case_for_user(caseId, uid)
if not case:
continue
if case.package_number or case.package_url:
continue
case.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
### FIFA world cup
team_dict = {'2018' : {'teams' : ["URUGUAY", "RUSSIA", "SAUDI ARABIA", "EGYPT",
"SPAIN", "PORTUGAL", "IRAN", "MOROCCO",
"FRANCE", "DENMARK", "PERU", "AUSTRALIA",
"CROATIA", "ARGENTINA", "NIGERIA", "ICELAND",
"BRAZIL", "SWITZERLAND", "SERBIA", "COSTA RICA",
"SWEDEN", "MEXICO", "KOREA", "GERMANY",
"BELGIUM", "ENGLAND", "TUNISIA", "PANAMA",
"COLOMBIA", "JAPAN", "SENEGAL", "POLAND"],
'teamsGS':[['RUSSIA','SAUDI ARABIA'],
['EGYPT','URUGUAY'],
['MOROCCO','IRAN'],
['PORTUGAL','SPAIN'],
['FRANCE','AUSTRALIA'],
['ARGENTINA','ICELAND'],
['PERU','DENMARK'],
['CROATIA','NIGERIA'],
['COSTA RICA','SERBIA'],
['GERMANY','MEXICO'],
['BRAZIL','SWITZERLAND'],
['SWEDEN','KOREA'],
['BELGIUM','PANAMA'],
['TUNISIA','ENGLAND'],
['COLOMBIA','JAPAN'],
['POLAND','SENEGAL'],
['RUSSIA','EGYPT'],
['PORTUGAL','MOROCCO'],
['URUGUAY','SAUDI ARABIA'],
['IRAN','SPAIN'],
['DENMARK','AUSTRALIA'],
['FRANCE','PERU'],
['ARGENTINA','CROATIA'],
['BRAZIL','COSTA RICA'],
['NIGERIA','ICELAND'],
['SERBIA','SWITZERLAND'],
['BELGIUM','TUNISIA'],
['KOREA','MEXICO'],
['GERMANY','SWEDEN'],
['ENGLAND','PANAMA'],
['JAPAN','SENEGAL'],
['POLAND','COLOMBIA'],
['URUGUAY','RUSSIA'],
['SAUDI ARABIA','EGYPT'],
['SPAIN','MOROCCO'],
['IRAN','PORTUGAL'],
['AUSTRALIA', 'PERU'],
['DENMARK', 'FRANCE'],
['NIGERIA', 'ARGENTINA'],
['ICELAND', 'CROATIA'],
['KOREA', 'GERMANY'],
['MEXICO', 'SWEDEN'],
['SERBIA', 'BRAZIL'],
['SWITZERLAND', 'COSTA RICA'],
['JAPAN', 'POLAND'],
['SENEGAL', 'COLOMBIA'],
['PANAMA', 'TUNISIA'],
['ENGLAND', 'BELGIUM']],
'teams16':[['SPAIN','RUSSIA'],
['CROATIA','DENMARK'],
['SWEDEN','SWITZERLAND'],
['COLOMBIA','ENGLAND'],
['URUGUAY','PORTUGAL'],
['FRANCE','ARGENTINA'],
['BRAZIL','MEXICO'],
['BELGIUM','JAPAN']],
'teamsQF':[['RUSSIA','CROATIA'],
['SWEDEN','ENGLAND'],
['URUGUAY','FRANCE'],
['BRAZIL','BELGIUM']],
'teamsSF':[['FRANCE','BELGIUM'],
['CROATIA','ENGLAND']],
'teamsFF':[['BELGIUM','ENGLAND'],
['FRANCE','CROATIA']]},
'2019' : {'teams' : ["FRANCE", "NORWAY", "NIGERIA", "KOREA",
"GERMANY", "SPAIN", "CHINA", "SOUTH AFRICA",
"ITALY", "AUSTRALIA", "BRAZIL", "JAMAICA",
"ENGLAND", "JAPAN", "ARGENTINA", "SCOTLAND",
"NETHERLANDS", "CANADA", "CAMEROON", "NEW ZEALAND",
"USA", "SWEDEN", "CHILE", "THAILAND"],
'teamsGS':[['FRANCE','KOREA'],
['GERMANY','CHINA'],
['SPAIN','SOUTH AFRICA'],
['NORWAY','NIGERIA'],
['AUSTRALIA','ITALY'],
['BRAZIL','JAMAICA'],
['ENGLAND','SCOTLAND'],
['ARGENTINA','JAPAN'],
['CANADA','CAMEROON'],
['NEW ZEALAND','NETHERLANDS'],
['CHILE','SWEDEN'],
['USA','THAILAND'],
['NIGERIA','KOREA'],
['GERMANY','SPAIN'],
['FRANCE','NORWAY'],
['AUSTRALIA','BRAZIL'],
['SOUTH AFRICA','CHINA'],
['JAPAN','SCOTLAND'],
['JAMAICA','ITALY'],
['ENGLAND','ARGENTINA'],
['NETHERLANDS','CAMEROON'],
['CANADA','NEW ZEALAND'],
['SWEDEN','THAILAND'],
['USA','CHILE'],
['CHINA','SPAIN'],
['SOUTH AFRICA','GERMANY'],
['NIGERIA','FRANCE'],
['KOREA','NORWAY'],
['JAMAICA','AUSTRALIA'],
['ITALY','BRAZIL'],
['JAPAN','ENGLAND'],
['SCOTLAND','ARGENTINA'],
['CAMEROON','NEW ZEALAND'],
['NETHERLANDS','CANADA'],
['SWEDEN','USA'],
['THAILAND','CHILE']],
'teams16':[['GERMANY','NIGERIA'],
['SWEDEN','CANADA'],
['ITALY','CHINA'],
['NETHERLANDS','JAPAN'],
['NORWAY','AUSTRALIA'],
['ENGLAND','CAMEROON'],
['FRANCE','BRAZIL'],
['SPAIN','USA']],
'teamsQF':[['GERMANY','SWEDEN'],
['ITALY','NETHERLANDS'],
['NORWAY','ENGLAND'],
['FRANCE','USA']],
'teamsSF':[['ENGLAND','USA'],
['SWEDEN','NETHERLANDS']],
'teamsFF':[['ENGLAND','SWEDEN'],
['USA','NETHERLANDS']]},
'CL20' : {'teams' : ["PARI", "REAL", "CLUB", "GALA",
"BAYE", "TOTT", "OLYM", "CRVE",
"MANC", "ATAL", "SHAK", "DINA",
"JUVE", "ATLE", "LEVE", "LOKO",
"LIVE", "NAPO", "SALZ", "GENK",
"BARC", "DORT", "INTE", "SLAV",
"LEIP", "LYON", "BENF", "ZENI",
"VALE", "CHEL", "AJAX", "LOSC"],
'teamsGS':[['INTE','SLAV'],
['LYON','ZENI'],
['NAPO','LIVE'],
['SALZ','GENK'],
['DORT','BARC'],
['BENF','LEIP'],
['CHEL','VALE'],
['AJAX','LOSC'],
['CLUB','GALA'],
['OLYM','TOTT'],
['PARI','REAL'],
['BAYE','CRVE'],
['DINA','ATAL'],
['SHAK','MANC'],
['LEVE','LOKO'],
['ATLE','JUVE'],
['REAL','CLUB'],
['ATAL','SHAK'],
['GALA','PARI'],
['TOTT','BAYE'],
['CRVE','OLYM'],
['MANC','DINA'],
['JUVE','LEVE'],
['LOKO','ATLE'],
['GENK','NAPO'],
['SLAV','DORT'],
['LIVE','SALZ'],
['BARC','INTE'],
['LEIP','LYON'],
['ZENI','BENF'],
['LOSC','CHEL'],
['VALE','AJAX'],
['SHAK','DINA'],
['ATLE','LEVE'],
['CLUB','PARI'],
['GALA','REAL'],
['OLYM','BAYE'],
['TOTT','CRVE'],
['MANC','ATAL'],
['JUVE','LOKO'],
['LEIP','ZENI'],
['AJAX','CHEL'],
['SALZ','NAPO'],
['GENK','LIVE'],
['INTE','DORT'],
['SLAV','BARC'],
['BENF','LYON'],
['LOSC','VALE'],
['BARC','SLAV'],
['ZENI','LEIP'],
['NAPO','SALZ'],
['LIVE','GENK'],
['DORT','INTE'],
['LYON','BENF'],
['CHEL','AJAX'],
['VALE','LOSC'],
['BAYE','OLYM'],
['LOKO','JUVE'],
['PARI','CLUB'],
['REAL','GALA'],
['CRVE','TOTT'],
['DINA','SHAK'],
['ATAL','MANC'],
['LEVE','ATLE'],
['GALA','CLUB'],
['LOKO','LEVE'],
['REAL','PARI'],
['CRVE','BAYE'],
['TOTT','OLYM'],
['ATAL','DINA'],
['MANC','SHAK'],
['JUVE','ATLE'],
['ZENI','LYON'],
['VALE','CHEL'],
['LIVE','NAPO'],
['GENK','SALZ'],
['BARC','DORT'],
['SLAV','INTE'],
['LEIP','BENF'],
['LOSC','AJAX'],
['NAPO','GENK'],
['SALZ','LIVE'],
['DORT','SLAV'],
['INTE','BARC'],
['LYON','LEIP'],
['BENF','ZENI'],
['CHEL','LOSC'],
['AJAX','VALE'],
['DINA','MANC'],
['SHAK','ATAL'],
['PARI','GALA'],
['CLUB','REAL'],
['BAYE','TOTT'],
['OLYM','CRVE'],
['LEVE','JUVE'],
['ATLE','LOKO']],
'teams16':[['ATLE','LIVE'],
['DORT','PARI'],
['ATAL','VALE'],
['TOTT','LEIP'],
['CHEL','BAYE'],
['NAPO','BARC'],
['LYON','JUVE'],
['REAL','MANC'],
['VALE','ATAL'],
['LEIP','TOTT'],
['LIVE','ATLE'],
['PARI','DORT'],
['JUVE','LYON'],
['MANC','REAL'],
['BAYE','CHEL'],
['BARC','NAPO']],
'teamsQF':[['ATAL','PARI'],
['LEIP','ATLE'],
['BARC','BAYE'],
['MANC','LYON']],
'teamsSF':[['LEIP','PARI'],
['LYON','BAYE']],
'teamsFF':[['PARI','BAYE']]},
'UEFA2020':{'teams':['TURKEY',
'ITALY',
'WALES',
'SWITZERLAND',
'DENMARK',
'FINLAND',
'BELGIUM',
'RUSSIA',
'AUSTRIA',
'N MACEDONIA',
'NETHERLANDS',
'UKRAINE',
'ENGLAND',
'CROATIA',
'SCOTLAND',
'CZECH REP',
'POLAND',
'SLOVAKIA',
'SPAIN',
'SWEDEN',
'HUNGARY',
'PORTUGAL',
'FRANCE',
'GERMANY',
'TURKEY',
'WALES',
'ITALY',
'SWITZERLAND',
'FINLAND',
'RUSSIA',
'DENMARK',
'BELGIUM',
'UKRAINE',
'N MACEDONIA',
'NETHERLANDS',
'AUSTRIA',
'CROATIA',
'CZECH REP',
'ENGLAND',
'SCOTLAND',
'SWEDEN',
'SLOVAKIA',
'SPAIN',
'POLAND',
'HUNGARY',
'FRANCE',
'PORTUGAL',
'GERMANY',
'ITALY',
'WALES',
'SWITZERLAND',
'TURKEY',
'FINLAND',
'BELGIUM',
'RUSSIA',
'DENMARK',
'UKRAINE',
'AUSTRIA',
'N MACEDONIA',
'NETHERLANDS',
'CZECH REP',
'ENGLAND',
'CROATIA',
'SCOTLAND',
'SWEDEN',
'POLAND',
'SLOVAKIA',
'SPAIN',
'GERMANY',
'HUNGARY',
'PORTUGAL',
'FRANCE',
'BELGIUM',
'PORTUGAL',
'ITALY',
'AUSTRIA',
'FRANCE',
'SWITZERLAND',
'CROATIA',
'SPAIN',
'SWEDEN',
'UKRAINE',
'ENGLAND',
'GERMANY',
'NETHERLANDS',
'CZECH REP',
'WALES',
'DENMARK',
'BELGIUM',
'ITALY',
'SWITZERLAND',
'SPAIN',
'UKRAINE',
'ENGLAND',
'CZECH REP',
'DENMARK',
'ITALY',
'SPAIN',
'ENGLAND',
'DENMARK'],
'teamsGS':[['TURKEY', 'ITALY'],
['WALES', 'SWITZERLAND'],
['DENMARK', 'FINLAND'],
['BELGIUM', 'RUSSIA'],
['AUSTRIA', 'N MACEDONIA'],
['NETHERLANDS', 'UKRAINE'],
['ENGLAND', 'CROATIA'],
['SCOTLAND', 'CZECH REP'],
['POLAND', 'SLOVAKIA'],
['SPAIN', 'SWEDEN'],
['HUNGARY', 'PORTUGAL'],
['FRANCE', 'GERMANY'],
['TURKEY', 'WALES'],
['ITALY', 'SWITZERLAND'],
['FINLAND', 'RUSSIA'],
['DENMARK', 'BELGIUM'],
['UKRAINE', 'N MACEDONIA'],
['NETHERLANDS', 'AUSTRIA'],
['CROATIA', 'CZECH REP'],
['ENGLAND', 'SCOTLAND'],
['SWEDEN', 'SLOVAKIA'],
['SPAIN', 'POLAND'],
['HUNGARY', 'FRANCE'],
['PORTUGAL', 'GERMANY'],
['ITALY', 'WALES'],
['SWITZERLAND', 'TURKEY'],
['FINLAND', 'BELGIUM'],
['RUSSIA', 'DENMARK'],
['UKRAINE', 'AUSTRIA'],
['N MACEDONIA', 'NETHERLANDS'],
['CZECH REP', 'ENGLAND'],
['CROATIA', 'SCOTLAND'],
['SWEDEN', 'POLAND'],
['SLOVAKIA', 'SPAIN'],
['GERMANY', 'HUNGARY'],
['PORTUGAL', 'FRANCE']],
'teams16':[['BELGIUM', 'PORTUGAL'],
['ITALY', 'AUSTRIA'],
['FRANCE', 'SWITZERLAND'],
['CROATIA', 'SPAIN'],
['SWEDEN', 'UKRAINE'],
['ENGLAND', 'GERMANY'],
['NETHERLANDS', 'CZECH REP'],
['WALES', 'DENMARK']],
'teamsQF':[['BELGIUM', 'ITALY'],
['SWITZERLAND', 'SPAIN'],
['UKRAINE', 'ENGLAND'],
['CZECH REP', 'DENMARK']],
'teamsSF':[['ITALY', 'SPAIN'], ['ENGLAND', 'DENMARK']],
'teamsFF':[['ITALY', 'ENGLAND']]
}
}
### END ###
|
<reponame>hustwei/chromite
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Stage a custom image on a Moblab device or in Google Storage."""
from __future__ import print_function
import os
import re
import shutil
from chromite.cbuildbot import commands
from chromite.cli import command
from chromite.cli import flash
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import dev_server_wrapper
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import remote_access
MOBLAB_STATIC_DIR = '/mnt/moblab/static'
MOBLAB_TMP_DIR = os.path.join(MOBLAB_STATIC_DIR, 'tmp')
BOARD_BUILD_DIR = 'usr/local/build'
DEVSERVER_STAGE_URL = ('http://%(moblab)s:8080/stage?local_path=%(staged_dir)s'
'&artifacts=full_payload,stateful,test_suites,'
'control_files,autotest_packages,'
'autotest_server_package')
CUSTOM_BUILD_NAME = '%(board)s-custom/%(build)s'
class CustomImageStagingException(Exception):
"""Thrown when there is an error staging an custom image."""
def GSURLRegexHelper(gsurl):
"""Helper to do regex matching on a Google Storage URL
Args:
gsurl: Google Storage URL to match.
Returns:
Regex Match Object with groups(board, type, & build_name) or None if there
was no match.
"""
return re.match(r'gs://.*/(trybot-)?(?P<board>[\w-]+)-(?P<type>\w+)/'
r'(?P<build_name>R\d+-[\d.ab-]+)', gsurl)
@command.CommandDecorator('stage')
class StageCommand(command.CliCommand):
"""Remotely stages an image onto a MobLab device or into Google Storage.
The image to be staged may be a local custom image built in the chroot or an
official image in Google Storage. The test binaries will always come from the
local build root regardless of the image source.
This script generates/copies the update payloads and test binaries required.
It then stages them on the Moblab's devserver or copies them into the
specified Google Storage Bucket.
The image name to then use for testing is outputted at the end of this
script.
"""
EPILOG = """
To stage a local image path onto a moblab device:
cros stage /path/to/board/build/chromiumos-test-image.bin <moblab>
To stage an official image with custom test binaries onto a moblab device:
cros stage <gs_image_dir> <moblab>
To stage a local image path into a Google Storage Bucket:
cros stage /path/to/board/build/chromiumos-test-image.bin <gs_base_path>
--boto_file=<boto_file_path>
NOTES:
* The autotest bits used to test this image will be the latest in your
build sysroot! I.E. if you emerge new autotest changes after producing the
image you wish to stage, there is a chance that the changes will not match.
* The custom image will only stay on the Moblab device for 24 hours at which
point it will be wiped.
"""
@classmethod
def AddParser(cls, parser):
"""Add parser arguments."""
super(StageCommand, cls).AddParser(parser)
parser.add_argument(
'image', nargs='?', default='latest', help='Path to image we want to '
'stage. If a local path, it should be in the format of '
'/.../.../board/build/<image>.bin . If a Google Storage path it should'
'be in the format of '
'gs://<bucket-name>/<board>-<builder type>/<build name>')
parser.add_argument(
'remote', help='MobLab device that has password-less SSH set up via '
'the chroot already. Or Google Storage Bucket in the form of '
'gs://<bucket-name>/')
parser.add_argument(
'--board', dest='board', default=None,
help='The board name, defaults to value extracted from image path.')
parser.add_argument(
'--staged_image_name', dest='staged_image_name', default=None,
help='Name for the staged image. Default: <board>-custom/<build>')
parser.add_argument(
'--boto_file', dest='boto_file', default=None,
help='Path to boto file to use when uploading to Google Storage. If '
'none the default chroot boto file is used.')
def __init__(self, options):
"""Initializes cros stage."""
super(StageCommand, self).__init__(options)
self.board = self.options.board
self.staged_image_name = self.options.staged_image_name
# Determine if we are staging a local custom image or an official image.
if self.options.image.startswith('gs://'):
self._remote_image = True
if not self.staged_image_name:
self.staged_image_name = self._GenerateImageNameFromGSUrl(
self.options.image)
else:
self._remote_image = False
if not self.staged_image_name:
self.staged_image_name = self._GenerateImageNameFromLocalPath(
self.options.image)
if not self.board:
raise CustomImageStagingException('Please specify the "board" argument')
self.stage_directory = os.path.join(MOBLAB_TMP_DIR, self.staged_image_name)
# Determine if the staging destination is a Moblab or Google Storage.
if self.options.remote.startswith('gs://'):
self._remote_is_moblab = False
else:
self._remote_is_moblab = True
def _GenerateImageNameFromLocalPath(self, image):
"""Generate the name as which |image| will be staged onto Moblab.
If the board name has not been specified, set the board name based on
the image path.
Args:
image: Path to image we want to stage. It should be in the format of
/.../.../board/build/<image>.bin
Returns:
Name the image will be staged as.
Raises:
CustomImageStagingException: If the image name supplied is not valid.
"""
realpath = osutils.ExpandPath(image)
if not realpath.endswith('.bin'):
raise CustomImageStagingException(
'Image path: %s does not end in .bin !' % realpath)
build_name = os.path.basename(os.path.dirname(realpath))
# Custom builds are name with the suffix of '-a1' but the build itself
# is missing this suffix in its filesystem. Therefore lets rename the build
# name to match the name inside the build.
if build_name.endswith('-a1'):
build_name = build_name[:-len('-a1')]
if not self.board:
self.board = os.path.basename(os.path.dirname(os.path.dirname(realpath)))
return CUSTOM_BUILD_NAME % dict(board=self.board, build=build_name)
def _GenerateImageNameFromGSUrl(self, image):
"""Generate the name as which |image| will be staged onto Moblab.
If the board name has not been specified, set the board name based on
the image path.
Args:
image: GS Url to the image we want to stage. It should be in the format
gs://<bucket-name>/<board>-<builder type>/<build name>
Returns:
Name the image will be staged as.
Raises:
CustomImageStagingException: If the image name supplied is not valid.
"""
match = GSURLRegexHelper(image)
if not match:
raise CustomImageStagingException(
'Image URL: %s is improperly defined!' % image)
if not self.board:
self.board = match.group('board')
return CUSTOM_BUILD_NAME % dict(board=self.board,
build=match.group('build_name'))
def _DownloadPayloads(self, tempdir):
"""Download from GS the update payloads we require.
Args:
tempdir: Temporary Directory to store the downloaded payloads.
"""
gs_context = gs.GSContext(boto_file=self.options.boto_file)
gs_context.Copy(os.path.join(self.options.image, 'stateful.tgz'), tempdir)
gs_context.Copy(os.path.join(self.options.image, '*_full*'), tempdir)
def _GeneratePayloads(self, tempdir):
"""Generate the update payloads we require.
Args:
tempdir: Temporary Directory to store the generated payloads.
"""
dev_server_wrapper.GetUpdatePayloadsFromLocalPath(
self.options.image, tempdir, static_dir=flash.DEVSERVER_STATIC_DIR)
rootfs_payload = os.path.join(tempdir, dev_server_wrapper.ROOTFS_FILENAME)
# Devservers will look for a file named *_full_*.
shutil.move(rootfs_payload, os.path.join(tempdir, 'update_full_dev.bin'))
def _GenerateTestBits(self, tempdir):
"""Generate and transfer to the Moblab the test bits we require.
Args:
tempdir: Temporary Directory to store the generated test artifacts.
"""
build_root = cros_build_lib.GetSysroot(board=self.board)
cwd = os.path.join(build_root, BOARD_BUILD_DIR)
tarball_funcs = [commands.BuildAutotestControlFilesTarball,
commands.BuildAutotestPackagesTarball,
commands.BuildAutotestTestSuitesTarball,
commands.BuildAutotestServerPackageTarball]
for tarball_func in tarball_funcs:
tarball_func(build_root, cwd, tempdir)
def _StageOnMoblab(self, tempdir):
"""Stage the generated payloads and test bits on a moblab device.
Args:
tempdir: Temporary Directory that contains the generated payloads and
test bits.
"""
with remote_access.ChromiumOSDeviceHandler(self.options.remote) as device:
device.RunCommand(['mkdir', '-p', self.stage_directory])
for f in os.listdir(tempdir):
device.CopyToDevice(os.path.join(tempdir, f), self.stage_directory)
device.RunCommand(['chown', '-R', 'moblab:moblab',
MOBLAB_TMP_DIR])
# Delete this image from the Devserver in case it was previously staged.
device.RunCommand(['rm', '-rf', os.path.join(MOBLAB_STATIC_DIR,
self.staged_image_name)])
stage_url = DEVSERVER_STAGE_URL % dict(moblab=self.options.remote,
staged_dir=self.stage_directory)
# Stage the image from the moblab, as port 8080 might not be reachable
# from the developer's system.
res = device.RunCommand(['curl', '--fail',
cros_build_lib.ShellQuote(stage_url)],
error_code_ok=True)
if res.returncode == 0:
logging.info('\n\nStaging Completed!')
logging.info('Image is staged on Moblab as %s',
self.staged_image_name)
else:
logging.info('Staging failed. Error Message: %s', res.error)
device.RunCommand(['rm', '-rf', self.stage_directory])
def _StageOnGS(self, tempdir):
"""Stage the generated payloads and test bits into a Google Storage bucket.
Args:
tempdir: Temporary Directory that contains the generated payloads and
test bits.
"""
gs_context = gs.GSContext(boto_file=self.options.boto_file)
for f in os.listdir(tempdir):
gs_context.CopyInto(os.path.join(tempdir, f), os.path.join(
self.options.remote, self.staged_image_name))
logging.info('\n\nStaging Completed!')
logging.info('Image is staged in Google Storage as %s',
self.staged_image_name)
def Run(self):
"""Perform the cros stage command."""
logging.info('Attempting to stage: %s as Image: %s at Location: %s',
self.options.image, self.staged_image_name,
self.options.remote)
osutils.SafeMakedirsNonRoot(flash.DEVSERVER_STATIC_DIR)
with osutils.TempDir() as tempdir:
if self._remote_image:
self._DownloadPayloads(tempdir)
else:
self._GeneratePayloads(tempdir)
self._GenerateTestBits(tempdir)
if self._remote_is_moblab:
self._StageOnMoblab(tempdir)
else:
self._StageOnGS(tempdir)
|
<filename>supporting_scripts/tacoxDNA/src/libs/cadnano_utils.py<gh_stars>0
'''
Created on Nov 11, 2018
@author: lorenzo
'''
import numpy as np
from tacoxDNA.src.libs import base
from tacoxDNA.src.libs import utils
import math
BP = "bp"
DEGREES = "degrees"
class StrandGenerator (object):
def generate(self, bp, sequence=None, start_pos=np.array([0, 0, 0]), direction=np.array([0, 0, 1]), perp=None, rot=0., double=True, circular=False, DELTA_LK=0, BP_PER_TURN=10.34, ds_start=None, ds_end=None, force_helicity=False):
"""
Generate a strand of DNA.
- linear, circular (circular)
- ssDNA, dsDNA (double)
- Combination of ss/dsDNA (ds_start, ds_end)
Note: Relevent argument(s) in parentheses.
Arguments:
bp --- Integer number of bp/nt (required)
sequence --- Array of integers or string. Should be same length as bp (default None)
Default (None) generates a random sequence.
Ex: [0,1,2,3,0]
Ex: "AGCTA"
See dictionary base.base_to_number for int/char conversion {0:'A'}
start_pos --- Location to begin building the strand (default np.array([0, 0, 0]))
direction --- a3 vector, orientation of the base (default np.array([0, 0, 1]))
perp --- Sets a1 vector, the orientation of the backbone. (default False)
Must be perpendicular to direction (as a1 must be perpendicular to a3)
If perp is None or False, perp is set to a random orthogonal angle
rot --- Rotation of first bp (default 0.)
double --- Generate dsDNA (default True)
circular --- Generate closed circular DNA (defalt False)
Limitations...
For ssDNA (double=False): bp >= 4
For dsDNA (double=True) : bp >= 30
Will throw warnings. Allowed, but use at your own risk.
DELTA_LK --- Integer change in linking number from Lk0 (default 0)
Only valid if circular==True
BP_PER_TURN --- Base pairs per complete 2*pi helix turn. (default 10.34)
Only valid if circular==True
ds_start --- Index (from 0) to begin double stranded region (default None)
ds_end --- Index (from 0) to end double stranded region (default None)
Default is None, which is entirely dsDNA; sets ds_start = 0, ds_end=bp
Ex: ds_start=0, ds_end=10 will create a double stranded region on bases
range(0,10): [0,1,2,3,4,5,6,7,8,9]
Note: To generate a nicked circular dsDNA, manually change state with
{Strand}.make_noncircular()
force_helicity --- Force generation of helical strands. Use helicity by default
for bp > 30. Warns from 18 to 29. Will crash oxDNA below 18. (default False)
Note: Minimuim circular duplex is 18. Shorter circular strands disobey FENE.
For shorter strands, circular ssDNA is generated in a circle instead of having
imposed helicity.
Examples:
Generate ssDNA:
generate(bp=4,sequence=[0,1,2,3],double=False,circular=False)
Generate circular dsDNA with +2 Linking number:
generate(bp=45,double=True,circular=True,DELTA_LK=2)
Generate a circular ssDNA (45nt) with ssDNA (25nt) annealed to indices 0 to 24:
generate(bp=45,double=True,circular=True,ds_start=0,ds_end=25)
"""
# we need numpy array for these
start_pos = np.array(start_pos, dtype=float)
direction = np.array(direction, dtype=float)
if isinstance(sequence, list):
sequence = np.array(sequence)
# Loads of input checking...
if isinstance(sequence, str):
try:
sequence = [base.base_to_number[x] for x in sequence]
except KeyError:
base.Logger.die("Key Error: sequence is invalid")
if sequence == None:
sequence = np.random.randint(0, 4, bp)
elif len(sequence) != bp:
n = bp - len(sequence)
sequence = np.append(sequence, np.random.randint(0, 4, n))
base.Logger.log("sequence is too short, adding %d random bases" % n, base.Logger.WARNING)
if circular == True and bp < 30:
# 30 is about the cut off for circular dsDNA. Anything shorter will probably clash.
# oxDNA can relax down to 18.
# 4 is about the cut off for circular ssDNA. Use dsDNA cutoff for saftey.
base.Logger.log("sequence is too short! Proceed at your own risk", base.Logger.WARNING)
option_use_helicity = True
if circular == True and bp < 30 and double == False:
base.Logger.log("sequence is too short! Generating ssDNA without imposed helicity", base.Logger.WARNING)
# Do not impose helcity to generate shorter circular ssDNA
if not force_helicity:
option_use_helicity = False
if ds_start == None:
ds_start = 0
if ds_end == None:
ds_end = bp
if ds_start > ds_end:
base.Logger.die("ds_end > ds_start")
if ds_end > bp:
base.Logger.die("ds_end > bp")
# we need to find a vector orthogonal to direction
dir_norm = np.sqrt(np.dot(direction, direction))
if dir_norm < 1e-10:
base.Logger.log("direction must be a valid vector, defaulting to (0, 0, 1)", base.Logger.WARNING)
direction = np.array([0, 0, 1])
else:
direction /= dir_norm
if perp is None or perp is False:
v1 = np.random.random_sample(3)
v1 -= direction * (np.dot(direction, v1))
v1 /= np.sqrt(sum(v1 * v1))
else:
v1 = perp;
# Setup initial parameters
ns1 = base.Strand()
# and we need to generate a rotational matrix
R0 = utils.get_rotation_matrix(direction, rot)
# R = get_rotation_matrix(direction, np.deg2rad(35.9))
R = utils.get_rotation_matrix(direction, [1, BP])
a1 = v1
a1 = np.dot (R0, a1)
rb = np.array(start_pos)
a3 = direction
# Circular strands require a continuious deformation of the ideal helical pitch
if circular == True:
# Unit vector orthogonal to plane of torus
# Note: Plane of torus defined by v1,direction
torus_perp = np.cross(v1, direction)
# Angle between base pairs along torus
angle = 2. * np.pi / float(bp)
# Radius of torus
radius = base.FENE_R0_OXDNA / math.sqrt(2. * (1. - math.cos(angle)));
if circular == True and option_use_helicity:
# Draw backbone in a helical spiral around a torus
# Draw bases pointing to center of torus
for i in range(bp):
# Torus plane defined by direction and v1
v_torus = v1 * base.BASE_BASE * math.cos(i * angle) + \
direction * base.BASE_BASE * math.sin(i * angle)
rb += v_torus
# a3 is tangent to the torus
a3 = v_torus / np.linalg.norm(v_torus)
R = utils.get_rotation_matrix(a3, [i * (round(bp / BP_PER_TURN) + DELTA_LK) / float(bp) * 360, DEGREES])
# a1 is orthogonal to a3 and the torus normal
a1 = np.cross (a3, torus_perp)
# Apply the rotation matrix
a1 = np.dot(R, a1)
ns1.add_nucleotide(base.Nucleotide(rb - base.CM_CENTER_DS * a1, a1, a3, sequence[i]))
ns1.make_circular(check_join_len=True)
elif circular == True and not option_use_helicity:
for i in xrange(bp):
rbx = math.cos (i * angle) * radius + 0.34 * math.cos(i * angle)
rby = math.sin (i * angle) * radius + 0.34 * math.sin(i * angle)
rbz = 0.
rb = np.array([rbx, rby, rbz])
a1x = math.cos (i * angle)
a1y = math.sin (i * angle)
a1z = 0.
a1 = np.array([a1x, a1y, a1z])
ns1.add_nucleotide(base.Nucleotide(rb, a1, np.array([0, 0, 1]), sequence[i]))
ns1.make_circular(check_join_len=True)
else:
# Add nt in canonical double helix
for i in range(bp):
ns1.add_nucleotide(base.Nucleotide(rb - base.CM_CENTER_DS * a1, a1, a3, sequence[i]))
if i != bp - 1:
a1 = np.dot(R, a1)
rb += a3 * base.BASE_BASE
# Fill in complement strand
if double == True:
ns2 = base.Strand()
for i in reversed(range(ds_start, ds_end)):
# Note that the complement strand is built in reverse order
nt = ns1._nucleotides[i]
a1 = -nt._a1
a3 = -nt._a3
nt2_cm_pos = -(base.FENE_EPS + 2 * base.POS_BACK) * a1 + nt.cm_pos
ns2.add_nucleotide(base.Nucleotide(nt2_cm_pos, a1, a3, 3 - sequence[i]))
if ds_start == 0 and ds_end == bp and circular == True:
ns2.make_circular(check_join_len=True)
return ns1, ns2
else:
return ns1
def generate_or_sq(self, bp, sequence=None, start_pos=np.array([0., 0., 0.]), direction=np.array([0., 0., 1.]), perp=None, double=True, rot=0., angle=np.pi / 180 * 33.75, length_change=0, region_begin=0, region_end=0):
if length_change and len(region_begin) != len(region_end):
if (len(region_end) + 1) == len(region_begin):
base.Logger.log("the lengths of begin (%d) and end (%d) arrays are mismatched; I will try to proceed by using the number of basepairs as the last element of the end array" % (len(region_begin), len(region_end)), base.Logger.WARNING)
region_end.append(bp + 1)
else:
base.Logger.die("the lengths of begin (%d) and end (%d) arrays are unrecoverably mismatched" % (len(region_begin), len(region_end)))
# we need numpy array for these
start_pos = np.array(start_pos, dtype=float)
direction = np.array(direction, dtype=float)
if sequence == None:
sequence = np.random.randint(0, 4, bp)
elif len(sequence) != bp:
n = bp - len(sequence)
sequence += np.random.randint(0, 4, n)
base.Logger.log("sequence is too short, adding %d random bases" % n, base.Logger.WARNING)
# angle should be an array, with a length 1 less than the # of base pairs
if not isinstance(angle, np.ndarray):
angle = np.ones(bp) * angle
elif len(angle) != bp - 1:
base.Logger.log("generate_or_sq: incorrect angle array length, should be 1 less than number of base pairs", base.Logger.CRITICAL)
# create the sequence of the second strand as made of complementary bases
sequence2 = [3 - s for s in sequence]
sequence2.reverse()
# we need to find a vector orthogonal to direction
dir_norm = np.sqrt(np.dot(direction, direction))
if dir_norm < 1e-10:
base.Logger.log("direction must be a valid vector, defaulting to (0, 0, 1)", base.Logger.WARNING)
direction = np.array([0, 0, 1])
else:
direction /= dir_norm
if perp is None:
v1 = np.random.random_sample(3)
v1 -= direction * (np.dot(direction, v1))
v1 /= np.sqrt(sum(v1 * v1))
else:
v1 = perp
# and we need to generate a rotational matrix
R0 = utils.get_rotation_matrix(direction, rot)
ns1 = base.Strand()
a1 = v1
a1 = np.dot (R0, a1)
rb = np.array(start_pos)
a3 = direction
Rs = []
for i in range(bp):
ns1.add_nucleotide(base.Nucleotide(rb - base.CM_CENTER_DS * a1, a1, a3, sequence[i]))
if i != bp - 1:
R = utils.get_rotation_matrix(direction, angle[i])
Rs.append(R)
a1 = np.dot(R, a1)
rb += a3 * base.BASE_BASE
if length_change:
for j in range(len(length_change)):
if i >= region_begin[j] and i < region_end[j]:
if length_change[j]:
rb += a3 * base.BASE_BASE * (-(float(length_change[j]) / (region_end[j] - region_begin[j])))
if double == True:
a1 = -a1
a3 = -direction
ns2 = base.Strand()
for i in range(bp):
ns2.add_nucleotide(base.Nucleotide(rb - base.CM_CENTER_DS * a1, a1, a3, sequence2[i]))
if i != bp - 1:
# we loop over the rotation matrices in the reverse order, and use the transpose of each matrix
a1 = np.dot(Rs.pop().transpose(), a1)
rb += a3 * base.BASE_BASE
if length_change:
for j in range(len(length_change)):
if bp - 2 - i >= region_begin[j] and bp - 2 - i < region_end[j]:
if length_change[j]:
rb += a3 * base.BASE_BASE * (-(float(length_change[j]) / (region_end[j] - region_begin[j])))
return ns1, ns2
else: return ns1
def generate_double_offset(self, seqA, seqB, offset, start_pos=np.array([0, 0, 0]), direction=np.array([0, 0, 1]), perp=None, rot=0):
if isinstance (seqA, str):
seqa = [base.base_to_number[x] for x in seqA]
else:
seqa = seqA
if isinstance (seqB, str):
seqb = [base.base_to_number[x] for x in seqB]
else:
seqb = seqB
bp = max (len(seqa), len(seqb) + offset)
s1, s2 = self.generate(bp, None, start_pos, direction, False, True, 0.)
s1 = s1.get_slice (0, len(seqa))
if len(seqb) + offset > len(seqa):
s2 = s2.get_slice (0, len(seqb)) # starts from opposite end
else:
s2 = s2.get_slice (bp - offset - len(seqb), len(seqb))
s1.set_sequence (seqa)
s2.set_sequence (seqb)
return s1, s2
def generate_rw (self, sequence, start_pos=np.array([0., 0., 0.])):
"""
Generate ssDNA as a random walk (high-energy configurations are possible):
generate(bp=45,double=False,circular=False,random_walk=True)
"""
# random walk generator
base.Logger.log("Generating strand as a random walk. Remember to equilibrate the configuration with MC", base.Logger.WARNING)
d = np.array ([0.7525, 0., 0.])
pos = start_pos
rw = []
rw.append(pos)
for i, _ in enumerate(sequence[1:]):
overlap = True
while overlap:
overlap = False
R = utils.get_random_rotation_matrix()
dd = np.dot (R, d)
trypos = pos + np.dot (R, d);
overlap = False
for r in rw:
dd = trypos - r
if np.dot (dd, dd) < 0.40 * 0.40:
overlap = True
pos = trypos
rw.append (pos)
# we get the a1 vectors in a smart way
a1s = []
d = rw[1] - rw[0]
a1s.append (d / np.sqrt (np.dot(d, d)))
for i in range (1, len(rw) - 1):
d = (rw[i + 1] + rw[i - 1]) * 0.5
d = rw[i] - d
a1s.append (d / np.sqrt(np.dot (d, d)))
d = rw[len(rw) - 1] - rw[len(rw) - 2]
a1s.append (d / np.sqrt (np.dot(d, d)))
s = base.Strand()
for i, r in enumerate(rw):
a1, _, a3 = utils.get_orthonormalized_base (a1s[i], utils.get_random_vector(), utils.get_random_vector())
# we use abs since POS_BACK is negative
cm = r + a1s[i] * abs(base.POS_BACK)
s.add_nucleotide (base.Nucleotide (cm, a1, a3, sequence[i]))
return s
class vhelix_vbase_to_nucleotide(object):
# at the moment squares with skips in have entries in the dicts but with the nucleotide list empty (rather than having no entry) - I'm not sure whether or not this is desirable. It's probably ok
def __init__(self):
self._scaf = {}
self._stap = {}
self.nuc_count = 0 # record the nucleotide count, updated only after a whole strand is added
self.strand_count = 0
def add_scaf(self, vh, vb, strand, nuc):
self._scaf[(vh, vb)] = (strand, nuc)
def add_stap(self, vh, vb, strand, nuc):
self._stap[(vh, vb)] = (strand, nuc)
# these methods use a reference vhvb2n object to make the final vhvb2n object
def add_scaf_strand(self, add_strand, reference, continue_join = False):
count = 0
size = len(self._scaf)
for (vh, vb), [strand_ind, nuc] in reference._scaf.iteritems():
if strand_ind == add_strand:
self.add_scaf(vh, vb, self.strand_count, [x + self.nuc_count for x in nuc])
count += len(nuc)
self.nuc_count += count
if len(self._scaf) == size:
return 1
else:
if continue_join == False:
self.strand_count += 1
return 0
def add_stap_strand(self, add_strand, reference, continue_join = False):
count = 0
size = len(self._stap)
for (vh, vb), [strand_ind, nuc] in reference._stap.iteritems():
if strand_ind == add_strand:
self.add_stap(vh, vb, self.strand_count, [x + self.nuc_count for x in nuc])
count += len(nuc)
self.nuc_count += count
if len(self._stap) == size:
return 1
else:
if continue_join == False:
self.strand_count += 1
return 0
def add_strand(self, add_strand, reference, continue_join = False):
if self.add_scaf_strand(add_strand, reference, continue_join) and self.add_stap_strand(add_strand, reference, continue_join):
return 1
else:
return 0
|
<reponame>lshtm-gis/WHO_PHSM_Cleaning
import pandas as pd
import re
import os
import logging
import uuid
import random
def generate_blank_record():
"""
Generate a blank record with the correct WHO PHSM keys.
Other objects requiring the same selection of keys descend from here.
Returns
-------
A blank record with keys in WHO PHSM column format.
type
dict.
"""
record = {
"processed": None,
"uuid": str(uuid.uuid4()),
"who_id": None,
"who_id_original": None,
"dataset": None,
"prop_id": None,
"keep": None,
"duplicate_record_id": None,
"who_region": None,
"country_territory_area": None,
"iso": None,
"iso_3166_1_numeric": None,
"admin_level": None,
"area_covered": None,
"prov_category": None,
"prov_subcategory": None,
"prov_measure": None,
"who_code": None,
"original_who_code": None,
"who_category": None,
"who_subcategory": None,
"who_measure": None,
"comments": None,
"date_start": None,
"measure_stage": None,
"prev_measure_number": None,
"following_measure_number": None,
"date_end": None,
"reason_ended": None,
"targeted": None,
"enforcement": None,
"non_compliance_penalty": None,
"value_usd": None,
"percent_interest": None,
"date_entry": None,
"link": None,
"link_live": None,
"link_eng": None,
"source": None,
"source_type": None,
"alt_link": None,
"alt_link_live": None,
"alt_link_eng": None,
"source_alt": None,
"queries_comments": None,
"date_processed": None,
"flag": None,
"old_targeted": None
}
return record
def new_id(dataset: str, length: int = 6, existing_ids: list = [None]):
"""
Function to create a unique id given a list of existing ids.
DEPRACATED?
Parameters
----------
dataset : str
Dataset to which ids will be added.
length : int
Length of new ID number.
existing_ids : list
Vector of existing IDs.
Returns
-------
New ID number.
type
str.
"""
id = create_id(dataset, length)
while id in existing_ids:
id = create_id(dataset)
return(id)
def create_id(dataset: str, length: int = 6):
"""
Create a random id of characters and numbers.
DEPRACATED?
Parameters
----------
dataset : str
Dataset to which ids will be added.
length : int
Length of new ID number.
Returns
-------
New ID number.
type
str.
"""
characters = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-'
id = ''
for i in range(0, length):
id += random.choice(characters)
id = dataset + '_' + str(id)
return id
def apply_key_map(new_record: dict, old_record: dict, key_ref: dict):
"""
Apply key mapping between two records based on a key reference.
Example:
Given `key_ref`: `{'column1':'column2'}`.
Extracts values from `old_record['column1']` to `new_record['column2']`.
Parameters
----------
new_record : dict
Record with WHO PHSM keys.
old_record : dict
Record with provider keys.
key_ref : dict
Reference for mapping keys between records.
Returns
-------
Record with new values appliued to specified keys.
type
dict.
"""
for key in key_ref:
try:
new_record = key_map(new_record,
old_record,
key['new_key'],
key['old_key'])
except Exception as e:
print(e)
continue
return(new_record)
def key_map(new_record: dict, old_record: dict, new_key: str, old_key: str):
"""
Implements key mapping from `new_record` to `old_record`.
For more information see `apply_key_map`.
Parameters
----------
new_record : dict
Record with WHO PHSM keys.
old_record : dict
Record with provider keys.
new_key : str
Key in `old_record`.
old_key : str
Corresponding key in `new_record`.
Returns
-------
type
Record with information mapped from `new_key` to `old_key`.
"""
if not pd.isnull(new_key):
new_record[new_key] = old_record[old_key]
return(new_record)
def parse_date(record: dict):
"""Function to parse record date format.
Currently relying on parsing behaviour of pandas.to_datetime.
NOTE: This is vulnerable to USA format dates parsed as EU dates
DEPRACATED?
Parameters
----------
record : dict
Dataset record.
Returns
-------
type
Dataset record.
"""
record['date_start'] = pd.to_datetime(record['date_start'])
record['date_end'] = pd.to_datetime(record['date_end'])
record['date_entry'] = pd.to_datetime(record['date_entry'])
return(record)
def get_min_id(fn: str, id_column: str = 'who_id'):
"""
Function to open a file and extract the maximum numeric.
This will be the new min id to be incremented for the ID field.
Future: should be replaced by a set difference of existing IDs and an
arbitrary ID sequence.
Example:
Extracts numeric valeu of ID `ACAPS_1234` -> `1234`.
Parameters
----------
fn : str
Filename to reference dataset.
id_column : str
ID column name in reference dataset.
Returns
-------
Maximum numeric ID value.
type
int.
"""
data = pd.read_csv(fn, encoding='latin1', low_memory=False)
return(max([int(re.findall(r'\d+', x)[0]) for x in data[id_column] if not pd.isna(x)]))
def assign_id(records: pd.DataFrame, min_id: int = 1):
"""
Function to assign a unique ID to each record.
IDs are assigned in the format `DATASET_NUMBER`. i.e. `ACAPS_1234`.
Parameters
----------
records : pandas.DataFrame
Dataframe of records which will have ID numbers added.
min_id : int
Number to begin incrementing IDs from.
Returns
-------
type
Dataframe with IDs added.
"""
#Ensure that no IDs are duplicated by incrementing by 1
min_id = min_id + 1
datasets = records['dataset']
ids = range(min_id, min_id + len(datasets))
ids = [x + '_' + str(y) for x, y in zip(datasets, ids)]
records['who_id'] = ids
return(records)
def assign_who_country_name(record: dict, country_ref: pd.DataFrame, missing_value: str='unknown'):
"""
Function to assign country names by ISO code.
Also adds: `who_region`, `country_territory_area`, `iso_3166_1_numeric`.
WHO recognizes standard country names which are transformed from ISOs defined on provider country names.
Parameters
----------
record : dict
Input record.
country_ref : pd.DataFrame
Dataframe of country name mappings.
missing_value : str
Value to add if name mapping fails - defaults to "unknown".
This value is recognized by output checks.
Returns
-------
type
Record with country name mapping applied.
"""
country_ref = country_ref.loc[country_ref['iso'] == record['iso'], :]
try:
assert len(country_ref.iloc[:, 1]) == 1
except Exception:
record['who_region'] = missing_value
record['country_territory_area'] = missing_value
record['iso_3166_1_numeric'] = missing_value
return(record)
record['who_region'] = str(country_ref['who_region'].iloc[0])
record['country_territory_area'] = str(country_ref['country_territory_area'].iloc[0])
record['iso_3166_1_numeric'] = int(country_ref['iso_3166_1_numeric'].iloc[0])
return(record)
def assign_who_coding(record: dict, who_coding: pd.DataFrame, missing_value: str = 'unknown'):
"""
Assign WHO coding to a record.
Adds: `who_code`, `who_measure`, `who_subcategory`, `who_category`.
Optionally adds: `targeted`, `non_compliance`, `enforcement`.
Transforms provider coding of interventions to WHO PHSM coding.
Parameters
----------
record : dict
Input record.
who_coding : pd.DataFrame
Dataframe of WHO PHSM intervention mappings.
missing_value : str
Value to add if name mapping fails - defaults to "unknown".
This value is recognized by output checks.
Returns
-------
type
Record with WHO PHSM code mapping applied.
"""
prov_measure = who_coding['prov_measure'] == none_to_empty_str(record['prov_measure'])
prov_subcategory = who_coding['prov_subcategory'] == none_to_empty_str(record['prov_subcategory'])
prov_category = who_coding['prov_category'] == none_to_empty_str(record['prov_category'])
coding = who_coding.loc[prov_measure & prov_subcategory & prov_category, :]
try:
assert len(coding.iloc[:, 1]) == 1
except Exception:
record['who_code'] = missing_value
record['who_measure'] = missing_value
record['who_subcategory'] = missing_value
record['who_category'] = missing_value
return(record)
record['who_code'] = coding['who_code'].iloc[0]
record['who_measure'] = coding['who_measure'].iloc[0]
record['who_subcategory'] = coding['who_subcategory'].iloc[0]
record['who_category'] = coding['who_category'].iloc[0]
# try to assign a who_targeted (missing for most records)
# WARNING: this could overwrite an existing targeted value
try:
if coding['who_targeted'].iloc[0] == '':
raise ValueError
else:
record['targeted'] = coding['who_targeted'].iloc[0]
except Exception:
pass
# try to assign a non_compliance (missing for most records)
# WARNING: this could overwrite an existing non_compliance value
try:
if coding['non_compliance'].iloc[0] == '':
raise ValueError
else:
record['non_compliance_penalty'] = coding['non_compliance'].iloc[0]
except Exception:
pass
# try to assign an enforcement (missing for most records)
# WARNING: this could overwrite an existing enforcement value
try:
if coding['who_enforcement'].iloc[0] == '':
raise ValueError
else:
record['enforcement'] = coding['who_enforcement'].iloc[0]
except Exception:
pass
return(record)
def none_to_empty_str(s):
"""
Convert None values to an empty string.
Useful for changing None values for smooth mapping of who coding.
Parameters
----------
s : type
String to be converted.
Returns
-------
type
Outut string, if string equalled None, returns '', else returns original string.
"""
if pd.isna(s):
return('')
else:
return(s)
def replace_conditional(record: dict, field: str, value: str, replacement: str):
"""
Function to conditionally replace a value in a field.
Parameters
----------
record : dict
Input record.
field : str
Key of field to be conditionally altered.
value : str
Value to identify and replace.
replacement : str
Value to insert on replacement.
Returns
-------
type
Record with specified key altered if `record[key] == value`. Otherwise, the original record is returned.
"""
if record[field] == value:
record[field] = replacement
return(record)
def replace_sensitive_regions(record):
"""
Replace a selection of commonly occuring admin level issues.
WHO recognizes certain administrative definitions that differ from ISO conventions.
Future: Move specific region definitions to `config` directory.
Parameters
----------
record : type
Input record.
Returns
-------
type
Record with sensitive regions changed.
"""
record = shift_sensitive_region(record, 'Kosovo', 'Serbia')
record = shift_sensitive_region(record, 'Hong Kong', 'China')
record = shift_sensitive_region(record, 'Taiwan', 'China')
record = shift_sensitive_region(record, 'Macau', 'China')
record = shift_sensitive_region(record, 'Macao', 'China')
record = shift_sensitive_region(record, 'Guadeloupe', 'France')
record = shift_sensitive_region(record, 'Palestine', 'Israel')
record = shift_sensitive_region(record, 'West Bank and Gaza', 'Israel')
return(record)
def shift_sensitive_region(record: dict, original_name: str, new_name: str):
"""
Function to demote sensitive country names to `area_covered` from `country_territory_area`.
Parameters
----------
record : dict
Input record.
original_name : str
Original country name from provider dataset.
new_name : str
New WHO-recognised country name.
Returns
-------
type
Record with sensitive countries changed.
"""
if record['country_territory_area'] == original_name:
record['area_covered'] = record['country_territory_area']
record['country_territory_area'] = new_name
return(record)
def add_admin_level(record: dict):
"""
Set admin_level values to "national" or "other".
If `area_covered` is blank: "national", else: "other".
Parameters
----------
record : dict
Input record.
Returns
-------
type
Record with `admin_level` added.
"""
if pd.isna(record['admin_level']) and pd.isna(record['area_covered']):
record['admin_level'] = 'national'
elif pd.isna(record['admin_level']) and not pd.isna(record['area_covered']):
record['admin_level'] = 'other'
return(record)
def remove_tags(record: dict, keys: list = ['comments']):
"""
Remove HTML tags from defined columns.
Some datasets (CDC_ITF) provide comments that are enclosed in
HTML tags for display on the web.
Identifies content inside of HTML tags and returns content only.
Example:
"<p>Content</p>" -> "Content"
Parameters
----------
record : dict
Input record.
keys : list
List of which keys HTML tage replacement should be applied to.
Returns
-------
type
Record with HTML tags replaced in the defined tags.
"""
exp = re.compile(r'<[^>]+>')
for key in keys:
try:
record[key] = exp.sub('', record[key])
except:
record[key] = None
return(record)
def replace_country(record: dict, country_name: str, area_name: str):
"""
Replace country name with an `area_covered` name.
Promote a string in `area_covered` to `country_territory_area`.
Applies to records where a WHO recognised country is defined as an
administrative region of a different country.
Parameters
----------
record : dict
Input record.
country_name : str
Country name to be matched.
area_name : str
Area name to be matched.
Returns
-------
type
Record with country `area_covered` promotion applied.
"""
if record['country_territory_area'] == country_name and record['area_covered'] == area_name:
record['country_territory_area'] = area_name
record['area_covered'] = None
return(record)
|
<gh_stars>100-1000
#
# filename
# mldb.ai inc, 2015
# this file is part of mldb. copyright 2015 mldb.ai inc. all rights reserved.
#
# This test is for issues MLDB-779 AND MLDB-780
# We do the training pipelines twice and each cls has a different
# failure point
#
from mldb import mldb
import datetime, random
dataset_config = {
'type' : 'sparse.mutable',
'id' : 'toy'
}
dataset = mldb.create_dataset(dataset_config)
mldb.log("data loader created dataset")
now = datetime.datetime.now()
for i in range(200):
label = i % 3 == 0
feats = []
for x in range(25):
rnd = random.random()
if rnd < x/25. or (label is True and rnd < 0.4):
feats.append(["feat%d" % x, 1, now])
#else:
# feats.append(["feat%d" % x, 0, now])
feats.append(["LABEL", "true" if label else "false", now])
dataset.record_row("example-%d" % i, feats)
mldb.log("Committing dataset")
dataset.commit()
for cls in ["bdt", "glz", "bs"]:
############
### train a cls
mldb.delete("/v1/procedures/tng_classif")
rez = mldb.put("/v1/procedures/tng_classif", {
"type": "classifier.train",
"params": {
"trainingData": {
"where": "rowHash() % 3 != 1",
"select":
"{* EXCLUDING(LABEL)} as features, LABEL = 'true' as label",
"from" : { "id": "toy" }
},
"configuration": {
"glz": {
"type": "glz",
"verbosity": 3,
"normalize": False,
"link_function": 'linear',
"regularization": 'none'
},
"bs": {
"type": "boosted_stumps",
"min_iter": 10,
"max_iter": 200,
"verbosity": 3
},
"bdt": {
"type": "boosting",
"min_iter": 10,
"max_iter": 200,
"weak_learner": {
"type": "decision_tree",
"max_depth": 1
}
}
},
"algorithm": cls,
"modelFileUrl": "file://models/tng.cls"
}
})
mldb.log(rez.json())
rez = mldb.put("/v1/procedures/tng_classif/runs/1")
mldb.log(rez.json())
# this is where the bs fails
rez = mldb.get("/v1/procedures/tng_classif/runs/1/details")
mldb.log(rez.json())
##########
## now test it
mldb.delete("/v1/functions/tng_scorer")
rez = mldb.put("/v1/functions/tng_scorer", {
"type": "classifier",
"params": {"modelFileUrl": "file://models/tng.cls"}
})
mldb.log(rez.json)
mldb.delete("/v1/procedures/tng_score_proc")
mldb.delete("/v1/datasets/toy_cls_baseline_scorer_rez")
rez = mldb.put("/v1/procedures/tng_score_proc", {
"type": "classifier.test",
"params": {
"testingData": {
"select" :
"{*} as features, LABEL = 'true' as label, tng_scorer({{* EXCLUDING(LABEL)} as features})[score] as score",
"from": {"id": "toy" },
"where" : "rowHash() % 3 = 1"
},
"outputDataset": {
"id":"toy_cls_baseline_scorer_rez",
"type": "sparse.mutable"
}
}
})
mldb.log(rez.json())
rez = mldb.post("/v1/procedures/tng_score_proc/runs")
mldb.log(rez.json())
######
# create explain function
mldb.delete("/v1/functions/tng_explain")
rez = mldb.put("/v1/functions/tng_explain", {
"type": "classifier.explain",
"params": {
"modelFileUrl": "file://models/tng.cls"
}
})
mldb.log(rez.json())
# this currently fails with the glz with a "Feature_Set::operator []:
# feature not found\
rez = mldb.get(
"/v1/query",
q="select tng_explain({{* EXCLUDING(LABEL)} as features, 1 as label})[explanation], "
"* from toy where rowHash() % 3 = 1",
format="sparse")
mldb.log(rez.json())
request.set_return('success')
|
<gh_stars>10-100
# Copyright 2014 OpenCore LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import sh
import sys
import time
from string import Template
class MongoInitializer(object):
def __init__(self, system):
"""
Create a new initializer
Param user The user login for the git repo
"""
self.template_dir = None
self.template_repo = None
self.fabric = None
self.container_data_dir = MongoConfig.data_directory
self.container_log_dir = MongoConfig.log_directory
def new_host_name(self, instance_id):
"""
Generate a new hostname
"""
return 'mongo' + str(instance_id)
def _execute_service(self, containers, entry_point, fabric, cmd):
"""
Start the service on the containers.
"""
all_output = {}
for c in containers:
if c.args:
args = c.args
else:
args = 'notrust'
output = fabric.cmd([c], '/service/sbin/startnode %s %s' % (cmd, args))
all_output = dict(all_output.items() + output.items())
# Now wait a couple seconds to make sure
# everything has started.
time.sleep(2)
return all_output
def start_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "start")
def restart_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "restart")
def stop_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "stop")
def _generate_config_dir(self, uuid):
"""
Generate a new configuration.
"""
return 'mongo_' + str(uuid)
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return []
def get_working_ports(self, num_instances):
"""
Ports necessary to get things working.
"""
return [MongoConfig.MONGO_PORT]
def get_total_instances(self, num_instances, layers):
instances = []
for i in range(num_instances):
instances.append('mongodb')
return instances
def generate(self, num):
"""
Generate a new configuration
Param num Number of instances that need to be configured
Param image Image type of the instances
"""
return MongoConfig(num)
def _generate_mongo_config(self, host_dir, config, arg):
"""
Generate the MongoDB configuration file.
"""
if arg == "trust":
conf_file = "trusted.conf"
else:
conf_file = "mongodb.conf"
in_file = open(self.template_dir + '/%s.template' % conf_file, 'r')
out_file = open(host_dir + '/%s' % conf_file, 'w+')
changes = { "MONGO_LOG":config.log_directory,
"MONGO_DATA":config.data_directory }
for line in in_file:
s = Template(line).substitute(changes)
out_file.write(s)
out_file.close()
in_file.close()
def apply(self, config, containers):
"""
Apply the configuration to the instances
"""
entry_point = { 'type' : 'mongodb' }
config_dirs = []
# Keep track of the MongoDB IP address.
entry_point['mongo'] = containers[0]['data_ip']
entry_point['ip'] = containers[0]['manage_ip']
new_config_dir = "/tmp/" + self._generate_config_dir(config.uuid)
try:
sh.mkdir('-p', new_config_dir)
except:
sys.stderr.write('could not create config dir ' + new_config_dir)
# This file records all instances so that we can
# generate the hosts file.
entry_point['instances'] = []
for server in containers:
entry_point['instances'].append([server['data_ip'], server['host_name']])
if not 'storage' in containers[0]:
# This is being called as a storage service.
# The client service doesn't do anything right now.
if 'args' in containers[0] and containers[0]['args']:
self._generate_mongo_config(new_config_dir, config, containers[0]['args'])
else:
self._generate_mongo_config(new_config_dir, config, 'notrust')
# Expose the login info.
output = self.fabric.cmd_raw(key = containers[0]['container'].privatekey,
ip = entry_point['mongo'],
cmd = '/service/sbin/startnode login',
user = self.fabric.docker_user)
logging.warning(str(output))
login_info = json.loads(str(output))
entry_point['mongo_user'] = login_info['user']
entry_point['mongo_pass'] = login_info['pass']
# Transfer the configuration.
for c in containers:
config_files = new_config_dir + '/*'
config_dirs.append([c['container'],
config_files,
config.config_directory])
return config_dirs, entry_point
class MongoConfig(object):
log_directory = '/service/logs/'
config_directory = '/service/conf/mongodb/'
data_directory = '/service/data/'
MONGO_PORT = '27017'
def __init__(self, num):
self.num = num
self.mongo_port = MongoConfig.MONGO_PORT
self.config_directory = MongoConfig.config_directory
self.log_directory = MongoConfig.log_directory
self.data_directory = MongoConfig.data_directory
|
<reponame>pcdshub/whatrecord
import dataclasses
import importlib
import inspect
import logging
import pkgutil
import re
import sys
import typing
from pathlib import Path
from types import ModuleType
from typing import Dict, List, Optional, Union
import apischema
import pytest
from .. import (access_security, asyn, autosave, cache, common, ioc_finder,
motor, shell, snl)
from ..common import FullLoadContext, LoadContext
MODULE_PATH = Path(__file__).parent
logger = logging.getLogger(__name__)
SKIP_CLASSES = (
cache.Cached,
ioc_finder.IocScriptStaticInfoList,
ioc_finder.IocScriptStaticList,
)
SKIP_DESERIALIZATION = {
# These take too long to round-trip and verify somehow:
shell.LoadedIoc,
shell.ShellState,
autosave.AutosaveState,
asyn.AsynState,
motor.MotorState,
access_security.AccessSecurityState,
}
def find_whatrecord_submodules() -> Dict[str, ModuleType]:
"""Find all whatrecord submodules, as a dictionary of name to module."""
modules = {}
package_root = str(MODULE_PATH.parent)
for item in pkgutil.walk_packages(path=[package_root], prefix="whatrecord."):
if item.name.endswith("__main__"):
continue
try:
modules[item.name] = sys.modules[item.name]
except KeyError:
# Submodules may not yet be imported; do that here.
try:
modules[item.name] = importlib.import_module(
item.name, package="whatrecord"
)
except Exception:
logger.exception("Failed to import %s", item.name)
return modules
def find_all_dataclasses() -> List[type]:
"""Find all dataclasses in whatrecord and return them as a list."""
def should_include(obj):
return (
inspect.isclass(obj)
and dataclasses.is_dataclass(obj)
and obj not in SKIP_CLASSES
)
def sort_key(cls):
return (cls.__module__, cls.__name__)
devices = [
obj
for module in find_whatrecord_submodules().values()
for _, obj in inspect.getmembers(module, predicate=should_include)
]
return list(sorted(set(devices), key=sort_key))
dataclass_name_to_class = {cls.__name__: cls for cls in find_all_dataclasses()}
all_dataclasses = pytest.mark.parametrize(
"cls", [pytest.param(cls, id=name) for name, cls in dataclass_name_to_class.items()]
)
init_args_by_type = {
Optional[List[LoadContext]]: None,
Optional[common.IocMetadata]: None,
Optional[str]: None,
Path: Path(),
Union[int, str]: "abc",
Union[shell.IocLoadFailure, str]: "use_cache",
Union[str, List[str]]: ["a", "b", "c"],
bool: True,
bytes: b"testing",
FullLoadContext: [LoadContext("test", 1)],
LoadContext: LoadContext("test", 1),
dict: {},
float: 10,
int: 10,
list: [],
re.Pattern: re.compile("abc"),
str: "testing",
typing.Any: 123,
typing.Tuple[str, str]: ("a", "b"),
Optional[int]: 11,
Optional[common.RecordDefinitionAndInstance]: None,
Optional[common.RecordInstance]: None,
Optional[common.RecordType]: None,
Union[snl.Declarator, snl.Variable]: snl.Variable(context=[], name="test"),
snl.OptionalExpression: None,
}
def try_to_instantiate(cls):
kwargs = {}
fields = dataclasses.fields(cls)
type_hints = apischema.utils.get_type_hints(cls)
for field in fields:
if (
field.default is not dataclasses.MISSING
or field.default_factory is not dataclasses.MISSING
):
continue
field_type = type_hints[field.name]
if field_type in init_args_by_type:
kwargs[field.name] = init_args_by_type[field_type]
elif str(field_type) in init_args_by_type:
kwargs[field.name] = init_args_by_type[str(field_type)]
elif dataclasses.is_dataclass(field_type):
kwargs[field.name] = try_to_instantiate(field_type)
else:
origin = apischema.typing.get_origin(field_type)
try:
kwargs[field.name] = init_args_by_type[origin]
except KeyError:
raise ValueError(f"Missing in dict: {field_type} ({origin})")
return cls(**kwargs)
@all_dataclasses
def test_serialize(cls):
instance = try_to_instantiate(cls)
serialized = apischema.serialize(instance)
print(cls)
print("Serialized:")
print(serialized)
deserialized = apischema.deserialize(cls, serialized)
print("Deserialized:")
print(deserialized)
if cls not in SKIP_DESERIALIZATION:
assert deserialized == instance
|
<gh_stars>100-1000
# Ant-FS
#
# Copyright (c) 2012, <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import array
import datetime
import logging
import threading
import queue
from ant.easy.channel import Channel
from ant.easy.node import Node, Message
import ant.fs.command
from ant.fs.beacon import Beacon
from ant.fs.command import (
LinkCommand,
DownloadRequest,
DownloadResponse,
AuthenticateCommand,
AuthenticateResponse,
DisconnectCommand,
UploadRequest,
UploadResponse,
UploadDataCommand,
UploadDataResponse,
EraseRequestCommand,
EraseResponse,
)
from ant.fs.commandpipe import CreateFile, Response, Time, TimeResponse
from ant.fs.file import Directory
from ant.fs.commons import crc
_logger = logging.getLogger("ant.fs.manager")
class AntFSException(Exception):
def __init__(self, error, errno=None):
Exception.__init__(self, error, errno)
self._error = error
self._errno = errno
def get_error(self):
if self._errno is not None:
return str(self._errno) + ": " + self._error
else:
return self._error
class AntFSDownloadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSUploadException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSEraseException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSAuthenticationException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSCreateFileException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class AntFSTimeException(AntFSException):
def __init__(self, error, errno=None):
AntFSException.__init__(self, error, errno)
class Application:
_serial_number = 1337
_frequency = 19 # 0 to 124, x - 2400 (in MHz)
def __init__(self):
self._queue = queue.Queue()
self._beacons = queue.Queue()
self._node = Node()
try:
NETWORK_KEY = [<KEY>
self._node.set_network_key(0x00, NETWORK_KEY)
print("Request basic information...")
m = self._node.request_message(Message.ID.RESPONSE_CAPABILITIES)
print(" Capabilities: ", m[2])
# m = self._node.request_message(Message.ID.RESPONSE_ANT_VERSION)
# print " ANT version: ", struct.unpack("<10sx", m[2])[0]
# m = self._node.request_message(Message.ID.RESPONSE_SERIAL_NUMBER)
# print " Serial number:", struct.unpack("<I", m[2])[0]
print("Starting system...")
# NETWORK_KEY= [<KEY>]
# self._node.set_network_key(0x00, NETWORK_KEY)
print("Key done...")
self._channel = self._node.new_channel(Channel.Type.BIDIRECTIONAL_RECEIVE)
self._channel.on_broadcast_data = self._on_data
self._channel.on_burst_data = self._on_data
self.setup_channel(self._channel)
self._worker_thread = threading.Thread(target=self._worker, name="ant.fs")
self._worker_thread.start()
except Exception as e:
self.stop()
raise e
def _worker(self):
self._node.start()
def _main(self):
try:
_logger.debug("Link level")
beacon = self._get_beacon()
if self.on_link(beacon):
for i in range(0, 5):
beacon = self._get_beacon()
if (
beacon.get_client_device_state()
== Beacon.ClientDeviceState.AUTHENTICATION
):
_logger.debug("Auth layer")
if self.on_authentication(beacon):
_logger.debug("Authenticated")
beacon = self._get_beacon()
self.on_transport(beacon)
self.disconnect()
break
finally:
_logger.debug("Run 5")
self.stop()
def _on_beacon(self, data):
b = Beacon.parse(data)
self._beacons.put(b)
def _on_command(self, data):
c = ant.fs.command.parse(data)
self._queue.put(c)
def _on_data(self, data):
# print "_on_data", data, len(data)
if data[0] == 0x43:
self._on_beacon(data[:8])
if len(data[8:]) > 0:
self._on_command(data[8:])
elif data[0] == 0x44:
self._on_command(data)
def _get_beacon(self):
b = self._beacons.get()
self._beacons.task_done()
return b
def _get_command(self, timeout=15.0):
_logger.debug("Get command, t%d, s%d", timeout, self._queue.qsize())
c = self._queue.get(True, timeout)
self._queue.task_done()
return c
def _send_command(self, c):
data = c.get()
if len(data) == 8:
self._channel.send_acknowledged_data(data)
else:
self._channel.send_burst_transfer(data)
# Application actions are defined from here
# =======================================================================
# These should be overloaded:
def setup_channel(self, channel):
pass
def on_link(self, beacon):
pass
def on_authentication(self, beacon):
pass
def on_transport(self, beacon):
pass
# Shouldn't have to touch these:
def start(self):
self._main()
def stop(self):
self._node.stop()
def _send_commandpipe(self, data):
# print "send commandpipe", data
self.upload(0xFFFE, data)
def _get_commandpipe(self):
# print "get commandpipe"
return ant.fs.commandpipe.parse(self.download(0xFFFE))
def create(self, typ, data, callback=None):
# print "create", typ
request = CreateFile(len(data), 0x80, [typ, 0x00, 0x00], [0x00, 0xFF, 0xFF])
self._send_commandpipe(request.get())
result = self._get_commandpipe()
# result._debug()
if result.get_response() != Response.Response.OK:
raise AntFSCreateFileException(
"Could not create file", result.get_response()
)
# print "create result", result, result.get_index(), result.get_data_type(), result.get_identifier()
# d = self.download_directory()
# Inform the application that the upload request was successfully created
if callback is not None:
callback(0)
self.upload(result.get_index(), data, callback)
return result.get_index()
def upload(self, index, data, callback=None):
# print "upload", index, len(data)
iteration = 0
while True:
# Request Upload
# Continue using Last Data Offset (special MAX_ULONG value)
request_offset = 0 if iteration == 0 else 0xFFFFFFFF
self._send_command(UploadRequest(index, len(data), request_offset))
upload_response = self._get_command()
# upload_response._debug()
if upload_response._get_argument("response") != UploadResponse.Response.OK:
raise AntFSUploadException(
"Upload request failed", upload_response._get_argument("response")
)
# Upload data
offset = upload_response._get_argument("last_data_offset")
max_block = upload_response._get_argument("maximum_block_size")
# print " uploading", offset, "to", offset + max_block
data_packet = data[offset : offset + max_block]
crc_seed = upload_response._get_argument("crc")
crc_val = crc(data_packet, upload_response._get_argument("crc"))
# Pad with 0 to even 8 bytes
missing_bytes = 8 - (len(data_packet) % 8)
if missing_bytes != 8:
data_packet.extend(array.array("B", [0] * missing_bytes))
# print " adding", str(missing_bytes), "padding"
# print " packet", len(data_packet)
# print " crc ", crc_val, "from seed", crc_seed
self._send_command(
UploadDataCommand(crc_seed, offset, data_packet, crc_val)
)
upload_data_response = self._get_command()
# upload_data_response._debug()
if (
upload_data_response._get_argument("response")
!= UploadDataResponse.Response.OK
):
raise AntFSUploadException(
"Upload data failed", upload_data_response._get_argument("response")
)
if callback is not None and len(data) != 0:
callback((offset + len(data_packet)) / len(data))
if offset + len(data_packet) >= len(data):
# print " done"
break
# print " one more"
iteration += 1
def download(self, index, callback=None):
offset = 0
initial = True
crc = 0
data = array.array("B")
while True:
_logger.debug("Download %d, o%d, c%d", index, offset, crc)
self._send_command(DownloadRequest(index, offset, True, crc))
_logger.debug("Wait for response...")
try:
response = self._get_command()
if response._get_argument("response") == DownloadResponse.Response.OK:
remaining = response._get_argument("remaining")
offset = response._get_argument("offset")
total = offset + remaining
data[offset:total] = response._get_argument("data")[:remaining]
# print "rem", remaining, "offset", offset, "total", total, "size", response._get_argument("size")
# TODO: check CRC
if callback is not None and response._get_argument("size") != 0:
callback(total / response._get_argument("size"))
if total == response._get_argument("size"):
return data
crc = response._get_argument("crc")
offset = total
else:
raise AntFSDownloadException(
"Download request failed: ", response._get_argument("response")
)
except queue.Empty:
_logger.debug("Download %d timeout", index)
# print "recover from download failure"
def download_directory(self, callback=None):
data = self.download(0, callback)
return Directory.parse(data)
def set_time(self, time=datetime.datetime.utcnow()):
"""
:param time: datetime in UTC, or None to set to current time
"""
utc_tai_diff_seconds = 35
offset = time - datetime.datetime(1989, 12, 31, 0, 0, 0)
t = Time(int(offset.total_seconds()) + utc_tai_diff_seconds, 0xFFFFFFFF, 0)
self._send_commandpipe(t.get())
result = self._get_commandpipe()
if result.get_response() != TimeResponse.Response.OK:
raise AntFSTimeException("Failed to set time", result.get_response())
def erase(self, index):
self._send_command(EraseRequestCommand(index))
response = self._get_command()
if (
response._get_argument("response")
!= EraseResponse.Response.ERASE_SUCCESSFUL
):
raise AntFSDownloadException(
"Erase request failed: ", response._get_argument("response")
)
def link(self):
self._channel.request_message(Message.ID.RESPONSE_CHANNEL_ID)
self._send_command(LinkCommand(self._frequency, 4, self._serial_number))
# New period, search timeout
self._channel.set_period(4096)
self._channel.set_search_timeout(10)
self._channel.set_rf_freq(self._frequency)
def authentication_serial(self):
self._send_command(
AuthenticateCommand(AuthenticateCommand.Request.SERIAL, self._serial_number)
)
response = self._get_command()
return (response.get_serial(), response.get_data_string())
def authentication_passkey(self, passkey):
self._send_command(
AuthenticateCommand(
AuthenticateCommand.Request.PASSKEY_EXCHANGE,
self._serial_number,
passkey,
)
)
response = self._get_command()
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException(
"Passkey authentication failed", response._get_argument("type")
)
def authentication_pair(self, friendly_name):
data = array.array("B", map(ord, list(friendly_name)))
self._send_command(
AuthenticateCommand(
AuthenticateCommand.Request.PAIRING, self._serial_number, data
)
)
response = self._get_command(30)
if response._get_argument("type") == AuthenticateResponse.Response.ACCEPT:
return response.get_data_array()
else:
raise AntFSAuthenticationException(
"Pair authentication failed", response._get_argument("type")
)
def disconnect(self):
d = DisconnectCommand(DisconnectCommand.Type.RETURN_LINK, 0, 0)
self._send_command(d)
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x02\xf5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xca\x00\x00\x00\x4f\x08\x06\x00\x00\x00\xaa\x18\x5a\xc3\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x12\x74\x00\x00\x12\x74\x01\xde\x66\
\x1f\x78\x00\x00\x02\x8a\x49\x44\x41\x54\x78\x5e\xed\xda\xd1\x6d\
\xdb\x30\x10\x80\x61\xa7\x23\x65\x81\x2c\xd2\xf1\xba\x48\x17\xe8\
\x2c\xda\x20\x05\x01\x0b\x11\x8c\xd8\x3e\x91\x47\x8a\x72\xbe\xef\
\x25\x7e\x32\x62\xe0\x7e\x9e\x08\xfb\x6d\x59\x96\xcf\x0b\xf0\xd0\
\xaf\xeb\x5f\xe0\x01\xa1\x40\x80\x50\x20\x40\x28\x10\x20\x14\x08\
\x10\x0a\x04\x08\x05\x02\x84\x02\x01\x42\x81\x00\xa1\x40\xc0\x34\
\x3f\x61\x79\xff\xfb\xfb\xfa\x0a\xbe\xfc\xfb\xf8\x73\x7d\x75\xac\
\xc3\x42\x11\x06\x35\x8e\x0a\x67\x68\x28\xe2\x20\xd3\xc8\x68\x86\
\x84\x22\x10\x7a\x1a\x11\x4c\xf7\xcb\xbc\x48\xe8\x6d\xc4\x8c\x75\
\xdb\x28\x7b\xff\xf9\xc8\xa9\x20\x3a\x9e\xe9\xb5\x5d\xba\x84\x12\
\x1d\xe8\x96\x0f\x25\x1a\xee\xe9\x11\x4b\x7a\x28\xcf\x06\xb8\xc7\
\x87\x10\x0d\xb7\xb2\xe7\x2c\xf5\x8e\x72\x44\x24\x45\xaf\xf7\xe5\
\xbc\xb2\x0f\xcf\xb4\x8d\xf2\xe8\x1f\x1b\x39\xc8\xb6\x0b\x5b\x59\
\xb3\x97\xb2\x51\x66\x89\xa4\xb0\x5d\xd8\xca\x3a\x38\x9b\x43\x99\
\x29\x92\x95\x58\xd8\xca\x88\xa5\x29\x94\x19\x23\x59\x89\x85\xad\
\xd6\x58\xba\x7c\xe1\x38\xcb\x90\x8a\x85\x2c\xd5\xa1\xdc\x2b\x74\
\xb6\xe1\x14\x0b\xab\x96\xad\x92\xba\x51\x66\x1d\x4a\xb1\xd0\xaa\
\x2a\x94\x8c\xcb\x11\x1c\xa1\x76\x76\xd3\x36\xca\xec\xa7\xb6\xad\
\x42\x8b\xdd\xa1\xd8\x26\x9c\x5d\xcd\x0c\xa7\x6c\x94\xb3\x9c\xd6\
\xb6\x0a\xb5\x52\x2f\xf3\x70\x16\x7b\xb7\xca\xae\x50\xbe\x7b\xf3\
\xb3\x9d\xd2\xb6\x0a\x35\x6c\x14\x08\x10\x0a\x04\xfc\xc8\x50\x3c\
\x7e\x51\xec\xb9\xa7\x84\x43\x79\x85\xfb\x09\xd4\xf2\xe8\x05\x01\
\x42\x81\x00\xa1\x40\x80\x50\x20\x40\x28\x10\x20\x14\x08\x10\x0a\
\x04\x08\x05\x02\x9a\x42\xa9\xf9\x5d\x3f\x9c\x51\x38\x94\x57\xfa\
\x16\x5e\xe0\x14\x7b\x66\xda\xa3\x17\x04\x08\x05\x02\x9a\x43\x39\
\xdb\x63\x8c\xc7\x2e\x6a\xec\x0a\xc5\xaf\x85\x79\x15\x7b\x67\x39\
\xe5\xd1\xeb\x2c\xa7\xb4\x6d\x42\xad\xdd\xa1\xd8\x2a\xfc\x44\x69\
\x97\xf9\xd9\x4f\x6b\xdb\x84\x55\xcd\x61\x5f\x15\x8a\xad\xc2\x59\
\xd5\xce\x6e\xda\x46\x29\x66\x3d\xb5\x6d\x13\x5a\x55\x87\x72\xaf\
\xcc\xd9\x86\x52\x24\xac\x5a\x9e\x84\x52\x37\xca\x6a\x96\xe1\x14\
\x09\x59\x9a\x42\x79\x54\xe8\xd1\x43\x2a\x12\xb6\x5a\xef\xd5\xcd\
\x1b\x65\xc6\x58\x44\xc2\x56\x6b\x24\x45\xca\xa3\xd7\x4c\xb1\x88\
\x84\xad\x8c\x48\x8a\x2e\x77\x94\x5b\x65\x78\x47\x0c\xb0\x48\xe8\
\xe5\x6d\x59\x96\xcf\xeb\xeb\x14\xcf\x86\x35\xab\xf0\x2d\x81\x70\
\x2b\x7b\xce\xd2\x43\x29\xa2\x83\xdb\xf2\x61\xc4\xc1\x3d\x3d\x0e\
\xe3\x2e\xa1\x14\x7b\x07\x39\xf2\xe1\xc4\xc1\x33\x3d\x22\x29\xba\
\x85\xb2\x32\xdc\x8c\xd2\x2b\x92\xa2\x7b\x28\x85\x58\xe8\xa9\x67\
\x20\xab\x21\xa1\xac\x04\x43\xa6\x11\x81\xac\x86\x86\xb2\x25\x1a\
\x6a\x8c\x8c\x63\xeb\xb0\x50\x6e\x09\x87\xef\x1c\x15\xc6\xad\x69\
\x42\x81\x99\x0d\xf9\x66\x1e\xce\x4e\x28\x10\x20\x14\x08\x10\x0a\
\x04\x08\x05\x02\x84\x02\x01\x42\x81\x00\xa1\xc0\x53\x97\xcb\x7f\
\x68\xd3\x02\xb5\x11\xd4\xce\x3d\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x02\xf4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xca\x00\x00\x00\x4f\x08\x06\x00\x00\x00\xaa\x18\x5a\xc3\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x12\x74\x00\x00\x12\x74\x01\xde\x66\
\x1f\x78\x00\x00\x02\x89\x49\x44\x41\x54\x78\x5e\xed\xda\xc1\x6d\
\x1b\x31\x10\x40\x51\x39\x69\x27\xc7\xf4\x5f\x41\x8e\x2e\x44\x0d\
\x04\x0e\x16\x10\x61\xc1\x70\xa4\x21\x97\x43\xee\x8a\xef\x5d\xac\
\x93\x45\x03\xf3\x35\x4b\xc8\x6f\xd7\xeb\xf5\xe3\x02\x3c\xf4\xe3\
\xf6\x13\x78\x40\x28\x10\x20\x14\x08\x10\x0a\x04\x08\x05\x02\x84\
\x02\x01\x42\x81\x00\xa1\x40\x80\x50\x20\x40\x28\x10\x70\x98\x7f\
\x61\xf9\xfb\xeb\xf7\xed\x15\x7c\xfa\xf9\xfe\xe7\xf6\x6a\xae\x69\
\xa1\x08\x83\x16\xb3\xc2\x19\x1a\x8a\x38\xe8\x69\x64\x34\x43\x42\
\x11\x08\x99\x46\x04\x93\x7e\x99\x17\x09\xd9\x46\xcc\x58\xda\x46\
\x11\x08\xf7\x22\x9f\xfa\x3d\x66\x26\x6b\xbb\xa4\x84\x22\x12\x36\
\x7b\x86\x76\xcf\x0c\x65\xc4\xd2\x3d\x14\x91\xac\x2d\x63\x48\x5b\
\x66\xaa\xf7\x39\xba\xde\x51\x44\xb2\xb6\xac\xc7\x9e\x96\xdf\xdb\
\x7b\x16\xbb\x6d\x14\x91\xac\x2b\x2b\x90\xef\xd4\xce\x59\xaf\xb3\
\x75\xd9\x28\x22\x59\xd7\xc8\x48\x36\xb5\xef\xd7\x6b\x36\x77\x87\
\x22\x92\x75\x8d\x8e\xa4\x98\x11\xcb\xae\x50\x44\xb2\xae\x59\x91\
\x14\xa3\x63\x49\xff\xc2\x91\xd7\x33\x3b\x92\x62\xe4\x39\x9a\x43\
\xb1\x4d\xd6\x74\x94\x48\x8a\x9a\xf3\xec\x99\x59\x1b\x85\xb0\xa3\
\x45\x52\x8c\x38\x57\x53\x28\xb6\x09\x67\xd5\x3a\xbb\x36\x0a\x21\
\x47\xdd\x26\x45\xf6\xf9\xaa\x43\xb1\x4d\x38\xbb\x96\x19\xb6\x51\
\x78\xea\xe8\xdb\xa4\xc8\x3c\xa7\x50\x58\x52\xed\x56\xa9\x0a\xc5\
\x63\xd7\x7a\xce\xb2\x4d\x8a\xac\xf3\xda\x28\x10\x20\x14\x08\x10\
\x0a\x2f\x27\xfa\xf8\x55\x73\x95\x08\x87\xe2\x7e\xb2\x9e\xb3\xdd\
\x4f\x32\xd9\x28\x10\x20\x14\x08\x10\x0a\x04\x08\x05\x02\x84\x02\
\x01\x42\x81\x00\xa1\x40\x80\x50\x20\x40\x28\xfc\x97\x2f\x99\x3f\
\x85\x43\xf1\x2d\x2d\x67\x11\x0d\xbc\x66\xa6\x6d\x14\x08\x10\x0a\
\x04\x08\x85\x87\xce\x76\x4f\xc9\x3a\x6f\x55\x28\xee\x29\xbc\x8a\
\xda\x59\xb6\x51\x78\xea\x2c\x5b\x25\xf3\x9c\xd5\xa1\xd8\x2a\xac\
\xc8\x46\x21\xe4\xe8\x5b\xa5\xe6\x7c\x2d\x1f\xf6\x4d\xa1\xd8\x2a\
\x9c\x55\xeb\xec\xda\x28\x84\x1d\x75\xab\x8c\x38\x57\x73\x28\xb6\
\xca\x9a\x8e\x16\x4b\xf6\x23\x57\x61\xa3\x50\xed\x28\xb1\x8c\x3c\
\xc7\xae\x50\x6c\x95\x75\xcd\x8e\xa5\xf6\xfd\xf7\xce\xea\xee\x8d\
\x22\x96\x75\xcd\x8a\x65\x74\x24\x9b\x2e\x8f\x5e\x62\x59\xd7\xe8\
\x58\x66\x44\xb2\x71\x47\x61\xb7\x6d\x78\xb3\x83\x19\xf1\x1e\x8f\
\xbc\x5d\xaf\xd7\x8f\xdb\xeb\x2e\x66\xfe\x31\xcc\x97\xf1\x74\xd1\
\x32\x53\xbd\xcf\xd1\x3d\x94\x8d\x58\xd8\xec\x19\xd6\x3d\x33\x94\
\x11\x6b\x4a\x28\x1b\xb1\x70\x2f\x32\xbc\x3d\x66\x26\x23\x92\x4d\
\x5a\x28\x85\x60\x18\x25\x2b\x92\x4d\x7a\x28\x1b\xb1\x90\x29\x33\
\x90\x62\x48\x28\x85\x60\xe8\x69\x44\x20\xc5\xd0\x50\xee\x89\x86\
\x16\x23\xe3\xb8\x37\x2d\x94\xaf\x84\xc3\x77\x66\x85\xf1\xd5\x61\
\x42\x81\x23\xf3\xcd\x3c\x04\x08\x05\x02\x84\x02\x01\x42\x81\x00\
\xa1\x40\x80\x50\x20\x40\x28\x10\x20\x14\x78\xea\x72\xf9\x07\x87\
\x67\x04\x58\x9e\x4e\x25\x3a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x03\x82\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x73\x00\x00\x00\x78\x08\x06\x00\x00\x00\xc1\x84\xcd\x25\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x12\x74\x00\x00\x12\x74\x01\xde\x66\
\x1f\x78\x00\x00\x03\x17\x49\x44\x41\x54\x78\x5e\xed\x9d\x5b\x6e\
\x23\x31\x0c\x04\x9d\x9c\xc0\xf7\x3f\xa5\x6f\xb0\x59\x21\x26\xe0\
\x1d\x38\x1b\xbd\x48\x36\x5b\x5d\x80\x11\xff\x0d\xc9\x52\x4b\x13\
\x27\x03\x7f\x3c\x1e\x8f\x3f\x37\x41\xc1\xe7\xf3\xa7\x20\x40\x32\
\x89\x90\x4c\x22\xca\x9f\x99\xf7\xfb\xfd\xf9\xae\x8f\xbf\xfd\x3e\
\xdf\xf1\xa1\x64\x12\x21\x99\x44\x48\x26\x11\x92\x49\x84\x64\x12\
\x21\x99\x44\x48\x26\x11\x92\x49\x84\x64\x12\x21\x99\x44\x48\x26\
\x11\x92\x49\x84\x64\x12\x21\x99\x44\x48\x26\x11\x92\x49\x84\x64\
\x12\x21\x99\x44\x48\x26\x11\x92\x49\x04\xb4\xcc\xf6\xcf\x5a\xf6\
\xf2\xe6\xf5\x5a\x11\xd7\xf3\x00\x4e\x26\xca\x40\x2b\x4a\x85\x92\
\x89\x38\xbc\x4a\x52\x21\x64\x56\x18\x58\x05\xa1\xa9\x32\x2b\xad\
\xfa\x06\x7a\xbd\x69\x32\x2b\x49\xbc\x82\x2a\x35\x5c\x26\xfa\xea\
\x1e\x01\xad\x8f\x50\x99\x2c\x12\x5f\x41\xea\x29\x4c\xe6\x6c\xd3\
\xed\x41\x9f\x88\x87\x7d\x56\xae\x83\x22\x34\x44\xe6\x4c\xb3\x51\
\x12\xaf\xcc\x5e\x17\x41\xa8\xbb\xcc\xd1\x26\xb3\x24\x5e\x99\xa9\
\x23\x5b\xa8\xab\xcc\x19\x91\x68\x54\x12\xea\x26\x93\x41\xa4\x31\
\x9a\xd2\x2c\xa1\x2e\x32\x47\x9a\x19\x1d\x54\x26\xe8\x42\xb7\xcb\
\x1c\x15\x59\x0d\x64\xa1\xee\x37\x40\x3f\x51\x51\xa4\x81\x5a\xfb\
\x56\x99\xbd\x2b\xb1\xb2\x48\xa3\xb7\x87\xc8\x74\x6e\x93\x79\x92\
\x48\x03\x4d\x68\xe8\x36\xcb\x24\xd2\x40\xea\x69\x8b\xcc\xe8\x83\
\xbe\x22\x11\x33\x0a\x4b\x26\x63\x2a\x0d\x94\xde\x96\x65\xf6\xac\
\x38\x66\x91\x46\x4f\x8f\xde\xe9\x0c\x3d\x33\x85\x2f\x4b\x32\x95\
\xca\x7f\xc9\x4e\xa7\x92\x49\x84\xab\xcc\x93\x52\x69\x64\xf6\x3c\
\x2d\x33\xe2\x56\x9b\x15\xaf\xd9\xb9\x25\xf3\xc4\x54\x1a\x59\xbd\
\xeb\xcc\x24\x62\x4a\xa6\xb6\xd8\x75\x3c\x66\xa8\x64\x3a\x91\xb1\
\xd5\xba\xc8\x3c\xf9\xbc\xcc\x44\xc9\x24\x42\x32\x89\x90\x4c\x22\
\xa6\xbe\xd7\xe4\xb7\x3b\x31\x9d\x99\xdf\x44\xcf\x49\xc9\x24\x42\
\x32\x89\x90\x4c\x22\x24\x93\x08\xc9\x24\x42\x32\x89\x90\x4c\x22\
\x24\x93\x08\xc9\x24\x42\x32\x89\x70\x91\xa9\x3f\x5e\xe7\xcc\x60\
\x4a\xa6\x3e\x7b\x5d\xc7\x63\x86\xda\x66\x89\x70\x93\x79\xf2\x56\
\x9b\xd5\xfb\xb4\x4c\x6d\xb5\xf3\x78\xcd\xce\x75\x9b\x3d\x31\x9d\
\x99\x3d\xeb\xcc\x24\x62\x49\x66\xcf\x76\x71\x52\x3a\x7b\x7a\xf5\
\x3c\x9e\x94\x4c\x22\x96\x65\x2a\x9d\xdf\x64\xa7\xb2\x11\x96\xcc\
\x93\xb6\xdb\x2c\xb6\xc8\xf4\x5e\x71\xe8\x20\xa4\xb2\x11\x7a\x66\
\x32\xa6\x13\xa9\xa7\x6d\x32\x7b\x57\x1e\x93\xd0\xde\x5e\xa2\x76\
\xae\xad\xc9\x3c\x49\x28\x62\x0f\xa1\xdb\xec\x2b\x95\x85\x8e\xd4\
\x1e\x79\x3f\xb1\x5d\xe6\x48\xf1\x15\x85\xa2\x8a\x6c\xb8\x24\x73\
\x54\x68\x15\xa9\xc8\x22\x1b\x6e\xdb\xec\x68\x33\xc8\x42\x47\x17\
\x5c\x86\xc8\x86\xeb\x99\xc9\x20\x74\xb4\xa6\x2c\x91\x8d\xa9\x47\
\xfa\x46\x99\x91\x94\x39\x94\x46\xc5\x9a\x43\x64\x36\x66\x53\x17\
\x3d\xa0\x2a\x75\xbe\x23\x4c\x66\x63\x76\x50\x86\xd7\xc0\x50\xeb\
\x1a\x25\x54\x66\x63\x75\x70\xc6\xea\x00\x51\xea\xd8\x49\xb8\x4c\
\x63\xd7\x30\x33\x41\x12\xd9\x48\xfb\x04\x08\x6d\x10\x23\xb4\xda\
\x11\xeb\x4f\x93\xd9\x40\x1d\xca\xff\x40\xae\x37\x55\xa6\x51\x41\
\x68\x85\x85\x97\x76\x66\xfe\x04\xda\x59\x5a\x61\xa1\x19\x70\x32\
\x8d\x6c\xa9\x95\x24\x1a\xb0\x32\xaf\x78\xcb\xad\x28\xef\x4a\x19\
\x99\xaf\x30\xfe\x8e\xb8\x83\x92\x32\xc5\x7b\x20\xee\x66\xc5\x1e\
\x24\x93\x08\xc9\x24\x42\x32\x89\x90\x4c\x22\x24\x93\x08\xc9\x24\
\x42\x32\x89\x90\x4c\x22\x24\x93\x08\xc9\x24\x42\x32\x89\x90\x4c\
\x22\x24\x93\x08\xc9\x24\x42\x32\x69\xb8\xdd\xbe\x00\xab\x57\x40\
\x46\x57\x6b\xee\xf0\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x03\x8a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x73\x00\x00\x00\x78\x08\x06\x00\x00\x00\xc1\x84\xcd\x25\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x12\x74\x00\x00\x12\x74\x01\xde\x66\
\x1f\x78\x00\x00\x03\x1f\x49\x44\x41\x54\x78\x5e\xed\xdd\x4b\x6e\
\x14\x41\x10\x84\xe1\x01\xae\xc3\x92\xfb\x9f\x80\x25\x07\x99\x0b\
\x20\xa3\x92\x27\xa5\xa6\x65\xcb\xf5\xca\xcc\xc8\xa8\xf8\x25\x84\
\x57\xb8\xab\xbe\xce\xea\x31\x0c\x9a\x6f\xcf\xe7\xf3\xed\xa1\x28\
\xfa\xfe\xfa\x5d\x11\x24\x4c\xa2\x84\x49\x54\xf9\x67\xe6\xdf\x9f\
\xbf\x5e\x5f\xf5\xf5\xe3\xcf\xef\xd7\x57\x7c\x69\x32\x89\x12\x26\
\x51\xc2\x24\x4a\x98\x44\x09\x93\x28\x61\x12\x25\x4c\xa2\x84\x49\
\x94\x30\x89\x12\x26\x51\xc2\x24\x4a\x98\x44\x09\x93\x28\x61\x12\
\x25\x4c\xa2\x84\x49\x94\x30\x89\x12\x26\x51\xc2\x24\x4a\x98\x44\
\x41\xbf\xa1\xeb\xfa\x66\xad\xcf\xde\x88\xb5\xeb\x0d\x5d\xf7\x3f\
\xa7\xe2\x1b\xbf\xe0\x26\xb3\x6d\xaa\xfd\xca\x0c\xe1\x1a\x46\x83\
\xc2\x44\xdc\xbc\x4a\xa8\x10\x98\x15\x36\xac\x02\x68\x2a\x66\xa5\
\xbb\xbe\x85\x7e\xbd\x69\x98\x95\x10\xef\xa1\xa2\x86\x63\xa2\xdf\
\xdd\x23\xa1\xad\x23\x14\x93\x05\xf1\x1a\xd2\x9a\xc2\x30\x67\x17\
\xdd\x7e\xde\x8b\xf8\x99\x6f\xe5\xfb\xa0\x80\x86\x60\xce\x2c\x36\
\x0a\xf1\xde\xec\xf7\x45\x00\x75\xc7\x1c\x5d\x64\x16\xe2\xbd\x99\
\xeb\xc8\x06\x75\xc5\x9c\x81\x44\xab\x12\xa8\x1b\x26\x03\xa4\x35\
\x3a\xa5\x59\xa0\x2e\x98\x23\x8b\x19\xdd\xa8\xcc\xd0\x41\xb7\x63\
\x8e\x42\x56\x0b\x19\xd4\xfd\x05\xd0\x67\x55\x84\xb4\x50\xaf\x7d\
\x2b\x66\xef\x9d\x58\x19\xd2\xea\x5d\x43\xe4\x74\x6e\xc3\x3c\x09\
\xd2\x42\x03\x0d\x3d\x66\x99\x20\x2d\xa4\x35\x6d\xc1\x8c\x7e\xd0\
\x57\x2c\x62\x8f\xc2\x26\x93\x71\x2a\x2d\x94\xb5\x2d\x63\xf6\xdc\
\x71\xcc\x90\x56\xcf\x1a\xbd\xa7\x33\xf4\x99\xa9\x7c\x5b\xc2\xd4\
\x54\xfe\x5f\xf6\x74\x6a\x32\x89\x72\xc5\x3c\x69\x2a\xad\xcc\x35\
\x4f\x63\x46\xbc\xd4\x66\xcd\x6b\xef\xdc\x26\xf3\xc4\xa9\xb4\xb2\
\xd6\xae\x67\x26\x51\x53\x98\x3a\x62\xd7\xf3\xd8\x43\x4d\xa6\x53\
\x19\x47\xad\x0b\xe6\xc9\xcf\xcb\xcc\x34\x99\x44\x09\x93\x28\x61\
\x12\x35\xf5\xdf\xe0\xbf\x7a\x25\xa6\x67\xe6\x7b\xd1\xfb\xa4\xc9\
\x24\x4a\x98\x44\x09\x93\x28\x61\x12\x25\x4c\xa2\x84\x49\x94\x30\
\x89\x12\x26\x51\xc2\x24\x4a\x98\x44\xb9\x60\xea\x1f\xaf\x73\xf6\
\x60\x0a\x53\x7f\xf7\xba\x9e\xc7\x1e\xea\x98\x25\xca\x0d\xf3\xe4\
\xa3\x36\x6b\xed\xd3\x98\x3a\x6a\xe7\xf3\xda\x3b\xd7\x63\xf6\xc4\
\xe9\xcc\x5c\xb3\x9e\x99\x44\x2d\x61\xf6\x1c\x17\x27\x4d\x67\xcf\
\x5a\x3d\x1f\x4f\x9a\x4c\xa2\x96\x31\x35\x9d\xef\x65\x4f\x65\x2b\
\x6c\x32\x4f\x3a\x6e\xb3\xda\x82\xe9\x7d\xc7\xa1\x87\x30\x95\xad\
\xd0\x67\x26\xe3\x74\x22\xad\x69\x1b\x66\xef\x9d\xc7\x04\xda\xbb\
\x96\xa8\x93\x6b\xeb\x64\x9e\x04\x8a\xb8\x86\xd0\x63\xf6\x5a\x65\
\xd0\x91\x6b\x8f\x7c\x3d\xb1\x1d\x73\xe4\xe2\x2b\x82\xa2\x42\xb6\
\xdc\x3e\x72\x71\x14\x2a\x7a\xe1\x33\x21\x43\xb6\x5c\x3f\x3f\x93\
\x05\xb4\xca\x3a\x5c\x9f\x99\xa3\x8b\x1a\xdd\xb4\x88\x2a\xdd\x90\
\x21\x9f\x6c\x3b\x83\x94\x3d\xa5\x15\xaf\x39\xec\x63\x8a\x67\xa7\
\x2e\x7a\x83\xaa\x5c\xe7\x47\x85\x61\xb6\x66\x37\xca\xf2\xda\x30\
\xd4\xeb\x1a\x2d\x14\xb3\xb5\xba\x71\xd6\xea\x06\xa2\x5c\xc7\xce\
\xc2\x31\xad\x5d\x9b\x99\x19\x12\x64\x2b\x0d\xb3\x55\x15\x14\x0d\
\xd1\x4a\xc5\xb4\x2a\xa1\xa2\x42\xb6\x20\x30\x5b\xe8\xa0\xc8\x88\
\x16\x0c\xa6\x85\x86\x5a\x01\xd1\x82\xc3\xb4\xb2\x51\x2b\x21\x5a\
\xb0\x98\xf7\xbc\x71\x2b\xe2\xdd\x2b\x83\x79\x6d\x17\x2c\x03\xe0\
\xb5\x92\x98\xea\xe3\xd2\xde\x69\xa0\xf6\x27\x4c\xa2\x84\x49\x94\
\x30\x89\x12\x26\x51\xc2\x24\x4a\x98\x44\x09\x93\x28\x61\x12\x25\
\x4c\xa2\x84\x49\x94\x30\x89\x12\x26\x51\xc2\x24\x4a\x98\x44\x09\
\x93\xa6\xc7\xe3\x1f\x1c\xf0\x5f\xbd\xd9\xfb\xc9\x32\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x05\
\x00\x57\x6d\xc2\
\x00\x50\
\x00\x6f\x00\x77\x00\x65\x00\x72\
\x00\x02\
\x00\x00\x04\xd6\
\x00\x48\
\x00\x56\
\x00\x08\
\x0b\x41\x5e\xe7\
\x00\x48\
\x00\x56\x00\x4f\x00\x4e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x03\xa9\xcc\x27\
\x00\x48\
\x00\x56\x00\x4f\x00\x46\x00\x46\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0d\xc5\xf9\x87\
\x00\x50\
\x00\x6f\x00\x77\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x01\x4d\x95\x67\
\x00\x50\
\x00\x6f\x00\x77\x00\x65\x00\x72\x00\x50\x00\x72\x00\x65\x00\x73\x00\x73\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x02\x00\x00\x00\x05\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x60\x00\x00\x00\x00\x00\x01\x00\x00\x09\x77\
\x00\x00\x00\x48\x00\x00\x00\x00\x00\x01\x00\x00\x05\xf1\
\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x02\xf9\
\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x02\x00\x00\x00\x02\x00\x00\x00\x05\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x60\x00\x00\x00\x00\x00\x01\x00\x00\x09\x77\
\x00\x00\x01\x69\xe4\x6a\x14\x77\
\x00\x00\x00\x48\x00\x00\x00\x00\x00\x01\x00\x00\x05\xf1\
\x00\x00\x01\x69\xe4\x6a\x52\x8e\
\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x02\xf9\
\x00\x00\x01\x69\xe9\x2f\xca\x25\
\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x69\xe9\x2d\x9b\xba\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
<gh_stars>10-100
"""
HRF Functions
=============
Various Hemodynamic Response Functions (HRFs) implemented by NiPy
Copyright (c) 2006-2017, NIPY Developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the NIPY Developers nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__all__ = [
"spm_hrf",
"glover_hrf",
"spm_time_derivative",
"glover_time_derivative",
"spm_dispersion_derivative",
]
from scipy.stats import gamma
import numpy as np
def _gamma_difference_hrf(
tr,
oversampling=16,
time_length=32,
onset=0.0,
delay=6,
undershoot=16.0,
dispersion=1.0,
u_dispersion=1.0,
ratio=0.167,
):
"""Compute an hrf as the difference of two gamma functions
Parameters
----------
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: int, hrf kernel length, in seconds
onset: float, onset of the hrf
Returns
-------
hrf: array of shape(length / tr * oversampling, float),
hrf sampling on the oversampled time grid
"""
dt = tr / oversampling
time_stamps = np.linspace(0, time_length, int(time_length / dt))
time_stamps -= onset / dt
hrf = gamma.pdf(
time_stamps, delay / dispersion, dt / dispersion
) - ratio * gamma.pdf(time_stamps, undershoot / u_dispersion, dt / u_dispersion)
hrf /= hrf.sum()
return hrf
def spm_hrf(tr, oversampling=16, time_length=32.0, onset=0.0):
"""Implementation of the SPM hrf model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
hrf: array of shape(length / tr * oversampling, float),
hrf sampling on the oversampled time grid
"""
return _gamma_difference_hrf(tr, oversampling, time_length, onset)
def glover_hrf(tr, oversampling=16, time_length=32, onset=0.0):
"""Implementation of the Glover hrf model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: int, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
hrf: array of shape(length / tr * oversampling, float),
hrf sampling on the oversampled time grid
"""
return _gamma_difference_hrf(
tr,
oversampling,
time_length,
onset,
delay=6,
undershoot=12.0,
dispersion=0.9,
u_dispersion=0.9,
ratio=0.35,
)
def spm_time_derivative(tr, oversampling=16, time_length=32.0, onset=0.0):
"""Implementation of the SPM time derivative hrf (dhrf) model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
dhrf: array of shape(length / tr, float),
dhrf sampling on the provided grid
"""
do = 0.1
dhrf = (
1.0
/ do
* (
spm_hrf(tr, oversampling, time_length, onset + do)
- spm_hrf(tr, oversampling, time_length, onset)
)
)
return dhrf
def glover_time_derivative(tr, oversampling=16, time_length=32.0, onset=0.0):
"""Implementation of the flover time derivative hrf (dhrf) model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
dhrf: array of shape(length / tr, float),
dhrf sampling on the provided grid
"""
do = 0.1
dhrf = (
1.0
/ do
* (
glover_hrf(tr, oversampling, time_length, onset + do)
- glover_hrf(tr, oversampling, time_length, onset)
)
)
return dhrf
def spm_dispersion_derivative(tr, oversampling=16, time_length=32.0, onset=0.0):
"""Implementation of the SPM dispersion derivative hrf model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
dhrf: array of shape(length / tr * oversampling, float),
dhrf sampling on the oversampled time grid
"""
dd = 0.01
dhrf = (
1.0
/ dd
* (
_gamma_difference_hrf(
tr, oversampling, time_length, onset, dispersion=1.0 + dd
)
- spm_hrf(tr, oversampling, time_length, onset)
)
)
return dhrf
|
import json
from flask import url_for
from urllib.parse import urlparse
from datetime import datetime
from dateutil import parser, tz
from urllib.parse import urlencode
from dataservice.extensions import db
from dataservice.api.study.models import Study
from dataservice.api.participant.models import Participant
from dataservice.api.diagnosis.models import Diagnosis
from dataservice.api.sequencing_center.models import SequencingCenter
from dataservice.api.biospecimen.models import (
Biospecimen,
BiospecimenDiagnosis
)
from tests.utils import FlaskTestCase
DIAGNOSES_URL = 'api.diagnoses'
DIAGNOSES_LIST_URL = 'api.diagnoses_list'
class DiagnosisTest(FlaskTestCase):
"""
Test diagnosis api
"""
def test_post(self):
"""
Test create a new diagnosis
"""
kwargs = self._create_save_to_db()
# Create diagnosis data
kwargs = {
'external_id': 'd1',
'source_text_diagnosis': 'flu',
'age_at_event_days': 365,
'diagnosis_category': 'Cancer',
'source_text_tumor_location': 'Brain',
'mondo_id_diagnosis': 'DOID:8469',
'icd_id_diagnosis': 'J10.01',
'uberon_id_tumor_location': 'UBERON:0000955',
'spatial_descriptor': 'left side',
'participant_id': kwargs.get('participant_id')
}
# Send get request
response = self.client.post(url_for(DIAGNOSES_LIST_URL),
data=json.dumps(kwargs),
headers=self._api_headers())
# Check response status status_code
self.assertEqual(response.status_code, 201)
# Check response content
response = json.loads(response.data.decode('utf-8'))
diagnosis = response['results']
dg = Diagnosis.query.get(diagnosis.get('kf_id'))
for k, v in kwargs.items():
if k == 'participant_id':
continue
self.assertEqual(diagnosis[k], getattr(dg, k))
self.assertEqual(2, Diagnosis.query.count())
def test_post_multiple(self):
# Create a diagnosis with participant
d1 = self._create_save_to_db()
# Create another diagnosis for the same participant
d2 = {
'external_id': 'd2',
'source_text_diagnosis': 'cold',
'diagnosis_category': 'Cancer',
'source_text_tumor_location': 'Brain',
'mondo_id_diagnosis': 'DOID:8469',
'icd_id_diagnosis': 'J10.01',
'uberon_id_tumor_location': 'UBERON:0000955',
'spatial_descriptor': 'left side',
'participant_id': d1['participant_id']
}
# Send post request
response = self.client.post(url_for(DIAGNOSES_LIST_URL),
headers=self._api_headers(),
data=json.dumps(d2))
# Check status code
self.assertEqual(response.status_code, 201)
# Check database
c = Diagnosis.query.count()
self.assertEqual(c, 2)
pd = Participant.query.all()[0].diagnoses
self.assertEqual(len(pd), 2)
def test_get(self):
# Create and save diagnosis to db
kwargs = self._create_save_to_db()
# Send get request
response = self.client.get(url_for(DIAGNOSES_URL,
kf_id=kwargs['kf_id']),
headers=self._api_headers())
# Check response status code
self.assertEqual(response.status_code, 200)
# Check response content
response = json.loads(response.data.decode('utf-8'))
diagnosis = response['results']
participant_link = response['_links']['participant']
participant_id = urlparse(participant_link).path.split('/')[-1]
for k, v in kwargs.items():
if k == 'participant_id':
self.assertEqual(participant_id,
kwargs['participant_id'])
else:
self.assertEqual(diagnosis[k], diagnosis[k])
def test_get_all(self):
"""
Test retrieving all diagnoses
"""
kwargs = self._create_save_to_db()
response = self.client.get(url_for(DIAGNOSES_LIST_URL),
headers=self._api_headers())
self.assertEqual(response.status_code, 200)
response = json.loads(response.data.decode("utf-8"))
content = response.get('results')
self.assertEqual(len(content), 1)
def test_patch(self):
"""
Test updating an existing diagnosis
"""
kwargs = self._create_save_to_db()
kf_id = kwargs.get('kf_id')
# Update existing diagnosis
body = {
'source_text_diagnosis': 'hangry',
'diagnosis_category': 'Structural Birth Defect',
'participant_id': kwargs['participant_id']
}
response = self.client.patch(url_for(DIAGNOSES_URL,
kf_id=kf_id),
headers=self._api_headers(),
data=json.dumps(body))
# Status code
self.assertEqual(response.status_code, 200)
# Message
resp = json.loads(response.data.decode("utf-8"))
self.assertIn('diagnosis', resp['_status']['message'])
self.assertIn('updated', resp['_status']['message'])
# Content - check only patched fields are updated
diagnosis = resp['results']
dg = Diagnosis.query.get(kf_id)
for k, v in body.items():
self.assertEqual(v, getattr(dg, k))
# Content - Check remaining fields are unchanged
unchanged_keys = (set(diagnosis.keys()) -
set(body.keys()))
for k in unchanged_keys:
val = getattr(dg, k)
if isinstance(val, datetime):
d = val.replace(tzinfo=tz.tzutc())
self.assertEqual(str(parser.parse(diagnosis[k])), str(d))
else:
self.assertEqual(diagnosis[k], val)
self.assertEqual(1, Diagnosis.query.count())
def test_delete(self):
"""
Test delete an existing diagnosis
"""
kwargs = self._create_save_to_db()
# Send get request
response = self.client.delete(url_for(DIAGNOSES_URL,
kf_id=kwargs['kf_id']),
headers=self._api_headers())
# Check status code
self.assertEqual(response.status_code, 200)
# Check response body
response = json.loads(response.data.decode("utf-8"))
# Check database
d = Diagnosis.query.first()
self.assertIs(d, None)
def test_filters(self):
"""
Test get and filter diagnoses by biospecimen_id or study id
"""
self._create_all_entities()
assert 8 == Diagnosis.query.count()
assert 8 == Biospecimen.query.count()
assert 5 == BiospecimenDiagnosis.query.count()
# Create query - Participant p0, Biospecimen b0 has 3 diagnoses
bs = Biospecimen.query.filter_by(
external_sample_id='study0-p0-b0').first()
s = Study.query.filter_by(external_id='s0').first()
bds = BiospecimenDiagnosis.query.filter_by(
biospecimen_id=bs.kf_id).count()
assert bds == 3
# Send get request
filter_params = {'biospecimen_id': bs.kf_id,
'study_id': s.kf_id}
qs = urlencode(filter_params)
endpoint = '{}?{}'.format('/diagnoses', qs)
response = self.client.get(endpoint, headers=self._api_headers())
# Check response status code
self.assertEqual(response.status_code, 200)
# Check response content
response = json.loads(response.data.decode('utf-8'))
assert 3 == response['total']
assert 3 == len(response['results'])
diagnoses = response['results']
for d in diagnoses:
assert d['external_id'] in {'study0-p0-d0',
'study0-p0-d1',
'study0-p0-d2'}
# Create query - Participant p0, Biospecimen b1 has 1 diagnosis
bs = Biospecimen.query.filter_by(
external_sample_id='study0-p0-b1').first()
bds = BiospecimenDiagnosis.query.filter_by(
biospecimen_id=bs.kf_id).count()
assert bds == 1
# Send get request
filter_params = {'biospecimen_id': bs.kf_id}
qs = urlencode(filter_params)
endpoint = '{}?{}'.format('/diagnoses', qs)
response = self.client.get(endpoint, headers=self._api_headers())
# Check response status code
self.assertEqual(response.status_code, 200)
# Check response content
response = json.loads(response.data.decode('utf-8'))
assert 1 == response['total']
assert 1 == len(response['results'])
diagnoses = response['results']
for d in diagnoses:
assert d['external_id'] in {'study0-p0-d2'}
# Create query - Same as first query but wrong study, yields 0 results
bs = Biospecimen.query.filter_by(
external_sample_id='study0-p0-b0').first()
s = Study.query.filter_by(external_id='s1').first()
bds = BiospecimenDiagnosis.query.filter_by(
biospecimen_id=bs.kf_id).count()
assert bds == 3
# Send get request
filter_params = {'biospecimen_id': bs.kf_id,
'study_id': s.kf_id}
qs = urlencode(filter_params)
endpoint = '{}?{}'.format('/diagnoses', qs)
response = self.client.get(endpoint, headers=self._api_headers())
# Check response status code
self.assertEqual(response.status_code, 200)
# Check response content
response = json.loads(response.data.decode('utf-8'))
assert 0 == response['total']
assert 0 == len(response['results'])
def _create_save_to_db(self):
"""
Create and save diagnosis
Requires creating a participant
Create a diagnosis and add it to participant as kwarg
Save participant
"""
# Create study
study = Study(external_id='phs001')
# Create diagnosis
kwargs = {
'external_id': 'd1',
'source_text_diagnosis': 'flu',
'diagnosis_category': 'Cancer',
'source_text_tumor_location': 'Brain',
'age_at_event_days': 365,
'mondo_id_diagnosis': 'DOID:8469',
'icd_id_diagnosis': 'J10.01',
'uberon_id_tumor_location': 'UBERON:0000955',
'spatial_descriptor': 'left side'
}
d = Diagnosis(**kwargs)
# Create and save participant with diagnosis
participant_id = 'Test subject 0'
p = Participant(external_id=participant_id, diagnoses=[d],
is_proband=True, study=study)
db.session.add(p)
db.session.commit()
# Create sequencing center
s = SequencingCenter(name='washu')
db.session.add(s)
db.session.commit()
# Create biospecimen
b = Biospecimen(analyte_type='DNA',
sequencing_center_id=s.kf_id,
participant=p)
db.session.add(s)
db.session.add(b)
db.session.commit()
kwargs['participant_id'] = p.kf_id
kwargs['kf_id'] = d.kf_id
return kwargs
def _create_all_entities(self):
"""
Create 2 studies with same content
Content: 3 participants, 4 biospecimens, 4 diagnoses
"""
# Create entities
sc = SequencingCenter.query.filter_by(name='sc').first()
if not sc:
sc = SequencingCenter(name='sc')
studies = []
# Two studies
for j in range(2):
s = Study(external_id='s{}'.format(j))
p0 = Participant(external_id='study{}-p0'.format(j))
p1 = Participant(external_id='study{}-p1'.format(j))
p2 = Participant(external_id='study{}-p2'.format(j))
# Participant 0
# Has 2 Biospecimens
for i in range(2):
b = Biospecimen(
external_sample_id='study{}-p0-b{}'.format(j, i),
analyte_type='DNA',
sequencing_center=sc)
# Biospecimen b0 has 2 diagnoses
if i == 0:
for k in range(2):
d = Diagnosis(
external_id='study{}-p0-d{}'.format(j, k))
p0.diagnoses.append(d)
# Biospecimen b1 has 1 diagnosis
else:
d = Diagnosis(
external_id='study{}-p0-d{}'.format(j, k + 1))
p0.diagnoses.append(d)
p0.biospecimens.append(b)
# Participant 1
# Has 1 biospecimen, 1 diagnosis
b = Biospecimen(external_sample_id='study{}-p1-b0'.format(j),
analyte_type='DNA',
sequencing_center=sc)
d = Diagnosis(external_id='study{}-p1-d0'.format(j))
p1.biospecimens.append(b)
p1.diagnoses.append(d)
# Participant 2
# Has 1 biospecimen
b = Biospecimen(external_sample_id='study{}-p2-b0'.format(j),
analyte_type='DNA',
sequencing_center=sc)
p2.biospecimens.append(b)
s.participants.extend([p0, p1, p2])
studies.append(s)
db.session.add_all(studies)
db.session.commit()
# Create links between bios and diags
bs_dgs = []
# Participant 0
p0 = studies[0].participants[0]
# b0-d0
bs_dgs.append(
BiospecimenDiagnosis(biospecimen_id=p0.biospecimens[0].kf_id,
diagnosis_id=p0.diagnoses[0].kf_id))
# b0-d1
bs_dgs.append(
BiospecimenDiagnosis(biospecimen_id=p0.biospecimens[0].kf_id,
diagnosis_id=p0.diagnoses[1].kf_id))
# b1-d2
bs_dgs.append(
BiospecimenDiagnosis(biospecimen_id=p0.biospecimens[1].kf_id,
diagnosis_id=p0.diagnoses[2].kf_id))
# b0-d2
bs_dgs.append(
BiospecimenDiagnosis(biospecimen_id=p0.biospecimens[0].kf_id,
diagnosis_id=p0.diagnoses[2].kf_id))
# Participant 1
p1 = studies[0].participants[1]
# b0-d0
bs_dgs.append(
BiospecimenDiagnosis(biospecimen_id=p1.biospecimens[0].kf_id,
diagnosis_id=p1.diagnoses[0].kf_id))
db.session.add_all(bs_dgs)
db.session.commit()
|
""" Module with physical constants for use with ipython, profile
"physics".
Definition of Fundamental Physical Constants, CODATA Recommended Values
Source, <NAME> and <NAME>,
CODATA Recommended Values of the Fundamental
Physical Constants, 1998
Website: physics.nist.gov/constants
"""
# License: BSD-like
# Copyright: <NAME> (<EMAIL>)
# inspired by maxima's physconst.mac by <NAME>
#from math import * # math MUST be imported BEFORE PhysicalQInteractive
from IPython.extensions.PhysicalQInteractive import PhysicalQuantityInteractive
# Math constants:
# Pi mathematical constants
pi = 3.141592653589793238462643383279502884197169399375105820974944592
# Universal Constants
#-------------------------------------------------------------------------
c = PhysicalQuantityInteractive(299792458 , 'm/s')
c.__doc__ = """speed of light in vacuum"""
c.__doc__ = "speed of light in vacuum"
u_0 = PhysicalQuantityInteractive(4*pi*1E-7 , 'N/(A**2)')
u_0.__doc__ = """magnetic constant"""
mu_0 = PhysicalQuantityInteractive(4*pi*1E-7 , 'N/(A**2)')
epsilon_0 = PhysicalQuantityInteractive(8.854187817E-12 , 'F/m')
epsilon_0.__doc__ = """electric constant """
Z_0 = PhysicalQuantityInteractive(376.730313461 , 'ohm')
Z_0.__doc__ = """characteristic impedance of vacuum """
G = PhysicalQuantityInteractive(6.673E-11 , 'm**3/(kg*s**2)')
G.__doc__ = """Newtonian constant of gravitation """
h = PhysicalQuantityInteractive(6.62606876E-34 , 'J*s')
h.__doc__ = """Planck constant """
h_eV = PhysicalQuantityInteractive(4.13566727E-15 , 'eV*s')
h_eV.__doc__ = """Planck constant in eVs """
h_bar = PhysicalQuantityInteractive(1.054571596E-34 , 'J*s')
h_bar.__doc__ = """Hbar"""
h_bar_eV = PhysicalQuantityInteractive(6.58211889E-16 , 'eV*s')
h_bar_eV.__doc__ = """Hbar in eV"""
P_m = PhysicalQuantityInteractive(2.1767E-8 , 'kg')
P_m.__doc__ = """Planck mass"""
P_l = PhysicalQuantityInteractive(1.6160E-35 , 'm')
P_l.__doc__ = """Planck length """
P_t = PhysicalQuantityInteractive(5.3906E-44 , 's')
P_t.__doc__ = """Planck time """
# Electromagnetic Constants
#------------------------------------------------------------------------
_e = PhysicalQuantityInteractive(1.602176462E-19 , 'C')
_e.__doc__ = """elementary charge"""
q = _e
capitalphi_0 = PhysicalQuantityInteractive(2.067833636E-15 , 'Wb')
capitalphi_0.__doc__ = """magnetic flux quantum """
mfq_0 = PhysicalQuantityInteractive(2.067833636E-15 , 'Wb')
G_0 = PhysicalQuantityInteractive(7.748091696E-5 , 'S')
G_0.__doc__ = """conductance quantum """
K_J = PhysicalQuantityInteractive(483597.898E9 , 'Hz/V')
K_J.__doc__ = """Josephson constant"""
R_K = PhysicalQuantityInteractive(25812.807572 , 'ohm')
R_K.__doc__ = """von Klitzing constant"""
u_B = PhysicalQuantityInteractive(927.400899E-26 , 'J/T')
u_B.__doc__ = """Bohr magneton"""
ueVT_B = PhysicalQuantityInteractive(5.788381749E-5 , 'eV/T')
ueVT_B.__doc__ = """Bohr magneton in eV T-1"""
u_N = PhysicalQuantityInteractive(5.05078317E-27 , 'J/T')
u_N.__doc__ = """nuclear magneton """
ueVT_N = PhysicalQuantityInteractive(3.152451238E-8 , 'eV/T')
ueVT_N.__doc__ = """nuclear magneton in eV T-1 """
# Atomic and Nuclear Constants
# General
#-------------------------------------------------------------------------
# fine-structure constant
alpha = 7.297352533E-3
Ry = PhysicalQuantityInteractive(10973731.568549 , '1/m')
Ry.__doc__ = """Rydberg constant """
Ry_INF = PhysicalQuantityInteractive(10973731.568549 , '1/m')
a_0 = PhysicalQuantityInteractive(0.5291772083E-10 , 'm')
a_0.__doc__ = """Bohr radius """
E_h = PhysicalQuantityInteractive(4.35974381E-18 , 'J')
E_h.__doc__ = """Hartree energy """
Eev_h = PhysicalQuantityInteractive(27.2113834 , 'eV')
Eev_h.__doc__ = """Hartree energy in eV """
qcir2 = PhysicalQuantityInteractive(3.636947516E-4 , 'm**2/s')
qcir2.__doc__ = """quantum of circulation h/(2me) """
qcir = PhysicalQuantityInteractive(7.273895032E-4 , 'm**2/s')
qcir.__doc__ = """quantum of circulation h/(me) """
# Electroweak
#-------------------------------------------------------------------------
Fcc = PhysicalQuantityInteractive(1.16639E-5 , '1/GeV**2')
Fcc.__doc__ = """Fermi coupling constant """
# weak mixing angled W (on-shell scheme)
wma_W = 0.2224
# Electron, e-
#-------------------------------------------------------------------------
m_e = PhysicalQuantityInteractive(9.10938188E-31 , 'kg')
m_e.__doc__ = """electron mass """
m_e_u = PhysicalQuantityInteractive(5.485799110E-4 , 'amu')
m_e_u.__doc__ = """electron mass (electron relative atomic mass times amu)"""
me_J = PhysicalQuantityInteractive(8.18710414E-14 , 'J')
me_J.__doc__ = """electron mass - energy equivalent """
me_MeV = PhysicalQuantityInteractive(0.510998902 , 'MeV')
me_MeV.__doc__ = """electron mass - energy equivalent in MeV"""
# electron-muon mass ratio
memu = 4.83633210E-3
# electron-tau mass ratio
metau = 2.87555E-4
# electron-proton mass ratio
memp = 5.446170232E-4
# electron-neutron mass ratio
memn = 5.438673462E-4
# electron-deuteron mass ratio
memd = 2.7244371170E-4
# electron to alpha particle mass ratio
memalpha = 1.3709335611E-4
echargeemass = PhysicalQuantityInteractive(-1.758820174E11 , 'C/kg')
echargeemass.__doc__ = """electron charge to mass quotient """
Molar_e = PhysicalQuantityInteractive(5.485799110E-7 , 'kg/mol')
Molar_e.__doc__ = """electron molar mass """
lambdaC = PhysicalQuantityInteractive(2.426310215E-12 , 'm')
lambdaC.__doc__ = """Compton wavelength """
r_e = PhysicalQuantityInteractive(2.817940285E-15 , 'm')
r_e.__doc__ = """classical electron radius """
sigma_e = PhysicalQuantityInteractive(0.665245854E-28 , 'm**2')
sigma_e.__doc__ = """Thomson cross section """
u_e = PhysicalQuantityInteractive(-928.476362E-26 , 'J/T')
u_e.__doc__ = """electron magnetic moment """
# electron magnetic moment to Bohr magneton ratio
ueuB = -1.0011596521869
# electron magnetic moment to nuclear magneton ratio
ueuN = -1838.2819660
# electron magnetic moment anomaly |ue|/uB - 1
a_e = 1.1596521869E-3
# electron g-factor
g_e = -2.0023193043737
# electron-muon magnetic moment ratio
ueuu = 206.7669720
# electron-proton magnetic moment ratio
ueup = -658.2106875
# electron to shielded proton magnetic moment ratio (H2O, sphere, 25 C)
ueusp = -658.2275954
# electron-neutron magnetic moment ratio
ueun = 960.92050
# electron-deuteron magnetic moment ratio
ueud = -2143.923498
# electron to shielded helione magnetic moment ratio (gas, sphere, 25 C)
ueush = 864.058255
gamma_e = PhysicalQuantityInteractive(1.760859794E11 , '1/(s*T)')
gamma_e.__doc__ = """electron gyromagnetic ratio """
# Muon, u-
#-------------------------------------------------------------------------
m_u = PhysicalQuantityInteractive(1.88353109E-28 , 'kg')
m_u.__doc__ = """muon mass """
mu_u = PhysicalQuantityInteractive(0.1134289168 , 'amu')
mu_u.__doc__ = """muon mass in muon relative atomic mass times amu """
muc2_J = PhysicalQuantityInteractive(1.69283332E-11 , 'J')
muc2_J.__doc__ = """energy equivalent """
muc2_MeV = PhysicalQuantityInteractive(105.6583568 , 'MeV')
muc2_MeV.__doc__ = """energy equivalent in MeV """
# muon-electron mass ratio
mume = 206.7682657
# muon-tau mass ratio
mum = 5.94572E-2
# muon-proton mass ratio
mump = 0.1126095173
# muon-neutron mass ratio
mumn = 0.1124545079
Molar_u = PhysicalQuantityInteractive(0.1134289168E-3 , 'kg/mol')
Molar_u.__doc__ = """muon molar mass """
lambda_C_u = PhysicalQuantityInteractive(11.73444197E-15 , 'm')
lambda_C_u.__doc__ = """muon Compton wavelength """
uu = PhysicalQuantityInteractive(-4.49044813E-26 , 'J/T')
uu.__doc__ = """muon magnetic moment """
# ratio of muon magnetic moment to Bohr magneton ratio
uuuB = -4.84197085E-3
# ratio of muon magnetic moment to nuclear magneton ratio
uuuN = -8.89059770
# muon magnetic moment anomaly |uu|/(e /2mu) - 1
a_u = 1.16591602E-3
# muon g-factor -2(1 + au)
g_u = -2.0023318320
# muon-proton magnetic moment ratio
uuup = -3.18334539
# Tau, tau-
#-------------------------------------------------------------------------
m_tau = PhysicalQuantityInteractive(3.16788E-27 , 'kg')
m_tau.__doc__ = """tau mass """
mu_tau = PhysicalQuantityInteractive(1.90774 , 'amu')
mu_tau.__doc__ = """tau mass (tau relative atomic mass times amu) """
mtauc2_J = PhysicalQuantityInteractive(2.84715E-10 , 'J')
mtauc2_J.__doc__ = """tau mass energy equivalent """
mtauc2_MeV = PhysicalQuantityInteractive(1777.05 , 'MeV')
mtauc2_MeV.__doc__ = """tau mass energy equivalent in MeV """
# tau-electron mass ratio
mtaume = 3477.60
# tau-muon mass ratio
mtaumu = 16.8188
# tau-proton mass ratio
mtaump = 1.89396
# tau-neutron mass ratio
mtaumn = 1.89135
Molar_tau = PhysicalQuantityInteractive(1.90774E-3 , 'kg/mol')
Molar_tau.__doc__ = """tau molar mass """
lambda_C_tau = PhysicalQuantityInteractive(0.69770E-15 , 'm')
lambda_C_tau.__doc__ = """tau Compton wavelength """
# Proton, p
#-------------------------------------------------------------------------
m_p = PhysicalQuantityInteractive(1.67262158E-27 , 'kg')
m_p.__doc__ = """proton mass """
mu_p = PhysicalQuantityInteractive(1.00727646688 , 'amu')
mu_p.__doc__ = """proton mass (proton relative atomic mass times amu) """
mpc2_J = PhysicalQuantityInteractive(1.50327731E-10 , 'J')
mpc2_J.__doc__ = """energy equivalent """
mpc2_MeV = PhysicalQuantityInteractive(938.271998 , 'MeV')
mpc2_MeV.__doc__ = """energy equivalent in MeV """
# proton-electron mass ratio
mpme = 1836.1526675
# proton-muon mass ratio
mpmu = 8.88024408
# proton-tau mass ratio
mpmtau = 0.527994
# proton-neutron mass ratio
mpmn = 0.99862347855
emp = PhysicalQuantityInteractive(9.57883408E7 , 'C/kg')
emp.__doc__ = """proton charge to mass quotient """
Molar_p = PhysicalQuantityInteractive(1.00727646688E-3 , 'kg/mol')
Molar_p.__doc__ = """proton molar mass """
lambda_C_p = PhysicalQuantityInteractive(1.321409847E-15 , 'm')
lambda_C_p.__doc__ = """proton Compton wavelength h/mpc """
up = PhysicalQuantityInteractive(1.410606633E-26 , 'J/T')
up.__doc__ = """proton magnetic moment """
# proton magnetic moment to Bohr magneton ratio
upuB = 1.521032203E-3
# proton magnetic moment to nuclear magneton ratio
upuN = 2.792847337
# proton g-factor 2up/uN
g_p = 5.585694675
# proton-neutron magnetic moment ratio
upun = -1.45989805
usp = PhysicalQuantityInteractive(1.410570399E-26 , 'J/T')
usp.__doc__ = """shielded proton magnetic moment (H2O, sphere, 25 C)"""
# shielded proton magnetic moment to Bohr magneton ratio
uspuB = 1.520993132E-3
# shielded proton magnetic moment to nuclear magneton ratio
uspuN = 2.792775597
# proton magnetic shielding correction 1 - u p/up (H2O, sphere, 25 C)
spc = 25.687E-6
gamma_p = PhysicalQuantityInteractive(2.67522212E8 , '1/(s*T)')
gamma_p.__doc__ = """proton gyromagnetic ratio """
gamma_sp = PhysicalQuantityInteractive(2.67515341E8 , '1/(s*T)')
gamma_sp.__doc__ = """shielded proton gyromagnetic ratio (H2O, sphere, 25 C)"""
# Neutron, n
#-------------------------------------------------------------------------
m_n = PhysicalQuantityInteractive(1.67492716E-27 , 'kg')
m_n.__doc__ = """neutron mass """
mu_n = PhysicalQuantityInteractive(1.00866491578 , 'amu')
mu_n.__doc__ = """neutron mass (neutron relative atomic mass times amu) """
mnc2_J = PhysicalQuantityInteractive(1.50534946E-10 , 'J')
mnc2_J.__doc__ = """neutron mass energy equivalent """
mnc2_MeV = PhysicalQuantityInteractive(939.565330 , 'MeV')
mnc2_MeV.__doc__ = """neutron mass energy equivalent in MeV """
# neutron-electron mass ratio
mnme = 1838.6836550
# neutron-muon mass ratio
mnmu = 8.89248478
# neutron-tau mass ratio
mnm = 0.528722
# neutron-proton mass ratio
mnmp = 1.00137841887
Molar_n = PhysicalQuantityInteractive(1.00866491578E-3 , 'kg/mol')
Molar_n.__doc__ = """neutron molar mass """
lambda_C_n = PhysicalQuantityInteractive(1.319590898E-15 , 'm')
lambda_C_n.__doc__ = """neutron Compton wavelength"""
un = PhysicalQuantityInteractive(-0.96623640E-26 , 'J/T')
un.__doc__ = """neutron magnetic moment """
# neutron magnetic moment to Bohr magneton ratio
unuB = -1.04187563E-3
# neutron magnetic moment to nuclear magneton ratio
unuN = -1.91304272
# neutron g-factor
g_n = -3.82608545
# neutron-electron magnetic moment ratio
unue = 1.04066882E-3
# neutron-proton magnetic moment ratio
unup = -0.68497934
# neutron to shielded proton magnetic moment ratio (H2O, sphere, 25 C)
unusp = -0.68499694
gamma_n = PhysicalQuantityInteractive(1.83247188E8 , '1/(s*T)')
gamma_n.__doc__ = """neutron gyromagnetic ratio """
# Deuteron, d
#-------------------------------------------------------------------------
m_d = PhysicalQuantityInteractive(3.34358309E-27 , 'kg')
m_d.__doc__ = """deuteron mass """
mu_d = PhysicalQuantityInteractive(2.01355321271 , 'amu')
mu_d.__doc__ = """deuteron mass (deuteron relative atomic mass times amu) """
mdc2_J = PhysicalQuantityInteractive(3.00506262E-10 , 'J')
mdc2_J.__doc__ = """deuteron mass energy equivalent """
mdc2_eV = PhysicalQuantityInteractive(1875.612762 , 'MeV')
mdc2_eV.__doc__ = """deuteron mass energy equivalent in MeV """
# deuteron-electron mass ratio
mdme = 3670.4829550
# deuteron-proton mass ratio
mdmp = 1.99900750083
Molar_d = PhysicalQuantityInteractive(2.01355321271E-3 , 'kg/mol')
Molar_d.__doc__ = """deuteron molar mass """
ud = PhysicalQuantityInteractive(0.433073457E-26 , 'J/T')
ud.__doc__ = """deuteron magnetic moment """
# deuteron magnetic moment to Bohr magneton ratio
uduB = 0.4669754556E-3
# deuteron magnetic moment to nuclear magneton ratio
uduN = 0.8574382284
# deuteron-electron magnetic moment ratio
udue = -4.664345537E-4
# deuteron-proton magnetic moment ratio
udup = 0.3070122083
# deuteron-neutron magnetic moment ratio
udun = -0.44820652
# Helion, h
#-------------------------------------------------------------------------
m_h = PhysicalQuantityInteractive(5.00641174E-27 , 'kg')
m_h.__doc__ = """helion mass """
mu_h = PhysicalQuantityInteractive(3.01493223469 , 'amu')
mu_h.__doc__ = """helion mass (helion relative atomic mass times amu) """
mhc2_J = PhysicalQuantityInteractive(4.49953848E-10 , 'J')
mhc2_J.__doc__ = """helion mass energy equivalent """
mhc2_MeV = PhysicalQuantityInteractive(2808.39132 , 'MeV')
mhc2_MeV.__doc__ = """helion mass energy equivalent in MeV """
# helion-electron mass ratio
mhme = 5495.885238
# helion-proton mass ratio
mhmp = 2.99315265850
Molar_h = PhysicalQuantityInteractive(3.01493223469E-3 , 'kg/mol')
Molar_h.__doc__ = """helion molar mass """
ush = PhysicalQuantityInteractive(-1.074552967E-26 , 'J/T')
ush.__doc__ = """shielded helion magnetic moment (gas, sphere, 25 C)"""
# shielded helion magnetic moment to Bohr magneton ratio
ushuB = -1.158671474E-3
# shielded helion magnetic moment to nuclear magneton ratio
ushuN = -2.127497718
# shielded helion to proton magnetic moment ratio (gas, sphere, 25 C)
ushup = -0.761766563
# shielded helion to shielded proton magnetic moment ratio (gas/H2O, spheres, 25 C)
ushusp = -0.7617861313
gamma_h = PhysicalQuantityInteractive(2.037894764E8 , '1/(s*T)')
gamma_h.__doc__ = """shielded helion gyromagnetic (gas, sphere, 25 C) """
# Alpha particle,
#-------------------------------------------------------------------------
m_alpha = PhysicalQuantityInteractive(6.64465598E-27 , 'kg')
m_alpha.__doc__ = """alpha particle mass """
mu_alpha = PhysicalQuantityInteractive(4.0015061747 , 'amu')
mu_alpha.__doc__ = """alpha particle mass (alpha particle relative atomic mass times amu) """
malphac2_J = PhysicalQuantityInteractive(5.97191897E-10 , 'J')
malphac2_J.__doc__ = """alpha particle mass energy equivalent """
malphac2_MeV = PhysicalQuantityInteractive(3727.37904 , 'MeV')
malphac2_MeV.__doc__ = """alpha particle mass energy equivalent in MeV """
# alpha particle to electron mass ratio
malphame = 7294.299508
# alpha particle to proton mass ratio
malphamp = 3.9725996846
Molar_alpha = PhysicalQuantityInteractive(4.0015061747E-3 , 'kg/mol')
Molar_alpha.__doc__ = """alpha particle molar mass"""
# PHYSICO-CHEMICAL
#-------------------------------------------------------------------------
N_A = PhysicalQuantityInteractive(6.02214199E23 , '1/mol')
N_A.__doc__ = """Avogadro constant """
L = PhysicalQuantityInteractive(6.02214199E23 , '1/mol')
m_u = PhysicalQuantityInteractive(1.66053873E-27 , 'kg')
m_u.__doc__ = """atomic mass constant mu = 112m(12C) = 1 u = 10E-3 kg mol-1/NA"""
# atomic mass constant mu = 112m(12C) = 1 u = 10E-3 kg mol-1/NA
amu = m_u
muc2_J = PhysicalQuantityInteractive(1.49241778E-10 , 'J')
muc2_J.__doc__ = """energy equivalent of the atomic mass constant"""
muc2_MeV = PhysicalQuantityInteractive(931.494013 , 'MeV')
muc2_MeV.__doc__ = """energy equivalent of the atomic mass constant in MeV """
F = PhysicalQuantityInteractive(96485.3415 , 'C/mol')
F.__doc__ = """Faraday constant"""
N_Ah = PhysicalQuantityInteractive(3.990312689E-10 , 'J*s/mol')
N_Ah.__doc__ = """molar Planck constant """
R = PhysicalQuantityInteractive(8.314472 , 'J/(mol*K)')
R.__doc__ = """molar gas constant """
k_J = PhysicalQuantityInteractive(1.3806503E-23 , 'J/K')
k_J.__doc__ = """Boltzmann constant """
k_eV = PhysicalQuantityInteractive(8.617342E-5 , 'eV/K')
k_eV.__doc__ = """Boltzmann constant in eV """
n_0 = PhysicalQuantityInteractive(2.6867775E25 , '1/m**3')
n_0.__doc__ = """Loschmidt constant NA/Vm """
Vm_1 = PhysicalQuantityInteractive(22.413996E-3 , 'm**3/mol')
Vm_1.__doc__ = """molar volume of ideal gas RT/p T = 273.15 K, p = 101.325 kPa """
Vm_2 = PhysicalQuantityInteractive(22.710981E-3 , 'm**3/mol')
Vm_2.__doc__ = """molar volume of ideal gas RT/p T = 273.15 K, p = 100 kPa """
# Sackur-Tetrode constant (absolute entropy constant) 52 + ln_(2 mukT1/h2)3/2kT1/p0
# T1 = 1 K, p0 = 100 kPa
S_0R_1 = -1.1517048
# T1 = 1 K, p0 = 101.325 kPa
S_0R_2 = -1.1648678
sigma = PhysicalQuantityInteractive(5.670400E-8 , 'W/(m**2*K**4)')
sigma.__doc__ = """Stefan-Boltzmann constant """
c_1 = PhysicalQuantityInteractive(3.74177107E-16 , 'W*m**2')
c_1.__doc__ = """first radiation constant"""
c_1L = PhysicalQuantityInteractive(1.191042722E-16 , 'W*m**2/sr')
c_1L.__doc__ = """first radiation constant for spectral radiance"""
c_2 = PhysicalQuantityInteractive(1.4387752E-2 , 'm*K')
c_2.__doc__ = """second radiation constant"""
b = PhysicalQuantityInteractive(2.8977686E-3 , 'm*K')
b.__doc__ = """Wien displacement law constant b = maxT = c2/4.965 114231... """
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def associate_kms_key(self, logGroupName: str, kmsKeyId: str):
pass
def can_paginate(self, operation_name: str = None):
pass
def cancel_export_task(self, taskId: str):
pass
def create_export_task(self, logGroupName: str, fromTime: int, to: int, destination: str, taskName: str = None, logStreamNamePrefix: str = None, destinationPrefix: str = None) -> Dict:
pass
def create_log_group(self, logGroupName: str, kmsKeyId: str = None, tags: Dict = None):
pass
def create_log_stream(self, logGroupName: str, logStreamName: str):
pass
def delete_destination(self, destinationName: str):
pass
def delete_log_group(self, logGroupName: str):
pass
def delete_log_stream(self, logGroupName: str, logStreamName: str):
pass
def delete_metric_filter(self, logGroupName: str, filterName: str):
pass
def delete_resource_policy(self, policyName: str = None):
pass
def delete_retention_policy(self, logGroupName: str):
pass
def delete_subscription_filter(self, logGroupName: str, filterName: str):
pass
def describe_destinations(self, DestinationNamePrefix: str = None, nextToken: str = None, limit: int = None) -> Dict:
pass
def describe_export_tasks(self, taskId: str = None, statusCode: str = None, nextToken: str = None, limit: int = None) -> Dict:
pass
def describe_log_groups(self, logGroupNamePrefix: str = None, nextToken: str = None, limit: int = None) -> Dict:
pass
def describe_log_streams(self, logGroupName: str, logStreamNamePrefix: str = None, orderBy: str = None, descending: bool = None, nextToken: str = None, limit: int = None) -> Dict:
pass
def describe_metric_filters(self, logGroupName: str = None, filterNamePrefix: str = None, nextToken: str = None, limit: int = None, metricName: str = None, metricNamespace: str = None) -> Dict:
pass
def describe_queries(self, logGroupName: str = None, status: str = None, maxResults: int = None, nextToken: str = None) -> Dict:
pass
def describe_resource_policies(self, nextToken: str = None, limit: int = None) -> Dict:
pass
def describe_subscription_filters(self, logGroupName: str, filterNamePrefix: str = None, nextToken: str = None, limit: int = None) -> Dict:
pass
def disassociate_kms_key(self, logGroupName: str):
pass
def filter_log_events(self, logGroupName: str, logStreamNames: List = None, logStreamNamePrefix: str = None, startTime: int = None, endTime: int = None, filterPattern: str = None, nextToken: str = None, limit: int = None, interleaved: bool = None) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_log_events(self, logGroupName: str, logStreamName: str, startTime: int = None, endTime: int = None, nextToken: str = None, limit: int = None, startFromHead: bool = None) -> Dict:
pass
def get_log_group_fields(self, logGroupName: str, time: int = None) -> Dict:
pass
def get_log_record(self, logRecordPointer: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_query_results(self, queryId: str) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_tags_log_group(self, logGroupName: str) -> Dict:
pass
def put_destination(self, destinationName: str, targetArn: str, roleArn: str) -> Dict:
pass
def put_destination_policy(self, destinationName: str, accessPolicy: str):
pass
def put_log_events(self, logGroupName: str, logStreamName: str, logEvents: List, sequenceToken: str = None) -> Dict:
pass
def put_metric_filter(self, logGroupName: str, filterName: str, filterPattern: str, metricTransformations: List):
pass
def put_resource_policy(self, policyName: str = None, policyDocument: str = None) -> Dict:
pass
def put_retention_policy(self, logGroupName: str, retentionInDays: int):
pass
def put_subscription_filter(self, logGroupName: str, filterName: str, filterPattern: str, destinationArn: str, roleArn: str = None, distribution: str = None):
pass
def start_query(self, logGroupName: str, startTime: int, endTime: int, queryString: str, limit: int = None) -> Dict:
pass
def stop_query(self, queryId: str) -> Dict:
pass
def tag_log_group(self, logGroupName: str, tags: Dict):
pass
def test_metric_filter(self, filterPattern: str, logEventMessages: List) -> Dict:
pass
def untag_log_group(self, logGroupName: str, tags: List):
pass
|
<filename>src/data/make_dataset.py
# -*- coding: utf-8 -*-
"""
DEPRECATED
"""
import os
import json
from glob import glob
import codecs
import click
import logging
from pathlib import Path
import pdftotext
import docx2txt
from striprtf.striprtf import rtf_to_text
from natasha import (
MorphVocab,
NewsEmbedding,
NewsNERTagger,
NamesExtractor,
DatesExtractor
)
import ner
INPUT_DIRPATH = r'D:\develop\hr-scoring\data\raw\test-data'
OUTPUT_FILEPATH = r'D:\develop\hr-scoring\data\interim\pool-test.json'
morph_vocab = MorphVocab()
emb = NewsEmbedding()
ner_tagger = NewsNERTagger(emb)
names_extractor = NamesExtractor(morph_vocab)
dates_extractor = DatesExtractor(morph_vocab)
def pdf2text(pdf_path):
raw_text, pdf = None, None
with open(pdf_path, "rb") as f:
pdf = pdftotext.PDF(f)
raw_text = "\n\n".join(pdf)
return raw_text
def rtf2text(fpath):
with codecs.open(fpath, 'r', encoding='cp1251') as file:
text = file.read()
text = rtf_to_text(text)
return text
def make_resume_info_dict(fpath, text=None):
resume_info = {
'filepath':os.path.abspath(fpath),
'filename': os.path.basename(fpath),
'skip': False if text != None else True,
'ner':[],
'text': text if text != None else ''
}
return resume_info
def prepare_ner(text):
blocks = ner.text_to_blocks(text)
block_info = []
for ix, block_text in enumerate(blocks):
entities = ner.extract_entities(block_text, dates_extractor, ner_tagger, names_extractor)
# print(f'-----{ix}-----')
# print(block_text)
# print(entities)
if len(entities) == 0:
continue
block_info.append(entities)
return block_info
# @profile
@click.command()
@click.argument('input_dirpath', default=INPUT_DIRPATH)
@click.argument('output_filepath', type=click.Path(), default=OUTPUT_FILEPATH)
def main(input_dirpath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('Extract Text data set from raw data')
logger.info(f'Input DIR: {input_dirpath}')
logger.info(f'Output FILE: "{output_filepath}')
fpaths = [y for x in os.walk(input_dirpath) for y in glob(os.path.join(x[0], '*.*'))]
logger.info(f'Find {len(fpaths)} files')
pdf_fpaths = [fpath for fpath in fpaths if fpath.endswith(".pdf")]
docx_fpaths = [fpath for fpath in fpaths if fpath.endswith(".docx")]
rtf_fpaths = [fpath for fpath in fpaths if fpath.endswith(".rtf")]
skip_paths = [fpath for fpath in fpaths if fpath not in set(pdf_fpaths + docx_fpaths + rtf_fpaths)]
logger.info(f'Find PDFs:{len(pdf_fpaths)} | DOCXs:{len(docx_fpaths)} | RTFs:{len(rtf_fpaths)} | SKIP: {len(skip_paths)}')
fpath_txts = [(fpath, pdf2text(fpath)) for fpath in pdf_fpaths]
fpath_txts = fpath_txts + [(fpath, docx2txt.process(fpath)) for fpath in docx_fpaths]
fpath_txts = fpath_txts + [(fpath, rtf2text(fpath)) for fpath in rtf_fpaths]
processed_data = []
for fpath, txt in fpath_txts:
resume_info = make_resume_info_dict(fpath, txt)
resume_info['ner'] = prepare_ner(resume_info['text'])
processed_data.append(resume_info)
logger.info(f'Processed {len(processed_data)}')
for fpath in skip_paths:
resume_info = make_resume_info_dict(fpath, None)
processed_data.append(resume_info)
with open(output_filepath, 'w', encoding='utf8') as fd:
json.dump(processed_data, fd, indent=4, ensure_ascii=False)
logger.info(f'Write to JSON {len(processed_data)}')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
<reponame>aasquier/AI_Tower_Defense
import pygame
from projectile.projectile import Projectile, DamageType
from animations.animation import Animation
from constants.animationConstants import *
from projectile.iceBeam import IceBeam
# from .igloo import Igloo
# Tower base class
class Tower:
def __init__(self, position):
self.name = "<NAME>"
self.cost = 200
self.position = position
self.x = position[0] # Position on map
self.y = position[1]
self.attackRadius = 192 # Distance it can attack enemies from, two grid squares
self.closeEnemies = []
self.startingHealth = 5
self.health = self.startingHealth
self.weaknesses = [DamageType.melee, DamageType.fakeNews] # All towers are weak to the punches
self.attackCooldownTime = 0 # Timestamp showing when tower can attack again
self.damageDealt = 0
self.healthBarWidth = 50
self.healthBarHeight = 10
self.healthBarYOffset = 10 # Larger numbers will move the health bar closer to the enemies head
self.attackAnimationStopTime = 0
self.projectileColor = (155, 155, 155)
self.width = 64 # Width of animation images
self.height = 64 # Height of animation images
self.image = None # Current image being displayed
self.projectilesFired = [] # projectile magazine
self.animations = [] # animations to render
# for the GAs
self.indexForRecordTable = 0
# for deep learning
self.damageDealtOnTurn = 0
self.damageTakenOnTurn = 0
# launches a tower attacking round
def attack(self, enemies, ticks):
'''
Looks for enemies within it's attack radius
Will find the closest one and attack it
'''
self.closeEnemies = enemies
#Check if the tower is ready to attack again
# ticks = pygame.time.get_ticks()
if ticks >= self.attackCooldownTime:
attackableEnemies = []
i = 0
# TODO this is where we would need to be selective about what we add to the attack queue
for enemy in enemies:
dist = (enemy.x - self.x) ** 2 + (enemy.y - self.y) ** 2
#Use radius squared to avoid taking square roots of distance
if dist <= self.attackRadius ** 2:
attackableEnemies.append((i, dist))
i += 1
if len(attackableEnemies) > 0:
# taget the closest enemy and load projectile into the magazine
closestEnemyIndex = (min(attackableEnemies, key = lambda enemy: enemy[1]))[0]
projectileToFire = self.loadProjectile(enemies[closestEnemyIndex])
projectileToFire.enemies = enemies
self.attackCooldownTime = ticks + projectileToFire.reloadTime
targetAcquired = projectileToFire.fire(ticks)
if targetAcquired:
projectileToFire.attackAnimationStopTime = ticks + projectileToFire.attackAnimationDuration
projectileToFire.color = self.projectileColor
self.projectilesFired.append(projectileToFire)
return enemies
''' Draws a health box above each tower '''
def drawHealthBox(self, win, centerX, centerY):
if self.health > 0:
healthBarX = self.x - (self.healthBarWidth / 2)
healthBarY = self.y - self.height + self.healthBarYOffset
if self.health == self.startingHealth:
pygame.draw.rect(win, HEALTH_GREEN, (healthBarX, healthBarY, self.healthBarWidth, self.healthBarHeight)) #Outline of health bar
pygame.draw.rect(win, HEALTH_RED, (healthBarX, healthBarY, self.healthBarWidth, self.healthBarHeight)) #Inside of health bar
else:
pygame.draw.rect(win, HEALTH_GREEN, (healthBarX, healthBarY, self.healthBarWidth, self.healthBarHeight)) #Outline health bar
pygame.draw.rect(win, HEALTH_RED, (healthBarX, healthBarY, (self.healthBarWidth / self.startingHealth) * self.health, self.healthBarHeight))
# draw the tower and any of its projectiles/animations
def draw(self, win, ticks, visualMode):
i = 0
# cycle through the prpjectiles in our magazine
while i < len(self.projectilesFired):
# check and make sure animation time hasn't lapsed
if self.projectilesFired[i].attackAnimationStopTime < ticks:
del self.projectilesFired[i]
continue
# TODO I think we may want to think about this. It currently is saying that a projectile has hit it's target
if self.projectilesFired[i].draw(win, ticks, visualMode) == True:
initialDamage = self.projectilesFired[i].damage
self.damageDealt += initialDamage
# deep Q
self.damageDealtOnTurn += initialDamage
if type(self.projectilesFired[i]) == IceBeam:
self.damageDealtOnTurn += 2
if visualMode:
# replace the projectile with its final animation in the same postion
self.addAnimationToQueue(self.projectilesFired[i], ticks)
del self.projectilesFired[i]
i += 1
if visualMode:
''' Render the tower to the map '''
centerX = self.x - (self.width / 2)
centerY = self.y - (self.height / 2)
# cycle through our animations for drawing, i.e. explosions
# for j in range(len(self.animations)):
j = 0
while j < len(self.animations):
self.animations[j].draw(win)
# remove any animations that have exceeded their durations
if self.animations[j].attackAnimationStopTime < ticks:
del self.animations[j]
j += 1
# continue
# draw health bar and render sprite
self.drawHealthBox(win, centerX, centerY)
win.blit(self.image, (self.x - (self.width / 2), self.y - (self.height / 2)))
# this is called when an enemy has hit a tower to reduce the towers health
def hit(self, damage, damageType, ticks):
self.health = self.health - damage
self.damageTakenOnTurn += damage
# parent stub for loading projectiles
def loadProjectile(self, enemy):
return Projectile((self.x, self.y), enemy, self.closeEnemies)
# adds an animation for a projectile that has reached its target to the queue
def addAnimationToQueue(self, projectile, ticks):
animation = projectile.finalAnimation(projectile.enemyStartingPosition)
animation.attackAnimationStopTime = ticks + animation.attackAnimationDuration
self.animations.append(animation)
|
<gh_stars>0
import logging
import os
import pandas as pd
import numpy as np
import itertools as it
import xgboost as xgb
class XGB(object):
def __init__(self, obj):
self.master = obj
for key, val in vars(obj).items():
setattr(self, key, val)
base_for = "ACGT"
base_rev = "TGCA"
self.comp_tab = str.maketrans(base_for, base_rev)
def load_xgb_model(self):
logging.debug('Loading xgboost model')
bst = xgb.Booster({'nthread':self.threads})
bst.load_model(self.xgb)
self.bst = bst
# Load label dict here:
with open(self.typedict, 'r') as f:
rs = (ll.rstrip().split(':') for ll in f)
self.label_dict = {r[1]:r[0] for r in rs}
def generate_canonical_kmer(self):
logging.debug('Generating canonical {}mers'.format(self.kmer))
letters = ['A','C','G','T']
all_kmer = [''.join(k) for k in it.product(letters, repeat=self.kmer)]
all_kmer_rev = [x.translate(self.comp_tab)[::-1] for x in all_kmer]
can_kmer = list(it.compress(all_kmer_rev, [not kf < kr for kf,kr in zip(all_kmer, all_kmer_rev)]))
can_kmer.sort()
self.can_kmer = can_kmer
def count_kmer(self, seq):
kmer_d = {}
for i in range(len(seq) - self.kmer + 1):
kmer_for = seq[i:(i+self.kmer)]
kmer_rev = kmer_for.translate(self.comp_tab)[::-1]
if kmer_for < kmer_rev:
kmer = kmer_for
else:
kmer = kmer_rev
if kmer in kmer_d:
kmer_d[kmer] += 1
else:
kmer_d[kmer] = 1
return kmer_d
def xgb_run(self):
if not self.redo:
# Get repeats
self.repeats = [x.cons for x in self.crisprs]
# Load crispr table
df = pd.read_csv(self.out+'crisprs_all.tab', sep='\t')
# Check
if len(df) > 0:
self.any_crispr = True
else:
logging.info('No CRISPRs found.')
os.remove(self.out+'crisprs_all.tab')
# Predict
if self.any_crispr:
self.predict_repeats()
# Add to file
df['Prediction'] = self.z_type
df['Subtype'] = self.z_type
df['Subtype_probability'] = self.z_max
df.loc[df.Subtype_probability < self.pred_prob, 'Prediction'] = 'Unknown'
df['Subtype_probability'] = df['Subtype_probability'].round(3)
# We trust arrays with a known (predictable) repeat sequence
df.loc[df.Subtype_probability >= 0.9, 'Trusted'] = True
df.to_csv(self.out+'crisprs_all.tab', sep='\t', index=False)
def predict_repeats(self):
logging.info('Predicting subtype of CRISPR repeats')
# Prepare
self.load_xgb_model()
self.generate_canonical_kmer()
self.repeats = [x.upper() for x in self.repeats]
# Count kmers (first index is a to ensure all kmers are in the df)
z_df = pd.DataFrame([dict(zip(self.can_kmer, np.zeros(len(self.can_kmer))))] + [self.count_kmer(x) for x in self.repeats]).fillna(0)
z_df = z_df.reindex(sorted(z_df.columns), axis=1)
# Predict
self.z_pred = self.bst.predict(xgb.DMatrix(z_df), ntree_limit=int(self.bst.attr('best_iteration')))
# Get type and max probability
self.z_best = [x.argmax() for x in self.z_pred][1:len(self.z_pred)]
self.z_max = [x.max() for x in self.z_pred][1:len(self.z_pred)]
# Convert to type string
self.z_type = [self.label_dict[str(x)] for x in self.z_best]
def print_xgb(self):
for i in range(len(self.repeats)):
print('{}\t{}\t{}'.format(self.repeats[i],
self.z_type[i],
self.z_max[i]))
|
<filename>arelle/WatchRss.py
'''
Created on Oct 17, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
import os, sys, traceback, re
from arelle import (ModelXbrl, XmlUtil, ModelVersReport, XbrlConst, ModelDocument,
ValidateXbrl, ValidateFiling, ValidateVersReport, ValidateFormula)
from arelle.FileSource import openFileSource
from arelle.ModelValue import (qname, QName)
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import parseRfcDatetime
import datetime
def initializeWatcher(modelXbrl):
return WatchRss(modelXbrl)
class ValidationException(Exception):
def __init__(self, message, severity, code):
self.message = message
self.severity = severity
self.code = code
self.messageLog = []
def __repr__(self):
return "{0}({1})={2}".format(self.code,self.severity,self.message)
class WatchRss:
def __init__(self, rssModelXbrl):
self.rssModelXbrl = rssModelXbrl
self.cntlr = rssModelXbrl.modelManager.cntlr
self.thread = None
self.stopRequested = False
rssModelXbrl.watchRss = self
# cache modelManager options which dialog overrides
self.priorValidateCalcLB = self.priorFormulaRunIDs = None
def start(self):
import threading
if not self.thread or not self.thread.is_alive():
self.stopRequested = False
self.priorValidateCalcLB = self.priorFormulaRunIDs = None
rssWatchOptions = self.rssModelXbrl.modelManager.rssWatchOptions
if rssWatchOptions.get("validateCalcLinkbase") != self.rssModelXbrl.modelManager.validateCalcLB:
self.priorValidateCalcLB = self.rssModelXbrl.modelManager.validateCalcLB
self.rssModelXbrl.modelManager.validateCalcLB = rssWatchOptions.get("validateCalcLinkbase")
if (rssWatchOptions.get("validateFormulaAssertions") in (False,True) and
self.rssModelXbrl.modelManager.formulaOptions is not None):
self.priorFormulaRunIDs = self.rssModelXbrl.modelManager.formulaOptions.runIDs
self.rssModelXbrl.modelManager.formulaOptions.runIDs = "" if rssWatchOptions.get("validateFormulaAssertions") else "**FakeIdToBlockFormulas**"
self.thread = threading.Thread(target=lambda: self.watchCycle())
self.thread.daemon = True
self.thread.start()
return
# load
# validate
def stop(self):
if self.thread and self.thread.is_alive():
self.stopRequested = True
def watchCycle(self):
logFile = self.rssModelXbrl.modelManager.rssWatchOptions.get("logFileUri")
if logFile:
self.cntlr.startLogging(logFileName=logFile,
logFileMode = "a",
logFormat="[%(messageCode)s] %(message)s - %(file)s",
logLevel="DEBUG")
while not self.stopRequested:
rssWatchOptions = self.rssModelXbrl.modelManager.rssWatchOptions
# check rss expiration
rssHeaders = self.cntlr.webCache.getheaders(self.rssModelXbrl.modelManager.rssWatchOptions.get("feedSourceUri"))
expires = parseRfcDatetime(rssHeaders.get("expires"))
reloadNow = True # texpires and expires > datetime.datetime.now()
# reload rss feed
self.rssModelXbrl.reload('checking RSS items', reloadCache=reloadNow)
if self.stopRequested: break
# setup validator
postLoadActions = []
if (rssWatchOptions.get("validateDisclosureSystemRules") or
rssWatchOptions.get("validateXbrlRules") or
rssWatchOptions.get("validateFormulaAssertions")):
self.instValidator = ValidateXbrl.ValidateXbrl(self.rssModelXbrl)
postLoadActions.append(_("validating"))
if (rssWatchOptions.get("validateFormulaAssertions")):
postLoadActions.append(_("running formulas"))
else:
self.instValidator = None
matchTextExpr = rssWatchOptions.get("matchTextExpr")
if matchTextExpr:
matchPattern = re.compile(matchTextExpr)
postLoadActions.append(_("matching text"))
else:
matchPattern= None
postLoadAction = ', '.join(postLoadActions)
# anything to check new filings for
if (rssWatchOptions.get("validateDisclosureSystemRules") or
rssWatchOptions.get("validateXbrlRules") or
rssWatchOptions.get("validateCalcLinkbase") or
rssWatchOptions.get("validateFormulaAssertions") or
rssWatchOptions.get("alertMatchedFactText") or
any(pluginXbrlMethod(rssWatchOptions)
for pluginXbrlMethod in pluginClassMethods("RssWatch.HasWatchAction"))
):
# form keys in ascending order of pubdate
pubDateRssItems = []
for rssItem in self.rssModelXbrl.modelDocument.rssItems:
pubDateRssItems.append((rssItem.pubDate,rssItem.objectId()))
for pubDate, rssItemObjectId in sorted(pubDateRssItems):
rssItem = self.rssModelXbrl.modelObject(rssItemObjectId)
# update ui thread via modelManager (running in background here)
self.rssModelXbrl.modelManager.viewModelObject(self.rssModelXbrl, rssItem.objectId())
if self.stopRequested:
break
latestPubDate = XmlUtil.datetimeValue(rssWatchOptions.get("latestPubDate"))
if (latestPubDate and
rssItem.pubDate < latestPubDate):
continue
try:
# try zipped URL if possible, else expanded instance document
modelXbrl = ModelXbrl.load(self.rssModelXbrl.modelManager,
openFileSource(rssItem.zippedUrl, self.cntlr),
postLoadAction)
if self.stopRequested:
modelXbrl.close()
break
emailAlert = False
emailMsgs = []
if modelXbrl.modelDocument is None:
modelXbrl.error("arelle.rssWatch",
_("RSS item %(company)s %(form)s document not loaded: %(date)s"),
modelXbrl=modelXbrl, company=rssItem.companyName,
form=rssItem.formType, date=rssItem.filingDate)
rssItem.status = "not loadable"
else:
for pluginXbrlMethod in pluginClassMethods("RssItem.Xbrl.Loaded"):
pluginXbrlMethod(modelXbrl, rssWatchOptions, rssItem)
# validate schema, linkbase, or instance
if self.stopRequested:
modelXbrl.close()
break
if self.instValidator:
self.instValidator.validate(modelXbrl, modelXbrl.modelManager.formulaOptions.typedParameters(modelXbrl.prefixedNamespaces))
if modelXbrl.errors and rssWatchOptions.get("alertValiditionError"):
emailAlert = True
for pluginXbrlMethod in pluginClassMethods("RssWatch.DoWatchAction"):
pluginXbrlMethod(modelXbrl, rssWatchOptions, rssItem)
# check match expression
if matchPattern:
for fact in modelXbrl.factsInInstance:
v = fact.value
if v is not None:
m = matchPattern.search(v)
if m:
fr, to = m.span()
msg = _("Fact Variable {0}\n context {1}\n matched text: {2}").format(
fact.qname, fact.contextID, v[max(0,fr-20):to+20])
modelXbrl.info("arelle.rssInfo",
msg,
modelXbrl=modelXbrl) # msg as code passes it through to the status
if rssWatchOptions.get("alertMatchedFactText"):
emailAlert = True
emailMsgs.append(msg)
if (rssWatchOptions.get("formulaFileUri") and rssWatchOptions.get("validateFormulaAssertions") and
self.instValidator):
# attach formulas
ModelDocument.load(modelXbrl, rssWatchOptions["formulaFileUri"])
ValidateFormula.validate(self.instValidator)
rssItem.setResults(modelXbrl)
modelXbrl.close()
del modelXbrl # completely dereference
self.rssModelXbrl.modelManager.viewModelObject(self.rssModelXbrl, rssItem.objectId())
if rssItem.assertionUnsuccessful and rssWatchOptions.get("alertAssertionUnsuccessful"):
emailAlert = True
if logFile:
self.cntlr.logHandler.flush() # write entries out
msg = _("Filing CIK {0}\n "
"company {1}\n "
"published {2}\n "
"form type {3}\n "
"filing date {4}\n "
"period {5}\n "
"year end {6}\n "
"results: {7}").format(
rssItem.cikNumber,
rssItem.companyName,
rssItem.pubDate,
rssItem.formType,
rssItem.filingDate,
rssItem.period,
rssItem.fiscalYearEnd,
rssItem.status)
self.rssModelXbrl.info("arelle:rssWatch", msg, modelXbrl=self.rssModelXbrl)
smtpEmailSettings = rssWatchOptions.get("smtpEmailSettings")
emailAddress = rssWatchOptions.get("emailAddress")
if emailAlert and emailAddress and smtpEmailSettings and len(smtpEmailSettings) == 4:
smtpAddr, smtpPort, smtpUser, smtpPassword = smtpEmailSettings
portNum = int(smtpPort) if smtpPort else 0
self.rssModelXbrl.modelManager.showStatus(_("sending e-mail alert"))
import smtplib
from email.mime.text import MIMEText
emailMsg = MIMEText(msg + "\n" + "\n".join(emailMsgs))
emailMsg["Subject"] = _("Arelle RSS Watch alert on {0}").format(rssItem.companyName)
emailMsg["From"] = emailAddress
emailMsg["To"] = emailAddress
if portNum < 125:
smtp = smtplib.SMTP(smtpAddr, portNum)
else:
smtp = smtplib.SMTP_SSL(smtpAddr, portNum)
if smtpUser or smtpPassword:
smtp.login(smtpUser, smtpPassword)
smtp.sendmail(emailAddress, [emailAddress], emailMsg.as_string())
smtp.quit()
self.rssModelXbrl.modelManager.showStatus(_("RSS item {0}, {1} completed, status {2}").format(rssItem.companyName, rssItem.formType, rssItem.status), 3500)
self.rssModelXbrl.modelManager.cntlr.rssWatchUpdateOption(rssItem.pubDate.strftime('%Y-%m-%dT%H:%M:%S'))
except Exception as err:
self.rssModelXbrl.error("arelle.rssError",
_("RSS item %(company)s, %(form)s, %(date)s, exception: %(error)s"),
modelXbrl=self.rssModelXbrl, company=rssItem.companyName,
form=rssItem.formType, date=rssItem.filingDate, error=err,
exc_info=True)
if self.stopRequested: break
if self.stopRequested:
self.cntlr.showStatus(_("RSS watch, stop requested"), 10000)
# reset prior options for calc and formula running
if self.priorValidateCalcLB is not None:
self.rssModelXbrl.modelManager.validateCalcLB = self.priorValidateCalcLB
if self.priorFormulaRunIDs is not None:
self.rssModelXbrl.modelManager.formulaOptions.runIDs = self.priorFormulaRunIDs
else:
import time
time.sleep(600)
if logFile:
self.cntlr.logHandler.close()
self.thread = None # close thread
self.stopRequested = False
|
<gh_stars>1-10
import json
import logging
import math
import socketserver
import struct
import threading
import lz4.frame
from landia.config import ServerConfig
from landia.common import StateDecoder, StateEncoder
from landia import gamectx
from .clock import clock
class UDPHandler(socketserver.BaseRequestHandler):
def handle(self):
config: ServerConfig = self.server.config
# Process Request data
request_st = self.request[0]
request_st = lz4.frame.decompress(request_st)
request_st = request_st.decode('utf-8').strip()
try:
request_data = json.loads(request_st, cls=StateDecoder)
except Exception as e:
print(request_st)
raise e
request_info = request_data['info']
request_message = request_info['message']
client = gamectx.get_remote_client(request_info['client_id'])
player = gamectx.get_player(
client,
player_type=request_info['player_type'],
is_human=request_info['is_human'],
name=request_info['name'])
snapshots_received = request_info['snapshots_received']
# simulate missing parts
skip_remove = False # random.random() < 0.01
# Reconnect?
if len(snapshots_received) == 0:
client.last_snapshot_time_ms = 0
for t in snapshots_received:
if t in client.unconfirmed_messages:
if skip_remove:
print("Skipping remove confirmation")
continue
else:
client.unconfirmed_messages.remove(t)
# Load events from client
all_events_data = []
for event_dict in request_data['items']:
all_events_data.extend(event_dict)
if len(all_events_data) > 0:
gamectx.event_manager.load_snapshot(all_events_data)
if len(client.unconfirmed_messages) >= config.max_unconfirmed_messages_before_new_snapshot:
client.last_snapshot_time_ms = 0
client.unconfirmed_messages = set()
snapshot_timestamp, snapshot = gamectx.create_snapshot_for_client(client)
client.unconfirmed_messages.add(snapshot_timestamp)
# Build response data
response_data = {}
response_data['info'] = {
'server_tick': clock.get_ticks(),
'server_time': clock.get_game_time(),
'message': "UPDATE",
'client_id': client.get_id(),
'player_id': player.get_id(),
'snapshot_timestamp': snapshot_timestamp}
response_data['snapshot'] = snapshot
# Convert response to json then compress and send in chunks
response_data_st = json.dumps(response_data, cls=StateEncoder)
response_data_st = bytes(response_data_st, 'utf-8')
response_data_st = lz4.frame.compress(response_data_st)
chunk_size = config.outgoing_chunk_size
chunks = math.ceil(len(response_data_st)/chunk_size)
socket = self.request[1]
for i in range(chunks+1): # TODO: +1 ??? why
header = struct.pack('ll', i+1, chunks)
data_chunk = header + response_data_st[i*chunk_size:(i+1)*chunk_size]
# current_thread = threading.current_thread()
# Simulate packet loss
# if random.random() < 0.01:
# print("random skip chunk")
# continue
socket.sendto(data_chunk, self.client_address)
client.last_snapshot_time_ms = snapshot_timestamp
# TODO: Have a thread snapshot at regular intervals
class GameUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
def __init__(self, conn, config, handler = UDPHandler):
socketserver.UDPServer.__init__(self, conn, handler)
self.config = config |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Imports
########################################
import sys, os
# Update this to point to the directory where you copied the SciAnalysis base code
#SciAnalysis_PATH='/home/kyager/current/code/SciAnalysis/main/'
SciAnalysis_PATH='/home/xf11bm/software/SciAnalysis/'
SciAnalysis_PATH in sys.path or sys.path.append(SciAnalysis_PATH)
# For remote execution of scripts
#import matplotlib as mpl
#mpl.use('Agg')
import glob
from SciAnalysis import tools
from SciAnalysis.XSAnalysis.Data import *
from SciAnalysis.XSAnalysis import Protocols
# Experimental parameters
########################################
mask_dir = SciAnalysis_PATH + '/SciAnalysis/XSAnalysis/masks/'
if True:
# SAXS detector on CMS
calibration = Calibration(wavelength_A=0.9184) # 13.5 keV
calibration.set_image_size(487, height=619) # Pilatus300k
calibration.set_pixel_size(pixel_size_um=172.0)
calibration.set_beam_position(402.0, 443.0)
calibration.set_distance(5.038)
mask = Mask(mask_dir+'Pilatus300k_main_gaps-mask.png')
mask.load('./Pilatus300k_current-mask.png')
else:
# WAXS detector on CMS
from SciAnalysis.XSAnalysis.DataRQconv import *
calibration = CalibrationRQconv(wavelength_A=0.9184) # 13.5 keV
calibration.set_image_size(1042) # psccd Photonic Sciences CCD
calibration.set_pixel_size(pixel_size_um=101.7)
calibration.set_distance(0.232) # Bigger number moves theory rings outwards (larger spacing)
calibration.set_beam_position(22.0, 1042-22.0)
calibration.set_angles(det_orient=45, det_tilt=-21, det_phi=0, incident_angle=0., sample_normal=0.)
print('ratio Dw = {:.3f}'.format(calibration.get_ratioDw()))
mask = Mask(mask_dir+'psccd_generic-mask.png')
# Files to analyze
########################################
#root_dir = '/GPFS/xf11bm/Pilatus300/'
#root_dir = '/GPFS/xf11bm/Pilatus300/2016-3/CFN_aligned-BCP/'
#source_dir = os.path.join(root_dir, '')
source_dir = '../'
#output_dir = os.path.join(source_dir, 'analysis/')
output_dir = './'
infiles = glob.glob(os.path.join(source_dir, '*.tiff'))
#infiles = glob.glob(os.path.join(source_dir, 'Ag*.tiff'))
#infiles = glob.glob(os.path.join(source_dir, 'AgBH_5m_th0.000_10.00s_20323_saxs.tiff'))
infiles.sort()
# Analysis to perform
########################################
load_args = { 'calibration' : calibration,
'mask' : mask,
}
run_args = { 'verbosity' : 3,
}
process = Protocols.ProcessorXS(load_args=load_args, run_args=run_args)
protocols = [
#Protocols.calibration_check(show=False, AgBH=True, q0=0.010, num_rings=4, ztrim=[0.05, 0.05], ) ,
Protocols.circular_average(ylog=True, plot_range=[0, 0.12, None, None]) ,
#Protocols.sector_average(angle=0, dangle=20, ylog=True, plot_range=[0, 0.3, None, None], show_region=True) ,
#Protocols.linecut_angle(q0=0.094, dq=0.015, show_region=False) ,
#Protocols.q_image(blur=1.0, bins_relative=0.5, plot_range=[-0.1, 3.0, 0, 3.0], _xticks=[0, 1.0, 2.0, 3.0], ztrim=[0.2, 0.01]) ,
#Protocols.qr_image(blur=None, bins_relative=0.8, plot_range=[-0.1, 3.0, 0, 3.0], _xticks=[0, 1.0, 2.0, 3.0], ztrim=[0.38, 0.002], dezing_fill=True) ,
#Protocols.q_phi_image(bins_relative=0.25, plot_range=[0, 3.0, 0, +90]) ,
Protocols.thumbnails(crop=None, resize=1.0, blur=None, cmap=cmap_vge, ztrim=[0.0, 0.01]) ,
]
# Run
########################################
process.run(infiles, protocols, output_dir=output_dir, force=False)
# Loop
########################################
# This is typically only used at the beamline (it loops forever, watching for new files).
process.monitor_loop(source_dir=source_dir, pattern='*.tiff', protocols=protocols, output_dir=output_dir, force=False)
|
import os
import numpy as np
import torch
from .cityscapes.data_loader import load_partition_data_cityscapes
from .coco.segmentation.data_loader import load_partition_data_coco_segmentation
from .pascal_voc_augmented.data_loader import load_partition_data_pascal_voc
import logging
def load(args):
return load_synthetic_data(args)
def combine_batches(batches):
full_x = torch.from_numpy(np.asarray([])).float()
full_y = torch.from_numpy(np.asarray([])).long()
for (batched_x, batched_y) in batches:
full_x = torch.cat((full_x, batched_x), 0)
full_y = torch.cat((full_y, batched_y), 0)
return [(full_x, full_y)]
def load_synthetic_data(args):
dataset_name = str(args.dataset).lower()
# check if the centralized training is enabled
centralized = True if (args.client_num_in_total == 1 and args.training_type != "cross_silo") else False
# check if the full-batch training is enabled
args_batch_size = args.batch_size
if args.batch_size <= 0:
full_batch = True
args.batch_size = 128 # temporary batch size
else:
full_batch = False
if dataset_name == "cityscapes":
# load cityscapes dataset
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_cityscapes(
dataset=dataset_name,
data_dir=args.data_cache_dir,
partition_method=None,
partition_alpha=None,
client_number=args.client_num_in_total,
batch_size=args.batch_size,
)
elif dataset_name in ["coco_segmentation", "coco"]:
# load coco dataset
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_coco_segmentation(
dataset=dataset_name,
data_dir=args.data_cache_dir,
partition_method=None,
partition_alpha=None,
client_number=args.client_num_in_total,
batch_size=args.batch_size,
)
elif dataset_name in ["pascal_voc", "pascal_voc_augmented"]:
# load pascal voc dataset
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_pascal_voc(
dataset=dataset_name,
data_dir=args.data_cache_dir,
partition_method=None,
partition_alpha=None,
client_number=args.client_num_in_total,
batch_size=args.batch_size,
)
else:
raise ValueError("dataset %s is not supported" % dataset_name)
if centralized:
train_data_local_num_dict = {
0: sum(user_train_data_num for user_train_data_num in train_data_local_num_dict.values())
}
train_data_local_dict = {
0: [batch for cid in sorted(train_data_local_dict.keys()) for batch in train_data_local_dict[cid]]
}
test_data_local_dict = {
0: [batch for cid in sorted(test_data_local_dict.keys()) for batch in test_data_local_dict[cid]]
}
args.client_num_in_total = 1
if full_batch:
train_data_global = combine_batches(train_data_global)
test_data_global = combine_batches(test_data_global)
train_data_local_dict = {
cid: combine_batches(train_data_local_dict[cid]) for cid in train_data_local_dict.keys()
}
test_data_local_dict = {cid: combine_batches(test_data_local_dict[cid]) for cid in test_data_local_dict.keys()}
args.batch_size = args_batch_size
dataset = [
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
]
return dataset, class_num
|
from tkinter import *
from tkinter.messagebox import *
import os
import shutil
import sys
system = sys.platform
base_dir = sys.path[0]
if system.startswith('win'):
dir_char = '\\'
else:
dir_char = '/'
base_dir += dir_char
def remove(path):
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def setup_xdg_open():
status = os.system('yum install xdg-utils')
return status
def OpenPic():
if dir_char == '/':
if system == 'darwin':
os.system('open pic.png')
else:
status = os.system('xdg-open pic.png')
if status:
print('No xdg-open! Auto setuping...')
status = setup_xdg_open()
if status:
exit('install xdg-open failed')
os.system('xdg-open pic.png')
else:
os.system('pic.png')
def verify_click():
with open('pic.dot', 'w') as f:
string = dg.get()
f_content = content.get(0.0, END)
f.write('%s G{\n' % string)
if string == 'digraph':
f_content = f_content.replace('--', '->')
f.write(f_content)
f.write('}')
status = os.system('dot pic.dot -T png -o pic.png')
if status:
if dir_char == '/':
if askquestion('需要安装Graphviz环境', '是否安装?'):
if system == 'darwin':
os.system('brew install graphviz')
else:
def install():
PWD = pwd.get()
os.system('echo %s | sudo apt-get install graphviz graphviz-doc' % PWD)
get_pass = Toplevel()
get_pass.title('需要sudo权限')
label = Label(get_pass, text='密码:')
label.grid(column=0, row=0)
pwd = Entry(get_pass, show='*')
pwd.grid(column=1, row=0)
btn = Button(get_pass, text='确认', command=install)
btn.grid(row=1, columnspan=2)
get_pass.mainloop()
os.system('dot pic.dot -T png -o pic.png')
else:
exit(0)
else:
showerror('没有Graphviz环境', '进入"http://www.graphviz.org/download/"安装Graphviz')
exit(0)
OpenPic()
ask = askokcancel('图片已生成', '是否保存')
if not ask:
remove('pic.dot')
remove('pic.png')
def cancel_click():
content.delete(0.0, END)
def text_change(event):
global flag
if flag:
cancel_click()
flag = False
flag = True
win = Tk()
win.title('Graphviz画图器')
content = Text(win, width=40, height=30, font="Helvetica 14 bold", bd=10, )
content.grid(row=0, columnspan=2)
content.bind('<BackSpace>', text_change)
content.insert(END, '输入图:\nx--y\n...')
dg = StringVar()
digraph = Radiobutton(win, text='有向图', var=dg, value='digraph')
graph = Radiobutton(win, text='无向图', var=dg, value='graph')
digraph.grid(column=0, row=1)
graph.grid(column=1, row=1)
verify = Button(win, text='确认', command=verify_click)
cancel = Button(win, text='清空', command=cancel_click)
verify.grid(column=0, row=2)
cancel.grid(column=1, row=2)
def main():
win.mainloop()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
#
# Copyright (C) 2021 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
import common, board_cfg_lib
PRE_LAUNCHED_VMS_TYPE = ["SAFETY_VM", "PRE_RT_VM", "PRE_STD_VM"]
POST_LAUNCHED_VMS_TYPE = ["POST_STD_VM", "POST_RT_VM", "KATA_VM"]
SOS_VM_TYPE = ["SOS_VM"]
def parse_hv_console(scenario_etree):
"""
There may be 3 types in the console item
1. BDF:(00:18.2) seri:/dev/ttyS2
2. /dev/ttyS2
3. ttyS2
"""
ttys_n = ''
ttys = common.get_node("//SERIAL_CONSOLE/text()", scenario_etree)
if not ttys or ttys == None:
return ttys_n
if ttys and 'BDF' in ttys or '/dev' in ttys:
ttys_n = ttys.split('/')[2]
else:
ttys_n = ttys
return ttys_n
def get_native_ttys():
native_ttys = {}
ttys_lines = board_cfg_lib.get_info(common.BOARD_INFO_FILE, "<TTYS_INFO>", "</TTYS_INFO>")
if ttys_lines:
for tty_line in ttys_lines:
tmp_dic = {}
#seri:/dev/ttySx type:mmio base:0x91526000 irq:4 [bdf:"00:18.0"]
#seri:/dev/ttySy type:portio base:0x2f8 irq:5
tty = tty_line.split('/')[2].split()[0]
ttys_type = tty_line.split()[1].split(':')[1].strip()
ttys_base = tty_line.split()[2].split(':')[1].strip()
ttys_irq = tty_line.split()[3].split(':')[1].strip()
tmp_dic['type'] = ttys_type
tmp_dic['base'] = ttys_base
tmp_dic['irq'] = int(ttys_irq)
native_ttys[tty] = tmp_dic
return native_ttys
def get_shmem_regions(etree):
ivshmem_enabled = common.get_node("//IVSHMEM_ENABLED/text()", etree)
if ivshmem_enabled == 'n':
return {}
# <IVSHMEM_REGION> format is shm_name, shm_size, VM IDs
# example: hv:/shm_region_0, 2, 0:2
ivshmem_regions = etree.xpath("//IVSHMEM_REGION")
shmem_regions = {}
for idx in range(len(ivshmem_regions)):
shm_string = ivshmem_regions[idx].text
if shm_string is None:
continue
shm_string_list = shm_string.split(',')
shm_name = shm_string_list[0].strip()
shm_size = shm_string_list[1].strip()
vmid_list = [vm_id.strip() for vm_id in shm_string_list[2].split(':')]
for vm_id in vmid_list:
if vm_id not in shmem_regions:
shmem_regions[vm_id] = {}
shmem_regions[vm_id][shm_name] = {'id' : str(idx), 'size' : shm_size}
return shmem_regions
def is_pre_launched_vm(vm_type):
if vm_type in PRE_LAUNCHED_VMS_TYPE:
return True
return False
def is_post_launched_vm(vm_type):
if vm_type in POST_LAUNCHED_VMS_TYPE:
return True
return False
def is_sos_vm(vm_type):
if vm_type in SOS_VM_TYPE:
return True
return False |
<filename>src/data/arpa/arpa_quality_raw_funcs.py
import logging
import os
import pandas as pd
from sodapy import Socrata
from pathlib import Path
from dotenv import load_dotenv
from src.config import PROJECT_DIR, ARPA_DATA_DIR, ARPA_REG_DATA_ID, ARPA_MEASURES_DATA_ID, PROC_DATA_DIR, ARPA_STATIONS
class ArpaConnect:
"""
Simple connector to Socrata API used to get current ARPA air quality open data.
Connection parameters are written into .env private file.
"""
def __init__(self):
self.params_dict = {}
self._init_connection()
def _init_connection(self):
load_dotenv(Path(PROJECT_DIR) / '.env')
logging.info("Connecting with Socrata backend for recent data")
self.params_dict = {
'domain': os.environ.get('ARPA_WEB_DOMAIN'),
'app_token': os.environ.get('ARPA_APP_TOKEN'),
'username': os.environ.get('ARPA_USER_NAME'),
'password': os.environ.get('ARPA_PWD')
}
self.connector = Socrata(**self.params_dict)
logging.info("Backend connected")
def get_df(self, dataset_identifier, **kwargs):
logging.info("Download from Socrata dataset {d} {kw}".format(d=dataset_identifier,
kw="with kwargs " + str(kwargs) if len(
kwargs) > 0 else ""))
results = self.connector.get(dataset_identifier, **kwargs)
results_df = pd.DataFrame.from_records(results)
return results_df
def get_city_sensor_ids(arpa: ArpaConnect, city: str=None, prov: str=None) -> pd.DataFrame:
"""Get dataframe with sensor id, type and location """
where_component = ["datastop IS NULL"]
if city is not None:
city_cond = 'comune = "{}"'.format(city)
where_component.append(city_cond)
if prov is not None:
prov_cond = 'provincia = "{}"'.format(prov)
where_component.append(prov_cond)
where_cond = " and ".join(where_component)
reg_df = arpa.get_df(ARPA_REG_DATA_ID,
where=where_cond,
order="idsensore")
return reg_df
def get_current_sensor_data(arpa: ArpaConnect, id_data: pd.DataFrame, dataset_identifier: str = None) -> pd.DataFrame:
""" Get dataframe with all sensor data for selected sensor id dataframe. """
if dataset_identifier is None:
dataset_identifier = ARPA_MEASURES_DATA_ID
where = "idsensore IN (" + str(id_data['idsensore'].to_list())[1:-1] + ")"
sensor_df = arpa.get_df(dataset_identifier=dataset_identifier, where=where, limit=6000000)
return sensor_df
def get_historical_sensor_data(id_data: pd.DataFrame) -> pd.DataFrame:
""" Load all data from previous years for selected sensor id dataframe. """
files = [file for file in os.listdir(ARPA_DATA_DIR) if file.endswith('.zip')]
hist_list = []
csv_kwargs = {
'compression': 'zip',
'parse_dates': ['Data'],
'dtype': {'IdSensore': str, 'valore': float}
}
id_data = id_data.loc[:, ["idsensore", "nometiposensore", "idstazione"]]
for f in files:
logging.info("Loading sensor data {}".format(f))
hist_single_df = pd.read_csv(os.path.join(ARPA_DATA_DIR, f), **csv_kwargs)
hist_single_df.columns = [c.lower() for c in hist_single_df.columns]
merged_single_df = pd.merge(hist_single_df, id_data, on='idsensore', how='inner')
merged_single_df = merged_single_df.loc[merged_single_df['valore'] != -9999]
merged_single_df = merged_single_df.drop(columns=['idoperatore'])
hist_list.append(merged_single_df)
hist_df = pd.concat(hist_list)
return hist_df
def load_historical_data(id_data: pd.DataFrame, build_historical: bool = False) -> pd.DataFrame:
if build_historical:
logging.info("Create historical arpa dataframe from zipped csv")
hist_df = get_historical_sensor_data(id_data=id_data)
else:
input_path = os.path.join(ARPA_DATA_DIR, 'history_df.pkl')
logging.info("Create historical arpa dataframe from builded pickle")
hist_df = pd.read_pickle(input_path)
return hist_df
def clean_current_sensor_df(sensor_df: pd.DataFrame, id_data: pd.DataFrame) -> pd.DataFrame:
""" Handle NA values, columns type, merge id info. """
cleaned_sensor_df = sensor_df.loc[sensor_df['valore'] != '-9999']
cleaned_sensor_df['valore'] = cleaned_sensor_df['valore'].astype(float)
cleaned_sensor_df['data'] = pd.to_datetime(cleaned_sensor_df.data)
cleaned_sensor_df = cleaned_sensor_df.drop(columns='idoperatore')
id_data = id_data.loc[:, ["idsensore", "nometiposensore", "idstazione"]]
merged_sensor_df = pd.merge(cleaned_sensor_df, id_data, on=['idsensore'])
return merged_sensor_df
def get_all_sensor_data(arpa: ArpaConnect, station: str = None, build_historical: bool = False) -> pd.DataFrame:
id_data = get_city_sensor_ids(arpa=arpa, city=station)
current_sensor_df = get_current_sensor_data(arpa=arpa, id_data=id_data)
if len(current_sensor_df) == 0:
raise RuntimeError("no current data available for selected sensors")
current_df = clean_current_sensor_df(sensor_df=current_sensor_df, id_data=id_data)
historical_cleaned_df = load_historical_data(id_data=id_data, build_historical=build_historical)
all_sensor_df = pd.concat([historical_cleaned_df, current_df])
return all_sensor_df
def save_all_sensor_data(all_sensor_df, specific_file: str = None):
if specific_file is None:
specific_file = 'arpa_data.pkl'
path_to_output = os.path.join(PROC_DATA_DIR, specific_file)
logging.info("saving arpa dataframe as pickle in {f}".format(f=specific_file))
all_sensor_df.to_pickle(path_to_output)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
arpa = ArpaConnect()
logging.info("building ARPA {}".format("history_df.pkl"))
hist_arpa_list = []
for station in ARPA_STATIONS:
id_data = get_city_sensor_ids(arpa=arpa, city=station)
stat_hist_arpa_df = get_historical_sensor_data(id_data=id_data)
hist_arpa_list.append(stat_hist_arpa_df)
hist_arpa_df = pd.concat(hist_arpa_list)
out_hist_path = os.path.join(ARPA_DATA_DIR, 'history_df.pkl')
logging.info("saving to {}".format(out_hist_path))
hist_arpa_df.to_pickle(out_hist_path)
|
<filename>config.py<gh_stars>1-10
import configparser
import asyncio
from os.path import exists
# configparser.ConfigParser config
config = None
# asyncio.Task save_task
save_task = None
# str file_name
file_name = ""
# params: str cfg_file_name
# return boolean
def setup_config(cfg_file_name):
global file_name
file_name = cfg_file_name
# str use_file_name
use_file_name = file_name
# declare config for use within the function (Python, scope is a thing, bruh)
global config
config = configparser.ConfigParser()
# if our config file doesn't already exist, load the default
# bool use_default
use_default = not exists(cfg_file_name)
if use_default:
use_file_name = cfg_file_name + ".default"
config.read(use_file_name)
if config.sections == []:
print("{} is empty! Cannot load!".format(use_file_name))
return False
else:
print("{} successfully loaded".format(use_file_name))
return True
if use_default:
# write the defaults out to the regular cfg file
try_save()
# params: str section, str key
# return str
def get_config(section, key):
if config is not None:
return config[section][key]
else:
raise Exception()
# alias for readability's sake
get_config_string = get_config
# params: str section, str key
# return float
def get_config_float(section, key):
return float(get_config(section, key))
# params: str section, str key
# return int
def get_config_int(section, key):
return int(get_config(section, key))
# params: str section, str key
# return boolean
def get_config_bool(section, key):
return config.getboolean(section, key)
# params: str section, str key
# return List[str]
def get_config_list(section, key):
# str raw_str
raw_str = get_config(section, key)
return raw_str.split(',')
# alias for readability's sake
get_config_str_list = get_config_list
# params: str section, str key
# return List[int]
def get_config_int_list(section, key):
return list(map(int, get_config_list(section, key)))
# params: str section, str key
# return List[float]
def get_config_float_list(section, key):
return list(map(float, get_config_list(section, key)))
# params: str section, str key
# return List[boolean]
def get_config_bool_list(section, key):
return list(map(convert_to_bool, get_config_list(section, key)))
# params: str section, str key, ??? value
# return boolean
def config_list_add(section, key, value):
# str value_str
value_str = str(value)
# str list_str
list_str = get_config(section, key)
if len(list_str) == 0:
# nothing's in there, so we can just set the list to be the one new element
set_config(section, key, value_str)
return True
else:
if list_str.find(value_str) == -1:
list_str = list_str + ',' + value_str
set_config(section, key, value_str)
return True
else:
print("Attention: Value \"{}\" is already in [{}] {}.".format(value_str, section, key))
return False
# params: str section, str key, ??? value
# return boolean
def config_list_remove(section, key, value):
# str value_str
value_str = str(value)
# str list_str
list_str = get_config(section, key)
if len(list_str) == 0:
print("Attention: Cannot remove value \"{}\" from empty list [{}] {}.".format(value_str, section, key))
return False
elif list_str == value_str:
list_str = ""
set_config(section, key, list_str)
return True
else:
# int substr_idx
substr_idx = list_str.find(value_str)
if substr_idx != -1:
if substr_idx + len(value_str) == len(list_str) - 1:
list_str = list_str.replace(value_str, "")
else:
list_str = list_str.replace(value_str + ',', "")
set_config(section, key, list_str)
return True
else:
print("Attention: Cannot remove value \"{}\" from [{}] {} because the list doesn't contain it.".format(value_str, section, key))
return False
# params: str value
# return boolean
# copy of ConfigParser._convert_to_boolean()
def convert_to_bool(value):
if value.lower() not in config.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return config.BOOLEAN_STATES[value.lower()]
# params: str section, str key, ??? value
def set_config(section, key, value):
# stupid Python global nonsense...
global config
config[section][key] = str(value)
try_save()
def try_save():
# allow multiple config sets to be bundled into one save
global save_task
if save_task is None:
save_task = asyncio.ensure_future(save_config())
async def save_config():
# clear task
global save_task
save_task = None
global file_name
# File config_file
config_file = open(file_name, "w")
if config_file is not None:
config.write(config_file)
print("{} successfully saved".format(file_name))
else:
print("{} is missing! Cannot save!".format(file_name)) |
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QMenuBar, QMainWindow, QMenu, QAction
from PyQt5.QtGui import QFont, QIcon
from PyQt5.QtCore import QCoreApplication
from bin.ui_view.utils import load_animation
from bin.ui_view.utils.about import AboutUI
from lib import settings
from bin.ui_view.utils.skincolordialog import SkinColorDialogUI
from lib.communicate import communicate
_translate = QCoreApplication.translate
"""
选项 OptionMenu
程序设置
生成服务端
皮肤
分隔符
退出
查看 ViewMenu
工具扩展
工具导航
状态栏
帮助 HelpMenu
关于
"""
class OptionMenu(object):
def __init__(self, menubar: QMenuBar, main_window: QMainWindow):
"""
选项
:param menubar:
:param main_window:
"""
self.menubar = menubar
self.main_window = main_window
# 向菜单栏中添加新的QMenu对象,父菜单
self.option = QMenu(self.menubar)
# 程序设置
self.setting = QAction(QIcon(settings.MENUBAR_UI["setting"]), '&Setting', self.menubar)
# 创建客户端
self.make_server = QAction(QIcon(settings.MENUBAR_UI["make_server"]), '&Make Server', self.menubar)
# 皮肤
self.skin = QAction(QIcon(settings.MENUBAR_UI["skin"]), '&Skin Settings', self.menubar)
# 分隔符
self.option.addSeparator()
# 退出
self.exit = QAction(QIcon(settings.MENUBAR_UI["exit"]), '&Exit', self.menubar)
# 皮肤对象
self.skin_color_dialog = SkinColorDialogUI()
def setup_ui(self) -> None:
self.setting.setShortcut('Ctrl+Alt+S')
self.setting.setObjectName("setting")
self.option.addAction(self.setting)
self.setting.triggered.connect(self.setting_receive)
self.make_server.setShortcut('Ctrl+N')
self.make_server.setObjectName("make_server")
self.option.addAction(self.make_server)
self.make_server.triggered.connect(self.make_server_receive)
self.skin.setShortcut('Ctrl+N')
self.skin.setObjectName("skin")
self.option.addAction(self.skin)
self.skin.triggered.connect(self.skin_receive)
self.exit.setShortcut('Ctrl+Q')
self.exit.setObjectName("exit")
self.option.addAction(self.exit)
# self.exit.triggered.connect(QCoreApplication.quit) # 直接退出程序
self.exit.triggered.connect(self.exit_receive)
self.menubar.addAction(self.option.menuAction())
# noinspection PyArgumentList
def retranslate_ui(self) -> None:
self.option.setTitle(_translate("MenubarUI", "选项"))
self.setting.setText(_translate("MenubarUI", "程序设置"))
self.make_server.setText(_translate("MenubarUI", "创建客户端"))
self.skin.setText(_translate("MenubarUI", "皮肤调节"))
self.exit.setText(_translate("MenubarUI", "退出程序"))
def setting_receive(self) -> None:
"""
程序设置
:return:
"""
print("setting_receive")
def make_server_receive(self) -> None:
"""
生成服务端
:return:
"""
print("make_server_receive")
# @staticmethod
def skin_receive(self) -> None:
"""
皮肤调节信号
状态栏 -> 皮肤信号
:return:
"""
# self.skin_color_dialog.start()
# communicate.skin_color_clicked.emit()
self.skin_color_dialog.setup_ui()
def exit_receive(self) -> None:
"""
退出程序
:return:
"""
self.main_window.close()
class ViewMenu(object):
def __init__(self, menubar: QMenuBar):
"""
查看
:param menubar:
"""
self.menubar = menubar
# 向菜单栏中添加新的QMenu对象,父菜单
self.view = QMenu(self.menubar)
# 状态栏
self.statusbar = QAction(QIcon(""), '&Statusbar', self.menubar)
# 工具导航
self.toolbar = QAction(QIcon(""), '&Toolbar', self.menubar)
# 分组信息
self.group_info = QAction(QIcon(""), '&Group Info', self.menubar)
# 工具扩展
self.tools_extension = QAction(QIcon(""), '&Tools Extension', self.menubar)
def setup_ui(self) -> None:
self.statusbar.setObjectName("statusbar")
self.statusbar.setCheckable(True)
self.statusbar.triggered.connect(self.statusbar_receive) # 变化的信号
# self.statusbar.changed.connect(self.statusbar_receive) # 变化的信号
self.view.addAction(self.statusbar)
self.toolbar.setObjectName("toolbar")
self.toolbar.setCheckable(True)
self.toolbar.triggered.connect(self.toolbar_receive)
self.view.addAction(self.toolbar)
self.group_info.setObjectName("group_info")
self.group_info.setCheckable(True)
self.group_info.triggered.connect(self.group_info_receive)
self.view.addAction(self.group_info)
self.tools_extension.setObjectName("tools_extension")
self.tools_extension.setCheckable(True)
self.tools_extension.triggered.connect(self.tools_extension_receive)
# self.tools_extension.changed.connect(self.tools_extension_receive)
self.view.addAction(self.tools_extension)
self.menubar.addAction(self.view.menuAction())
if settings.TOOLS_EXTENSION_SHOW:
self.tools_extension.setChecked(True)
if settings.TOOLBAR_SHOW:
self.toolbar.setChecked(True)
if settings.STATUSBAR_SHOW:
self.statusbar.setChecked(True)
if settings.GROUP_TREE_SHOW:
self.group_info.setChecked(True)
# noinspection PyArgumentList
def retranslate_ui(self) -> None:
self.view.setTitle(_translate("MenubarUI", "查看"))
self.tools_extension.setText(_translate("MenubarUI", "工具扩展"))
self.toolbar.setText(_translate("MenubarUI", "工具导航"))
self.statusbar.setText(_translate("MenubarUI", "状态栏"))
self.group_info.setText(_translate("MenubarUI", "分组信息"))
def tools_extension_receive(self) -> None:
"""
菜单栏 -> 工具扩展
:return:
"""
if self.tools_extension.isChecked():
communicate.tools_extension_show.emit(True)
else:
communicate.tools_extension_show.emit(False)
def group_info_receive(self) -> None:
"""
菜单栏 -> 分组信息
:return:
"""
if self.group_info.isChecked():
communicate.group_tree_show.emit(True)
else:
communicate.group_tree_show.emit(False)
def toolbar_receive(self) -> None:
"""
菜单栏 -> 工具栏是否显示
:return:
"""
if self.toolbar.isChecked():
communicate.toolbar_show.emit(True)
else:
communicate.toolbar_show.emit(False)
def statusbar_receive(self) -> None:
"""
菜单栏 -> 状态栏
:return:
"""
if self.statusbar.isChecked():
communicate.statusbar_show.emit(True)
else:
communicate.statusbar_show.emit(False)
class HelpMenu(object):
def __init__(self, menubar: QMenuBar, main_window: QMainWindow):
"""
帮助
:param menubar:
:param main_window:
"""
self.menubar = menubar
self.main_window = main_window
# 向菜单栏中添加新的QMenu对象,父菜单
self.help = QMenu(self.menubar)
# 关于
self.about = QAction(QIcon(settings.MENUBAR_UI["about"]), '&Tools Extension', self.menubar)
self.about_ui = AboutUI()
def setup_ui(self) -> None:
self.about.setObjectName("tools_extension")
self.help.addAction(self.about)
self.about.triggered.connect(self.about_receive)
self.menubar.addAction(self.help.menuAction())
self.about_ui.setup_ui()
self.about_ui.retranslate_ui()
# noinspection PyArgumentList
def retranslate_ui(self) -> None:
self.help.setTitle(_translate("MenubarUI", "帮助"))
self.about.setText(_translate("MenubarUI", "关于"))
def about_receive(self) -> None:
self.about_ui.show()
class MenubarUI(object):
def __init__(self, main_window: QMainWindow):
"""
菜单栏
外观模式
:param main_window:
"""
self.main_window = main_window
self.menubar = QMenuBar(main_window)
self.menu_list = []
self.option_menu = OptionMenu(self.menubar, self.main_window)
self.view_menu = ViewMenu(self.menubar)
self.help_menu = HelpMenu(self.menubar, self.main_window)
def setup_ui(self) -> None:
font = QFont()
font.setPointSize(10)
self.menubar.setFont(font)
# self.menubar.setGeometry(QRect(0, 0, 800, 25))
self.menubar.setFixedHeight(30)
self.menubar.setObjectName("menubar")
self.main_window.setMenuBar(self.menubar)
self.load_ui()
self.show_ui()
if settings.LOAD_EFFECT_ON:
load_animation.load_animation(self.menubar)
# noinspection PyArgumentList
def retranslate_ui(self) -> None:
self.menubar.setWindowTitle(_translate("MenubarUI", "菜单栏"))
def load_ui(self) -> None:
"""
加载模块
:return:
"""
self.menu_list.append(self.option_menu)
self.menu_list.append(self.view_menu)
self.menu_list.append(self.help_menu)
def show_ui(self) -> None:
"""
显示数据
:return:
"""
for view in self.menu_list:
view.setup_ui()
view.retranslate_ui()
class MenubarConnect(object):
def __init__(self, menubar_ui: MenubarUI):
"""
菜单栏 信号
:param menubar_ui:
"""
self.menubar_ui = menubar_ui
def setup_ui(self) -> None:
self.communicate_connect()
def communicate_connect(self) -> None:
# 菜单中工具导航
communicate.toolbar_checked.connect(self.toolbar_checked)
# 菜单中工具扩展
communicate.tools_extension_checked.connect(self.tools_extension_checked)
# 菜单中状态栏
communicate.statusbar_checked.connect(self.statusbar_checked)
# 菜单中分组信息
communicate.group_tree_checked.connect(self.group_tree_checked)
def group_tree_checked(self, flag: bool) -> None:
if flag:
# 菜单栏选中
self.menubar_ui.view_menu.group_info.setChecked(True)
else:
# 菜单栏取消
self.menubar_ui.view_menu.group_info.setChecked(False)
def toolbar_checked(self, flag: bool) -> None:
if flag:
# 菜单栏选中
self.menubar_ui.view_menu.toolbar.setChecked(True)
else:
# 菜单栏取消
self.menubar_ui.view_menu.toolbar.setChecked(False)
def tools_extension_checked(self, flag: bool) -> None:
if flag:
# 菜单栏选中
self.menubar_ui.view_menu.tools_extension.setChecked(True)
else:
# 菜单栏取消
self.menubar_ui.view_menu.tools_extension.setChecked(False)
def statusbar_checked(self, flag: bool) -> None:
if flag:
# 菜单栏选中
self.menubar_ui.view_menu.statusbar.setChecked(True)
else:
# 菜单栏取消
self.menubar_ui.view_menu.statusbar.setChecked(False)
def retranslate_ui(self) -> None:
pass
|
########## 1.10.1 Classificação ##########
# DecisionTreeClassifier é uma classe capaz de realizar classificação multiclasse em um conjunto de dados.
# Tal como acontece com outros classificadores, DecisionTreeClassifier leva como entrada duas matrizes: uma matriz X, esparsa ou densa, de forma (n_samples, n_features) contendo as amostras de treinamento e uma matriz Y de valores inteiros, forma (n_samples,), contendo os rótulos de classe para os exemplos de treinamento:
from sklearn import tree
X = [[0,0], [1,1]]
Y = [0,1]
clf = tree.DecisionTreeClassifier()
print(clf.fit(X,Y))
# Depois de ser ajustado, o modelo pode ser usado para prever a classe de amostras:
clf.predict([[2.,2.]])
# Caso existam várias classes com a mesma e maior probabilidade, o classificador irá prever a classe com o menor índice entre essas classes.
# Como alternativa à saída de uma classe específica, a probabilidade de cada classe pode ser prevista, que é a fração das amostras de treinamento da classe em uma folha:
clf.predict_proba([[2.,2.]])
# DecisionTreeClassifier é capaz de classificação binária (onde os rótulos são [-1, 1]) e multiclasse (onde os rótulos são [0, ..., K-1]).
# Usando o conjunto de dados Iris, podemos construir uma árvore da seguinte maneira:
from sklearn.datasets import load_iris
from sklearn import tree
iris = load_iris()
X, y = iris.data, iris.target
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, y)
# Uma vez treinado, você pode plotar a árvore com a função plot_tree:
tree.plot_tree(clf)
# Também podemos exportar a árvore no formato Graphviz usando o exportador export_graphviz. Se você usar o gerenciador de pacotes conda, os binários graphviz e o pacote python podem ser instalados com conda install python-graphviz.
# Alternativamente, os binários para o graphviz podem ser baixados da página inicial do projeto graphviz, e o wrapper Python instalado a partir do pypi com pip install graphviz.
# Abaixo está um exemplo de exportação Graphviz da árvore acima treinada em todo o conjunto de dados da íris; os resultados são salvos em um arquivo de saída iris.pdf:
import graphviz
dot_data = tree.export_graphviz(clf, out_file=None)
graph = graphviz.Source(fot_data)
graph.render('iris')
# O exportador export_graphviz também oferece suporte a uma variedade de opções estéticas, incluindo nós de cor por sua classe (ou valor para regressão) e usando variáveis explícitas e nomes de classe, se desejado. Os cadernos Jupyter também renderizam esses gráficos embutidos automaticamente:
dot_data = tree.export_graphviz(clf, out_file=None, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
# Alternativamente, a árvore também pode ser exportada em formato textual com a função export_text. Este método não requer a instalação de bibliotecas externas e é mais compacto:
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_text
iris = load_iris()
decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2)
decision_tree = decision_tree.fit(iris.data, iris.target)
r = export_text(decision_tree, feature_names=iris['feature_names'])
print(r)
## Exemplos
## https://scikit-learn.org/stable/auto_examples/tree/plot_iris_dtc.html#sphx-glr-auto-examples-tree-plot-iris-dtc-py
## https://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html#sphx-glr-auto-examples-tree-plot-unveil-tree-structure-py |
<reponame>LeoRya/py-orbit
import sys
import math
import orbit_mpi
from orbit_mpi import mpi_comm
from orbit_mpi import mpi_datatype
from orbit_mpi import mpi_op
from spacecharge import Grid2D
from orbit_utils import Function
from orbit_utils import SplineCH
from orbit_utils import GaussLegendreIntegrator
from orbit.utils.fitting import PolynomialFit
from orbit_utils import Polynomial
class SuperFish_3D_RF_FieldReader:
"""
This class reads the SuperFish file with the 3D axial symmetric RF field.
It uses z and r as variables. The file include Ez, Er, and H.
The file will work in parallel environment.
"""
def __init__(self):
# self.data_arr is a flat data array with tuples [z,r,Ez,Er,E,B]
self.data_arr = []
self.Zmin = 0.
self.Zmax = 0.
self.Rmin = 0.
self.Rmax = 0.
self.zSteps = 0
self.rSteps = 0
def readFile(self,file_name):
self.data_arr = []
self.Zmin = 0.
self.Zmax = 0.
self.Rmin = 0.
self.Rmax = 0.
self.zSteps = 0
self.rSteps = 0
rank = orbit_mpi.MPI_Comm_rank(mpi_comm.MPI_COMM_WORLD)
main_rank = 0
if(rank == 0):
fl_in = open(file_name,"r")
start_data = 0
for ln in fl_in:
res = ln.split()
if(start_data == 0):
if(ln.find("(Zmin,Rmin)") >= 0):
zr_min_max = res[2][1:len(res[2])-1].split(",")
self.Zmin = float(zr_min_max[0])
self.Rmin = float(zr_min_max[1])
if(ln.find("(Zmax,Rmax)") >= 0):
zr_min_max = res[2][1:len(res[2])-1].split(",")
self.Zmax = float(zr_min_max[0])
self.Rmax = float(zr_min_max[1])
if(len(res) > 4 and res[0] == "Z" and res[1] == "and" and res[2] == "R"):
self.zSteps = int(res[4])
self.rSteps = int(res[5])
if(len(res) == 6 and res[0] == '(cm)' and res[1] == '(cm)' and res[2] == '(MV/m)'):
start_data = 1
else:
if(len(res) == 6):
arr = []
for st in res:
arr.append(float(st))
self.data_arr.append(arr)
else:
break
fl_in.close()
#------end of rank 0 actions
n = len(self.data_arr)
n = orbit_mpi.MPI_Bcast(n,mpi_datatype.MPI_INT,main_rank,mpi_comm.MPI_COMM_WORLD)
self.zSteps = orbit_mpi.MPI_Bcast(self.zSteps,mpi_datatype.MPI_INT,main_rank,mpi_comm.MPI_COMM_WORLD)
self.rSteps = orbit_mpi.MPI_Bcast(self.rSteps,mpi_datatype.MPI_INT,main_rank,mpi_comm.MPI_COMM_WORLD)
self.Zmin = orbit_mpi.MPI_Bcast(self.Zmin,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
self.Zmax = orbit_mpi.MPI_Bcast(self.Zmax,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
self.Rmin = orbit_mpi.MPI_Bcast(self.Rmin,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
self.Rmax = orbit_mpi.MPI_Bcast(self.Rmax,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
if((self.zSteps+1)*(self.rSteps+1) != n):
if(rank == 0):
print "====================================================="
print "SuperFish_3D_RF_FiledReader:"
print "The file=",file_name," does not have a correct format!"
print "Stop."
sys.exit(1)
for i in range(n):
arr = [0.]*6
if(rank == 0):
arr = self.data_arr[i]
arr = orbit_mpi.MPI_Bcast(arr,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
if(rank != 0):
self.data_arr.append(arr)
def getDataArray(self):
"""
A convinience method. It returns the raw array
with records with tuples [z,r,Ez,Er,E,B]
"""
return self.data_arr
def getNumberStepsZ(self):
"""
Returns the number of steps in Z-axis. The number of grid points is self.zSteps+1.
"""
return self.zSteps
def getNumberStepsR(self):
"""
Returns the number of steps along the radius. The number of grid points is self.rSteps+1.
"""
return self.rSteps
def makeGrid2DFileds_EzErH(self):
"""
It fills out the Grid2D instances with the electric and magnetic filed components -
Ez, Er, H.
The Ez and Er are in MV in SuperFish file, and H in [A/m].
In the Grid2D Ez and Er will be placed in [V/m].
"""
#The Z and R in the self.data_arr are in [cm], so to switch to [m] we use 0.01
Zmin = 0.01*self.Zmin
Zmax = 0.01*self.Zmax
Rmin = 0.01*self.Rmin
Rmax = 0.01*self.Rmax
grid2D_Ez = Grid2D(self.zSteps+1,self.rSteps+1,Zmin,Zmax,Rmin,Rmax)
grid2D_Er = Grid2D(self.zSteps+1,self.rSteps+1,Zmin,Zmax,Rmin,Rmax)
grid2D_H = Grid2D(self.zSteps+1,self.rSteps+1,Zmin,Zmax,Rmin,Rmax)
for iz in range(self.zSteps+1):
for ir in range(self.rSteps+1):
i = (self.zSteps+1)*ir + iz
[z,r,Ez,Er,E,H] = self.data_arr[i]
grid2D_Ez.setValue(Ez*1.0e+6,iz,ir)
grid2D_Er.setValue(Er*1.0e+6,iz,ir)
grid2D_H.setValue(H,iz,ir)
return (grid2D_Ez,grid2D_Er,grid2D_H)
def getAxisEz(self, zSimmetric = -1):
"""
Returns the Spline with Ez(z) on the axis of the RF.
If zSimmetric > 0 the table has only half of the table,
and the Function should be added points for (-Zmax) to (Zmin - step).
"""
stepZ = (self.Zmax - self.Zmin)/self.zSteps
Ez_max = 0.
for iz in range(self.zSteps+1):
[z,r,Ez,Er,E,B] = self.data_arr[iz]
Ez_abs = math.fabs(Ez)
if(Ez_max < Ez_abs):
Ez_max = Ez_abs
#The z in the self.data_arr is in [cm], so to switch to [m] we use 0.01
f = Function()
if(zSimmetric > 0):
for iz in range(1,self.zSteps+1):
[z,r,Ez,Er,E,B] = self.data_arr[iz]
z = self.Zmin + stepZ*iz
f.add(-z*0.01,Ez/Ez_max)
for iz in range(self.zSteps+1):
[z,r,Ez,Er,E,B] = self.data_arr[iz]
z = self.Zmin + stepZ*iz
f.add(z*0.01,Ez/Ez_max)
spline = SplineCH()
spline.compile(f)
return spline
|
<filename>Resources/Programs/colorChart.py
# Imports #
import tkinter as tk
# Main #
def main():
tk_Root = tk.Tk()
tk_Root.title("Named Color Chart")
tk_Chart = c_ColorChart(tk_Root, l_Colors)
tk_Root.mainloop()
#-----------Basement------------
# Classes #
class c_ColorChart(tk.Frame):
v_Rows = 36
v_FontSize = 6
def __init__(self, tk_Root, l_Colors):
tk.Frame.__init__(self, tk_Root)
v_Row = 0
v_Column = 0
for _Color in l_Colors:
tk_Label = tk.Label(self, text=_Color, bg=_Color,\
font=("Times", self.v_FontSize, "bold"))
tk_Label.grid(row=v_Row, column=v_Column, sticky="ew")
v_Row += 1
if v_Row > self.v_Rows:
v_Row = 0
v_Column += 1
self.pack(expand=1, fill="both")
l_Colors = ["MAROON","DARKRED","BROWN","FIREBRICK","CRIMSON","RED","TOMATO",
"CORAL","INDIANRED","LIGHTCORAL","DARKSALMON","SALMON","LIGHTSALMON","ORANGERED",
"DARKORANGE","ORANGE","GOLD","DARKGOLDENROD","GOLDENROD","PALEGOLDENROD",
"DARKKHAKI","KHAKI","OLIVE","YELLOW","YELLOWGREEN","DARKOLIVEGREEN","OLIVEDRAB",
"LAWNGREEN","CHARTREUSE","GREENYELLOW","DARKGREEN","GREEN","FORESTGREEN","LIME",
"LIMEGREEN","LIGHTGREEN","PALEGREEN","DARKSEAGREEN","MEDIUMSPRINGGREEN",
"SPRINGGREEN","SEAGREEN","MEDIUMAQUAMARINE","MEDIUMSEAGREEN","LIGHTSEAGREEN",
"DARKSLATEGRAY","TEAL","DARKCYAN","AQUA","CYAN","LIGHTCYAN","DARKTURQUOISE",
"TURQUOISE","MEDIUMTURQUOISE","PALETURQUOISE","AQUAMARINE","POWDERBLUE","CADETBLUE",
"STEELBLUE","CORNFLOWERBLUE","DEEPSKYBLUE","DODGERBLUE","LIGHTBLUE","SKYBLUE",
"LIGHTSKYBLUE","MIDNIGHTBLUE","NAVY","DARKBLUE","MEDIUMBLUE","BLUE","ROYALBLUE",
"BLUEVIOLET","INDIGO","DARKSLATEBLUE","SLATEBLUE","MEDIUMSLATEBLUE","MEDIUMPURPLE",
"DARKMAGENTA","DARKVIOLET","DARKORCHID","MEDIUMORCHID","PURPLE","THISTLE","PLUM",
"VIOLET","MAGENTA","ORCHID","MEDIUMVIOLETRED","PALEVIOLETRED","DEEPPINK","HOTPINK",
"LIGHTPINK","PINK","ANTIQUEWHITE","BEIGE","BISQUE","BLANCHEDALMOND","WHEAT","CORNSILK",
"LEMONCHIFFON","LIGHTGOLDENRODYELLOW","LIGHTYELLOW","SADDLEBROWN","SIENNA","CHOCOLATE",
"PERU","SANDYBROWN","BURLYWOOD","TAN","ROSYBROWN","MOCCASIN","NAVAJOWHITE","PEACHPUFF",
"MISTYROSE","LAVENDERBLUSH","LINEN","OLDLACE","PAPAYAWHIP","SEASHELL","MINTCREAM",
"SLATEGRAY","LIGHTSLATEGRAY","LIGHTSTEELBLUE","LAVENDER","FLORALWHITE","ALICEBLUE",
"GHOSTWHITE","HONEYDEW","IVORY","AZURE","SNOW","BLACK","DIMGRAY","GRAY","DARKGRAY",
"SILVER","LIGHTGRAY","GAINSBORO","WHITESMOKE","WHITE"]
# Main Loop #
if __name__ == '__main__':
main() |
import time
def definir_prog():
print('\n'+8*'=--='+'\n')
programacao = []
exercicios = []
cond1 = True
while cond1 == True:
programaNome = str(input('Programa: '))
cond2 = True
while cond2 == True:
print(32*'-'+'\n')
exercicioNome = str(input('Exercicio: '))
print('Tempo de exercicio')
m = int(input('Minutos: '))
s = int(input('Segundos: '))
exercicios.append(exercicioNome)
exercicios.append([m, s])
print(32*'-'+'\n')
validacao = False
cond2 = escolha(validacao)
cond1 = False
programacao.append(programaNome)
programacao.append(exercicios)
for i in range(0, len(programacao)): # Teste: Verificação do que irá ser gravado no arquivo
print(programacao[i])
gravar(programacao)
def iniciar_prog():
abrir_arq()
def cronometro():
for m in range(0,60):
for s in range(0,60):
time.sleep(1)
print('Relógio {}:{}'.format(m, s))# Teste
lista = []
lista.append(m)
lista.append(s)
print(lista)
if lista == [0, 5]:#parametros de tempo extraídos da lista do arquivo texto
break
if lista == [0, 5]:#repete-se para quebrar o ciclo externo
break
def gravar(programacao):
exercicios = str(programacao)
file = open('programacao.txt', 'a')
file.writelines(exercicios+'\n') # Grava uma programação por linha no arquivo
file.close()
def abrir_arq():
file = open('programacao.txt', 'r')
linhas = file.readlines()
print(linhas)# Teste
file.close()
def textoMenu():
opcao = input(32*'='+'\n'+
'1- Definir programação\n'
'2- Iniciar programação\n'
'3- Cronometro\n'
'-> ')
return opcao
def menu():
opcao = textoMenu()
while opcao != 1 and opcao != 2 and opcao != 3:
if opcao == '1':
definir_prog()
print()
opcao = textoMenu()
if opcao == '2':
iniciar_prog()
print()
opcao = textoMenu()
if opcao == '3':
cronometro()
print()
opcao = textoMenu()
else:
print(15*'='+'\nOpção inválida!\n'+15*'=')
print('\nInsira uma opção do Menu abaixo:\n')
opcao = textoMenu()
# Método que recebe uma condição booleana True para executar um menu de escolha binária (Sim ou não)
def escolha(validacao):
while validacao != True:
print('Deseja adicionar mais exercicios?')
escolha = int(input('1 - Sim\n'
'2 - Não\n'))
if escolha != 1 and escolha != 2:
print('Opção inválida!\n' + 32 * '-')
validacao = False
else:
if escolha == 1:
cond = True
if escolha == 2:
cond = False
validacao = True
return cond
# Main Program
menu() |
import turtle
import random
import time
# 画樱花的躯干
def tree(branch, t):
time.sleep(0.0008)
if branch > 3:
if 8 <= branch <= 12:
if random.randint(0, 2) == 0:
t.color('snow')
else:
t.color('lightcoral')
t.pensize(branch / 3)
elif branch < 8:
if random.randint(0, 1) == 0:
t.color('snow')
else:
t.color('lightcoral')
t.pensize(branch / 2)
else:
t.color('sienna')
t.pensize(branch / 10)
t.forward(branch)
a = 1.5 * random.random()
t.right(20 * a)
b = 1.5 * random.random()
tree(branch - 10 * b, t)
t.left(40 * a)
tree(branch - 10 * b, t)
t.right(20 * a)
t.up()
t.backward(branch)
t.down()
# 掉落的花瓣
def petal(m, t):
for i in range(m):
a = 200 - 380 * random.random()
b = 10 - 20 * random.random()
t.up()
t.forward(b)
t.left(90)
t.forward(a)
t.down()
t.color('lightcoral')
t.circle(1)
t.up()
t.backward(a)
t.right(90)
t.backward(b)
def write(t):
t.up()
t.goto(0, -110)
t.pencolor('black')
t.write("Ivy J.\n\n暖春三月,樱花纷纷落落,\n花瓣唱着歌,飘向你心窝。\n愿它的香气能令你的心情快乐,\n愿你拥有樱花般灿烂的生活!^_^",
font=('华文楷体', 16, 'italic'))
"""
.Turtle:注意字母的大写,用于生成一个 turtle 对象
.mainloop:保持画布窗口不消失,用在最后
.mode:"logo",初始位置指向北(上);"standard",初始位置指向东方(右)
.fillcolor:设置要填充的颜色
.color(p1, p2):p1 画笔整体颜色; p2 由画笔画出的图形的填充颜色
turtle.backward(distance):沿当前反方向,画笔绘制distance距离
turtle.forward(distance):沿当前方向,画笔绘制distance距离
turtle.right(degree):顺时针移动degree度
turtle.left(degree):逆时针移动degree度
.seth/setheading(angle):设置当前朝向为angle角度,若模式为“logo”,则顺时针旋转;若模式为“standard”,则逆时针旋转
.heading:返回当前放置的角度
.pu/penup/up:抬笔
.pd/pendown/down:落笔
.goto/setposition/setpos:移动到相对于画布中心点的坐标位置(x,y),画布是一个以初始位置为原点的坐标系
.setx/sety:保持一个坐标不变,移到到另一个坐标,移动的距离是相对于原点来计算的
.xcor/ycor:返回当前箭头所处位置的橫纵坐标
.home:让画笔回到初始位置(原点),同时绘制
.reset:抹去之前所有的痕迹,重新绘画,恢复箭头的初始状态
.clear:抹去之前所有的痕迹,但是保持箭头的初始状态
.circle:一个输入参数时画圆,两个时画弧长,三个参数时画多边形
.pensize:设置画笔大小
.speed:设置画笔移动速度,0为最快速度
.undo:撤销上一次操作
.write:绘制文本
.getscreen:获取画布对象,对画布进行操作
"""
try:
myWin = turtle.Screen()
myWin.title("樱花 ^_^")
myWin.tracer(5, 2)
# 隐藏画笔
turtle.hideturtle()
turtle.setx(-120)
turtle.left(90)
# 抹去之前所有的痕迹,但是保持箭头的初始状态
turtle.clear()
turtle.up()
turtle.backward(150)
turtle.down()
turtle.color('sienna')
# 画樱花的躯干
tree(60, turtle)
# 掉落的花瓣
petal(210, turtle)
# 写字
write(turtle)
turtle.done()
except (turtle.Terminator, BaseException):
pass
|
from datetime import datetime, timedelta
from unittest.mock import Mock
import pytest
import sqlalchemy as sa
from h_matchers import Any
from h.models.document import ConcurrentUpdateError, create_or_update_document_uri
from h.models.document._document import Document
from h.models.document._uri import DocumentURI
class TestDocumentURI:
def test_it_normalizes_the_uri(self):
document_uri = DocumentURI(uri="http://example.com/")
assert document_uri.uri_normalized == "httpx://example.com"
def test_type_defaults_to_empty_string(self, db_session, document_uri, factories):
document_uri = factories.DocumentURI(type=None)
db_session.flush()
assert not document_uri.type
def test_you_cannot_set_type_to_null(self, db_session, document_uri):
document_uri.type = None
with pytest.raises(sa.exc.IntegrityError):
db_session.flush()
def test_content_type_defaults_to_empty_string(self, db_session, factories):
document_uri = factories.DocumentURI(content_type=None)
db_session.flush()
assert not document_uri.content_type
def test_you_cannot_set_content_type_to_null(self, db_session, document_uri):
document_uri.content_type = None
with pytest.raises(sa.exc.IntegrityError):
db_session.flush()
def test_you_cannot_add_duplicate_document_uris(self, db_session):
# You can't add DocumentURI's with the same claimant, uri, type and
# content_type, even if they have different documents.
attrs = {
"claimant": "http://www.example.com",
"uri": "http://www.example.com",
"type": "foo",
"content_type": "bar",
}
db_session.add(DocumentURI(**attrs, document=Document()))
db_session.add(DocumentURI(**attrs, document=Document()))
with pytest.raises(sa.exc.IntegrityError):
db_session.commit()
def test_repr(self):
uri = DocumentURI(id=1234)
repr_string = repr(uri)
assert "DocumentURI" in repr_string
assert "1234" in repr_string
@pytest.fixture
def document_uri(self, db_session, factories):
document_uri = factories.DocumentURI()
db_session.flush()
return document_uri
@pytest.mark.usefixtures("log")
class TestCreateOrUpdateDocumentURI:
def test_it_updates_the_existing_DocumentURI_if_there_is_one(
self, db_session, doc_uri_attrs
):
original_attrs = doc_uri_attrs
updated_attrs = dict(
original_attrs, created=datetime.now(), updated=datetime.now()
)
document_uri = DocumentURI(**original_attrs)
db_session.add(document_uri)
create_or_update_document_uri(session=db_session, **updated_attrs)
assert document_uri.created == original_attrs["created"]
assert document_uri.updated == updated_attrs["updated"]
assert (
len(db_session.query(DocumentURI).all()) == 1
), "It shouldn't have added any new objects to the db"
def test_it_creates_a_new_DocumentURI_if_there_is_no_existing_one(
self, db_session, doc_uri_attrs
):
original_attrs = doc_uri_attrs
updated_attrs = dict(
original_attrs, created=datetime.now(), updated=datetime.now()
)
# Add one non-matching DocumentURI to the database.
db_session.add(DocumentURI(**dict(original_attrs, content_type="different")))
create_or_update_document_uri(session=db_session, **updated_attrs)
document_uri = (
db_session.query(DocumentURI).order_by(DocumentURI.created.desc()).first()
)
assert document_uri == Any.object.with_attrs(updated_attrs)
def test_it_skips_denormalizing_http_uris_to_document(
self, db_session, doc_uri_attrs
):
doc_uri_attrs["document"] = document = Document(
web_uri="http://example.com/first_uri.html"
)
db_session.add(document)
create_or_update_document_uri(session=db_session, **doc_uri_attrs)
document_ = db_session.query(Document).get(document.id)
assert document_.web_uri == "http://example.com/first_uri.html"
def test_it_logs_a_warning_if_document_ids_differ(
self, log, mock_db_session, factories, doc_uri_attrs
):
# Ensure the document we use, and that returned by filter first are
# different
mock_db_session.query.return_value.filter.return_value.first.return_value = (
factories.DocumentURI()
)
different_document = factories.Document()
create_or_update_document_uri(
session=mock_db_session, **dict(doc_uri_attrs, document=different_document)
)
assert log.warning.call_count == 1
def test_raises_retryable_error_when_flush_fails(
self, db_session, monkeypatch, doc_uri_attrs
):
def err():
raise sa.exc.IntegrityError(None, None, None)
monkeypatch.setattr(db_session, "flush", err)
with pytest.raises(ConcurrentUpdateError):
with db_session.no_autoflush: # prevent premature IntegrityError
create_or_update_document_uri(session=db_session, **doc_uri_attrs)
@pytest.fixture
def doc_uri_attrs(self):
return {
"claimant": "http://example.com/example_claimant.html",
"uri": "http://example.com/example_uri.html",
"type": "self-claim",
"content_type": "",
"document": Document(),
"created": datetime.now() - timedelta(days=1),
"updated": datetime.now() - timedelta(days=1),
}
@pytest.fixture()
def mock_db_session(self, db_session):
return Mock(spec=db_session)
@pytest.fixture
def log(self, patch):
return patch("h.models.document._uri.log")
|
<gh_stars>1-10
#-------------------------------------------------
# ras2raw.py
#
# Copyright (c) 2018, Data PlatForm Center, NIMS
#
# This software is released under the MIT License.
#-------------------------------------------------
# coding: utf-8
#__author__ = "nagao"
__package__ = "M-DaC_XRD/Rigaku_XRD_tools"
__version__ = "1.0.0"
import argparse
import os.path
import csv
import pandas as pd
from dateutil.parser import parse
import xml.dom.minidom
import re
import xml.etree.ElementTree as ET
import codecs
parser = argparse.ArgumentParser()
parser.add_argument("file_path")
parser.add_argument("--encoding", default="utf_8")
parser.add_argument("template_file")
parser.add_argument("out_file")
parser.add_argument("--stdout", help="show meta information", action="store_true")
options = parser.parse_args()
readfile = options.file_path
encoding_option = options.encoding
templatefile = options.template_file
outputfile = options.out_file
print_option = options.stdout
channel = 0
template = ET.parse(templatefile)
columns=[]
metas = template.findall('meta')
for meta in metas:
columns.append(meta.attrib["key"])
dom = xml.dom.minidom.Document()
metadata = dom.createElement('metadata')
dom.appendChild(metadata)
count = 0
wide = 1
maxcolumn = 1
df = pd.DataFrame(index=['value'])
with open(readfile, 'r', encoding=encoding_option) as f:
for line in f:
line = line.strip()
line = line[1:]
if line == 'RAS_HEADER_END':
break
elif not(line == 'RAS_DATA_START' or line == 'RAS_HEADER_START'):
lines = line.split(" ", 1)
key = lines[0]
tempvalue = lines[1]
value = tempvalue[1:-1]
temp = template.find('meta[@key="{value}"]'.format(value=key))
if temp != None:
df[key] = value
for meta in metas:
key = meta.attrib["key"]
if key in df.columns:
value = df.loc['value', key]
else:
value = ""
subnode = dom.createElement('meta')
subnode.appendChild(dom.createTextNode(value))
subnode_attr = dom.createAttribute('key')
subnode_attr.value = key
subnode.setAttributeNode(subnode_attr)
metadata.appendChild(subnode)
subnode_attr = dom.createAttribute('type')
typename = template.find('meta[@key="{value}"]'.format(value=key))
if typename.get("type") != None:
subnode_attr.value = typename.get("type")
else:
subnode_attr.value = "String"
subnode.setAttributeNode(subnode_attr)
metadata.appendChild(subnode)
if channel != 0:
subnode_attr = dom.createAttribute('column')
subnode_attr.value = channel
subnode.setAttributeNode(subnode_attr)
metadata.appendChild(subnode)
subnode = dom.createElement('column_num')
subnode.appendChild(dom.createTextNode(str(maxcolumn)))
metadata.appendChild(subnode)
column_name = template.find('column_name').text
subnode = dom.createElement('column_name')
subnode.appendChild(dom.createTextNode(column_name))
metadata.appendChild(subnode)
tool_package = __package__
subnode = dom.createElement('tool_package')
subnode.appendChild(dom.createTextNode(tool_package))
metadata.appendChild(subnode)
tool_filename = os.path.basename(__file__)
subnode = dom.createElement('tool_filename')
subnode.appendChild(dom.createTextNode(tool_filename))
metadata.appendChild(subnode)
tool_version = __version__
subnode = dom.createElement('tool_version')
subnode.appendChild(dom.createTextNode(tool_version))
metadata.appendChild(subnode)
template_package = template.getroot().attrib['package']
subnode = dom.createElement('template_package')
subnode.appendChild(dom.createTextNode(template_package))
metadata.appendChild(subnode)
template_filename = os.path.basename(templatefile)
subnode = dom.createElement('template_filename')
subnode.appendChild(dom.createTextNode(template_filename))
metadata.appendChild(subnode)
template_version = template.getroot().attrib['version']
subnode = dom.createElement('template_version')
subnode.appendChild(dom.createTextNode(template_version))
metadata.appendChild(subnode)
if print_option == True:
print(dom.toprettyxml())
file = codecs.open(outputfile,'wb',encoding='utf-8')
dom.writexml(file,'','\t','\n',encoding='utf-8')
file.close()
dom.unlink() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.