input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
_config.items():
if k.startswith('vsd'):
ctxt[k.replace('-', '_')] = v
for rid in relation_ids('vsd-rest-api'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
vsd_ip = rdata.get('vsd-ip-address')
if cmp_release >= 'kilo':
cms_id_value = rdata.get('nuage-cms-id')
log('relation data:cms_id required for'
' nuage plugin: {}'.format(cms_id_value))
if cms_id_value is not None:
ctxt['vsd_cms_id'] = cms_id_value
log('relation data:vsd-ip-address: {}'.format(vsd_ip))
if vsd_ip is not None:
ctxt['vsd_server'] = '{}:8443'.format(vsd_ip)
if 'vsd_server' not in ctxt:
ctxt['vsd_server'] = '1.1.1.1:8443'
ctxt['verbose'] = config('verbose')
ctxt['debug'] = config('debug')
ctxt['neutron_bind_port'] = \
determine_api_port(api_port('neutron-server'),
singlenode_mode=True)
ctxt['quota_security_group'] = config('quota-security-group')
ctxt['quota_security_group_rule'] = \
config('quota-security-group-rule')
ctxt['quota_network'] = config('quota-network')
ctxt['quota_subnet'] = config('quota-subnet')
ctxt['quota_port'] = config('quota-port')
ctxt['quota_vip'] = config('quota-vip')
ctxt['quota_pool'] = config('quota-pool')
ctxt['quota_member'] = config('quota-member')
ctxt['quota_health_monitors'] = config('quota-health-monitors')
ctxt['quota_router'] = config('quota-router')
ctxt['quota_floatingip'] = config('quota-floatingip')
n_api_settings = self.get_neutron_api_rel_settings()
if n_api_settings:
ctxt.update(n_api_settings)
flat_providers = config('flat-network-providers')
if flat_providers:
ctxt['network_providers'] = ','.join(flat_providers.split())
vlan_ranges = config('vlan-ranges')
if vlan_ranges:
ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())
vni_ranges = config('vni-ranges')
if vni_ranges:
ctxt['vni_ranges'] = ','.join(vni_ranges.split())
enable_dns_extension_driver = False
dns_domain = get_dns_domain()
if dns_domain:
enable_dns_extension_driver = True
ctxt['dns_domain'] = dns_domain
if cmp_release >= 'mitaka':
for rid in relation_ids('external-dns'):
if related_units(rid):
enable_dns_extension_driver = True
# AZAwareWeightScheduler inherits from WeightScheduler and is
# available as of mitaka
ctxt['network_scheduler_driver'] = (
'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler'
)
ctxt['dhcp_load_type'] = config('dhcp-load-type')
extension_drivers = []
if config('enable-ml2-port-security'):
extension_drivers.append(EXTENSION_DRIVER_PORT_SECURITY)
if enable_dns_extension_driver:
if cmp_release < 'queens':
extension_drivers.append(EXTENSION_DRIVER_DNS)
else:
extension_drivers.append(EXTENSION_DRIVER_DNS_DOMAIN_PORTS)
if is_qos_requested_and_valid():
extension_drivers.append(EXTENSION_DRIVER_QOS)
if extension_drivers:
ctxt['extension_drivers'] = ','.join(extension_drivers)
ctxt['enable_sriov'] = config('enable-sriov')
if cmp_release >= 'mitaka':
if config('global-physnet-mtu'):
ctxt['global_physnet_mtu'] = config('global-physnet-mtu')
if config('path-mtu'):
ctxt['path_mtu'] = config('path-mtu')
else:
ctxt['path_mtu'] = config('global-physnet-mtu')
physical_network_mtus = config('physical-network-mtus')
if physical_network_mtus:
ctxt['physical_network_mtus'] = ','.join(
physical_network_mtus.split())
if 'kilo' <= cmp_release <= 'mitaka':
pci_vendor_devs = config('supported-pci-vendor-devs')
if pci_vendor_devs:
ctxt['supported_pci_vendor_devs'] = \
','.join(pci_vendor_devs.split())
ctxt['mechanism_drivers'] = get_ml2_mechanism_drivers()
n_load_balancer_settings = NeutronLoadBalancerContext()()
if n_load_balancer_settings:
ctxt.update(n_load_balancer_settings)
if config('neutron-plugin') in ['ovs', 'ml2', 'Calico']:
ctxt['service_plugins'] = []
service_plugins = {
'icehouse': [
('neutron.services.l3_router.l3_router_plugin.'
'L3RouterPlugin'),
'neutron.services.firewall.fwaas_plugin.FirewallPlugin',
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
'neutron.services.vpn.plugin.VPNDriverPlugin',
('neutron.services.metering.metering_plugin.'
'MeteringPlugin')],
'juno': [
('neutron.services.l3_router.l3_router_plugin.'
'L3RouterPlugin'),
'neutron.services.firewall.fwaas_plugin.FirewallPlugin',
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
'neutron.services.vpn.plugin.VPNDriverPlugin',
('neutron.services.metering.metering_plugin.'
'MeteringPlugin')],
'kilo': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'],
'liberty': ['router', 'firewall', 'lbaas', 'vpnaas',
'metering'],
'mitaka': ['router', 'firewall', 'lbaas', 'vpnaas',
'metering'],
'newton': ['router', 'firewall', 'vpnaas', 'metering',
('neutron_lbaas.services.loadbalancer.plugin.'
'LoadBalancerPluginv2')],
'ocata': ['router', 'firewall', 'vpnaas', 'metering',
('neutron_lbaas.services.loadbalancer.plugin.'
'LoadBalancerPluginv2'), 'segments',
('neutron_dynamic_routing.'
'services.bgp.bgp_plugin.BgpPlugin')],
'pike': ['router', 'firewall', 'metering', 'segments',
('neutron_lbaas.services.loadbalancer.plugin.'
'LoadBalancerPluginv2'),
('neutron_dynamic_routing.'
'services.bgp.bgp_plugin.BgpPlugin')],
'queens': ['router', 'firewall', 'metering', 'segments',
('neutron_lbaas.services.loadbalancer.plugin.'
'LoadBalancerPluginv2'),
('neutron_dynamic_routing.'
'services.bgp.bgp_plugin.BgpPlugin')],
'rocky': ['router', 'firewall', 'metering', 'segments',
('neutron_dynamic_routing.'
'services.bgp.bgp_plugin.BgpPlugin')],
'stein': ['router', 'firewall_v2', 'metering', 'segments',
('neutron_dynamic_routing.'
'services.bgp.bgp_plugin.BgpPlugin')],
'train': ['router', 'firewall_v2', 'metering', 'segments',
('neutron_dynamic_routing.'
'services.bgp.bgp_plugin.BgpPlugin')],
# TODO: FWaaS was deprecated at Ussuri and will be removed
# during the W cycle
}
if cmp_release >= 'rocky' and cmp_release < 'train':
if ctxt.get('load_balancer_name', None):
# TODO(fnordahl): Remove when ``neutron_lbaas`` is retired
service_plugins[release].append('lbaasv2-proxy')
else:
# TODO(fnordahl): Remove fall-back in next charm release
service_plugins[release].append('lbaasv2')
# TODO: FWaaS was deprecated at Ussuri and will be removed
# during the W cycle
if cmp_release >= 'stein':
ctxt['firewall_v2'] = True
ctxt['service_plugins'] = service_plugins.get(
release, service_plugins['stein'])
if is_nsg_logging_enabled() or is_nfg_logging_enabled():
ctxt['service_plugins'].append('log')
if is_port_forwarding_enabled():
ctxt['service_plugins'].append('port_forwarding')
if is_qos_requested_and_valid():
ctxt['service_plugins'].append('qos')
if is_vlan_trunking_requested_and_valid():
ctxt['service_plugins'].append('trunk')
ctxt['service_plugins'] = ','.join(ctxt['service_plugins'])
return ctxt
class HAProxyContext(context.HAProxyContext):
interfaces = ['ceph']
def __call__(self):
'''
Extends the main charmhelpers HAProxyContext with a port mapping
specific to this charm.
Also used to extend nova.conf context with correct api_listening_ports
'''
from neutron_api_utils import api_port
ctxt = super(HAProxyContext, self).__call__()
# Apache ports
a_neutron_api = determine_apache_port(api_port('neutron-server'),
singlenode_mode=True)
port_mapping = {
'neutron-server': [
api_port('neutron-server'), a_neutron_api]
}
ctxt['neutron_bind_port'] = determine_api_port(
api_port('neutron-server'),
singlenode_mode=True,
)
# for haproxy.conf
ctxt['service_ports'] = port_mapping
return ctxt
class EtcdContext(context.OSContextGenerator):
interfaces = ['etcd-proxy']
def __call__(self):
ctxt = {'cluster': ''}
cluster_string = ''
if not config('neutron-plugin') == 'Calico':
return ctxt
for rid in relation_ids('etcd-proxy'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
cluster_string = rdata.get('cluster')
if cluster_string:
break
ctxt['cluster'] = cluster_string
return ctxt
class NeutronApiSDNContext(context.SubordinateConfigContext):
interfaces = ['neutron-plugin-api-subordinate']
def __init__(self, config_file='/etc/neutron/neutron.conf'):
"""Initialize context for plugin subordinates.
:param config_file: Which config file we accept custom sections for
:type config_file: str
"""
super(NeutronApiSDNContext, self).__init__(
interface='neutron-plugin-api-subordinate',
service='neutron-api',
config_file=config_file)
# NOTE: The defaults dict serve a dual purpose.
# 1. Only the keys listed here are picked up from the relation.
# 2. Any keys listed here with a value will be used as a default
# if not specified on the relation.
#
# Any empty values will not be returned on this context to allow
# values to be passed on from other contexts.
self.defaults = {
'core-plugin': {
'templ_key': 'core_plugin',
'value': 'neutron.plugins.ml2.plugin.Ml2Plugin',
},
'neutron-plugin-config': {
'templ_key': 'neutron_plugin_config',
'value': '/etc/neutron/plugins/ml2/ml2_conf.ini',
},
'service-plugins': {
'templ_key': 'service_plugins',
'value': '',
},
'restart-trigger': {
'templ_key': 'restart_trigger',
'value': '',
},
'quota-driver': {
'templ_key': 'quota_driver',
'value': '',
},
'api-extensions-path': {
'templ_key': 'api_extensions_path',
'value': '',
},
'extension-drivers': {
'templ_key': 'extension_drivers',
'value': '',
},
'mechanism-drivers': {
'templ_key': 'mechanism_drivers',
'value': '',
},
'tenant-network-types': {
'templ_key': 'tenant_network_types',
'value': '',
},
'neutron-security-groups': {
'templ_key': 'neutron_security_groups',
'value': '',
},
}
def is_default(self, templ_key):
"""Check whether value associated with specified key is the default.
:param templ_key: Key to look up
:type templ_key: str
:returns: True if default, False if not, None if key does not exist.
:rtype: Option[bool, NoneValue]
"""
ctxt = self.__call__()
for interface_key in self.defaults:
if self.defaults[interface_key]['templ_key'] == templ_key:
break
else:
return None
return ctxt.get(templ_key) == self.defaults[interface_key]['value']
def is_allowed(self, templ_key):
"""Check whether specified key is allowed on the relation.
:param templ_key: Key to lookup
:type templ_key: str
:returns: True or False
:rtype: bool
"""
for interface_key in self.defaults:
if self.defaults[interface_key]['templ_key'] == templ_key:
return True
return False
def __call__(self):
ctxt = super(NeutronApiSDNContext, self).__call__()
for rid in relation_ids('neutron-plugin-api-subordinate'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
plugin = rdata.get('neutron-plugin')
if not plugin:
continue
ctxt['neutron_plugin'] = plugin
for key in self.defaults.keys():
remote_value = rdata.get(key)
ctxt_key = self.defaults[key]['templ_key']
if remote_value:
ctxt[ctxt_key] = remote_value
elif self.defaults[key]['value']:
ctxt[ctxt_key] = self.defaults[key]['value']
else:
# Do not set empty values
pass
return ctxt
# Return empty dict when there are no related units, this will flag the
# context as incomplete and will allow end user messaging of missing
# relations
return {}
class NeutronApiSDNConfigFileContext(context.OSContextGenerator):
interfaces = ['neutron-plugin-api-subordinate']
def __call__(self):
for rid in relation_ids('neutron-plugin-api-subordinate'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
neutron_server_plugin_conf = rdata.get('neutron-plugin-config')
if neutron_server_plugin_conf:
return {'config': neutron_server_plugin_conf}
else:
return {'config': '/etc/neutron/plugins/ml2/ml2_conf.ini'}
# Return empty dict when there are no related units, this will flag the
# context as incomplete and will allow end user messaging of missing
# relations
return {}
class NeutronApiApiPasteContext(context.OSContextGenerator):
interfaces = ['neutron-plugin-api-subordinate']
def __validate_middleware(self, middleware):
'''
Accepts a list of dicts of the following format:
{
'type': 'middleware_type',
'name': 'middleware_name',
'config': {
option_1: value_1,
# ...
option_n: value_n
}
This validator was meant to be minimalistic - PasteDeploy's
validator will take care of the rest while our purpose here
is mainly config rendering - not imposing additional validation
logic which does not belong here.
'''
# types taken from PasteDeploy's wsgi loader
VALID_TYPES = ['filter', 'filter-app',
'app', 'application',
'composite', 'composit', 'pipeline']
def types_valid(t, n, c):
return all((type(t) is str,
type(n) is str,
type(c is dict)))
def mtype_valid(t):
return t in VALID_TYPES
for m in middleware:
t, n, c = [m.get(v) for v in ['type', 'name', 'config']]
# note that dict has to be non-empty
if not types_valid(t, n, c):
raise ValueError('Extra middleware key type(s) are'
' invalid: {}'.format(repr(m)))
if not mtype_valid(t):
raise ValueError('Extra middleware type key is not'
' a valid PasteDeploy middleware '
'type {}'.format(repr(t)))
if not c:
raise ValueError('Extra middleware config dictionary'
' is empty')
def __process_unit(self, rid, unit):
rdata = relation_get(rid=rid, unit=unit)
# update extra middleware for all possible plugins
rdata_middleware = rdata.get('extra_middleware')
if rdata_middleware:
try:
middleware = ast.literal_eval(rdata_middleware)
except Exception:
import traceback
log(traceback.format_exc())
raise ValueError('Invalid extra middleware data'
' - check the subordinate charm')
if middleware:
return middleware
else:
log('extra_middleware specified but not'
'populated by unit {}, '
'relation: {}, value: {}'.format(
unit, rid, repr(middleware)))
raise ValueError('Invalid extra middleware'
'specified by a subordinate')
# no extra middleware
return list()
def __call__(self):
extra_middleware = []
for rid in relation_ids('neutron-plugin-api-subordinate'):
for unit in related_units(rid):
extra_middleware.extend(self.__process_unit(rid, unit))
self.__validate_middleware(extra_middleware)
return {'extra_middleware': extra_middleware}\
if extra_middleware else {}
class NeutronLoadBalancerContext(context.OSContextGenerator):
interfaces = ['neutron-load-balancer']
def __call__(self):
ctxt = {}
for rid in relation_ids('neutron-load-balancer'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
try:
ctxt['load_balancer_name'] = json.loads(
| |
<reponame>diCagri/content
import demistomock as demisto
from CommonServerPython import *
import urllib3
import traceback
from typing import Any, Dict, Optional, Union
import ntpath
from dateparser import parse
# Disable insecure warnings
urllib3.disable_warnings()
""" CONSTANTS """
VERSION = 24
MAX_RESULTS = 100
""" CLIENT CLASS """
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
"""
def __init__(
self,
base_url,
project_name,
params,
verify=True,
proxy=False,
ok_codes=tuple(),
headers=None,
auth=None,
):
self.project_name = project_name
self.params = params
super().__init__(base_url, verify, proxy, ok_codes, headers, auth)
def get_project_list(self):
return self._http_request(
method="GET", url_suffix="/projects", params=self.params
)
def get_webhooks_list(self, project_name: str):
if project_name:
project_name_to_pass = project_name
else:
project_name_to_pass = self.project_name
return self._http_request(
method="GET",
url_suffix=f"/project/{project_name_to_pass}/webhooks",
params=self.params,
)
def get_jobs_list(
self,
id_list: list,
group_path: str,
job_filter: str,
job_exec_filter: str,
group_path_exact: str,
scheduled_filter: str,
server_node_uuid_filter: str,
project_name: str,
):
"""
This function returns a list of all existing projects.
:param id_list: list of Job IDs to include
:param group_path: include all jobs within that group path. if not specified, default is: "*".
:param job_filter: specify a filter for a job Name, apply to any job name that contains this value
:param job_exec_filter: specify an exact job name to match
:param group_path_exact: specify an exact group path to match. if not specified, default is: "*".
:param scheduled_filter: return only scheduled or only not scheduled jobs. can either be "true" or "false
:param server_node_uuid_filter: return all jobs related to a selected server UUID".
:param project_name: A project name to list its jobs
:return: api response.
"""
request_params: Dict[str, Any] = {}
if id_list:
request_params["idlist"] = ",".join(id_list)
if group_path:
request_params["groupPath"] = group_path
if job_filter:
request_params["jobFilter"] = job_filter
if job_exec_filter:
request_params["jobExactFilter"] = job_exec_filter
if group_path_exact:
request_params["groupPathExact"] = group_path_exact
if scheduled_filter:
request_params["scheduledFilter"] = scheduled_filter
if server_node_uuid_filter:
request_params["serverNodeUUIDFilter"] = server_node_uuid_filter
project_name_to_pass = project_name if project_name else self.project_name
request_params.update(self.params)
return self._http_request(
method="GET",
url_suffix=f"/project/{project_name_to_pass}/jobs",
params=request_params,
)
def execute_job(
self,
job_id: str,
arg_string: str,
log_level: str,
as_user: str,
node_filter: str,
run_at_time: str,
options: dict,
run_at_time_raw: str,
):
"""
This function runs an existing job
:param arg_string: execution arguments for the selected job: -opt1 value1 -opt2 value2
:param job_id: id of the job you want to execute
:param log_level: specifying the loglevel to use: 'DEBUG','VERBOSE','INFO','WARN','ERROR'
:param as_user: identifying the user who ran the job
:param node_filter: can be a node filter string
:param run_at_time: select a time to run the job. can be either in: 1 hour, 1 week, 1 day.
:param options: add options for running a job
:param run_at_time_raw: select a time to run the job in iso 8061 time as string
:return: api response
"""
request_body: Dict[str, Any] = {}
if arg_string:
request_body["argString"] = arg_string
if log_level:
request_body["loglevel"] = log_level
if as_user:
request_body["asUser"] = as_user
if node_filter:
request_body["filter"] = node_filter
if options:
request_body["options"] = options
if run_at_time:
request_body["runAtTime"] = run_at_time
elif run_at_time_raw:
request_body["runAtTime"] = run_at_time_raw
return self._http_request(
method="POST",
url_suffix=f"/job/{job_id}/executions",
params=self.params,
data=str(request_body),
)
def retry_job(
self,
job_id: str,
arg_string: str,
log_level: str,
as_user: str,
failed_nodes: str,
execution_id: str,
options: dict,
):
"""
This function retry running a failed execution.
:param arg_string: execution arguments for the selected job: -opt1 value1 -opt2 value2
:param job_id: id of the job you want to execute
:param log_level: specifying the log level to use: 'DEBUG','VERBOSE','INFO','WARN','ERROR'
:param as_user: identifying the user who ran the job
:param failed_nodes: can either ben true or false. true for run all nodes and false for running only failed nodes
:param execution_id: for specified what execution to rerun
:param options: add options for running a job
:return: api response
"""
request_body: Dict[str, Any] = {}
if arg_string:
request_body["argString"] = arg_string
if log_level:
request_body["loglevel"] = log_level
if as_user:
request_body["asUser"] = as_user
if failed_nodes:
request_body["failedNodes"] = failed_nodes
if options:
request_body["options"] = options
return self._http_request(
method="POST",
url_suffix=f"/job/{job_id}/retry/{execution_id}",
params=self.params,
data=str(request_body),
)
def job_execution_query(
self,
status_filter: str,
aborted_by_filter: str,
user_filter: str,
recent_filter: str,
older_filter: str,
begin: str,
end: str,
adhoc: str,
job_id_list_filter: list,
exclude_job_id_list_filter: list,
job_list_filter: list,
exclude_job_list_filter: list,
group_path: str,
group_path_exact: str,
exclude_group_path: str,
exclude_group_path_exact: str,
job_filter: str,
exclude_job_filter: str,
job_exact_filter: str,
exclude_job_exact_filter: str,
execution_type_filter: str,
max_results: Optional[int],
offset: Optional[int],
project_name: str,
):
"""
This function returns previous and active executions
:param status_filter: execution status, can be either: "running", succeeded", "failed" or "aborted"
:param aborted_by_filter: Username who aborted an execution
:param user_filter: Username who started the execution
:param recent_filter: for specify when the execution has occur. the format is 'XY' when 'X' is a number and 'Y'
can be: h - hour, d - day, w - week, m - month, y - year
:param older_filter: return executions that completed before the specified relative period of time. works with
the same format as 'recent_filter'
:param begin: Specify exact date for earliest execution completion time
:param end: Specify exact date for latest execution completion time
:param adhoc: can be true or false. true for include Adhoc executions
:param job_id_list_filter: specify a Job IDs to filter by
:param exclude_job_id_list_filter: specify a Job IDs to exclude
:param job_list_filter: specify a full job group/name to include.
:param exclude_job_list_filter: specify a full Job group/name to exclude
:param group_path: specify a group or partial group to include all jobs within that group path.
:param group_path_exact: like 'group_path' but you need to specify an exact group path to match
:param exclude_group_path specify a group or partial group path to exclude all jobs within that group path
:param exclude_group_path_exact: specify a group or partial group path to exclude jobs within that group path
:param job_filter: provide here a job name to query
:param exclude_job_filter: provide here a job name to exclude
:param job_exact_filter: provide here an exact job name to match
:param exclude_job_exact_filter: specify an exact job name to exclude
:param execution_type_filter: specify the execution type, can be: 'scheduled', 'user' or 'user-scheduled'
:param max_results: maximum number of results to get from the api
:param offset: offset for first result to include
:param project_name: the project name that you want to get its execution
:return: api response
"""
request_params: Dict[str, Any] = {}
if status_filter:
request_params["statusFilter"] = status_filter
if aborted_by_filter:
request_params["abortedbyFilter"] = aborted_by_filter
if user_filter:
request_params["userFilter"] = user_filter
if recent_filter:
request_params["recentFilter"] = recent_filter
if older_filter:
request_params["olderFilter"] = older_filter
if begin:
request_params["begin"] = begin
if end:
request_params["end"] = end
if adhoc:
request_params["adhoc"] = adhoc
if job_id_list_filter:
request_params["jobIdListFilter"] = job_id_list_filter
if exclude_job_id_list_filter:
request_params["excludeJobIdListFilter"] = exclude_job_id_list_filter
if job_list_filter:
request_params["jobListFilter"] = job_list_filter
if exclude_job_list_filter:
request_params["excludeJobListFilter"] = exclude_job_list_filter
if group_path:
request_params["groupPath"] = group_path
if group_path_exact:
request_params["groupPathExact"] = group_path_exact
if exclude_group_path:
request_params["excludeGroupPath"] = exclude_group_path
if exclude_group_path_exact:
request_params["excludeGroupPathExact"] = exclude_group_path_exact
if job_filter:
request_params["jobFilter"] = job_filter
if exclude_job_filter:
request_params["excludeJobFilter"] = exclude_job_filter
if job_exact_filter:
request_params["jobExactFilter"] = job_exact_filter
if exclude_job_exact_filter:
request_params["excludeJobExactFilter"] = exclude_job_exact_filter
if execution_type_filter:
request_params["executionTypeFilter"] = execution_type_filter
if max_results:
request_params["max"] = max_results
if offset:
request_params["offset"] = offset
project_name_to_pass = project_name if project_name else self.project_name
request_params["max"] = max_results if max_results else MAX_RESULTS
request_params.update(self.params)
return self._http_request(
method="POST",
url_suffix=f"/project/{project_name_to_pass}/executions",
params=request_params,
)
def job_execution_output(self, execution_id: int):
"""
This function gets metadata regarding workflow state
:param execution_id: id to execute.
:return: api response
"""
return self._http_request(
method="GET",
url_suffix=f"/execution/{execution_id}/output/state",
params=self.params,
)
def job_execution_abort(self, execution_id: int):
"""
This function aborts live executions
:param execution_id: id to abort execution
:return: api response
"""
return self._http_request(
method="GET",
url_suffix=f"/execution/{execution_id}/abort",
params=self.params,
)
def adhoc_run(
self,
project_name: str,
exec_command: str,
node_thread_count: str,
node_keepgoing: str,
as_user: str,
node_filter: str,
):
"""
This function executes shell commands in nodes.
:param project_name: project to run the command on
:param exec_command: the shell command that you want to run
:param node_thread_count: threadcount to use
:param node_keepgoing: 'true' for continue executing on other nodes after a failure. 'false' otherwise
:param as_user: specifies a username identifying the user who ran the command
:param node_filter: node filter to add
:return: api response
"""
request_params: Dict[str, Any] = {}
if exec_command:
request_params["exec"] = | |
'finished':
return finished
elif mode == 'canceled':
return canceled
def _prepare_order_line_procurement(self, cr, uid, order, line, move_id, date_planned, context=None):
return {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty)\
or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id)\
or line.product_uom.id,
'location_id': order.shop_id.warehouse_id.lot_stock_id.id,
'procure_method': line.type,
'move_id': move_id,
'company_id': order.company_id.id,
'note': line.name,
}
def _prepare_order_line_move(self, cr, uid, order, line, picking_id, date_planned, context=None):
location_id = order.shop_id.warehouse_id.lot_stock_id.id
output_id = order.shop_id.warehouse_id.lot_output_id.id
return {
'name': line.name,
'picking_id': picking_id,
'product_id': line.product_id.id,
'date': date_planned,
'date_expected': date_planned,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id)\
or line.product_uom.id,
'product_packaging': line.product_packaging.id,
'partner_id': line.address_allotment_id.id or order.partner_shipping_id.id,
'location_id': location_id,
'location_dest_id': output_id,
'sale_line_id': line.id,
'tracking_id': False,
'state': 'draft',
#'state': 'waiting',
'company_id': order.company_id.id,
'price_unit': line.product_id.standard_price or 0.0
}
def _prepare_order_picking(self, cr, uid, order, context=None):
pick_name = self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.out')
return {
'name': pick_name,
'origin': order.name,
'date': order.date_order,
'type': 'out',
'state': 'auto',
'move_type': order.picking_policy,
'sale_id': order.id,
'partner_id': order.partner_shipping_id.id,
'note': order.note,
'invoice_state': (order.order_policy=='picking' and '2binvoiced') or 'none',
'company_id': order.company_id.id,
}
def ship_recreate(self, cr, uid, order, line, move_id, proc_id):
# FIXME: deals with potentially cancelled shipments, seems broken (specially if shipment has production lot)
"""
Define ship_recreate for process after shipping exception
param order: sales order to which the order lines belong
param line: sales order line records to procure
param move_id: the ID of stock move
param proc_id: the ID of procurement
"""
move_obj = self.pool.get('stock.move')
if order.state == 'shipping_except':
for pick in order.picking_ids:
for move in pick.move_lines:
if move.state == 'cancel':
mov_ids = move_obj.search(cr, uid, [('state', '=', 'cancel'),('sale_line_id', '=', line.id),('picking_id', '=', pick.id)])
if mov_ids:
for mov in move_obj.browse(cr, uid, mov_ids):
# FIXME: the following seems broken: what if move_id doesn't exist? What if there are several mov_ids? Shouldn't that be a sum?
move_obj.write(cr, uid, [move_id], {'product_qty': mov.product_qty, 'product_uos_qty': mov.product_uos_qty})
self.pool.get('procurement.order').write(cr, uid, [proc_id], {'product_qty': mov.product_qty, 'product_uos_qty': mov.product_uos_qty})
return True
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=line.delay or 0.0)
date_planned = (date_planned - timedelta(days=order.company_id.security_lead)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return date_planned
def _create_pickings_and_procurements(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Create the required procurements to supply sales order lines, also connecting
the procurements to appropriate stock moves in order to bring the goods to the
sales order's requested location.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: sales order to which the order lines belong
:param list(browse_record) order_lines: sales order line records to procure
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if ommitted.
:return: True
"""
move_obj = self.pool.get('stock.move')
picking_obj = self.pool.get('stock.picking')
procurement_obj = self.pool.get('procurement.order')
proc_ids = []
for line in order_lines:
if line.state == 'done':
continue
date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context)
if line.product_id:
if line.product_id.type in ('product', 'consu'):
if not picking_id:
picking_id = picking_obj.create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
move_id = move_obj.create(cr, uid, self._prepare_order_line_move(cr, uid, order, line, picking_id, date_planned, context=context))
else:
# a service has no stock move
move_id = False
proc_id = procurement_obj.create(cr, uid, self._prepare_order_line_procurement(cr, uid, order, line, move_id, date_planned, context=context))
proc_ids.append(proc_id)
line.write({'procurement_id': proc_id})
self.ship_recreate(cr, uid, order, line, move_id, proc_id)
wf_service = netsvc.LocalService("workflow")
if picking_id:
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
for proc_id in proc_ids:
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr)
val = {}
if order.state == 'shipping_except':
val['state'] = 'progress'
val['shipped'] = False
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
order.write(val)
return True
def action_ship_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
self._create_pickings_and_procurements(cr, uid, order, order.order_line, None, context=context)
return True
def action_ship_end(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
val = {'shipped': True}
if order.state == 'shipping_except':
val['state'] = 'progress'
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
for line in order.order_line:
towrite = []
if line.state == 'exception':
towrite.append(line.id)
if towrite:
self.pool.get('sale.order.line').write(cr, uid, towrite, {'state': 'done'}, context=context)
res = self.write(cr, uid, [order.id], val)
return True
def has_stockable_products(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
class sale_order_line(osv.osv):
def _number_packages(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
try:
res[line.id] = int((line.product_uom_qty+line.product_packaging.qty-0.0001) / line.product_packaging.qty)
except:
res[line.id] = 1
return res
_inherit = 'sale.order.line'
_columns = {
'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}),
'procurement_id': fields.many2one('procurement.order', 'Procurement'),
'property_ids': fields.many2many('mrp.property', 'sale_order_line_property_rel', 'order_id', 'property_id', 'Properties', readonly=True, states={'draft': [('readonly', False)]}),
'product_packaging': fields.many2one('product.packaging', 'Packaging'),
'move_ids': fields.one2many('stock.move', 'sale_line_id', 'Inventory Moves', readonly=True),
'number_packages': fields.function(_number_packages, type='integer', string='Number Packages'),
}
_defaults = {
'delay': 0.0,
'product_packaging': False,
}
def _get_line_qty(self, cr, uid, line, context=None):
if line.procurement_id and not (line.order_id.invoice_quantity=='order'):
return self.pool.get('procurement.order').quantity_get(cr, uid,
line.procurement_id.id, context=context)
else:
return super(sale_order_line, self)._get_line_qty(cr, uid, line, context=context)
def _get_line_uom(self, cr, uid, line, context=None):
if line.procurement_id and not (line.order_id.invoice_quantity=='order'):
return self.pool.get('procurement.order').uom_get(cr, uid,
line.procurement_id.id, context=context)
else:
return super(sale_order_line, self)._get_line_uom(cr, uid, line, context=context)
def button_cancel(self, cr, uid, ids, context=None):
res = super(sale_order_line, self).button_cancel(cr, uid, ids, context=context)
for line in self.browse(cr, uid, ids, context=context):
for move_line in line.move_ids:
if move_line.state != 'cancel':
raise osv.except_osv(
_('Cannot cancel sales order line!'),
_('You must first cancel stock moves attached to this sales order line.'))
return res
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'move_ids': []})
return super(sale_order_line, self).copy_data(cr, uid, id, default, context=context)
def product_packaging_change(self, cr, uid, ids, pricelist, product, qty=0, uom=False,
partner_id=False, packaging=False, flag=False, context=None):
if not product:
return {'value': {'product_packaging': False}}
product_obj = self.pool.get('product.product')
product_uom_obj = self.pool.get('product.uom')
pack_obj = self.pool.get('product.packaging')
warning = {}
result = {}
warning_msgs = ''
if flag:
res = self.product_id_change(cr, uid, ids, pricelist=pricelist,
product=product, qty=qty, uom=uom, partner_id=partner_id,
packaging=packaging, flag=False, context=context)
warning_msgs = res.get('warning') and res['warning']['message']
products = product_obj.browse(cr, uid, product, context=context)
if not products.packaging:
packaging = result['product_packaging'] = False
elif not packaging and products.packaging and not flag:
packaging = products.packaging[0].id
result['product_packaging'] = packaging
if packaging:
default_uom = products.uom_id and products.uom_id.id
pack = pack_obj.browse(cr, uid, packaging, context=context)
q = product_uom_obj._compute_qty(cr, uid, uom, pack.qty, default_uom)
# qty = qty - qty % q + q
if qty and (q and not (qty % q) == 0):
ean = pack.ean or _('(n/a)')
qty_pack = pack.qty
type_ul = pack.ul
if not warning_msgs:
warn_msg = _("You selected a quantity of %d Units.\n"
"But it's not compatible with the selected packaging.\n"
"Here is a proposition of quantities according to the packaging:\n"
"EAN: %s Quantity: %s Type of ul: %s") % \
(qty, ean, qty_pack, type_ul.name)
warning_msgs += _("Picking Information ! : ") + warn_msg + "\n\n"
warning = {
'title': _('Configuration Error!'),
'message': warning_msgs
}
result['product_uom_qty'] = qty
return {'value': result, 'warning': warning}
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
context = context or {}
product_uom_obj = self.pool.get('product.uom')
partner_obj = self.pool.get('res.partner')
product_obj = self.pool.get('product.product')
warning = {}
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not product:
res['value'].update({'product_packaging': False})
return res
#update of result obtained in super function
res_packing = self.product_packaging_change(cr, uid, ids, pricelist, product, qty, uom, partner_id, packaging, context=context)
res['value'].update(res_packing.get('value', {}))
warning_msgs = res_packing.get('warning') and res_packing['warning']['message'] or ''
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value']['delay'] = (product_obj.sale_delay or 0.0)
res['value']['type'] = product_obj.procure_method
#check if product is available, and if not: raise | |
[
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["34"].date_histogram.field = "@timestamp"
cquery.aggs["34"].date_histogram.interval = qinterval
cquery.aggs["34"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["34"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["34"].date_histogram.extended_bounds.min = qgte
cquery.aggs["34"].date_histogram.extended_bounds.max = qlte
#DFS FS metrics
cquery.aggs["34"].aggs["1"].avg.field = "BlocksTotal"
cquery.aggs["34"].aggs["2"].avg.field = "MissingBlocks"
cquery.aggs["34"].aggs["3"].avg.field = "MissingReplOneBlocks"
cquery.aggs["34"].aggs["4"].avg.field = "ExpiredHeartbeats"
cquery.aggs["34"].aggs["5"].avg.field = "TransactionsSinceLastCheckpoint"
cquery.aggs["34"].aggs["6"].avg.field = "TransactionsSinceLastLogRoll"
cquery.aggs["34"].aggs["7"].avg.field = "LastWrittenTransactionId"
cquery.aggs["34"].aggs["8"].avg.field = "LastCheckpointTime"
cquery.aggs["34"].aggs["9"].avg.field = "UnderReplicatedBlocks"
cquery.aggs["34"].aggs["10"].avg.field = "CorruptBlocks"
cquery.aggs["34"].aggs["11"].avg.field = "CapacityTotal"
cquery.aggs["34"].aggs["12"].avg.field = "CapacityTotalGB"
cquery.aggs["34"].aggs["13"].avg.field = "CapacityUsed"
#cquery.aggs["34"].aggs["14"].avg.field = "CapacityTotalGB" ####
#cquery.aggs["34"].aggs["15"].avg.field = "CapacityUsed"
cquery.aggs["34"].aggs["16"].avg.field = "CapacityUsedGB"
cquery.aggs["34"].aggs["17"].avg.field = "CapacityRemaining"
cquery.aggs["34"].aggs["18"].avg.field = "CapacityRemainingGB"
cquery.aggs["34"].aggs["19"].avg.field = "CapacityUsedNonDFS"
cquery.aggs["34"].aggs["20"].avg.field = "TotalLoad"
cquery.aggs["34"].aggs["21"].avg.field = "SnapshottableDirectories"
cquery.aggs["34"].aggs["22"].avg.field = "Snapshots"
cquery.aggs["34"].aggs["23"].avg.field = "FilesTotal"
cquery.aggs["34"].aggs["24"].avg.field = "PendingReplicationBlocks"
cquery.aggs["34"].aggs["25"].avg.field = "ScheduledReplicationBlocks"
cquery.aggs["34"].aggs["26"].avg.field = "PendingDeletionBlocks"
cquery.aggs["34"].aggs["27"].avg.field = "ExcessBlocks"
cquery.aggs["34"].aggs["28"].avg.field = "PostponedMisreplicatedBlocks"
cquery.aggs["34"].aggs["29"].avg.field = "PendingDataNodeMessageCount"
cquery.aggs["34"].aggs["30"].avg.field = "MillisSinceLastLoadedEdits"
cquery.aggs["34"].aggs["31"].avg.field = "BlockCapacity"
cquery.aggs["34"].aggs["32"].avg.field = "StaleDataNodes"
cquery.aggs["34"].aggs["33"].avg.field = "TotalFiles"
cqueryd = cquery.to_dict()
return cqueryd
def jvmNNquery(self, qstring, qgte, qlte, qsize, qinterval, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["13"].date_histogram.field = "@timestamp"
cquery.aggs["13"].date_histogram.interval = qinterval
cquery.aggs["13"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["13"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["13"].date_histogram.extended_bounds.min = qgte
cquery.aggs["13"].date_histogram.extended_bounds.max = qlte
#NN JVM Metrics
cquery.aggs["13"].aggs["1"].avg.field = "MemNonHeapUsedM"
cquery.aggs["13"].aggs["2"].avg.field = "MemNonHeapCommittedM"
cquery.aggs["13"].aggs["3"].avg.field = "MemHeapUsedM"
cquery.aggs["13"].aggs["4"].avg.field = "MemHeapCommittedM"
cquery.aggs["13"].aggs["5"].avg.field = "MemHeapMaxM"
cquery.aggs["13"].aggs["6"].avg.field = "MemMaxM"
cquery.aggs["13"].aggs["7"].avg.field = "GcCountParNew"
cquery.aggs["13"].aggs["8"].avg.field = "GcTimeMillisParNew"
cquery.aggs["13"].aggs["9"].avg.field = "GcCountConcurrentMarkSweep"
cquery.aggs["13"].aggs["10"].avg.field = "GcTimeMillisConcurrentMarkSweep"
cquery.aggs["13"].aggs["11"].avg.field = "GcCount"
cquery.aggs["13"].aggs["12"].avg.field = "GcTimeMillis"
cquery.aggs["13"].aggs["14"].avg.field = "GcNumWarnThresholdExceeded"
cquery.aggs["13"].aggs["15"].avg.field = "GcNumInfoThresholdExceeded"
cquery.aggs["13"].aggs["16"].avg.field = "GcTotalExtraSleepTime"
cquery.aggs["13"].aggs["17"].avg.field = "ThreadsNew"
cquery.aggs["13"].aggs["18"].avg.field = "ThreadsRunnable"
cquery.aggs["13"].aggs["19"].avg.field = "ThreadsBlocked"
cquery.aggs["13"].aggs["20"].avg.field = "ThreadsWaiting"
cquery.aggs["13"].aggs["21"].avg.field = "ThreadsTimedWaiting"
cquery.aggs["13"].aggs["22"].avg.field = "ThreadsTerminated"
cquery.aggs["13"].aggs["23"].avg.field = "LogError"
cquery.aggs["13"].aggs["24"].avg.field = "LogFatal"
cquery.aggs["13"].aggs["25"].avg.field = "LogWarn"
cquery.aggs["13"].aggs["26"].avg.field = "LogInfo"
cqueryd = cquery.to_dict()
return cqueryd
def jvmMRQuery(self, qstring, qgte, qlte, qsize, qinterval, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["13"].date_histogram.field = "@timestamp"
cquery.aggs["13"].date_histogram.interval = qinterval
cquery.aggs["13"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["13"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["13"].date_histogram.extended_bounds.min = qgte
cquery.aggs["13"].date_histogram.extended_bounds.max = qlte
# NN JVM Metrics
cquery.aggs["13"].aggs["1"].avg.field = "MemNonHeapUsedM"
cquery.aggs["13"].aggs["2"].avg.field = "MemNonHeapCommittedM"
cquery.aggs["13"].aggs["3"].avg.field = "MemHeapUsedM"
cquery.aggs["13"].aggs["4"].avg.field = "MemHeapCommittedM"
cquery.aggs["13"].aggs["5"].avg.field = "MemHeapMaxM"
cquery.aggs["13"].aggs["6"].avg.field = "MemMaxM"
cquery.aggs["13"].aggs["7"].avg.field = "GcCountParNew"
cquery.aggs["13"].aggs["8"].avg.field = "GcTimeMillisParNew"
cquery.aggs["13"].aggs["9"].avg.field = "GcCountConcurrentMarkSweep"
cquery.aggs["13"].aggs["10"].avg.field = "GcTimeMillisConcurrentMarkSweep"
cquery.aggs["13"].aggs["11"].avg.field = "GcCount"
cquery.aggs["13"].aggs["12"].avg.field = "GcTimeMillis"
cquery.aggs["13"].aggs["17"].avg.field = "ThreadsNew"
cquery.aggs["13"].aggs["18"].avg.field = "ThreadsRunnable"
cquery.aggs["13"].aggs["19"].avg.field = "ThreadsBlocked"
cquery.aggs["13"].aggs["20"].avg.field = "ThreadsWaiting"
cquery.aggs["13"].aggs["21"].avg.field = "ThreadsTimedWaiting"
cquery.aggs["13"].aggs["22"].avg.field = "ThreadsTerminated"
cquery.aggs["13"].aggs["23"].avg.field = "LogError"
cquery.aggs["13"].aggs["24"].avg.field = "LogFatal"
cquery.aggs["13"].aggs["25"].avg.field = "LogWarn"
cquery.aggs["13"].aggs["26"].avg.field = "LogInfo"
cqueryd = cquery.to_dict()
return cqueryd
def resourceQueueQuery(self, qstring, qgte, qlte, qsize, qinterval, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["23"].date_histogram.field = "@timestamp"
cquery.aggs["23"].date_histogram.interval = qinterval
cquery.aggs["23"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["23"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["23"].date_histogram.extended_bounds.min = qgte
cquery.aggs["23"].date_histogram.extended_bounds.max = qlte
# Resource Manager Queue Metrics
cquery.aggs["23"].aggs["1"].avg.field = "running_0"
cquery.aggs["23"].aggs["2"].avg.field = "running_60"
cquery.aggs["23"].aggs["3"].avg.field = "running_300"
cquery.aggs["23"].aggs["4"].avg.field = "running_1440"
cquery.aggs["23"].aggs["5"].avg.field = "AppsSubmitted"
cquery.aggs["23"].aggs["6"].avg.field = "AppsPending"
cquery.aggs["23"].aggs["7"].avg.field = "AppsCompleted"
cquery.aggs["23"].aggs["8"].avg.field = "AllocatedMB"
cquery.aggs["23"].aggs["9"].avg.field = "AllocatedVCores"
cquery.aggs["23"].aggs["10"].avg.field = "AllocatedContainers"
cquery.aggs["23"].aggs["11"].avg.field = "AggregateContainersAllocated"
cquery.aggs["23"].aggs["12"].avg.field = "AggregateContainersReleased"
cquery.aggs["23"].aggs["13"].avg.field = "AvailableMB"
cquery.aggs["23"].aggs["14"].avg.field = "AvailableVCores"
cquery.aggs["23"].aggs["15"].avg.field = "PendingVCores"
cquery.aggs["23"].aggs["16"].avg.field = "PendingContainers"
cquery.aggs["23"].aggs["17"].avg.field = "ReservedMB"
cquery.aggs["23"].aggs["18"].avg.field = "ReservedContainers"
cquery.aggs["23"].aggs["19"].avg.field = "ActiveUsers"
cquery.aggs["23"].aggs["20"].avg.field = "ActiveApplications"
cquery.aggs["23"].aggs["21"].avg.field = "AppAttemptFirstContainerAllocationDelayNumOps"
cquery.aggs["23"].aggs["22"].avg.field = "AppAttemptFirstContainerAllocationDelayAvgTime"
cqueryd = cquery.to_dict()
return cqueryd
def clusterMetricsQuery(self, qstring, qgte, qlte, qsize, qinterval, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["2"].date_histogram.field = "@timestamp"
cquery.aggs["2"].date_histogram.interval = qinterval
cquery.aggs["2"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["2"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["2"].date_histogram.extended_bounds.min = qgte
cquery.aggs["2"].date_histogram.extended_bounds.max = qlte
# Cluster Metrics
cquery.aggs["2"].aggs["1"].avg.field = "NumActiveNMs"
cquery.aggs["2"].aggs["3"].avg.field = "NumDecommissionedNMs"
cquery.aggs["2"].aggs["4"].avg.field = "NumLostNMs"
cquery.aggs["2"].aggs["5"].avg.field = "NumUnhealthyNMs"
cquery.aggs["2"].aggs["6"].avg.field = "AMLaunchDelayNumOps"
cquery.aggs["2"].aggs["7"].avg.field = "AMLaunchDelayAvgTime"
cquery.aggs["2"].aggs["8"].avg.field = "AMRegisterDelayNumOps"
cquery.aggs["2"].aggs["9"].avg.field = "AMRegisterDelayAvgTime"
cquery.aggs["2"].aggs["10"].avg.field = "NumRebootedNMs"
cqueryd = cquery.to_dict()
return cqueryd
def datanodeMetricsQuery(self, qstring, qgte, qlte, qsize, qinterval, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["12"].date_histogram.field = "@timestamp"
cquery.aggs["12"].date_histogram.interval = qinterval
cquery.aggs["12"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["12"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["12"].date_histogram.extended_bounds.min = qgte
cquery.aggs["12"].date_histogram.extended_bounds.max = qlte
# DataNode Metrics
cquery.aggs["12"].aggs["1"].avg.field = "BytesWritten"
cquery.aggs["12"].aggs["2"].avg.field = "TotalWriteTime"
cquery.aggs["12"].aggs["3"].avg.field = "BytesRead"
cquery.aggs["12"].aggs["4"].avg.field = "TotalReadTime"
cquery.aggs["12"].aggs["5"].avg.field = "BlocksWritten"
cquery.aggs["12"].aggs["6"].avg.field = "BlocksRead"
cquery.aggs["12"].aggs["7"].avg.field = "BlocksReplicated"
cquery.aggs["12"].aggs["8"].avg.field = "BlocksRemoved"
cquery.aggs["12"].aggs["9"].avg.field = "BlocksVerified"
cquery.aggs["12"].aggs["10"].avg.field = "BlockVerificationFailures"
cquery.aggs["12"].aggs["11"].avg.field = "BlocksCached"
cquery.aggs["12"].aggs["13"].avg.field = "BlocksUncached"
cquery.aggs["12"].aggs["14"].avg.field = "ReadsFromLocalClient"
cquery.aggs["12"].aggs["15"].avg.field = "ReadsFromRemoteClient"
cquery.aggs["12"].aggs["16"].avg.field = "WritesFromLocalClient"
cquery.aggs["12"].aggs["17"].avg.field = "WritesFromRemoteClient"
cquery.aggs["12"].aggs["18"].avg.field = "BlocksGetLocalPathInfo"
cquery.aggs["12"].aggs["19"].avg.field = "RemoteBytesRead"
cquery.aggs["12"].aggs["20"].avg.field = "RemoteBytesWritten"
cquery.aggs["12"].aggs["21"].avg.field = "RamDiskBlocksWrite"
cquery.aggs["12"].aggs["22"].avg.field = "RamDiskBlocksWriteFallback"
cquery.aggs["12"].aggs["23"].avg.field = "RamDiskBytesWrite"
cquery.aggs["12"].aggs["24"].avg.field = "RamDiskBlocksReadHits"
cquery.aggs["12"].aggs["25"].avg.field = "RamDiskBlocksEvicted"
cquery.aggs["12"].aggs["27"].avg.field = "RamDiskBlocksEvictedWithoutRead"
cquery.aggs["12"].aggs["28"].avg.field = "RamDiskBlocksEvictionWindowMsNumOps"
cquery.aggs["12"].aggs["29"].avg.field = "RamDiskBlocksEvictionWindowMsAvgTime"
cquery.aggs["12"].aggs["30"].avg.field = "RamDiskBlocksLazyPersisted"
cquery.aggs["12"].aggs["31"].avg.field = "RamDiskBlocksDeletedBeforeLazyPersisted"
cquery.aggs["12"].aggs["32"].avg.field = "RamDiskBytesLazyPersisted"
cquery.aggs["12"].aggs["33"].avg.field = "RamDiskBlocksLazyPersistWindowMsNumOps"
cquery.aggs["12"].aggs["34"].avg.field = "RamDiskBlocksLazyPersistWindowMsAvgTime"
cquery.aggs["12"].aggs["35"].avg.field = "FsyncCount"
cquery.aggs["12"].aggs["36"].avg.field = "VolumeFailures"
cquery.aggs["12"].aggs["37"].avg.field = "DatanodeNetworkErrors"
cquery.aggs["12"].aggs["38"].avg.field = "ReadBlockOpNumOps"
cquery.aggs["12"].aggs["39"].avg.field = "ReadBlockOpAvgTime"
cquery.aggs["12"].aggs["40"].avg.field = "CopyBlockOpNumOps"
cquery.aggs["12"].aggs["41"].avg.field = "CopyBlockOpAvgTime"
cquery.aggs["12"].aggs["42"].avg.field = "ReplaceBlockOpNumOps"
cquery.aggs["12"].aggs["43"].avg.field = "ReplaceBlockOpAvgTime"
cquery.aggs["12"].aggs["44"].avg.field = "HeartbeatsNumOps"
cquery.aggs["12"].aggs["45"].avg.field = "HeartbeatsAvgTime"
cquery.aggs["12"].aggs["46"].avg.field = "BlockReportsNumOps"
cquery.aggs["12"].aggs["47"].avg.field = "BlockReportsAvgTime"
cquery.aggs["12"].aggs["48"].avg.field = "IncrementalBlockReportsNumOps"
cquery.aggs["12"].aggs["49"].avg.field = "IncrementalBlockReportsAvgTime"
cquery.aggs["12"].aggs["50"].avg.field = "CacheReportsNumOps"
cquery.aggs["12"].aggs["51"].avg.field = "CacheReportsAvgTime"
cquery.aggs["12"].aggs["52"].avg.field = "PacketAckRoundTripTimeNanosNumOps"
cquery.aggs["12"].aggs["53"].avg.field = "FlushNanosNumOps"
cquery.aggs["12"].aggs["54"].avg.field = "FlushNanosAvgTime"
cquery.aggs["12"].aggs["55"].avg.field = "FsyncNanosNumOps"
cquery.aggs["12"].aggs["56"].avg.field = "FsyncNanosAvgTime"
cquery.aggs["12"].aggs["57"].avg.field = "SendDataPacketBlockedOnNetworkNanosNumOps"
cquery.aggs["12"].aggs["58"].avg.field = "SendDataPacketBlockedOnNetworkNanosAvgTime"
cquery.aggs["12"].aggs["59"].avg.field = "SendDataPacketTransferNanosNumOps"
cquery.aggs["12"].aggs["60"].avg.field = "SendDataPacketTransferNanosAvgTime"
cquery.aggs["12"].aggs["61"].avg.field = "WriteBlockOpNumOps"
cquery.aggs["12"].aggs["62"].avg.field = "WriteBlockOpAvgTime"
cquery.aggs["12"].aggs["63"].avg.field = "BlockChecksumOpNumOps"
cquery.aggs["12"].aggs["64"].avg.field = "BlockChecksumOpAvgTime"
cqueryd = cquery.to_dict()
return cqueryd
def fsopDurationsQuery(self, qstring, qgte, qlte, qsize, qinterval, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["2"].date_histogram.field = "@timestamp"
cquery.aggs["2"].date_histogram.interval = qinterval
cquery.aggs["2"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["2"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["2"].date_histogram.extended_bounds.min = qgte
cquery.aggs["2"].date_histogram.extended_bounds.max = qlte
# FSOpDuration metrics
cquery.aggs["2"].aggs["1"].avg.field = "ContinuousSchedulingRunNumOps"
cquery.aggs["2"].aggs["3"].avg.field = "ContinuousSchedulingRunAvgTime"
cquery.aggs["2"].aggs["4"].avg.field = "ContinuousSchedulingRunStdevTime"
cquery.aggs["2"].aggs["5"].avg.field = "ContinuousSchedulingRunIMinTime"
cquery.aggs["2"].aggs["6"].avg.field = "ContinuousSchedulingRunIMaxTime"
cquery.aggs["2"].aggs["7"].avg.field = "ContinuousSchedulingRunMinTime"
cquery.aggs["2"].aggs["8"].avg.field = "ContinuousSchedulingRunMaxTime"
cquery.aggs["2"].aggs["9"].avg.field = "ContinuousSchedulingRunINumOps"
cquery.aggs["2"].aggs["10"].avg.field = "NodeUpdateCallNumOps"
cquery.aggs["2"].aggs["11"].avg.field = "NodeUpdateCallAvgTime"
cquery.aggs["2"].aggs["12"].avg.field = "NodeUpdateCallStdevTime"
cquery.aggs["2"].aggs["13"].avg.field = "NodeUpdateCallMinTime"
cquery.aggs["2"].aggs["14"].avg.field = "NodeUpdateCallIMinTime"
cquery.aggs["2"].aggs["15"].avg.field = "NodeUpdateCallMaxTime"
cquery.aggs["2"].aggs["16"].avg.field = "NodeUpdateCallINumOps"
cquery.aggs["2"].aggs["17"].avg.field = "UpdateThreadRunNumOps"
cquery.aggs["2"].aggs["18"].avg.field = "UpdateThreadRunAvgTime"
cquery.aggs["2"].aggs["19"].avg.field = "UpdateThreadRunStdevTime"
cquery.aggs["2"].aggs["20"].avg.field = "UpdateThreadRunIMinTime"
cquery.aggs["2"].aggs["21"].avg.field = "UpdateThreadRunMinTime"
cquery.aggs["2"].aggs["22"].avg.field = "UpdateThreadRunMaxTime"
cquery.aggs["2"].aggs["23"].avg.field = "UpdateThreadRunINumOps"
cquery.aggs["2"].aggs["24"].avg.field = "UpdateCallNumOps"
cquery.aggs["2"].aggs["25"].avg.field = "UpdateCallAvgTime"
cquery.aggs["2"].aggs["26"].avg.field = "UpdateCallStdevTime"
cquery.aggs["2"].aggs["27"].avg.field = "UpdateCallIMinTime"
cquery.aggs["2"].aggs["28"].avg.field = "UpdateCallMinTime"
cquery.aggs["2"].aggs["29"].avg.field = "UpdateCallMaxTime"
cquery.aggs["2"].aggs["30"].avg.field = "UpdateCallINumOps"
cquery.aggs["2"].aggs["31"].avg.field = "PreemptCallNumOps"
cquery.aggs["2"].aggs["32"].avg.field = "PreemptCallAvgTime"
cquery.aggs["2"].aggs["33"].avg.field = "PreemptCallStdevTime"
cquery.aggs["2"].aggs["34"].avg.field = "PreemptCallINumOps"
cqueryd = cquery.to_dict()
return cqueryd
def shuffleQuery(self, qstring, qgte, qlte, qsize, qinterval, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["2"].date_histogram.field = "@timestamp"
cquery.aggs["2"].date_histogram.interval = qinterval
cquery.aggs["2"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["2"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["2"].date_histogram.extended_bounds.min = qgte
cquery.aggs["2"].date_histogram.extended_bounds.max = qlte
#Shuffle metrics
cquery.aggs["2"].aggs["1"].avg.field = "ShuffleConnections"
cquery.aggs["2"].aggs["3"].avg.field = "ShuffleOutputBytes"
cquery.aggs["2"].aggs["4"].avg.field = "ShuffleOutputsFailed"
cquery.aggs["2"].aggs["5"].avg.field = "ShuffleOutputsOK"
cqueryd = cquery.to_dict()
return cqueryd
def queryByProcess(self, qstring, qgte, qlte, qsize, qinterval, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["2"].date_histogram.field = "@timestamp"
cquery.aggs["2"].date_histogram.interval = qinterval
cquery.aggs["2"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["2"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["2"].date_histogram.extended_bounds.min = qgte
cquery.aggs["2"].date_histogram.extended_bounds.max = qlte
cquery.fields = ["*", "_source"]
cquery.script_fields = {}
cquery.fielddata_fields = ["@timestamp"]
cquery.sort = [{"@timestamp": {"order": "desc", "unmapped_type": "boolean"}}]
cqueryd = cquery.to_dict()
return cqueryd
def stormQuery(self, qstring, qgte, qlte, qsize, qinterval, bolts, spouts, wildCard=True, qtformat="epoch_millis",
qmin_doc_count=1):
cquery = Dict()
cquery.query.filtered.query.query_string.query = qstring
cquery.query.filtered.query.query_string.analyze_wildcard = wildCard
cquery.query.filtered.filter.bool.must = [
{"range": {"@timestamp": {"gte": qgte, "lte": qlte, "format": qtformat}}}]
cquery.query.filtered.filter.bool.must_not = []
cquery.size = qsize
cquery.aggs["1"].date_histogram.field = "@timestamp"
cquery.aggs["1"].date_histogram.interval = qinterval
cquery.aggs["1"].date_histogram.time_zone = "Europe/Helsinki"
cquery.aggs["1"].date_histogram.min_doc_count = qmin_doc_count
cquery.aggs["1"].date_histogram.extended_bounds.min = qgte
cquery.aggs["1"].date_histogram.extended_bounds.max = qlte
# Storm metrics
cquery.aggs["1"].aggs["2"].avg.field = "executorsTotal"
cquery.aggs["1"].aggs["3"].avg.field = "msgTimeout"
cquery.aggs["1"].aggs["4"].avg.field = "tasksTotal"
cquery.aggs["1"].aggs["5"].avg.field = "workersTotal"
cquery.aggs["1"].aggs["6"].avg.field = "topologyStats_10m_acked"
cquery.aggs["1"].aggs["7"].avg.field = "topologyStats_10m_completeLatency"
cquery.aggs["1"].aggs["8"].avg.field = "topologyStats_10m_emitted"
cquery.aggs["1"].aggs["9"].avg.field = "topologyStats_10m_failed"
cquery.aggs["1"].aggs["10"].avg.field = "topologyStats_10m_transferred"
cquery.aggs["1"].aggs["11"].avg.field = "topologyStats_10m_window"
cquery.aggs["1"].aggs["12"].avg.field = "topologyStats_1d_acked"
cquery.aggs["1"].aggs["13"].avg.field = "topologyStats_1d_completeLatency"
cquery.aggs["1"].aggs["14"].avg.field | |
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(VoidPickUpResponse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'PickUpVoided':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'PickUpVoided')
ival_ = self.gds_validate_boolean(ival_, node, 'PickUpVoided')
self.PickUpVoided = ival_
self.PickUpVoided_nsprefix_ = child_.prefix
super(VoidPickUpResponse, self).buildChildren(child_, node, nodeName_, True)
# end class VoidPickUpResponse
class ValidatePickUpRequest(Request):
"""ValidatePickUpRequest"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = Request
def __init__(self, BillingAccountNumber=None, PartnerID=None, PickupInstruction=None, Address=None, ShipmentSummary=None, NotificationEmails=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
super(ValidatePickUpRequest, self).__init__( **kwargs_)
self.BillingAccountNumber = BillingAccountNumber
self.BillingAccountNumber_nsprefix_ = None
self.PartnerID = PartnerID
self.PartnerID_nsprefix_ = None
self.PickupInstruction = PickupInstruction
self.PickupInstruction_nsprefix_ = None
self.Address = Address
self.Address_nsprefix_ = None
self.ShipmentSummary = ShipmentSummary
self.ShipmentSummary_nsprefix_ = None
self.NotificationEmails = NotificationEmails
self.NotificationEmails_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ValidatePickUpRequest)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ValidatePickUpRequest.subclass:
return ValidatePickUpRequest.subclass(*args_, **kwargs_)
else:
return ValidatePickUpRequest(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_BillingAccountNumber(self):
return self.BillingAccountNumber
def set_BillingAccountNumber(self, BillingAccountNumber):
self.BillingAccountNumber = BillingAccountNumber
def get_PartnerID(self):
return self.PartnerID
def set_PartnerID(self, PartnerID):
self.PartnerID = PartnerID
def get_PickupInstruction(self):
return self.PickupInstruction
def set_PickupInstruction(self, PickupInstruction):
self.PickupInstruction = PickupInstruction
def get_Address(self):
return self.Address
def set_Address(self, Address):
self.Address = Address
def get_ShipmentSummary(self):
return self.ShipmentSummary
def set_ShipmentSummary(self, ShipmentSummary):
self.ShipmentSummary = ShipmentSummary
def get_NotificationEmails(self):
return self.NotificationEmails
def set_NotificationEmails(self, NotificationEmails):
self.NotificationEmails = NotificationEmails
def hasContent_(self):
if (
self.BillingAccountNumber is not None or
self.PartnerID is not None or
self.PickupInstruction is not None or
self.Address is not None or
self.ShipmentSummary is not None or
self.NotificationEmails is not None or
super(ValidatePickUpRequest, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ValidatePickUpRequest', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ValidatePickUpRequest')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ValidatePickUpRequest':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ValidatePickUpRequest')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ValidatePickUpRequest', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ValidatePickUpRequest'):
super(ValidatePickUpRequest, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ValidatePickUpRequest')
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ValidatePickUpRequest', fromsubclass_=False, pretty_print=True):
super(ValidatePickUpRequest, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.BillingAccountNumber is not None:
namespaceprefix_ = self.BillingAccountNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.BillingAccountNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sBillingAccountNumber>%s</%sBillingAccountNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.BillingAccountNumber), input_name='BillingAccountNumber')), namespaceprefix_ , eol_))
if self.PartnerID is not None:
namespaceprefix_ = self.PartnerID_nsprefix_ + ':' if (UseCapturedNS_ and self.PartnerID_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sPartnerID>%s</%sPartnerID>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.PartnerID), input_name='PartnerID')), namespaceprefix_ , eol_))
if self.PickupInstruction is not None:
namespaceprefix_ = self.PickupInstruction_nsprefix_ + ':' if (UseCapturedNS_ and self.PickupInstruction_nsprefix_) else ''
self.PickupInstruction.export(outfile, level, namespaceprefix_, namespacedef_='', name_='PickupInstruction', pretty_print=pretty_print)
if self.Address is not None:
namespaceprefix_ = self.Address_nsprefix_ + ':' if (UseCapturedNS_ and self.Address_nsprefix_) else ''
self.Address.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Address', pretty_print=pretty_print)
if self.ShipmentSummary is not None:
namespaceprefix_ = self.ShipmentSummary_nsprefix_ + ':' if (UseCapturedNS_ and self.ShipmentSummary_nsprefix_) else ''
self.ShipmentSummary.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ShipmentSummary', pretty_print=pretty_print)
if self.NotificationEmails is not None:
namespaceprefix_ = self.NotificationEmails_nsprefix_ + ':' if (UseCapturedNS_ and self.NotificationEmails_nsprefix_) else ''
self.NotificationEmails.export(outfile, level, namespaceprefix_, namespacedef_='', name_='NotificationEmails', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(ValidatePickUpRequest, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'BillingAccountNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'BillingAccountNumber')
value_ = self.gds_validate_string(value_, node, 'BillingAccountNumber')
self.BillingAccountNumber = value_
self.BillingAccountNumber_nsprefix_ = child_.prefix
elif nodeName_ == 'PartnerID':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'PartnerID')
value_ = self.gds_validate_string(value_, node, 'PartnerID')
self.PartnerID = value_
self.PartnerID_nsprefix_ = child_.prefix
elif nodeName_ == 'PickupInstruction':
obj_ = PickupInstruction.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.PickupInstruction = obj_
obj_.original_tagname_ = 'PickupInstruction'
elif nodeName_ == 'Address':
obj_ = Address.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Address = obj_
obj_.original_tagname_ = 'Address'
elif nodeName_ == 'ShipmentSummary':
obj_ = ShipmentSummary.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ShipmentSummary = obj_
obj_.original_tagname_ = 'ShipmentSummary'
elif nodeName_ == 'NotificationEmails':
obj_ = NotificationEmails.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.NotificationEmails = obj_
obj_.original_tagname_ = 'NotificationEmails'
super(ValidatePickUpRequest, self).buildChildren(child_, node, nodeName_, True)
# end class ValidatePickUpRequest
class ValidatePickUpResponse(Response):
"""ValidatePickUpRespone"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = Response
def __init__(self, IsBulkdRequired=None, CutOffTime=None, CutOffWindow=None, BulkMaxWeight=None, BulkMaxPackages=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
super(ValidatePickUpResponse, self).__init__( **kwargs_)
self.IsBulkdRequired = IsBulkdRequired
self.IsBulkdRequired_nsprefix_ = None
self.CutOffTime = CutOffTime
self.CutOffTime_nsprefix_ = None
self.CutOffWindow = CutOffWindow
self.CutOffWindow_nsprefix_ = None
self.BulkMaxWeight = BulkMaxWeight
self.BulkMaxWeight_nsprefix_ = None
self.BulkMaxPackages = BulkMaxPackages
self.BulkMaxPackages_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ValidatePickUpResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ValidatePickUpResponse.subclass:
return ValidatePickUpResponse.subclass(*args_, **kwargs_)
else:
return ValidatePickUpResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_IsBulkdRequired(self):
return self.IsBulkdRequired
def set_IsBulkdRequired(self, IsBulkdRequired):
self.IsBulkdRequired = IsBulkdRequired
def get_CutOffTime(self):
return self.CutOffTime
def set_CutOffTime(self, CutOffTime):
self.CutOffTime = CutOffTime
def get_CutOffWindow(self):
return self.CutOffWindow
def set_CutOffWindow(self, CutOffWindow):
self.CutOffWindow = CutOffWindow
def get_BulkMaxWeight(self):
return self.BulkMaxWeight
def set_BulkMaxWeight(self, BulkMaxWeight):
self.BulkMaxWeight = BulkMaxWeight
def get_BulkMaxPackages(self):
return self.BulkMaxPackages
def set_BulkMaxPackages(self, BulkMaxPackages):
self.BulkMaxPackages = BulkMaxPackages
def hasContent_(self):
if (
self.IsBulkdRequired is not None or
self.CutOffTime is not None or
self.CutOffWindow is not None or
self.BulkMaxWeight is not None or
self.BulkMaxPackages is not None or
super(ValidatePickUpResponse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ValidatePickUpResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ValidatePickUpResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ValidatePickUpResponse':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ValidatePickUpResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ValidatePickUpResponse', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ValidatePickUpResponse'):
super(ValidatePickUpResponse, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ValidatePickUpResponse')
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ValidatePickUpResponse', fromsubclass_=False, pretty_print=True):
super(ValidatePickUpResponse, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.IsBulkdRequired is not None:
namespaceprefix_ = self.IsBulkdRequired_nsprefix_ + ':' if (UseCapturedNS_ and self.IsBulkdRequired_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsBulkdRequired>%s</%sIsBulkdRequired>%s' % (namespaceprefix_ , self.gds_format_boolean(self.IsBulkdRequired, input_name='IsBulkdRequired'), namespaceprefix_ , eol_))
if self.CutOffTime is not None:
namespaceprefix_ = self.CutOffTime_nsprefix_ + ':' if (UseCapturedNS_ and self.CutOffTime_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCutOffTime>%s</%sCutOffTime>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.CutOffTime), input_name='CutOffTime')), namespaceprefix_ , eol_))
if self.CutOffWindow is not None:
namespaceprefix_ = self.CutOffWindow_nsprefix_ + ':' if (UseCapturedNS_ and self.CutOffWindow_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCutOffWindow>%s</%sCutOffWindow>%s' % (namespaceprefix_ , self.gds_format_integer(self.CutOffWindow, input_name='CutOffWindow'), namespaceprefix_ , eol_))
if self.BulkMaxWeight is not None:
namespaceprefix_ = self.BulkMaxWeight_nsprefix_ + ':' if (UseCapturedNS_ and self.BulkMaxWeight_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sBulkMaxWeight>%s</%sBulkMaxWeight>%s' % (namespaceprefix_ , self.gds_format_decimal(self.BulkMaxWeight, input_name='BulkMaxWeight'), namespaceprefix_ , eol_))
if self.BulkMaxPackages is not None:
namespaceprefix_ = self.BulkMaxPackages_nsprefix_ + ':' if (UseCapturedNS_ and self.BulkMaxPackages_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sBulkMaxPackages>%s</%sBulkMaxPackages>%s' % (namespaceprefix_ , self.gds_format_integer(self.BulkMaxPackages, input_name='BulkMaxPackages'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(ValidatePickUpResponse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'IsBulkdRequired':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'IsBulkdRequired')
ival_ = self.gds_validate_boolean(ival_, node, 'IsBulkdRequired')
self.IsBulkdRequired = ival_
self.IsBulkdRequired_nsprefix_ = child_.prefix
| |
<gh_stars>0
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
from datetime import datetime
import sys
import argparse
import torch
import os
from inspect import currentframe, getframeinfo
GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../")
sys.path.append(CRAFTASSIST_DIR)
from shapes import get_bounds
def pretty_log(log_string):
cf = currentframe().f_back
filename = getframeinfo(cf).filename.split("/")[-1]
print(
"{} {}:{} {}".format(
datetime.now().strftime("%m/%d/%Y %H:%M:%S"), filename, cf.f_lineno, log_string
)
)
sys.stdout.flush()
## Train Fxns ##
def get_base_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", type=int, default=1, help="0 for cpu")
parser.add_argument("--batchsize", type=int, default=64, help="batchsize")
parser.add_argument("--dataset", default="shapes", help="shapes/segments/both")
parser.add_argument(
"--epochsize", type=int, default=1000, help="number of examples in an epoch"
)
parser.add_argument("--nepoch", type=int, default=1000, help="number of epochs")
parser.add_argument("--context_sidelength", type=int, default=32, help="size of cube")
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument("--num_layers", type=int, default=3, help="num layers")
parser.add_argument(
"--blockid_embedding_dim", type=int, default=8, help="size of blockid embedding"
)
parser.add_argument(
"--num_words", type=int, default=256, help="number of words for the blockid embeds"
)
parser.add_argument("--lr", type=float, default=0.1, help="step size for net")
parser.add_argument(
"--optim", type=str, default="adagrad", help="optim type to use (adagrad|sgd|adam)"
)
parser.add_argument("--momentum", type=float, default=0.0, help="momentum")
parser.add_argument("--checkpoint", default="", help="where to save model")
parser.add_argument("--num_workers", type=int, default=4, help="number of dataloader workers")
return parser
def add_dataset_flags(parser):
parser.add_argument(
"--dataset_ratios", type=str, default="shape:1.0", help="comma separated name:prob"
)
parser.add_argument("--useid", type=bool, default=False, help="use blockid")
parser.add_argument("--fixed_cube_size", type=int, default=None, help="fixed_cube_size")
parser.add_argument("--fixed_center", type=bool, default=False, help="fixed_center")
parser.add_argument(
"--min_seg_size", type=int, default=6, help="min seg size for seg data type"
)
parser.add_argument(
"--use_saved_data",
type=bool,
default=False,
help="use preparsed data for this min_seg_size",
)
def add_directional_flags(parser):
parser.add_argument("--spatial_embedding_dim", type=int, default=8, help="size of spatial emb")
parser.add_argument("--output_embedding_dim", type=int, default=8, help="size of output emb")
parser.add_argument(
"--seg_direction_net", type=bool, default=False, help="use segdirnet module"
)
parser.add_argument(
"--seg_use_viewer_pos", type=bool, default=False, help="use viewer pos in seg"
)
parser.add_argument(
"--seg_use_viewer_look", type=bool, default=False, help="use viewer look in seg"
)
parser.add_argument(
"--seg_use_direction", type=bool, default=False, help="use direction in seg"
)
parser.add_argument("--num_seg_dir_layers", type=int, default=3, help="num segdir net layers")
parser.add_argument(
"--cont_use_direction", type=bool, default=False, help="use direction in context"
)
parser.add_argument(
"--cont_use_xyz_from_viewer_look",
type=bool,
default=False,
help="use xyz position relative to viewer look in context emb",
)
def get_dataloader(dataset, opts, collate_fxn):
def init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32))
return torch.utils.data.DataLoader(
dataset,
batch_size=opts["batchsize"],
shuffle=True,
pin_memory=True,
drop_last=True,
num_workers=opts["num_workers"],
worker_init_fn=init_fn,
collate_fn=collate_fxn,
)
def to_cuda(list_modules):
for m in list_modules:
m.cuda()
def multitensor_collate_fxn(x):
"""
Takes a list of BATCHSIZE lists of tensors of length D.
Returns a list of length D of batched tensors.
"""
num_tensors_to_batch = len(x[0])
regroup_tensors = [[] for i in range(num_tensors_to_batch)]
for t_list in x:
for i, t in enumerate(t_list):
regroup_tensors[i].append(t.unsqueeze(0))
batched_tensors = [torch.cat(tl) for tl in regroup_tensors]
return batched_tensors
## 3D Utils ##
def get_side_lengths(bounds):
"""
Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z].
Returns a list of the side lengths.
"""
return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])]
def coord_to_index(coord, sl):
"""
Takes a 3D coordinate in a cube and the cube side length.
Returns index in flattened 3D array.
"""
return coord[0] * sl * sl + coord[1] * sl + coord[2]
def index_to_coord(index, sl):
"""
Takes an index into a flattened 3D array and its side length.
Returns the coordinate in the cube.
"""
coord = []
two_d_slice_size = sl * sl
coord.append(index // two_d_slice_size)
remaining = index % two_d_slice_size
coord.append(remaining // sl)
coord.append(remaining % sl)
return coord
def shift_subsegment_corner(S):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns the segment in the same form, shifted to the origin, and the shift vec
"""
bounds = get_bounds(S)
shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]]
new_S = []
for s in S:
new_S.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1]))
return new_S, shift_zero_vec
def subset_and_scale_3d(init_array, mins, maxs, scale=1):
return scale * init_array[mins[0] : maxs[0], mins[1] : maxs[1], mins[2] : maxs[2]]
def combine_seg_context(seg, context, seg_shift, seg_mult=1):
completed_context = context.clone()
# Calculate the region to copy over, sometimes the segment
# falls outside the range of the context bounding box
c_mins = [int(i) for i in seg_shift]
c_maxs = [int(min(ss + 8, 32)) for ss in seg_shift]
s_mins = [0 for i in range(3)]
# If the edge of the segment goes past the edge of the context (ss + 8 > 32),
# remove the extra from the segment.
s_maxs = [int(8 - max(0, (ss + 8) - 32)) for ss in seg_shift]
seg_to_add = subset_and_scale_3d(seg, s_mins, s_maxs, seg_mult)
context_subset = subset_and_scale_3d(completed_context, c_mins, c_maxs, 1)
completed_context[c_mins[0] : c_maxs[0], c_mins[1] : c_maxs[1], c_mins[2] : c_maxs[2]] = (
seg_to_add + context_subset
)
return completed_context
def get_vector(start, end):
return end - start
def get_random_viewer_info(sl):
viewer_pos = torch.tensor(random_int_triple(0, sl - 1))
viewer_look = torch.tensor(random_int_triple(0, sl - 1))
if viewer_pos.eq(viewer_look).sum() == viewer_pos.size(0):
if viewer_look[0] < sl + 1:
viewer_look[0] += 1
else:
viewer_look[0] -= 1
return viewer_pos, viewer_look
def b_greater_than_a(a, b):
if a == b:
return 0
return 1 if b > a else -1
def shift_block(b, s):
return tuple((tuple((b[0][0] + s[0], b[0][1] + s[1], b[0][2] + s[2])), b[1]))
def rotate_block(b, c, r):
""" rotates the block b around the point c by 90*r degrees
in the xz plane. r should be 1 or -1."""
# TODO add a reflection
c = np.array(c)
p = np.add(b[0], -c)
x = p[0]
z = p[2]
if r == -1:
p[0] = z
p[2] = -x
else:
p[0] = -z
p[2] = x
return (tuple(p + c), b[1])
def random_int_triple(minval, maxval):
t = [
random.randint(minval, maxval),
random.randint(minval, maxval),
random.randint(minval, maxval),
]
return t
def check_inrange(x, minval, maxval):
"""inclusive check"""
return all([v >= minval for v in x]) and all([v <= maxval for v in x])
def normalize(batched_vector):
vec = batched_vector.double()
norm = torch.norm(vec, dim=1)
# Set norm to 1 if it's 0
norm = norm + norm.eq(0).double()
expanded_norm = norm.unsqueeze(1).expand(-1, vec.size()[1])
return torch.div(vec, expanded_norm)
def get_rotation_matrix(viewer_pos, viewer_look):
# VP, VL: N x 3, VP_to_VL: N x 3
vp_to_vl = get_vector(viewer_pos, viewer_look)[:, :2]
nlook_vec = normalize(vp_to_vl)
nly = nlook_vec[:, 1]
# Nlx necessary to correct for the range of acrcos
nlx = nlook_vec[:, 0]
nlx = nlx.gt(0).double() - nlx.lt(0).double() - nlx.eq(0).double()
# Take care of nans created by raising 0 to a power
# and then masking the sin theta to 0 as intended
base = 1 - nly * nly
nan_mask = torch.isnan(torch.pow(base, 0.5)).double()
base = base + nan_mask
sin_theta = nlx * nan_mask.eq(0).double() * torch.pow(base, 0.5)
nly = nly.unsqueeze(1)
sin_theta = sin_theta.unsqueeze(1)
rm_pt1 = torch.cat([nly, sin_theta], 1).unsqueeze(1)
rm_pt2 = torch.cat([-sin_theta, nly], 1).unsqueeze(1)
rm = torch.cat([rm_pt1, rm_pt2], 1)
return rm
def rotate_x_y(coord, rotation_matrix):
return torch.mm(coord.unsqueeze(0), rotation_matrix).squeeze(0)
def float_equals(a, b, epsilon):
return True if abs(a - b) < epsilon else False
def get_argmax_list(vals, epsilon, minlist=False, maxlen=None):
mult = -1 if minlist else 1
max_ind = []
for i, v in enumerate(vals):
if not max_ind or float_equals(max_ind[0][1], v, epsilon):
if maxlen and len(max_ind) == maxlen:
continue
max_ind.append((i, v))
elif mult * (v - max_ind[0][1]) > 0:
max_ind = [(i, v)]
return max_ind
def get_firstmax(vals, epsilon, minlist=False):
return get_argmax_list(vals, epsilon, minlist, 1)[0]
# N -> batch size in training
# D -> num target coord per element
# Viewer pos, viewer_look are N x 3 tensors
# Batched target coords is a N x D x 3 tensor
# Output is a N x D x 3 tensor
def get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords):
# First verify the sizing and unsqueeze if necessary
btc_sizes = batched_target_coords.size()
vp_sizes = viewer_pos.size()
vl_sizes = viewer_look.size()
if len(btc_sizes) > 3 or len(vp_sizes) > 2 or len(vl_sizes) > 2:
raise Exception("One input has too many dimensions")
if btc_sizes[-1] != 3 or vp_sizes[-1] != 3 or vl_sizes[-1] != 3:
raise Exception("The last dimension of all inputs should be size 3")
if len(btc_sizes) < 3:
for i in range(3 - len(btc_sizes)):
batched_target_coords = batched_target_coords.unsqueeze(0)
if len(vp_sizes) == 1:
viewer_pos = viewer_pos.unsqueeze(0)
if len(vl_sizes) == 1:
viewer_look = viewer_look.unsqueeze(0)
n = batched_target_coords.size()[0]
d = batched_target_coords.size()[1]
# Handle xy and z separately
# XY = N X D x 2
xy = batched_target_coords[:, :, 0:2].double()
# Z = N x D x 1
z = batched_target_coords[:, :, 2].unsqueeze(2).double()
## XY
# Shift such that viewer pos is the origin
# VPXY, VLXY: N x 2
vpxy = viewer_pos.double()[:, 0:2]
vlxy = viewer_look.double()[:, 0:2]
vpxy_to_vlxy = vlxy - vpxy
# VPXY to XY: N x D x 2
vpxy_to_xy | |
#!/usr/bin/env python
from binascii import hexlify
from fcntl import ioctl
from socket import *
from struct import pack, unpack
import six
from pcappy_port.constants import *
from pcappy_port.functions import *
# wrap libpcap use ctypes
# fork of pcappy_port, fix bugs and make it work for python3
__author__ = '<NAME>'
__copyright__ = 'Copyright 2012, PcapPy Project'
__credits__ = ['<NAME>']
__license__ = 'GPL'
__version__ = '0.3'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
__all__ = [
'PcapPyLive',
'PcapPyOffline',
'PcapPyDead',
'open_offline',
'open_dead',
'open_live',
'findalldevs',
'PcapPyException'
]
def _inet_ntoa(ip):
return inet_ntop(AF_INET, pack(b'!L', htonl(ip)))
def _inet6_ntoa(ip):
return inet_ntop(AF_INET6, ip)
def _inet_atoi(ip):
return htonl(unpack(b'!L', inet_aton(ip))[0])
class PcapPyException(Exception):
pass
class PcapPyInterface(object):
def __init__(self, pa):
self._addresses = []
self._name = pa.name
self._description = pa.description or ''
self._flags = pa.flags
topaddr = pa.addresses
while topaddr:
topaddr = topaddr.contents
self.addresses.append(
dict(
addr=self._parseaddrs(topaddr.addr),
netmask=self._parseaddrs(topaddr.netmask),
broadaddr=self._parseaddrs(topaddr.broadaddr),
dstaddr=self._parseaddrs(topaddr.dstaddr)
)
)
topaddr = topaddr.next
@property
def addresses(self):
return self._addresses
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def flags(self):
return self._flags
def _parsemac(self, mac):
if six.PY2:
return b':'.join([hexlify(i).zfill(2) for i in mac])
else:
return b':'.join([hexlify(bytes([i])).zfill(2) for i in mac])
def _parseaddrs(self, sa):
if not sa:
return
sa = sa.contents
if sa.sa.sa_family == AF_LINK:
return dict(
sdl_len=sa.sdl.sdl_len,
sdl_family=sa.sdl.sdl_family,
sdl_index=sa.sdl.sdl_index,
sdl_type=sa.sdl.sdl_type,
sdl_nlen=sa.sdl.sdl_nlen,
sdl_alen=sa.sdl.sdl_alen,
sdl_slen=sa.sdl.sdl_slen,
sdl_data=self._parsemac(
string_at(byref(sa.sdl.sdl_data, sa.sdl.sdl_nlen), sa.sdl.sdl_alen))
)
elif sa.sa.sa_family == AF_PACKET:
return dict(
sll_family=sa.sll.sll_family,
sll_protocol=sa.sll.sll_protocol,
sll_ifindex=sa.sll.sll_ifindex,
sll_hatype=sa.sll.sll_hatype,
sll_pkttype=sa.sll.sll_pkttype,
sll_halen=sa.sll.sll_halen,
sll_data=self._parsemac(string_at(byref(sa.sll.sll_data), sa.sll.sll_halen))
)
elif sa.sa.sa_family == AF_INET:
if platform == 'darwin':
return dict(
len=sa.sin.sin_len,
family=sa.sin.sin_family,
port=sa.sin.sin_port,
address=_inet_ntoa(sa.sin.sin_addr)
)
else:
return dict(
family=sa.sin.sin_family,
port=sa.sin.sin_port,
address=_inet_ntoa(sa.sin.sin_addr)
)
elif sa.sa.sa_family == AF_INET6:
if platform == 'darwin':
return dict(
len=sa.sin6.sin6_len,
port=sa.sin6.sin6_port,
family=sa.sin6.sin6_family,
flowinfo=sa.sin6.sin6_flowinfo,
address=_inet6_ntoa(string_at(sa.sin6.sin6_addr, 16)),
scope_id=sa.sin6.sin6_scope_id
)
else:
return dict(
port=sa.sin6.sin6_port,
family=sa.sin6.sin6_family,
flowinfo=sa.sin6.sin6_flowinfo,
address=_inet6_ntoa(string_at(sa.sin6.sin6_addr, 16)),
scope_id=sa.sin6.sin6_scope_id
)
if platform == 'darwin':
return dict(
len=sa.sa.sa_len,
family=sa.sa.sa_family,
data=string_at(sa.sa.sa_data, sa.sa.sa_len)
)
else:
return dict(
family=sa.sa.sa_family,
data=string_at(sa.sa.sa_data, sa.sa.sa_len)
)
class PcapPyDumper(object):
def __init__(self, pcap, filename, ng=False):
self._pcap = pcap
self.filename = filename
self._pd = None
self._ng = ng
if self._ng:
self._pd = pcap_ng_dump_open(self._pcap._p, self.filename)
else:
self._pd = pcap_dump_open(self._pcap._p, self.filename)
if not self._pd:
raise PcapPyException(self._pcap.err)
def close(self):
if not self.closed:
self.flush()
if self._ng:
pcap_ng_dump_close(self._pd)
else:
pcap_dump_close(self._pd)
self._pd = None
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
r = pcap_dump_ftell(self._pd)
if r == -1:
raise PcapPyException(self._pcap.err)
return r
ftell = tell
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
r = pcap_dump_flush(self._pd)
if r == -1:
raise PcapPyException(self._pcap.err)
def write(self, pkt_hdr, pkt_data):
if self.closed:
raise ValueError('I/O operation on closed file')
ph = pcap_pkthdr(
ts=timeval(
tv_sec=pkt_hdr[b'ts'][b'tv_sec'],
tv_usec=pkt_hdr[b'ts'][b'tv_usec']
),
caplen=pkt_hdr[b'caplen'],
len=pkt_hdr[b'len']
)
if self._ng:
pcap_ng_dump(self._pd, pointer(ph), pkt_data)
else:
pcap_dump(self._pd, pointer(ph), pkt_data)
dump = ng_dump = write
# def fileno(self):
# return self.file.fileno()
#
# @property
# def file(self):
# if self.closed:
# raise ValueError('I/O operation on closed file')
# f = pcap_dump_file(self._pd)
# if not f:
# raise PcapPyException(self._pcap.err)
# return PyFile_FromFile(f, self.filename, 'wb', None)
@property
def closed(self):
return self._pd is None
def __del__(self):
self.close()
class PcapPyBpfProgram(object):
def __init__(self, expr, opt, nm, **kwargs):
self._expression = expr
self._optimize = opt
self._netmask = nm
self._bpf = bpf_program()
if 'pcap' in kwargs:
if pcap_compile(kwargs['pcap']._handle, pointer(self._bpf), expr, opt,
_inet_atoi(nm)) == -1:
raise PcapPyException(kwargs['pcap'].err)
elif pcap_compile_nopcap(kwargs['snaplen'], kwargs['linktype'], pointer(self._bpf), expr,
opt, _inet_atoi(nm)) == -1:
raise PcapPyException(kwargs['pcap'].err)
@property
def expression(self):
return self._expression
@property
def optimize(self):
return self._optimize
@property
def netmask(self):
return self._netmask
mask = netmask
def __del__(self):
if self._bpf:
try:
# todo: exception
pcap_freecode(pointer(self._bpf))
except Exception:
pass
def open_live(device, snaplen=64, promisc=1, to_ms=1000):
return PcapPyLive(device, snaplen, promisc, to_ms)
def open_dead(linktype=LINKTYPE_ETHERNET, snaplen=64):
return PcapPyDead(linktype, snaplen)
def open_offline(file):
return PcapPyOffline(file)
def _findalldevs(devs):
top = devs
devices = []
while top:
top = top.contents
devices.append(PcapPyInterface(top))
top = top.next
pcap_freealldevs(devs)
return devices
def findalldevs():
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
devs = pcap_if_t_ptr()
if pcap_findalldevs(pointer(devs), c_char_p((addressof(errbuf)))) == -1:
raise PcapPyException(errbuf.raw)
return _findalldevs(devs)
def findalldevs_ex(source, username=b'', password=b''):
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
ra = pcap_rmtauth()
ra.type = RPCAP_RMTAUTH_PWD if username and password else RPCAP_RMTAUTH_NULL
ra.username = username
ra.password = password
devs = pcap_if_t_ptr()
if pcap_findalldevs_ex(source, pointer(ra), pointer(devs), c_char_p((addressof(errbuf)))) == -1:
raise PcapPyException(errbuf.raw)
return _findalldevs(devs)
def lookupdev():
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
r = pcap_lookupdev(c_char_p((addressof(errbuf))))
if not r:
raise PcapPyException(errbuf.raw)
return r
def datalink_val_to_name(val):
return pcap_datalink_val_to_name(val)
def datalink_name_to_val(name):
return pcap_datalink_name_to_val(name)
def datalink_val_to_description(val):
return pcap_datalink_val_to_description(val)
def lookupnet(device):
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
netp = c_uint32()
maskp = c_uint32()
r = pcap_lookupnet(
device,
pointer(netp),
pointer(maskp),
c_char_p(addressof(errbuf))
)
if r == -1:
raise PcapPyException(errbuf.raw)
return _inet_ntoa(netp.value), _inet_ntoa(maskp.value)
def statustostr(status):
return pcap_statustostr(status)
def strerror(status):
return pcap_strerror(status)
def compile_nopcap(snaplen=64, linktype=LINKTYPE_ETHERNET, expr=b'', optimize=1, mask='0.0.0.0'):
PcapPyBpfProgram(expr, optimize, mask, linktype=linktype, snaplen=snaplen)
class PcapPyBase(object):
_is_base = True
def __init__(self):
if self._is_base:
raise Exception('Cannot initialize base class. '
'Use PcapPyLive, PcapPyDead, or PcapPyOffline instead.')
self._handle = None
self._filter = None
@classmethod
def findalldevs(cls):
return findalldevs()
@classmethod
def findalldevs_ex(cls, source, username=b'', password=b''):
return findalldevs_ex(source, username, password)
def geterr(self):
return self.err
@classmethod
def lookupdev(self):
return lookupdev
def list_datalinks(self):
dlt_buf = c_int_p()
r = pcap_list_datalinks(self._handle, pointer(dlt_buf))
if r == -1:
raise PcapPyException(self.err)
dlt_buf_a = cast(dlt_buf, POINTER(c_int * r)).contents
l = [dlt_buf_a[i] for i in range(0, r)]
pcap_free_datalinks(dlt_buf)
return l
@classmethod
def datalink_val_to_name(cls, val):
return datalink_val_to_name(val)
@classmethod
def datalink_name_to_val(cls, name):
return datalink_name_to_val(name)
@classmethod
def datalink_val_to_description(cls, val):
return datalink_val_to_description(val)
@classmethod
def lookupnet(cls, device):
return lookupnet(device)
def compile(self, expr, optimize=1, mask='0.0.0.0'):
return PcapPyBpfProgram(expr, optimize, mask, pcap=self)
def dump_open(self, filename):
return PcapPyDumper(self, filename)
@classmethod
def compile_nopcap(cls, snaplen=64, linktype=LINKTYPE_ETHERNET, expr=b'', optimize=1,
mask='0.0.0.0'):
return compile_nopcap(snaplen, linktype, expr, optimize, mask)
@classmethod
def statustostr(cls, status):
return statustostr(status)
@classmethod
def strerror(cls, status):
return strerror(status)
@property
def is_swapped(self):
return pcap_is_swapped(self._handle) == 1
@property
def minor_version(self):
return pcap_minor_version(self._handle)
@property
def major_version(self):
return pcap_major_version(self._handle)
@property
def lib_version(self):
return pcap_lib_version(self._handle)
@property
def err(self):
return pcap_geterr(self._handle)
@property
def datalink_ext(self):
r = pcap_datalink_ext(self._handle)
if r == -1:
raise PcapPyException(self.err)
return r
@property
def datalink(self):
r = pcap_datalink(self._handle)
if r == -1:
raise PcapPyException(self.err)
return r
@datalink.setter
def datalink(self, value):
if pcap_set_datalink(self._handle, value) < 0:
raise PcapPyException(self.err)
@property
def snapshot(self):
return pcap_snapshot(self._handle)
@snapshot.setter
def snapshot(self, value):
if pcap_set_snaplen(self._handle, value) < 0:
raise PcapPyException(self.err)
snaplen = snapshot
def __del__(self):
if self._handle:
# todo: del exception
try:
pcap_close(self._handle)
except Exception:
pass
class PcapPyDead(PcapPyBase):
_is_base = False
def __init__(self, linktype=LINKTYPE_ETHERNET, snaplen=64):
super(PcapPyDead, self).__init__()
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
self._handle = pcap_open_dead(linktype, snaplen)
if not self._handle:
raise PcapPyException(errbuf.raw)
class PcapPyAlive(PcapPyBase):
def __init__(self):
super(PcapPyAlive, self).__init__()
self._direction = 0
def _parse_entry(self, ph, pd):
if platform == 'darwin':
return dict(
caplen=ph.caplen,
len=ph.len,
ts=dict(tv_usec=ph.ts.tv_usec, tv_sec=ph.ts.tv_sec),
comments=string_at(ph.comments)
), string_at(pd, ph.caplen)
return dict(
caplen=ph.caplen,
len=ph.len,
ts=dict(tv_usec=ph.ts.tv_usec, tv_sec=ph.ts.tv_sec)
), string_at(pd, ph.caplen)
def _setup_handler(self, looper, cnt, callback, user):
def _loop_callback(user, ph, pd):
ph, pd = self._parse_entry(ph.contents, pd)
callback(user.contents.value, ph, pd)
r = looper(self._handle, cnt, pcap_handler(_loop_callback), pointer(py_object(user)))
if r == -1:
raise PcapPyException(self.err)
return r
def loop(self, cnt, callback, user):
return self._setup_handler(pcap_loop, cnt, callback, user)
def dispatch(self, cnt, callback, user):
return self._setup_handler(pcap_dispatch, cnt, callback, user)
def breakloop(self):
pcap_breakloop(self._handle)
def next_ex(self):
ph = pcap_pkthdr_ptr()
pd = c_ubyte_p()
r = pcap_next_ex(self._handle, pointer(ph), pointer(pd))
if r in [0, -2]:
return None
elif r == -1:
raise PcapPyException(self.err)
return self._parse_entry(ph.contents, pd)
def next(self):
ph = pcap_pkthdr()
pd = pcap_next(self._handle, pointer(ph))
if not pd:
return None
return self._parse_entry(ph, pd)
@property
def nonblock(self):
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
r = pcap_getnonblock(self._handle, c_char_p((addressof(errbuf))))
if r == -1:
raise PcapPyException(errbuf.raw)
return r
@nonblock.setter
def nonblock(self, value):
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
r = pcap_setnonblock(self._handle, value, c_char_p((addressof(errbuf))))
if r < 0:
raise PcapPyException(errbuf.raw)
@property
def stats(self):
ps = pcap_stat()
if pcap_stats(self._handle, pointer(ps)):
raise PcapPyException(self.err)
if platform == 'nt':
return dict(
ps_recv=ps.ps_recv,
ps_drop=ps.ps_drop,
ps_ifdrop=ps.ps_ifdrop,
bs_capt=ps.bs_capt
)
return dict(
ps_recv=ps.ps_recv,
ps_drop=ps.ps_drop,
ps_ifdrop=ps.ps_ifdrop
)
@property
def filter(self):
return self._filter
@filter.setter
def filter(self, value):
if isinstance(value, six.text_type):
self._filter = self.compile(value.encode())
elif isinstance(value, six.binary_type):
self._filter = self.compile(value)
else:
self._filter = value
if pcap_setfilter(self._handle, pointer(self._filter._bpf)) < 0:
raise PcapPyException(self.err)
@property
def selectable_fd(self):
return pcap_get_selectable_fd(self._handle)
@property
def can_set_rfmon(self):
return pcap_can_set_rfmon(self._handle) == 1
@property
def direction(self):
return self._direction
@direction.setter
def direction(self, value):
if value not in [PCAP_D_INOUT, PCAP_D_IN, PCAP_D_OUT]:
raise ValueError(
'Must be either PCAP_D_INOUT (%s), PCAP_D_IN (%s), or PCAP_D_OUT (%s)' %
(
PCAP_D_INOUT,
PCAP_D_IN,
PCAP_D_OUT
)
)
if pcap_setdirection(self._handle, value) < 0:
raise PcapPyException(self.err)
self._direction = value
@property
def fileno(self):
return pcap_fileno(self._handle)
class PcapPyOffline(PcapPyAlive):
_is_base = False
def __init__(self, filename):
super(PcapPyOffline, self).__init__()
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
# if isinstance(file_, file):
# self._p = pcap_fopen_offline(PyFile_AsFile(file_), c_char_p((addressof(errbuf))))
# else:
self._handle = pcap_open_offline(filename, c_char_p((addressof(errbuf))))
self.filename = filename
if not self._handle:
raise PcapPyException(errbuf.raw)
# @property
# def file(self):
# f = pcap_file(self._p)
# | |
<filename>enemigo.py
import pygame
import random
import math
random.seed(pygame.time.get_ticks())
ALTO=1000
ANCHO=1000
limites=[10, 8, 11, 10, 8, 6, 9, 4, 12, 8, 8, 10, 9, 4, 7, 5, 2, 8, 9, 9, 9]
pygame.mixer.init(44100, -16, 2, 2048)
punchE2=pygame.mixer.Sound('punchEnemy.ogg')
stepE=pygame.mixer.Sound('pasosJugador.ogg')
ouch=pygame.mixer.Sound('gag.ogg')
blast=pygame.mixer.Sound('blast.ogg')
bite=pygame.mixer.Sound('bite.ogg')
cry=pygame.mixer.Sound('cry.ogg')
ouch.set_volume(0.6)
stepE.set_volume(0.05)
blast.set_volume(0.3)
channel3 = pygame.mixer.Channel(2)
channel4 = pygame.mixer.Channel(3)
channel6 = pygame.mixer.Channel(5)
screensize = pygame.display.Info()
RESOLUTION = [screensize.current_w, screensize.current_h]
bglimit = 10
#Funciones
def recortarRept(max_x, max_y, archivo, vector):
imagen=pygame.image.load(archivo)
info=imagen.get_rect()
an_imagen=info[2]
al_imagen=info[3]
an_image_corte= an_imagen/max_x
al_image_corte= al_imagen/max_y
mapa=[]
for i in range(max_y):
mapis=[]
for j in range(vector[i]):
cuadro=imagen.subsurface(j*an_image_corte, i*al_image_corte, an_image_corte, al_image_corte)
mapis.append(cuadro)
mapa.append(mapis)
return mapa
def recortarEne1(archivo):
fondo=pygame.image.load(archivo)
infoFondo=fondo.get_rect()
matriz=[]
idleR=[]
idleL=[]
attack1R=[]
attack1L=[]
idle=[[248, 187, 57,75], [305, 187, 57,75], [362, 187, 57,75]]
#walkRight=[[11, 193, 59, 59] , [172, 196, 51, 55], [249, 193, 59, 59], [323, 200, 56, 53], [402, 197, 56, 55],[16, 281, 51, 55], [91, 281, 51, 55]]
attack1=[[4,183,50,75], [67,183,77,75], [154,183,84,75]]
#attack2=[[242, 108, 63, 59], [313, 95, 73, 73], [397, 98, 54, 74]]
#Idle R-L
for x in range(3):
cuadro=fondo.subsurface(idle[x])
cuadro=pygame.transform.scale(cuadro, (125, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
cuadro2=pygame.transform.scale(cuadro2, (125, 125))
idleR.append(cuadro2)
idleL.append(cuadro)
#Attack 1 R-L
'''
for x in range(3):
cuadro=fondo.subsurface(attack1[x])
cuadro=pygame.transform.scale(cuadro, (125, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
cuadro2=pygame.transform.scale(cuadro2, (125, 125))
attack1R.append(cuadro2)
attack1L.append(cuadro)
'''
cuadro0=fondo.subsurface(attack1[0])
cuadro0=pygame.transform.scale(cuadro0, (125, 125))
cuadro1=fondo.subsurface(attack1[1])
cuadro1=pygame.transform.scale(cuadro1, (192, 125))
cuadro2=fondo.subsurface(attack1[2])
cuadro2=pygame.transform.scale(cuadro1, (210, 125))
attack1L.append(cuadro0)
attack1L.append(cuadro1)
attack1L.append(cuadro2)
cuadro00=pygame.transform.flip(cuadro0, True, False)
cuadro11=pygame.transform.flip(cuadro1, True, False)
cuadro22=pygame.transform.flip(cuadro2, True, False)
attack1R.append(cuadro00)
attack1R.append(cuadro11)
attack1R.append(cuadro22)
return idleR, idleL, attack1R, attack1L
def recortarEne2(archivo):
fondo=pygame.image.load(archivo)
infoFondo=fondo.get_rect()
matriz=[]
idleR=[]
idleL=[]
walkR=[]
walkL=[]
attack1R=[]
attack1L=[]
dieR=[]
dieL=[]
idle=[[1, 11, 31, 67], [55, 11, 31, 67], [111, 11, 31, 67]]
walkRight=[[183, 11, 38, 67] , [251, 11, 31, 67], [310, 11, 31, 67], [364, 11, 37, 67], [428, 11, 30, 67],[485, 11, 30, 67]]
attack1=[[0,101,35,67], [49,101,55,67]]
die=[[262, 111, 55, 57], [328, 111, 67, 57], [404, 11 ,74, 57]]
#Idle R-L
for x in range(3):
cuadro=fondo.subsurface(idle[x])
cuadro=pygame.transform.scale(cuadro, (100, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
cuadro2=pygame.transform.scale(cuadro2, (100, 125))
idleR.append(cuadro)
idleL.append(cuadro2)
#Walk R-L
for x in range(6):
cuadro=fondo.subsurface(walkRight[x])
cuadro=pygame.transform.scale(cuadro, (100, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
cuadro2=pygame.transform.scale(cuadro2, (100, 125))
walkR.append(cuadro)
walkL.append(cuadro2)
#Attack 1 R-L
for x in range(2):
cuadro=fondo.subsurface(attack1[x])
cuadro=pygame.transform.scale(cuadro, (100, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
cuadro2=pygame.transform.scale(cuadro2, (100, 125))
attack1R.append(cuadro)
attack1L.append(cuadro2)
#Die 1 R-L
for x in range(3):
cuadro=fondo.subsurface(die[x])
cuadro=pygame.transform.scale(cuadro, (100, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
cuadro2=pygame.transform.scale(cuadro2, (100, 125))
dieR.append(cuadro)
dieL.append(cuadro2)
return idleR, idleL, walkR, walkL, attack1R, attack1L, dieR, dieL
def recortarBala(archivo):
fondo=pygame.image.load(archivo)
infoFondo=fondo.get_rect()
matriz=[]
cuadro=fondo.subsurface(410, 35, 15, 15)
matriz.append(cuadro)
return matriz
def recortarReptV2(archivo):
fondo=pygame.image.load(archivo)
infoFondo=fondo.get_rect()
matriz=[]
idleR=[]
idleL=[]
walkR=[]
walkL=[]
attack1R=[]
attack1L=[]
dieR=[]
dieL=[]
idle=[[5, 51, 228, 174], [280, 51, 228, 174], [552, 51, 228, 174], [825, 51, 228, 174], [1091, 51, 228, 174]]
walkRight=[[0, 321, 228, 174] , [294, 321, 228, 174] , [554, 321, 228, 174] , [816, 321, 228, 174] , [1092, 321, 228, 174] ,[1362, 321, 228, 174] ]
attack1=[[0,605,228,180], [282,554,228,227], [535,554,228,227], [820,554,228,227], [1108,605,192,180]]
die=[[262, 111, 55, 57], [328, 111, 67, 57], [404, 11 ,74, 57]]
#Idle R-L
for x in range(5):
cuadro=fondo.subsurface(idle[x])
#cuadro=pygame.transform.scale(cuadro, (100, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
#cuadro2=pygame.transform.scale(cuadro2, (100, 125))
idleR.append(cuadro)
idleL.append(cuadro2)
#Walk R-L
for x in range(6):
cuadro=fondo.subsurface(walkRight[x])
#cuadro=pygame.transform.scale(cuadro, (100, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
#cuadro2=pygame.transform.scale(cuadro2, (100, 125))
walkR.append(cuadro)
walkL.append(cuadro2)
#Attack 1 R-L
for x in range(5):
cuadro=fondo.subsurface(attack1[x])
#cuadro=pygame.transform.scale(cuadro, (100, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
#cuadro2=pygame.transform.scale(cuadro2, (100, 125))
attack1R.append(cuadro)
attack1L.append(cuadro2)
#Die 1 R-L
for x in range(3):
cuadro=fondo.subsurface(die[x])
#cuadro=pygame.transform.scale(cuadro, (100, 125))
cuadro2=pygame.transform.flip(cuadro, True, False)
#cuadro2=pygame.transform.scale(cuadro2, (100, 125))
dieR.append(cuadro)
dieL.append(cuadro2)
return idleR, idleL, walkR, walkL, attack1R, attack1L, dieR, dieL
matrizBala=recortarBala('lasers.png')
#Clases
class Enemigo1(pygame.sprite.Sprite):
def __init__(self, matriz):
pygame.sprite.Sprite.__init__(self)
self.f=matriz
self.image=self.f[0][0]
self.rect=self.image.get_rect()
self.indice=1
self.rect.x=50
self.rect.y=500
self.accion=0
self.dir = 'L'
self._health = 100
self.shoottimer = 50
self.shoot = False
def getHealth(self):
return self._health
def die(self):
#ouch.play()
channel4.play(blast)
def update(self):
#Idle R
self.shoottimer -= 1
if self.shoottimer < 0:
self.shoot = True
self.shoottimer = random.randrange(20,50)
if self.shoot:
self.accion=2
else:
self.accion=0
if self.accion==0:
self.image = self.f[self.accion][self.indice]
self.indice += 1
if self.indice > 2:
self.indice=0
#Idle L
if self.accion==1:
self.image = self.f[self.accion][self.indice]
self.indice += 1
if self.indice > 2:
self.indice=0
#1
#Attack R
if self.accion==2:
if self.indice <=2:
self.image = self.f[self.accion][self.indice]
if self.indice==1:
shoot=Bala(matrizBala)
self.indice += 1
#Es 7 normalmente
if self.indice > 2:
self.indice=0
#Attack L
if self.accion==3:
if self.indice <=2:
self.image = self.f[self.accion][self.indice]
if self.indice==0:
self.rect.x+=85
self.indice += 1
if self.indice==1:
self.rect.x-=67
if self.indice==2:
self.rect.x-=18
#Es 7 normalmente
if self.indice > 2:
self.indice=0
class Enemigo2(pygame.sprite.Sprite):
def __init__(self, matriz):
pygame.sprite.Sprite.__init__(self)
self.f=matriz
self.image=self.f[0][0]
self.rect=self.image.get_rect()
self.indice=0
self.rect.x=900
self.rect.y=500
self.accion=0
self.dir = 'R'
self._health = 100
self.finished = False
self.canDie = False
self.prevkey = None
self.vel_y = 0
self.vel_x = 0
self.vel_x_value = 10
self.vel_y_value = 6
self.moverange = 50
self.movetime = random.randrange(0,100)
def getHealth(self):
return self._health
def getSlope(self, posJugador):
point1 = [self.rect.x, self.rect.y]
if self.rect.x == posJugador[0]:
return False
m = float(posJugador[1] - point1[1])/(posJugador[0] - point1[0])
b = posJugador[1] - m*posJugador[0]
return [m, b]
def isAttacking(self):
if self.prevkey in ['AL', 'AR']:
return True
else:
return False
def AImove(self, jugador1, jugador2 = None, noplayers = 1):
if self.accion not in[6,7]:
self.movetime -= 1
if self.movetime <= -50:
self.movetime = random.randrange(0,50)
self.move('I')
if self.movetime <= 0:
if noplayers == 1:
selectplayer = jugador1
else:
distanceplayer1 = math.fabs(jugador1.rect.x-self.rect.x)+math.fabs(jugador1.rect.y-self.rect.y)
distanceplayer2 = math.fabs(jugador2.rect.x-self.rect.x)+math.fabs(jugador2.rect.y-self.rect.y)
if distanceplayer1 > distanceplayer2:
selectplayer = jugador2
else:
selectplayer = jugador1
if math.fabs(selectplayer.rect.x - self.rect.x) <= self.moverange and math.fabs(selectplayer.rect.y- self.rect.y) <= self.moverange/4:
if selectplayer.rect.x - self.rect.x > 0:
self.move('AR')
else:
self.move('AL')
else:
movedir = random.randrange(0,2)
discardedy = False
if movedir:
if selectplayer.rect.y - self.rect.y > self.moverange/4:
self.vel_y = self.vel_y_value
if selectplayer.rect.x - self.rect.x > 0:
self.move('R')
else:
self.move('L')
elif selectplayer.rect.y - self.rect.y < -self.moverange/4:
self.vel_y = -self.vel_y_value
if selectplayer.rect.x - self.rect.x > 0:
self.move('R')
else:
self.move('L')
else:
discardedy = True
elif discardedy or movedir == 0:
if selectplayer.rect.x - self.rect.x > self.moverange:
self.vel_x = self.vel_x_value
if selectplayer.rect.x - self.rect.x > 0:
self.move('R')
else:
self.move('L')
elif selectplayer.rect.x - self.rect.x < -self.moverange:
self.vel_x = -self.vel_x_value
if selectplayer.rect.x - self.rect.x > 0:
self.move('R')
else:
self.move('L')
random.seed(pygame.time.get_ticks())
def die(self):
if not self.accion in [6,7]:
if self.dir=='R' or self.move=='R' or self.move=='AR' or self.move=='I':
self.accion=6
self.finished = False
elif self.dir=='L' or self.move=='L' or self.move=='AL':
self.accion=7
self.finished = False
else:
pass
def move(self, key):
if (self.finished and self.prevkey in ['AL', 'AR']) or self.prevkey not in ['AL', 'AR'] :
self.finished = False
if key == 'R':
self.accion = 2
elif key == 'L':
self.accion = 3
elif key == 'AR':
self.accion = 4
elif key == 'AL':
self.accion = 5
elif key == 'I':
self.accion = 0
self.prevkey = key
self.indice = 0
def update(self):
#Idle R
if self.accion==0:
self.image = self.f[self.accion][self.indice]
self.indice += 1
if self.indice > 2:
self.indice=0
self.vel_x = 0
self.vel_y = 0
#Idle L
if self.accion==1:
self.image = self.f[self.accion][self.indice]
self.indice += 1
if self.indice > 2:
self.finished = True
self.indice=0
self.vel_x = 0
self.vel_y = 0
#Walk R
if self.accion==2:
if self.indice <=5:
self.image = self.f[self.accion][self.indice]
if self.indice==0:
stepE.play()
if self.indice==3:
stepE.play()
self.indice += 1
#Es 7 normalmente
if self.indice > 5:
self.finished = True
self.indice=0
#Walk L
if self.accion==3:
if self.indice <=5:
self.image = self.f[self.accion][self.indice]
if self.indice==0:
stepE.play()
if self.indice==3:
stepE.play()
self.indice += 1
#Es 7 normalmente
if self.indice > 5:
self.finished = True
self.indice=0
#1
#Attack R
if self.accion==4:
if self.indice <=1:
self.image = self.f[self.accion][self.indice]
if self.indice==1:
punchE2.play()
self.indice += 1
if self.indice > 1:
self.finished = True
self.indice=0
self.vel_x = 0
self.vel_y = 0
#Attack L
if self.accion==5:
if self.indice <=1:
self.image = self.f[self.accion][self.indice]
if self.indice==1:
punchE2.play()
self.indice += 1
if self.indice > 1:
self.finished = True
self.indice=0
self.vel_x = 0
self.vel_y = 0
#Die R
if self.accion==6:
if self.indice <2:
if self.indice==0:
channel3.play(ouch)
self.image = self.f[self.accion][self.indice]
self.indice += 1
if self.indice >= 2:
self.indice = 0
self.finished = True
self.vel_x = 0
self.vel_y = 0
#Die L
if self.accion==7:
if self.indice <=2:
if self.indice==0:
channel3.play(ouch)
self.image = self.f[self.accion][self.indice]
self.indice += 1
if self.indice >= 2:
self.indice = 0
self.finished = True
if self.accion in [6,7] and self.finished:
self.canDie = True
self.vel_x = 0
self.vel_y = 0
self.rect.y += self.vel_y
self.rect.x += self.vel_x
#if self.rect.x + self.rect.width > RESOLUTION[0] - bglimit:
# self.rect.x = RESOLUTION[0] - bglimit - self.rect.width
#elif self.rect.x < bglimit:
# self.rect.x = bglimit
class Bala (pygame.sprite.Sprite):
def __init__(self, matriz):
pygame.sprite.Sprite.__init__(self)
self.f=matriz
self.image=self.f[0]
| |
or not (=0).
-'ocg' specifies wether a strict or relaxed occupancy grid is used in
the method itself. Use "str" for strict occgrid, "rlx" for relaxed.
-'taulow' is the threshold for the values in the relaxed occgrid. If a
value is >taulow it is used in the relaxed occgrid and ignored
otherwise. This exists to limit the error that can happen due to the
usage of a relaxed occupancy grid.
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Uses a ball kernel for the neighborhood intersection.
Returns the mean curvature for each voxel.
"""
if debug==1:
starttime = time.time()
if type(inp)==str:
#load pointcloud
if ocg=="str":
x, y, z = np.loadtxt(inp,skiprows=1, unpack=True,
usecols=(0,1,2))
else:
x, y, z, vals = np.loadtxt(inp,skiprows=1, unpack=True,
usecols=(0,1,2,3))
elif isinstance(inp,(list,np.ndarray)):
z = inp[:,0]
y = inp[:,1]
x = inp[:,2]
if ocg=="rlx":
vals = inp[:,3]
else:
raise NameError('Input can be an already loaded pointcloud that \
consists of the three coordinates x y z or a string \
that leads to the file that is in x y z format with \
no header. ')
if debug==1:
print("Initialised the input point cloud.\n"
+ "Current Runtime: " + str(time.time() - starttime))
print("Number of Points in the pointcloud: " + str(np.shape(x)[0]))
if ocg=="str":
OGD = og.constructoccgrid_pointcloud([z, y, x], rho)
else:
OGD = og.constructoccgrid_pointcloud([z, y, x, vals], rho, ocg=ocg,
taulow=taulow)
OGB = og.constructoccgrid_ball(kr)
if debug==1:
print("Got all the Occupancy Grids.\n"
+ "Current Runtime: " + str(time.time() - starttime))
if fft==0:
Vb = nd.filters.convolve(OGD, OGB, mode='constant', cval=0.0)
if debug==1:
print("Calculated Vb without FFT.\n"
+ "Current Runtime: " + str(time.time() - starttime))
else:
Vb = sg.fftconvolve(OGD, OGB, mode='same')
if debug==1:
print("Calculated Vb with FFT.\n"
+ "Current Runtime: " + str(time.time() - starttime))
mc = (4/(np.pi*np.abs(kr)**4)) * (( (2*np.pi/3) * np.abs(kr)**3 - Vb))
if debug==1:
print("Done.\n"
+ "Current Runtime: " + str(time.time() - starttime))
if debug==0:
return mc
else:
print("Debug: Returning OGD, OGN, Vb, mc. Good luck!")
return OGD, OGB, Vb, mc
def cemean_simple_ocg(inp, kr=6, fft=1, debug=0):
"""
Calculates a simple mean curvature estimation of an occupancy grid.
Returns a matrix with the mean curvature value at each voxel.
Input:
-'inp' is a strict or relaxed occupancy grid
-'kr' is the kernel radius.
-'fft' specifies wether the fft convolution shall be used (=1)
or not (=0).
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Uses a ball kernel for the neighborhood intersection.
Returns the mean curvature for each voxel.
"""
if debug==1:
starttime = time.time()
if debug==1:
print("Initialised the input point cloud.\n"
+ "Current Runtime: " + str(time.time() - starttime))
OGB = og.constructoccgrid_ball(kr)
if debug==1:
print("Got all the Occupancy Grids.\n"
+ "Current Runtime: " + str(time.time() - starttime))
if fft==0:
Vb = nd.filters.convolve(inp, OGB, mode='constant', cval=0.0)
if debug==1:
print("Calculated Vb without FFT.\n"
+ "Current Runtime: " + str(time.time() - starttime))
else:
Vb = sg.fftconvolve(inp, OGB, mode='same')
if debug==1:
print("Calculated Vb with FFT.\n"
+ "Current Runtime: " + str(time.time() - starttime))
mc = (4/(np.pi*np.abs(kr)**4)) * (( (2*np.pi/3) * np.abs(kr)**3 - Vb))
if debug==1:
print("Done.\n"
+ "Current Runtime: " + str(time.time() - starttime))
if debug==0:
return mc
else:
print("Debug: Returning OGD, OGN, Vb, mc. Good luck!")
return inp, OGB, Vb, mc
def cemean_principalcurv(kappa1,kappa2, debug=0):
"""
Calculates the mean curvature from two matrices that contain the first and
second principal curvature, respectively. Returns a matrix with the mean
curvature at each voxel.
Input:
-kappa1,2 are the two principal curvature values and they are given
as a matrix.
-'debug' for debugging
Returns mean curvature in a matrix.
"""
if debug==1:
starttime = time.time()
#calculate mean curvature
mc = (kappa1 + kappa2)/2
if debug==1:
print("Success!.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return mc
def cemeanpca_pointcloud(inp, rho, kr=3, order=2, cm=1, ocg="str", taulow=0,
debug=0):
"""
Calculates the mean curvature of a pointcloud using pca and integral
invariants. Returns mean curvature, both principal directions and the
surface normal.
Input:
-'inp' can be an already loaded pointcloud that consists of the three
coordinates x y z or a string that leads to the file that is in x y z
format with no header.
-'rho' controls the amount of cells in the occupancy grid (=rho+1).
-'kr' is the kernel radius.
-'cm' stands for convolution mode.If ==1, the fft with zero-padding is
used. If cm<1, then the discrete convolution with a certain kind of
padding is used. If cm==0, zero-padding, if cm==0.25, 'reflect',
if cm==0.50, 'nearest', if cm==0.75, 'mirror', if cm==0.95, 'wrap'.
-'order' is the parameter that specifies the order of the eigenvalues.
If order==0, then the order is not changed at all.
If order==1, then the eigenvalues and eigenvectors are ordered
according to the values of the eigenvalues. I.e. the biggest
eigenvalue is first, 2nd biggest is 2nd, etc etc.
If order==2, then we use the error approximations of Pottmann07 to
estimate which eigenvalues are the "first" and "second" eigenvalues
that are needed to calculate the principal curvatures.
If order ==3 and cm==1.5 then the reflect padding is used in the
central difference computation. If order==3,cm==1, then zero padding.
In all other cases for order==3, the same padding as in the
convolution in the separate cm modes is used.
-'taulow' is the threshold for the values in the relaxed occgrid. If a
value is >taulow it is used in the relaxed occgrid and ignored
otherwise. This exists to limit the error that can happen due to the
usage of a relaxed occupancy grid.
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Uses a ball kernel for the neighborhood intersection.
Returns mean curvature, both principal directions and the
surface normal each in their own matrices where each voxel corresponds to
the value.
"""
if debug==1:
starttime = time.time()
kappa1, kappa2, pd1, pd2, sn = cepca_pointcloud(inp, rho, kr=kr,
order=order, cm=cm,
ocg=ocg, taulow=taulow)
#calculate mean curvature
mc = (kappa1 + kappa2)/2
if debug==1:
print("Success!.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return mc, pd1, pd2, sn
def cemeanpca_ocg(inp, kr=3, order=2, cm=1, debug=0):
"""
Calculates the mean curvature of strict occupancy grid using pca & integral
invariants. Returns mean curvature, both principal directions and the
surface normal.
Input:
-'inp' is a strict or relaxed occipancy grid
-'kr' is the kernel radius.
-'cm' stands for convolution mode.If ==1, the fft with zero-padding is
used. If cm<1, then the discrete convolution with a certain kind of
padding is used. If cm==0, zero-padding, if cm==0.25, 'reflect',
if cm==0.50, 'nearest', if cm==0.75, 'mirror', if cm==0.95, 'wrap'.
-'order' is the parameter that specifies the order of the eigenvalues.
If order==0, then the order is not changed at all.
If order==1, then the eigenvalues and eigenvectors are ordered
according to the values of the eigenvalues. I.e. the biggest
eigenvalue is first, 2nd biggest is 2nd, etc etc.
If order==2, then we use the error approximations of Pottmann07 to
estimate which eigenvalues are the "first" and "second" eigenvalues
that are needed to calculate the principal curvatures.
If order ==3 and cm==1.5 then the reflect padding is used in the
central difference computation. If order==3,cm==1, then zero padding.
In all other cases for order==3, the same padding as in the
convolution in the separate cm modes is used.
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Uses a ball | |
<reponame>albi3ro/pennylane<filename>tests/optimize/test_optimize.py
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane` optimizers.
"""
# pylint: disable=redefined-outer-name
import itertools as it
import numpy as onp
import pytest
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import (
GradientDescentOptimizer,
MomentumOptimizer,
NesterovMomentumOptimizer,
AdagradOptimizer,
RMSPropOptimizer,
AdamOptimizer,
RotoselectOptimizer,
RotosolveOptimizer,
)
x_vals = np.linspace(-10, 10, 16, endpoint=False)
# Hyperparameters for optimizers
stepsize = 0.1
gamma = 0.5
delta = 0.8
# function arguments in various formats
mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
multid_list = [[0.1, 0.2], [-0.1, -0.4]]
# functions and their gradients
fnames = ["test_function_1", "test_function_2", "test_function_3"]
univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x ** 2]
grad_uni_fns = [lambda x: (np.cos(x),), lambda x: (np.exp(x / 10.0) / 10.0,), lambda x: (2 * x,)]
multivariate_funcs = [
lambda x: np.sin(x[0]) + np.cos(x[1]),
lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
lambda x: np.sum([x_ ** 2 for x_ in x]),
]
grad_multi_funcs = [
lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
lambda x: (
np.array(
[np.exp(x[0] / 3) / 3 * np.tanh(x[1]), np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2)]
),
),
lambda x: (np.array([2 * x_ for x_ in x]),),
]
mvar_mdim_funcs = [
lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[1, 1],
lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
lambda x: np.sum([x_[0] ** 2 for x_ in x]),
]
grad_mvar_mdim_funcs = [
lambda x: (np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])], [-np.sin(x[1, 0]), 1.0]]),),
lambda x: (
np.array(
[
[
np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1]) ** 2),
],
[0.0, 0.0],
]
),
),
lambda x: (np.array([[2 * x_[0], 0.0] for x_ in x]),),
]
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun(*variables):
qml.RX(variables[0][1], wires=[0])
qml.RY(variables[1][2], wires=[0])
qml.RY(variables[2], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_flat(var):
qml.RX(var[0], wires=[0])
qml.RY(var[1], wires=[0])
qml.RY(var[2], wires=[0])
qml.RX(var[3], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_mdarr(var):
qml.RX(var[0, 1], wires=[0])
qml.RY(var[1, 0], wires=[0])
qml.RY(var[1, 1], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_mdlist(var):
qml.RX(var[0][1], wires=[0])
qml.RY(var[1][0], wires=[0])
qml.RY(var[1][1], wires=[0])
return qml.expval(qml.PauliZ(0))
@pytest.fixture(scope="function")
def bunch():
class A:
sgd_opt = GradientDescentOptimizer(stepsize)
mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
adag_opt = AdagradOptimizer(stepsize)
rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
rotoselect_opt = RotoselectOptimizer()
return A()
class TestOptimizer:
"""Basic optimizer tests."""
def test_mixed_inputs_for_hybrid_optimization(self, bunch, tol):
"""Tests that gradient descent optimizer treats parameters of mixed types the same
for hybrid optimization tasks."""
def hybrid_fun(variables):
return quant_fun(*variables) + variables[0][1]
hybrid_list = bunch.sgd_opt.step(hybrid_fun, mixed_list)
hybrid_tuple = bunch.sgd_opt.step(hybrid_fun, mixed_tuple)
assert hybrid_list[0] == pytest.approx(hybrid_tuple[0], abs=tol)
assert hybrid_list[1] == pytest.approx(hybrid_tuple[1], abs=tol)
assert hybrid_list[2] == pytest.approx(hybrid_tuple[2], abs=tol)
def test_mixed_inputs_for_classical_optimization(self, bunch, tol):
"""Tests that gradient descent optimizer treats parameters of mixed types the same
for purely classical optimization tasks."""
def class_fun(var):
return var[0][1] * 2.0 + var[1][2] + var[2]
class_list = bunch.sgd_opt.step(class_fun, mixed_list)
class_tuple = bunch.sgd_opt.step(class_fun, mixed_tuple)
assert class_list[0] == pytest.approx(class_tuple[0], abs=tol)
assert class_list[1] == pytest.approx(class_tuple[1], abs=tol)
assert class_list[2] == pytest.approx(class_tuple[2], abs=tol)
def test_mixed_inputs_for_quantum_optimization(self, bunch, tol):
"""Tests that gradient descent optimizer treats parameters of mixed types the same
for purely quantum optimization tasks."""
quant_list = bunch.sgd_opt.step(quant_fun, *mixed_list)
quant_tuple = bunch.sgd_opt.step(quant_fun, *mixed_tuple)
assert quant_list[0] == pytest.approx(quant_tuple[0], abs=tol)
assert quant_list[1] == pytest.approx(quant_tuple[1], abs=tol)
assert quant_list[2] == pytest.approx(quant_tuple[2], abs=tol)
def test_array_and_list_return_same_update(self, bunch, tol):
"""Tests that gradient descent optimizer has the same output for
lists and arrays."""
def hybrid_fun_mdarr(var):
return quant_fun_mdarr(var) + var[0, 0]
def hybrid_fun_mdlist(var):
return quant_fun_mdlist(var) + var[0][0]
array = bunch.sgd_opt.step(hybrid_fun_mdarr, multid_array)
ls = bunch.sgd_opt.step(hybrid_fun_mdlist, multid_list)
assert array == pytest.approx(np.asarray(ls), abs=tol)
def test_step_and_cost_autograd_sgd_mixed_list(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
gradient-descent optimizer"""
_, res = bunch.sgd_opt.step_and_cost(quant_fun, *mixed_list)
expected = quant_fun(*mixed_list)
assert np.all(res == expected)
def test_step_and_cost_autograd_sgd_multid_array(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
gradient-descent optimizer"""
_, res = bunch.sgd_opt.step_and_cost(quant_fun_mdarr, multid_array)
expected = quant_fun_mdarr(multid_array)
assert np.all(res == expected)
def test_step_and_cost_autograd_nesterov_mixed_list(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
Nesterov momentum optimizer"""
_, res = bunch.nesmom_opt.step_and_cost(quant_fun, *mixed_list)
expected = quant_fun(*mixed_list)
assert np.all(res == expected)
def test_step_and_cost_autograd_nesterov_multid_array(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
Nesterov momentum optimizer"""
_, res = bunch.nesmom_opt.step_and_cost(quant_fun_mdarr, multid_array)
expected = quant_fun_mdarr(multid_array)
assert np.all(res == expected)
@pytest.mark.parametrize("params", [[1.7, 2.2], [-1.42, 0.1], [0.05, -0.8]])
def test_step_and_cost_autograd_rotoselect(self, bunch, params):
"""Test that the correct cost is returned via the step_and_cost method for the
Rotoselect momentum optimizer"""
generators = [qml.RY, qml.RX]
possible_generators = [qml.RX, qml.RY, qml.RZ]
bunch.rotoselect_opt.possible_generators = possible_generators
dev = qml.device("default.qubit", shots=None, wires=2)
def ansatz(params, generators):
generators[0](params[0], wires=0)
generators[1](params[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit_1(params, generators=None): # generators will be passed as a keyword arg
ansatz(params, generators)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit_2(params, generators=None): # generators will be passed as a keyword arg
ansatz(params, generators)
return qml.expval(qml.PauliX(0))
def cost_fn(params, generators):
Z_1, Y_2 = circuit_1(params, generators=generators)
X_1 = circuit_2(params, generators=generators)
return 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
_, _, res = bunch.rotoselect_opt.step_and_cost(cost_fn, params, generators)
expected = cost_fn(params, generators)
assert np.all(res == expected)
@pytest.mark.parametrize("func, f_grad", list(zip(univariate_funcs, grad_uni_fns)))
@pytest.mark.parametrize("var", [0, -3, 42])
def test_step_and_cost_supplied_grad(self, bunch, func, var, f_grad):
"""Test that returned cost is correct if gradient function is supplied"""
_, res = bunch.sgd_opt.step_and_cost(func, var, grad_fn=f_grad)
expected = func(var)
assert np.all(res == expected)
@pytest.mark.parametrize("x_start", x_vals)
def test_gradient_descent_optimizer_univar(self, x_start, bunch, tol):
"""Tests that basic stochastic gradient descent takes gradient-descent steps correctly
for univariate functions."""
# TODO parametrize this for also
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
x_new = bunch.sgd_opt.step(f, x_start)
x_correct = x_start - gradf(x_start)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
def test_gradient_descent_optimizer_multivar(self, bunch, tol):
"""Tests that basic stochastic gradient descent takes gradient-descent steps correctly
for multivariate functions."""
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
x_vec = x_vals[jdx : jdx + 2]
x_new = bunch.sgd_opt.step(f, x_vec)
x_correct = x_vec - gradf(x_vec)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
def test_gradient_descent_optimizer_multivar_multidim(self, bunch, tol):
"""Tests that basic stochastic gradient descent takes gradient-descent steps correctly
for multivariate functions and with higher dimensional inputs."""
for gradf, f, name in zip(grad_mvar_mdim_funcs, mvar_mdim_funcs, fnames):
for jdx in range(len(x_vals[:-3])):
x_vec = x_vals[jdx : jdx + 4]
x_vec_multidim = np.reshape(x_vec, (2, 2))
x_new = bunch.sgd_opt.step(f, x_vec_multidim)
x_correct = x_vec_multidim - gradf(x_vec_multidim)[0] * stepsize
x_new_flat = x_new.flatten()
x_correct_flat = x_correct.flatten()
assert x_new_flat == pytest.approx(x_correct_flat, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_gradient_descent_optimizer_usergrad(self, x_start, bunch, tol):
"""Tests that basic stochastic gradient descent takes gradient-descent steps correctly
using user-provided gradients."""
for gradf, f, name in zip(grad_uni_fns[::-1], univariate_funcs, fnames):
x_new = bunch.sgd_opt.step(f, x_start, grad_fn=gradf)
x_correct = x_start - gradf(x_start)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_momentum_optimizer_univar(self, x_start, bunch, tol):
"""Tests that momentum optimizer takes one and two steps correctly
for univariate functions."""
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.mom_opt.reset()
x_onestep = bunch.mom_opt.step(f, x_start)
x_onestep_target = x_start - gradf(x_start)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.mom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_start)[0]
x_twosteps_target = x_onestep - (gradf(x_onestep)[0] + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_momentum_optimizer_multivar(self, bunch, tol):
"""Tests that momentum optimizer takes one and two steps correctly
for multivariate functions."""
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.mom_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.mom_opt.step(f, x_vec)
x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.mom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_vec)[0]
x_twosteps_target = x_onestep - (gradf(x_onestep)[0] + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_nesterovmomentum_optimizer_univar(self, x_start, bunch, tol):
"""Tests that nesterov momentum optimizer takes one and | |
<filename>mcmc/util_cupy.py
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 14:17:09 2018
@author: puat133
"""
# import math
import h5py
import scipy.io as sio
import numpy as np
import scipy.linalg as sla
import numba as nb
import cupy as cp
import time
import gc
import mcmc.image_cupy as im
import h5py
from skimage.transform import radon
from cupy.prof import TimeRangeDecorator as cupy_profile
from numba import cuda
SQRT2 = cp.float32(1.41421356)
PI = cp.float32(cp.pi)
TPBn = 8#4*32
TPB = (TPBn,TPBn)
import mcmc.util as u_nb
from numba import cuda
from math import sin,cos,sqrt,pi
from cmath import exp
from mcmc.extra_linalg import solve_triangular
from cupy.linalg import qr
import cupyx as cpx
#@cupy_profile()
def construct_w_Half(n):
wHalf = cp.random.randn(n,dtype=cp.float32)+1j*cp.random.randn(n,dtype=cp.float32)
# wHalf[0] = wHalf[0].real*cp.sqrt(2)
wHalf[0] = 2*wHalf[0].real
# return wHalf/cp.sqrt(2)
return wHalf/SQRT2
#@cupy_profile()
def inner(u,v):
return cp.inner(u,v)
# @nb.vectorize([nb.complex128(nb.int64,nb.float64)],cache=CACHE,nopython=True)
#@cupy_profile()
def eigenFunction1D(i,t):
"""
Return an eigen function of Laplacian operator in one dimension
i - index int
t - time float
"""
return cp.exp(2*PI*1j*i*t)
#@cupy_profile()
def matMulti(A,D):
"""
Matrix multiplication A@D where A is herimitian matrix, and D is a diagonal matrix
"""
return A@D
def matMulti_sparse(A,D):
"""
Matrix multiplication A@D where A is herimitian matrix, and D is a sparse diagonal matrix
"""
C = cp.zeros_like(A,dtype=A.dtype)
diag_D = D.diagonal()
bpg=((A.shape[0]+TPBn-1)//TPBn,(A.shape[1]+TPBn-1)//TPBn)
_matMulti_sparse[bpg,TPB](A,diag_D,C)
# for i in range(A.shape[0]):
# for j in range(A.shape[1]):
# C[i,j] = A[i,j]*diag_D[j]
return C
@cuda.jit()
def _matMulti_sparse(A,diag_D,C):
i,j = cuda.grid(2)
if i < C.shape[0] and j < C.shape[1]:
C[i,j] = A[i,j]*diag_D[j]
def slogdet(L):
"""
# The determinant of a Hermitian matrix is real;the determinant is the product of the matrix's eigenvalues
# L^dagger L is Hermitian
# cupy slogdet seems cannot handle complex matrix
"""
cp.linalg.slogdet(L)
temp = L.conj().T@L
temp = 0.5*(temp+temp.conj().T)
res = 0.5*cp.sum(cp.log(cp.linalg.eigvalsh(temp)))
del temp
cp._default_memory_pool.free_all_blocks()
return res,0
# @nb.vectorize([nb.float64(nb.float64)],cache=CACHE,nopython=True)
#@cupy_profile()
def kappaFun(ut):
"""
kappa function as a function of u in time domain
"""
return cp.exp(-ut)
# @nb.vectorize([nb.float64(nb.float64)],cache=CACHE,nopython=True)
# def kappa_pow_min_nu(ut):
# # res = cp.zeros(ut.shape[0],dtype=cp.float64)
# # for i in nb.prange(ut.shape[0]):
# # res[i] = math.exp(1.5*ut[i])
# # return res
# # return kappaFun(ut)**(-1.5)
# xp = cp.get_array_module(ut)
# return xp.exp(1.5*ut)
# # @nb.vectorize([nb.float64(nb.float64)],cache=CACHE,nopython=True)
# def kappa_pow_half(ut):
# # res = cp.zeros(ut.shape[0],dtype=cp.float64)
# # for i in nb.prange(ut.shape[0]):
# # res[i] = math.exp(-0.5*ut[i])
# # return res
# xp = cp.get_array_module(ut)
# return xp.exp(-0.5*ut)
# # return cp.sqrt(kappaFun(ut))
#@cupy_profile()
def norm2(u):
"""
Compute euclidean squared norm 2 of a complex vector
"""
# xp = cp.get_array_module(u)
return cp.linalg.norm(u)**2
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
#@c<EMAIL>_profile()
def sigmasLancos(n):
"""
sigma Lancos coefficients for calculating inverse Fourier Transforms
"""
k = cp.arange(1,n+1)
return cp.sin(PI*(k/(n+1)))/(PI*(k/(n+1)))
def updateWelford(existingAggregate, newValue):
(count, mean, M2) = existingAggregate
count += 1
delta = newValue - mean
mean += delta / count
delta2 = newValue - mean
M2 += delta * delta2
return (count, mean, M2)
# # retrieve the mean, variance and sample variance from an aggregate
def finalizeWelford(existingAggregate):
(count, mean, M2) = existingAggregate
(mean, variance, sampleVariance) = (mean, M2/count, M2/(count - 1))
# (mean, variance) = (mean, M2/count)
# if count < 2:
# return float('nan')
# else:
return (mean, variance,sampleVariance)
#@cupy_profile()
def extend(uSymmetric,num):
# xp = cp.get_array_module(uSymmetric)
n = (uSymmetric.shape[0]+1)//2
if num> n:
z = cp.zeros(2*num-1,dtype=cp.complex64)
z[(num-1)-(n-1):(num-1)+n] = uSymmetric
return z
else:
return uSymmetric
#@cupy_profile()
def symmetrize(w_half):
# xp = cp.get_array_module(w_half)
w = cp.concatenate((w_half[:0:-1].conj(),w_half)) #symmetrize
return w
# def construct_w_Half_2D(n):
# wHalf = construct_w_Half(2*n*n-2*n+1)
# return fromUHalfToUHalf2D(wHalf,n)/SQRT2
#@cupy_profile()
def construct_w_Half_2D_ravelled(n):
wHalf = construct_w_Half(2*n*n-2*n+1)
return wHalf/SQRT2
# def fromUHalfToUHalf2D(uHalf,n):
# xp = cp.get_array_module(uHalf)
# uHalf = xp.concatenate((uHalf[n-1:0:-1].conj(),uHalf))
# return uHalf.reshape((n,2*n-1)).T
# def fromUHalf2DToUHalf(uHalf2D,n):
# uHalf = uHalf2D.T.ravel()
# return uHalf[n-1:]
#@cupy_profile()
def construct_w_2D_ravelled(n):
uHalf = construct_w_Half_2D_ravelled(n)
return cp.concatenate((uHalf[:0:-1].conj(),uHalf))
#@cupy_profile()
def symmetrize_2D(uHalf2D):
# xp = cp.get_array_module(uHalf2D)
uHalfW=uHalf2D[:,1:]
uHalf2Dc = uHalfW[::-1,:][:,::-1].conj()
return cp.hstack((uHalf2Dc,uHalf2D))
# def from_u_2D_ravel_to_u_2D(u,n):
# return u.reshape(2*n-1,2*n-1)
#@cupy_profile()
def from_u_2D_ravel_to_uHalf_2D(u,n):
return u.reshape(2*n-1,2*n-1,order=im.ORDER)[:,n-1:]
#@cupy_profile()
def extend2D(uIn,num):
if uIn.shape[1] != uIn.shape[0]: #uHalfCase
n = uIn.shape[1]
if num> n:
z = cp.zeros((2*num-1,num),dtype=cp.complex64)
z[(num-1)-(n-1):(num-1)+n,:n] = uIn
return z
else:
return uIn
else:
n = (uIn.shape[0]+1)//2
if num> n:
z = cp.zeros((2*num-1,2*num-1),dtype=cp.complex64)
z[(num-1)-(n-1):(num-1)+n,(num-1)-(n-1):(num-1)+n] = uIn
return z
else:
return uIn
#@cupy_profile()
def kappa_pow_min_nu(u):
"""
for d=2, and alpha =2 nu = 1
"""
return cp.exp(u)#1/kappaFun(u)
#@cupy_profile()
def kappa_pow_d_per_2(u):
"""
for d=2, and d/2 = 1
"""
return cp.exp(-u)#kappaFun(u)
#@cupy_profile()
def rfft2(z,n):
# xp = cp.get_array_module(z)
m = z.shape[0]
zrfft = cp.fft.fftshift(cp.fft.rfft2(z,norm="ortho"),axes=0)
return zrfft[m//2 -(n-1):m//2 +n,:n]
#@cupy_profile()
def irfft2(uHalf2D,num):
"""
Fourier transform of one dimensional signal
ut = 1D signal
num = Ut length - 1
dt = timestep
(now using cp.fft.fft) in the implementation
"""
# xp = cp.get_array_module(uHalf2D)
uHalfExtended = extend2D(uHalf2D,num)
uh = cp.fft.ifftshift(uHalfExtended,axes=0)
uh = cp.fft.irfft2(uh,s=(2*num-1,2*num-1),norm="ortho")
return uh
#@cupy_profile()
def constructU(uHalf2D,index):
n = uHalf2D.shape[1]
res = extend2D(symmetrize_2D(uHalf2D),2*n-1)[index]
return res
def constructU_cuda(uHalf2D):
n = uHalf2D.shape[1]
innerlength = 2*n-1
length = innerlength**2
U = cp.zeros((length,length),dtype=cp.complex64)
bpg = ((U.shape[0]+TPBn-1)//TPBn,(U.shape[1]+TPBn-1)//TPBn)
_construct_U[bpg,TPB](symmetrize_2D(uHalf2D),n,innerlength,U)
return U
def constructU_from_uSym2D_cuda(uSym2D):
innerlength = uSym2D.shape[0]
n = (innerlength+1)//2
length = innerlength**2
U = cp.zeros((length,length),dtype=cp.complex64)
bpg = ((U.shape[0]+TPBn-1)//TPBn,(U.shape[1]+TPBn-1)//TPBn)
_construct_U[bpg,TPB](uSym2D,n,innerlength,U)
return U
#@cupy_profile()
def constructMatexplicit(uHalf2D,fun,num,index):
temp = fun(irfft2(uHalf2D,num))
temp2 = rfft2(temp,uHalf2D.shape[1])
return constructU(temp2,index)
def constructMatexplicit_cuda(uHalf2D,fun,num):
temp = fun(irfft2(uHalf2D,num))
temp2 = rfft2(temp,uHalf2D.shape[1])
return constructU_cuda(temp2)
#@cupy_profile()
def constructLexplicit(uHalf2D,D,num,sqrtBeta,index):
Ku_pow_min_nu = constructMatexplicit(uHalf2D,kappa_pow_min_nu,num,index)
Ku_pow_d_per_2 = constructMatexplicit(uHalf2D,kappa_pow_d_per_2,num,index)
L = (matMulti(Ku_pow_min_nu,D) - Ku_pow_d_per_2)/sqrtBeta
return L
#@cupy_profile()
# def createUindex(n):
# innerlength = (2*n-1)
# length = innerlength**2
# shape = (length,length)
# iX = cp.zeros(shape,dtype=cp.int32)#*(innerlength-1)
# iY = cp.zeros(shape,dtype=cp.int32)#*(innerlength-1)
# for i in range(innerlength):
# for j in range(innerlength):
# # if cp.abs(j-i)<n:
# # iX[i*innerlength:(i+1)*innerlength,j*innerlength:(j+1)*innerlength] = (j-i)+(innerlength-1)
# iX[i*innerlength:(i+1)*innerlength,j*innerlength:(j+1)*innerlength] = (i-j)+(innerlength-1)
# for k in range(innerlength):
# for l in range(innerlength):
# iShift = i*innerlength
# jShift = j*innerlength
# iY[k+iShift,l+jShift] = (l-k)+(innerlength-1)
# # iY[k+iShift,l+jShift] = (k-l)+(innerlength-1)
# return (iY,iX)
def createUindex(n):
innerlength = (2*n-1)
length = innerlength**2
shape = (length,length)
iX = cp.zeros(shape,dtype=cp.int8)#*(innerlength-1)
iY = cp.zeros(shape,dtype=cp.int8)#*(innerlength-1)
for i in range(innerlength):
for j in range(innerlength):
iShift = i*innerlength
jShift = j*innerlength
# iY[i*innerlength:(i+1)*innerlength,j*innerlength:(j+1)*innerlength] = (i-j)+(innerlength-1)#innerlength-1 adalah shiftnya jadi innerlength-1 itu nol
iY[iShift:iShift+innerlength,jShifth:jShift+innerlength] = (i-j)+(innerlength-1)#innerlength-1 adalah shiftnya jadi innerlength-1 itu nol
for k in range(innerlength):
for l in range(innerlength):
# iShift = i*innerlength
# jShift = j*innerlength
iX[k+iShift,l+jShift] = (k-l)+(innerlength-1)
return (iY,iX)#because iY is row index, iX is column index
# return (iX,iY)#because iY is row index, iX is column index
#@cupy_profile()
def eigenFunction2D(tx,ty,kx,ky):
"""
Return an eigen function of Laplacian operator in one dimension
i - index int
t - time float
Numpy FFT implementation using a constant of 2*PI only for fft2
beware of this!!
"""
return cp.exp(1j*(2*PI)*(kx*tx+ky*ty)) #<-- why the eigen function has to be in this form?
#@cupy_profile()
def constructH(tx,ty,ix,iy):
"""
(iX,iY) are meshgrid, but ravelled
(tx,ty) also ravelled meshgrid
"""
H = cp.empty((tx.shape[0],ix.shape[0]),dtype=cp.complex64)
for i in range(tx.shape[0]):
# H[i,:] = eigenFunction2D(tx[-i],ty[-i],ix,iy)
H[i,:] = eigenFunction2D(tx[i],ty[i],ix,iy)
return H
@cuda.jit()
def _construct_H(tx,ty,ix,iy,H):
"""
(iX,iY) are meshgrid, but ravelled
(tx,ty) also ravelled meshgrid
"""
# H[i,j] = eigenFunction2D(tx[-i],ty[-i],ix,iy)
i,j = cuda.grid(2)
H[i,j] = exp(1j*2*pi*(ix[j]*tx[i]+iy[j]*ty[i]))
@cuda.jit()
def _construct_U(uSym2D,N,innerlength,U):
'''CUDA kernel for constructing U matrix from uSym2D
'''
m,n = cuda.grid(2)
if m < U.shape[0] and n < U.shape[1]:
i = m//innerlength
j = n//innerlength
k = m%innerlength
l = n%innerlength
delta_IJ = (i-j)
delta_KL = (k-l)
if -(N-1)<=delta_IJ < N:
if -(N-1)<=delta_KL < N:
U[m,n] = uSym2D[delta_KL+(N-1),delta_IJ+(N-1)] #<-- this is correct already!!
# U[m,n] = uSym2D[delta_IJ+(N-1),delta_KL+(N-1)] #<-- this is correct already!!
@cuda.jit()
def _calculate_H_Tomography(r,theta,ix,iy,H):
"""
(iX,iY) are meshgrid for Fourier Index
(tx,ty) also ravelled meshgrid for original location grid (0 to 1)
CUDA kernel function, with cuda jit
"""
m,n = cuda.grid(2)
if m < H.shape[0] and n < H.shape[1]:
# theta_m = theta[m]
theta_m = theta[-(m+1)]+0.5*pi
sTheta = sin(theta_m)
cTheta = cos(theta_m)
r_m = r[m]
kx = ix[n]
ky = iy[n]
k_tilde_u = kx*cTheta+ky*sTheta
k_tilde_v = -kx*sTheta+ky*cTheta
l = sqrt(0.25-r_m*r_m)
if k_tilde_v*k_tilde_v > 0.0:
H[m,n] = exp(1j*pi*((kx+ky)-2*k_tilde_v*r_m))*(sin(2*pi*k_tilde_u*l))/(pi*k_tilde_u)
else:
H[m,n] = exp(1j*pi*((kx+ky)-2*k_tilde_v*r_m))*(2*l)
# # ASELI untuk 180 derajat sesuai buku | |
single value,
the ``predicate`` attached to this node.
"""
return (self.predicate,)
def __repr__(self):
return "{}({})".format(type(self).__name__, self.predicate)
def __eq__(self, other):
if not isinstance(other, PostFilterNode):
return NotImplemented
return self is other or self.predicate == other.predicate
def _to_filter(self, post=False):
"""Helper to convert to low-level filter.
Args:
post (bool): Indicates if this is a post-filter node.
Returns:
Tuple[Callable[[Any], bool], None]: If this is a post-filter, this
returns the stored ``predicate``, otherwise it returns
:data:`None`.
"""
if post:
return self.predicate
else:
return None
class _BooleanClauses:
"""This type will be used for symbolically performing boolean operations.
Internally, the state will track a symbolic expression like::
A or (B and C) or (A and D)
as a list of the ``OR`` components::
[A, B and C, A and D]
When ``combine_or=False``, it will track ``AND`` statements as a list,
making the final simplified form of our example::
[[A], [B, C], [A, D]]
Via :meth:`add_node`, we will ensure that new nodes will be correctly
combined (via ``AND`` or ``OR``) with the current expression.
Args:
name (str): The name of the class that is tracking a
boolean expression.
combine_or (bool): Indicates if new nodes will be combined
with the current boolean expression via ``AND`` or ``OR``.
"""
__slots__ = ("name", "combine_or", "or_parts")
def __init__(self, name, combine_or):
self.name = name
self.combine_or = combine_or
if combine_or:
# For ``OR()`` the parts are just nodes.
self.or_parts = []
else:
# For ``AND()`` the parts are "segments", i.e. node lists.
self.or_parts = [[]]
def add_node(self, node):
"""Update the current boolean expression.
This uses the distributive law for sets to combine as follows:
- ``(A or B or C or ...) or D`` -> ``A or B or C or ... or D``
- ``(A or B or C or ...) and D`` ->
``(A and D) or (B and D) or (C and D) or ...``
Args:
node (Node): A node to add to the list of clauses.
Raises:
TypeError: If ``node`` is not a :class:`.Node`.
"""
if not isinstance(node, Node):
raise TypeError(
"{}() expects Node instances as arguments; "
"received a non-Node instance {!r}".format(self.name, node)
)
if self.combine_or:
if isinstance(node, DisjunctionNode):
# [S1 or ... or Sn] or [A1 or ... or Am]
# -> S1 or ... Sn or A1 or ... or Am
self.or_parts.extend(node._nodes)
else:
# [S1 or ... or Sn] or [A1]
# -> S1 or ... or Sn or A1
self.or_parts.append(node)
else:
if isinstance(node, DisjunctionNode):
# [S1 or ... or Sn] and [A1 or ... or Am]
# -> [S1 and A1] or ... or [Sn and A1] or
# ... or [Sn and Am] or ... or [Sn and Am]
new_segments = []
for segment in self.or_parts:
# ``segment`` represents ``Si``
for sub_node in node:
# ``sub_node`` represents ``Aj``
new_segment = segment + [sub_node]
new_segments.append(new_segment)
# Replace wholesale.
self.or_parts[:] = new_segments
elif isinstance(node, ConjunctionNode):
# [S1 or ... or Sn] and [A1 and ... and Am]
# -> [S1 and A1 and ... and Am] or ... or
# [Sn and A1 and ... and Am]
for segment in self.or_parts:
# ``segment`` represents ``Si``
segment.extend(node._nodes)
else:
# [S1 or ... or Sn] and [A1]
# -> [S1 and A1] or ... or [Sn and A1]
for segment in self.or_parts:
segment.append(node)
class ConjunctionNode(Node):
"""Tree node representing a boolean ``AND`` operator on multiple nodes.
.. warning::
The constructor for this type may not always return a
:class:`ConjunctionNode`. For example:
* If the passed in ``nodes`` has only one entry, that single node
will be returned by the constructor
* If the resulting boolean expression has an ``OR`` in it, then a
:class:`DisjunctionNode` will be returned; e.g.
``AND(OR(A, B), C)`` becomes ``OR(AND(A, C), AND(B, C))``
Args:
nodes (Tuple[Node, ...]): A list of nodes to be joined.
Raises:
TypeError: If ``nodes`` is empty.
RuntimeError: If the ``nodes`` combine to an "empty" boolean
expression.
"""
__slots__ = ("_nodes",)
def __new__(cls, *nodes):
if not nodes:
raise TypeError("ConjunctionNode() requires at least one node.")
elif len(nodes) == 1:
return nodes[0]
clauses = _BooleanClauses("ConjunctionNode", combine_or=False)
for node in nodes:
clauses.add_node(node)
if not clauses.or_parts:
# NOTE: The original implementation returned a ``FalseNode``
# here but as far as I can tell this code is unreachable.
raise RuntimeError("Invalid boolean expression")
if len(clauses.or_parts) > 1:
return DisjunctionNode(
*[ConjunctionNode(*segment) for segment in clauses.or_parts]
)
instance = super(ConjunctionNode, cls).__new__(cls)
instance._nodes = clauses.or_parts[0]
return instance
def __getnewargs__(self):
"""Private API used to specify ``__new__`` arguments when unpickling.
.. note::
This method only applies if the ``pickle`` protocol is 2 or
greater.
Returns:
Tuple[Node, ...]: The list of stored nodes, converted to a
:class:`tuple`.
"""
return tuple(self._nodes)
def __iter__(self):
return iter(self._nodes)
def __repr__(self):
all_nodes = ", ".join(map(str, self._nodes))
return "AND({})".format(all_nodes)
def __eq__(self, other):
if not isinstance(other, ConjunctionNode):
return NotImplemented
return self._nodes == other._nodes
def _to_filter(self, post=False):
"""Helper to convert to low-level filter.
Args:
post (bool): Indicates if this is a post-filter node.
Returns:
Optional[Node]: The single or composite filter corresponding to
the pre- or post-filter nodes stored. May return :data:`None`.
"""
filters = []
for node in self._nodes:
if isinstance(node, PostFilterNode) == post:
as_filter = node._to_filter(post=post)
if as_filter:
filters.append(as_filter)
if not filters:
return None
if len(filters) == 1:
return filters[0]
if post:
def composite_and_predicate(entity_pb):
return all((filter(entity_pb) for filter in filters))
return composite_and_predicate
return _datastore_query.make_composite_and_filter(filters)
def _post_filters(self):
"""Helper to extract post-filter nodes, if any.
Filters all of the stored nodes that are :class:`PostFilterNode`.
Returns:
Optional[Node]: One of the following:
* :data:`None` if there are no post-filter nodes in this ``AND()``
clause
* The single node if there is exactly one post-filter node, e.g.
if the only node in ``AND(A, B, ...)`` that is a post-filter
node is ``B``
* The current node if every stored node a post-filter node, e.g.
if all nodes ``A, B, ...`` in ``AND(A, B, ...)`` are
post-filter nodes
* A **new** :class:`ConjunctionNode` containing the post-filter
nodes, e.g. if only ``A, C`` are post-filter nodes in
``AND(A, B, C)``, then the returned node is ``AND(A, C)``
"""
post_filters = [
node for node in self._nodes if isinstance(node, PostFilterNode)
]
if not post_filters:
return None
if len(post_filters) == 1:
return post_filters[0]
if post_filters == self._nodes:
return self
return ConjunctionNode(*post_filters)
def resolve(self, bindings, used):
"""Return a node with parameters replaced by the selected values.
Args:
bindings (dict): A mapping of parameter bindings.
used (Dict[Union[str, int], bool]): A mapping of already used
parameters. This will be modified for each parameter found
in ``bindings``.
Returns:
Node: The current node, if all nodes are already resolved.
Otherwise returns a modified :class:`ConjunctionNode` with
each individual node resolved.
"""
resolved_nodes = [node.resolve(bindings, used) for node in self._nodes]
if resolved_nodes == self._nodes:
return self
return ConjunctionNode(*resolved_nodes)
class DisjunctionNode(Node):
"""Tree node representing a boolean ``OR`` operator on multiple nodes.
.. warning::
This constructor may not always return a :class:`DisjunctionNode`.
If the passed in ``nodes`` has only one entry, that single node
will be returned by the constructor.
Args:
nodes (Tuple[Node, ...]): A list of nodes to be joined.
Raises:
TypeError: If ``nodes`` is empty.
"""
_multiquery = True
__slots__ = ("_nodes",)
def __new__(cls, *nodes):
if not nodes:
raise TypeError("DisjunctionNode() requires at least one node")
elif len(nodes) == 1:
return nodes[0]
instance = super(DisjunctionNode, cls).__new__(cls)
instance._nodes = []
clauses = _BooleanClauses("DisjunctionNode", combine_or=True)
for node in nodes:
clauses.add_node(node)
instance._nodes[:] = clauses.or_parts
return instance
def __getnewargs__(self):
"""Private API used to specify ``__new__`` arguments when unpickling.
.. note::
This method only applies if the ``pickle`` protocol is 2 or
greater.
Returns:
Tuple[Node, ...]: The list of stored nodes, converted to a
:class:`tuple`.
"""
return tuple(self._nodes)
def __iter__(self):
return iter(self._nodes)
def __repr__(self):
all_nodes = ", ".join(map(str, self._nodes))
return "OR({})".format(all_nodes)
def __eq__(self, other):
if not isinstance(other, DisjunctionNode):
return NotImplemented
return self._nodes == other._nodes
def resolve(self, bindings, used):
"""Return a node with parameters replaced by the selected values.
Args:
bindings (dict): A mapping of parameter bindings.
used (Dict[Union[str, int], bool]): | |
<filename>wp.py<gh_stars>0
import sqlite3
import sys
import json
import natto
import math
import numpy
import unicodedata
import string
import re
class Document():
"""Abstract class representing a document.
"""
def id(self):
"""Returns the id for the Document. Should be unique within the Collection.
"""
raise NotImplementedError()
def text(self):
"""Returns the text for the Document.
"""
raise NotImplementedError()
class Collection():
"""Abstract class representing a collection of documents.
"""
def get_document_by_id(self, id):
"""Gets the document for the given id.
Returns:
Document: The Document for the given id.
"""
raise NotImplementedError()
def num_documents(self):
"""Returns the number of documents.
Returns:
int: The number of documents in the collection.
"""
raise NotImplementedError()
def get_all_documents(self):
"""Creates an iterator that iterates through all documents in the collection.
Returns:
Iterable[Document]: All the documents in the collection.
"""
raise NotImplementedError()
class WikipediaArticle(Document):
"""A Wikipedia article.
Attributes:
title (str): The title. This will be unique so it can be used as the id. It will also always be less than 256 bytes.
_text (str): The plain text version of the article body.
opening_text (str): The first paragraph of the article body.
auxiliary_text (List[str]): A list of auxiliary text, usually from the inbox.
categories (List[str]): A list of categories.
headings (List[str]): A list of headings (i.e. the table of contents).
wiki_text (str): The MediaWiki markdown source.
popularity_score(float): Some score indicating article popularity. Bigger is more popular.
num_incoming_links(int): Number of links (within Wikipedia) that point to this article.
"""
def __init__(self, collection, title, text, opening_text, auxiliary_text, categories, headings, wiki_text, popularity_score, num_incoming_links):
self.title = title
self._text = text
self.opening_text = opening_text
self.auxiliary_text = auxiliary_text # list
self.categories = categories
self.headings = headings
self.wiki_text = wiki_text
self.popularity_score = popularity_score
self.num_incoming_links = num_incoming_links
def id(self):
"""Returns the id for the WikipediaArticle, which is its title.
Override for Document.
Returns:
str: The id, which in the Wikipedia article's case, is the title.
"""
return self.title
def text(self):
"""Returns the text for the Document.
Override for Document.
Returns:
str: Text for the Document
"""
return self._text
class FilterWords():
def shouldBeIncluded(feature):
if feature[0] == '名詞':
if feature[1] == 'サ変接続' or feature[1] == '一般' or feature[1] == '形容動詞語幹' or feature[1] == '固有名詞' or feature[1] == '数':
return True
elif feature[0] == '形容詞':
if feature[1] == '自立':
return True
elif feature[0] == '動詞':
if feature[1] == '自立':
return True
return False
def excludeParticles(features):
return features[0] != '助詞'
class AnalyseQuery():
def extractWords(self, query, func = FilterWords.shouldBeIncluded):
parser = natto.MeCab()
terms = []
for node in parser.parse(query, as_nodes=True):
if node.is_nor():
features = node.feature.split(',')
# if features[0] != '助詞':
if func(features):
terms.append(features[6] if len(features) == 9 else node.surface)
return terms
def divide_ngrams(self, query):
n = 2
table_for_remove = str.maketrans("", "", string.punctuation + "「」、。・『』《》")
ngrams = unicodedata.normalize("NFKC", query.strip().replace(" ", ""))
ngrams = ngrams.translate(table_for_remove)
ngrams = re.sub(r'[a-zA-Z0-9¥"¥.¥,¥@]+', '', ngrams, flags=re.IGNORECASE)
ngrams = re.sub(r'[!"“#$%&()\*\+\-\.,\/:;<=>?@\[\\\]^_`{|}~]', '', ngrams, flags=re.IGNORECASE)
ngrams = re.sub(r'[\n|\r|\t|年|月|日]', '', ngrams, flags=re.IGNORECASE)
ngrams = [ngrams[i:i+n] for i in range(0, len(ngrams))]
return ngrams
class Index():
def __init__(self, filename, collection):
self.db = sqlite3.connect(filename)
self.collection = collection
def search(self, terms):
c = self.db.cursor()
# search process
print("extractWords Done")
# titles which apeare len(query) times are the rets
titles = []
flag = True
for term in terms:
cands = c.execute("SELECT document_id FROM postings WHERE term=?", (term,)).fetchall()
if cands == None: # TODO: len(cands) == 0
continue
"""
for cand in cands:
if cand[0] in dict:
dict[cand[0]] += 1
else:
dict[cand[0]] = 1
if dict[cand[0]] == len(terms):
titles.append(cand[0])
"""
temptitles = set(map(lambda c:c[0], cands))
if flag:
titles = temptitles
flag = False
else:
titles = titles & temptitles
print("all terms searched")
return titles
def sortSearch(self, terms):
c = self.db.cursor()
documentVectors = {}
defaultVector = []
length = {}
for n, term in enumerate(terms):
cands = c.execute("SELECT document_id, times FROM postings WHERE term=?", (term,)).fetchall()
if cands == None or len(cands) == 0:
defaultVector.append(0)
continue
# non-zero div is ensured
defaultVector.append(math.log(self.collection.num_documents() / len(cands)))
for cand in cands:
if cand[0] in length:
pass
else:
# length[cand[0]] = len(self.collection.find_article_by_title(cand[0]).text)
length[cand[0]] = 1
if cand[0] in documentVectors:
pass
else:
documentVectors[cand[0]] = [0 for i in range(len(terms))]
documentVectors[cand[0]][n] = (1 + math.log(cand[1] / length[cand[0]])) * math.log(self.collection.num_documents() / len(cands))
return self.returnBestTitleWithSorting(documentVectors, defaultVector)
def sortSearchWithNgram(self, terms, ngrams):
c = self.db.cursor()
ngrams_list = self.ngrams_search(ngrams)
documentVectors = {}
defaultVector = []
length = {}
for n, term in enumerate(terms):
cands = c.execute("SELECT document_id, times FROM postings WHERE term=?", (term,)).fetchall()
if cands == None or len(cands) == 0:
defaultVector.append(0)
continue
# non-zero div is ensured
defaultVector.append(math.log(self.collection.num_documents() / len(cands)))
for cand in cands:
if cand[0] in length:
pass
else:
# length[cand[0]] = len(self.collection.find_article_by_title(cand[0]).text)
length[cand[0]] = 1
if cand[0] in documentVectors:
pass
else:
documentVectors[cand[0]] = [0 for i in range(len(terms) + 1)]
documentVectors[cand[0]][n] = (1 + math.log(cand[1] / length[cand[0]])) * math.log(self.collection.num_documents() / len(cands))
defaultVector.append(numpy.sum(defaultVector) * 2 / len(terms))
for title in documentVectors.keys():
if title in ngrams_list:
documentVectors[title][len(terms)] = numpy.sum(defaultVector) * 2 / len(terms)
return self.returnBestTitleWithSorting(documentVectors, defaultVector)
def sortSearchFromTwoVectors(self, vectors1, vectors2, defaultVector1, defaultVector2):
defaultVector = numpy.add(defaultVector1, numpy.multiply(defaultVector2, 0.1))
vectors = {}
for title in vectors1.keys():
if title in vectors2:
vectors[title] = numpy.add(vectors1[title], numpy.multiply(vectors2[title], 0.1))
else:
vectors[title] = vectors1[title]
return self.returnBestTitleWithSorting(vectors, defaultVector)
def returnBestTitle(self, documentVectors, defaultVector):
defaultVector[len(defaultVector) - 1] *= 10
max_cos = -1
best_title = ''
for title, documentVector in documentVectors.items():
documentVector[len(documentVector) - 1] *= 10
cos = numpy.dot(documentVector, defaultVector) / (numpy.linalg.norm(documentVector) * numpy.linalg.norm(defaultVector))
if max_cos < cos:
max_cos = cos
best_title = title
return best_title
def returnBestTitleWithSorting(self, documentVectors, defaultVector):
defaultVector[len(defaultVector) - 1] *= 10
cos_title = []
for title, documentVector in documentVectors.items():
documentVector[len(documentVector) - 1] *= 10
cos = numpy.dot(documentVector, defaultVector) / (numpy.linalg.norm(documentVector) * numpy.linalg.norm(defaultVector))
cos_title.append((cos, title))
cos_title = sorted(cos_title, reverse=True)
print(cos_title[0][1])
print(cos_title[1][1])
print(cos_title[2][1])
print(cos_title[3][1])
print(cos_title[4][1])
print()
return cos_title[0][1]
def sortSearchReturnVectors(self, terms):
c = self.db.cursor()
documentVectors = {}
defaultVector = []
length = {}
for n, term in enumerate(terms):
cands = c.execute("SELECT document_id, times FROM postings WHERE term=?", (term,)).fetchall()
if cands == None or len(cands) == 0:
defaultVector.append(0)
continue
# non-zero div is ensured
defaultVector.append(math.log(self.collection.num_documents() / len(cands)))
for cand in cands:
if cand[0] in length:
pass
else:
# length[cand[0]] = len(self.collection.find_article_by_title(cand[0]).text)
length[cand[0]] = 1
if cand[0] in documentVectors:
pass
else:
documentVectors[cand[0]] = [0 for i in range(len(terms))]
documentVectors[cand[0]][n] = (1 + math.log(cand[1] / length[cand[0]])) * math.log(self.collection.num_documents() / len(cands))
returnVectors = {}
for title, documentVector in documentVectors.items():
returnVectors[title] = documentVector
return (returnVectors, defaultVector)
def ngrams_search(self, ngrams):
c = self.db.cursor()
is_first = True
for term in ngrams:
cands = c.execute("SELECT document_id FROM postings WHERE term=?", (term,)).fetchall()
if len(cands) == 0: continue
temptitles = set(map(lambda c:c[0], cands))
if is_first:
titles = temptitles
is_first = False
else:
titles = titles & temptitles
return titles
def sortSearchReturnTable(self, terms):
c = self.db.cursor()
documentVectors = {}
defaultVector = []
for n, term in enumerate(terms):
cands = c.execute("SELECT document_id FROM postings WHERE term=?", (term,)).fetchall()
if cands == None or len(cands) == 0:
defaultVector.append(0)
continue
# non-zero div is ensured
termPoint = (1 + math.log(len(cands)) * math.log(self.collection.num_documents() / len(cands)))
defaultVector.append(termPoint)
for cand in cands:
if cand[0] in documentVectors:
documentVectors[cand[0]][n] = termPoint
else:
documentVectors[cand[0]] = [0 for i in range(len(terms))]
documentVectors[cand[0]][n] = termPoint
max_cos = -1
best_title = ''
table = {}
for title, documentVector in documentVectors.items():
cos = numpy.dot(documentVector, defaultVector) / (numpy.linalg.norm(documentVector) * numpy.linalg.norm(defaultVector))
table[title] = cos
return table
def returnBestFromTable(self, table):
max_title = ''
max_val = -1
for title in table.keys():
if max_val < table[title]:
max_val = table[title]
max_title = title
return max_title
def mergeTable(self, table1, table2):
# table1 < tabl2 is pereferable
returnTable = {}
for title in table1.keys():
if title in table2:
returnTable[title] = table1[title] + table2[title] * 0.5
else:
returnTable[title] = table1[title]
return returnTable
def generate(self):
# indexing process
c = self.db.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS postings (
term TEXT NOT NULL,
document_id TEXT NOT NULL,
times INTEGER
);""")
parser = natto.MeCab()
articles = self.collection.get_all_documents()
count = 0
for article in articles:
count += 1
if count % 100 == 0:
print(count)
dict = {}
for node in parser.parse(article.text(), as_nodes=True):
if node.is_nor():
features = node.feature.split(',')
term | |
tf.nn.dropout(A1, dropout_rates[dropout_layers.index(1)])
print("applied dropout on layer 1 with rate of", dropout_rates[dropout_layers.index(1)])
equations["Z1"] = Z1
equations["A1"] = A1
Z_final = tf.add(tf.matmul(parameters['W' + str(2)], equations["A" + str(1)]), parameters['b' + str(2)])
#Z_final = tf.nn.relu(Z_final)
return Z_final
else:
return
def compute_cost(z3, Y):
"""
Computes the cost
Arguments:
z3 -- output of forward propagation (output of the last LINEAR unit), of shape (output of before last layer, number of examples)
Y -- "true" labels vector placeholder, same shape as z3
Returns:
cost - Tensor of the cost function
"""
predictions = z3
labels = Y
if predictions.shape[0] == 2:
var0 = tf.slice(predictions, [0, 0], [1, predictions.shape[1]])
var1 = tf.slice(predictions, [1, 0], [1, predictions.shape[1]])
print(var0, var1)
predictions = var0 * var1
cost = tf.keras.losses.mse(labels, predictions)
else:
cost = tf.keras.losses.mse(labels, predictions)
return cost
def compute_error(Z3_train, Z3_test, Y_train, Y_test):
"""
Computes the error or accuracy inverse
Arguments:
Z3_train -- output of forward propagation on training set (output of the last LINEAR unit), of shape (1, number of examples)
Z3_test -- output of forward propagation on test set (output of the last LINEAR unit), of shape (1, number of examples)
Y_train -- "true" labels of training set vector placeholder, same shape as Z3_train
Y_test -- "true" labels of test set vector placeholder, same shape as Z3_test
Returns:
error_train - Tensor of error of training set
error_test - Tensor of error of test set
"""
if Z3_train.shape[0] == 2:
var0 = tf.slice(Z3_train, [0, 0], [1, Z3_train.shape[1]])
var1 = tf.slice(Z3_train, [1, 0], [1, Z3_train.shape[1]])
Z = var0 * var1
error_train = tf.reduce_mean(tf.math.divide(tf.abs(Z - Y_train),tf.abs(Y_train)))
var3 = tf.slice(Z3_test, [0, 0], [1, Z3_test.shape[1]])
var4 = tf.slice(Z3_test, [1, 0], [1, Z3_test.shape[1]])
Z1 = var3 * var4
error_test = tf.reduce_mean(tf.math.divide(tf.abs(Z1 - Y_test),tf.abs(Y_test)))
else:
error_train = tf.reduce_mean(tf.math.divide(tf.abs(Z3_train - Y_train),tf.abs(Y_train)))
error_test = tf.reduce_mean(tf.math.divide(tf.abs(Z3_test - Y_test),tf.abs(Y_test)))
return error_train, error_test
def model(X_train, Y_train, X_test, Y_test, W, activations, learning_rate = 0.0001,
num_epochs = 1500, print_cost = True, print_errors = True, show_plots = True, gpu = False, b1 = 0.9, b2 = 0.999, dropout_layers = [], dropout_rates= [], get_errors = False):
"""
Implements a "len(activations)"-layered tensorflow neural network
Arguments:
X_train -- training set, of shape
Y_train -- test set, of shape
X_test -- training set, of shape
Y_test -- test set, of shape
W -- list where each value is number of neurons and index of that value indicates of which hidden layer but last value refers to output of output layer
activations -- list of activations for each hidden layer, where avlue of index refers to which hidden layer
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
print_cost -- True to print the cost every 100 epochs
print_errors -- True to print the errors of Train and Test set every 100 epochs
show_plots -- True (default), shows the plots of cost and test/train error over iterations
gpu -- True to enable usage of gpu processing
b1 -- Momentum variable beta1 for adam optimizer
b2 -- RMS prop variable beta2 for adam optimizer
dropout_layers -- list whose elements indicate the number of the layer on which dropout should be applied. Example [1,3,4]
dropout_rates -- list whose elements define the rate of the dropout that is applied to the layers of same index value in dropout_layers. Example [0.3, 0.5, 0.7]]
get_test_error -- False, if True returns the test set error
Returns:
parameters -- parameters learnt by the model. They can then be used with the predict function.
"""
tf.compat.v1.disable_eager_execution()
tf.compat.v1.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.compat.v1.set_random_seed(1) # to keep consistent results
seed = 0 # set initial seed value
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
(n_x_te, m_te) = X_test.shape # (n_x: input size, m : number of examples in the train set)
n_y_te = Y_test.shape[0] # n_y : output size
costs = [] # To keep track of the cost
errors_train = [] # To keep track of training errors
errors_test = [] # To keep track of test errors
dev = '/cpu:0' # Default device to use for processeing: CPU
if gpu == True:
dev = '/gpu:0'
# Run the following on CPU
with tf.device('/cpu:0'):
# Create Placeholders of shape (n_x, n_y)
X, Y = create_placeholders(n_x, n_y, m = m)
# Create Placeholders for test set of shape (n_x, n_y)
X_te, Y_te = create_placeholders(n_x_te, n_y_te, m = m_te)
# Initialize parameters
parameters = initialize_parameters(W, n_x)
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters, activations, dropout_layers, dropout_rates)
# Forward propagation for test set: Build the forward propagation in the tensorflow graph
Z3_te = forward_propagation(X_te, parameters, activations)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(Z3, Y)
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate = learning_rate, beta1 = b1, beta2 = b2).minimize(cost)
# Calculate error
error, error_te = compute_error(Z3, Z3_te, Y, Y_te)
# Initialize all the variables
init = tf.compat.v1.global_variables_initializer()
start = time.time()
# Start the session to compute the tensorflow graph
with tf.compat.v1.Session() as sess:
# Run the initialization
sess.run(init)
# Choose which processor to run the following code on
with tf.device(dev):
# Do the training loop
for epoch in range(num_epochs):
seed = seed + 1
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch or fullbatch for (X,Y).
_ , cost_iter, err, err_te = sess.run([optimizer, cost, error, error_te], feed_dict={X: X_train, Y: Y_train, X_te: X_test, Y_te: Y_test})
# Print the cost every 100 epochs
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, cost_iter))
if print_cost == True and epoch % 100 == 0:
costs.append(cost_iter)
# Calculate and print the training error for train and test every 100 epochs
if print_errors == True and epoch % 100 == 0:
errors_train.append(err)
errors_test.append(err_te)
print("Training error: ", err)
print("Test error: ",err_te)
end = time.time()
print("Device Driver execution runtime: ", (end-start))
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per 100)')
plt.title("Learning rate =" + str(learning_rate))
if show_plots == True:
plt.show()
# plot the errors
plt.plot(np.squeeze(errors_train))
plt.plot(np.squeeze(errors_test))
plt.legend(['Training errors','Test errors'])
plt.ylabel('errors')
plt.xlabel('iterations (per 100)')
plt.title("Learning rate =" + str(learning_rate))
if show_plots == True:
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
print("Train Accuracy:", 1- err)
print("Test Accuracy:", 1 - err_te)
print("Train Error:", err)
print("Test Error:", err_te)
if get_errors == True:
return parameters, err, err_te
else:
return parameters
def modelW(X_train, Y_train, X_test, Y_test, W, activations, learning_rate = 0.0001,
num_epochs = 1500, print_cost = True, print_errors = True, gpu = False):
"""
Implements a "len(activations)"-layered tensorflow neural network
Arguments:
X_train -- training set, of shape
Y_train -- test set, of shape
X_test -- training set, of shape
Y_test -- test set, of shape
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
print_cost -- True to print the cost every 100 epochs
print_errors -- True to print the errors of Train and Test set every 100 epochs
gpu -- True to enable usage of gpu processing
b1 -- Momentum variable beta1
b2 -- RMS prop variable beta2
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
tf.compat.v1.disable_eager_execution()
tf.compat.v1.reset_default_graph() # to be able to rerun the model without overwriting | |
<gh_stars>0
from __future__ import division, print_function
#import matplotlib.pyplot as plt
import numpy as np
from numpy import log10
from sys import stderr, stdout, exit
from dispersion import DR_Solve, init, DR_point, PlotDR2d
from scipy.interpolate import InterpolatedUnivariateSpline as US
def banner():
line = '_' * 80 + '\n'
print(line)
print(' ' * 30 + 'THIS IS HYDROS v1.0.0')
print(line)
# This function allows the roots to be found ranging across XARRAY
def HYDROS(
params,
start,
scan1,
scan2,
log=1,
method='default',
scan_options=[
1000.,
1],
outfilename='output.log',
scanfilename='scan.log',
scan_type='1d',
normalization=1):
banner()
global outfile
check_variables(params, scan1, scan2)
outfile = open(outfilename, 'w')
scanfile = init_resultfile(scanfilename)
if scan_type == 'DR-map':
w, DR2d = PlotDR2d(
params, np=(
800, 800), cntr=[
1.5, -1.5], raxis=3., iaxis=3., outfilename='output.log')
write_2d_scan('DR2d.log', w.real, w.imag, DR2d)
return
scanvar, precis, scan_range, log, scan_list = scan1
scansize = precis
scanvar2, precis2, scan_range2, log2, scan_list2 = scan2
if scan_type == '2d':
scansize *= precis2
else:
if scan_list2 == []:
scanvar2, precis2, scan_range2, log2, scan_list2 = [
None, None, None, None, []]
# initialize dispersion relation solver
params, start, scan_range, scan_range2 = renormalize(
normalization, scanvar, scanvar2, params, start, scan_range, scan_range2, scan_type)
init(params, outfile)
# set up scan arrays
XARRAY, X2ARRAY = setup_xarray(
scan_list, scan_list2, scan_range, scan_range2, log, log2, precis, precis2, scan_type)
OMEGA = np.zeros(scansize)
GAMMA = np.zeros(scansize)
BY = np.zeros(scansize, np.complex)
DN = np.zeros(scansize, np.complex)
i = 0
while i < len(XARRAY):
outfile.flush()
update_output('\r Computing index %d/%d.' % (i + 1, len(XARRAY)))
print(
"\n=====================================================================",
file=outfile)
print(
"\n kperp= %7.3g, kpar= %7.3g, k= %7.3g, theta= %7.3g, beta= %7.3g, tau= %7.3g, Tpar/Tperp= %7.3g, " %
(params[1],
params[2],
params[10],
params[9],
params[3],
params[4],
params[5]),
file=outfile)
if X2ARRAY is not None:
print("\nComputing index no. %d, %s = %7.3g, %s = %7.3g" %
(i + 1, scanvar, XARRAY[i], scanvar2, X2ARRAY[i]), file=outfile)
else:
print("\nComputing index no. %d, %s = %7.3g" %
(i + 1, scanvar, XARRAY[i]), file=outfile)
if scan_type == '2d' and i % precis == 0 and i > 0:
start = [OMEGA[i - precis], GAMMA[i - precis]]
shift = precis - 1
else:
shift = 0
scan = call_DR_get_root(
i,
params,
start,
scanvar,
scanvar2,
method,
scan_options,
XARRAY,
OMEGA,
GAMMA,
BY,
DN,
X2ARRAY,
shift)
omega, gamma, By, dn, err = scan
if err == 1: # no error
OMEGA[i] = omega
GAMMA[i] = gamma
BY[i] = By
DN[i] = dn
else:
update_output(
'\r Lost root on index %d/%d, aborting scan.' %
(i + 1, len(XARRAY)), True)
print(
"Error: Lost root, ending scan at scanvar=",
XARRAY[i],
file=outfile)
if scan_type == '2d':
return XARRAY[:i], OMEGA[:i], GAMMA[:i], BY[:i], DN[:i], X2ARRAY[:i]
else:
return XARRAY[:i], OMEGA[:i], GAMMA[:i], BY[:i], DN[:i], None
if X2ARRAY is not None:
b = X2ARRAY[i]
else:
b = 0.
a, b, c, d = renormalize_output(
normalization, scanvar, scanvar2, params, XARRAY[i], b, OMEGA[i], GAMMA[i])
write_result_to_scanfile(scanfile, a, b, c, d, BY[i], DN[i])
i += 1
update_output(
'\r Scan complete. Logfile: \'%s\', Scan output: \'%s\'.' %
(outfilename, scanfilename), True)
scanfile.close()
def check_variables(params, scan1, scan2):
scanvar, precis, scan_range, log, scan_list = scan1
scanvar2, precis2, scan_range2, log2, scan_list2 = scan2
wavevector_mode = params[0]
kperp = params[1]
kpar = params[2]
theta = params[9]
k = params[10]
if wavevector_mode == 1:
if scanvar in ['k', 'theta']:
exit('Error: scanning over k or theta in wavevector_mode = 1 makes no sense!')
if wavevector_mode == 2:
if scanvar in ['kpar', 'kperp']:
exit(
'Error: scanning over kpar or kperp in wavevector_mode = 2 makes no sense!')
def get_var_from_scan_range(
var,
value,
scanvar,
scanvar2,
scan_range,
scan_range2,
scan_type):
if scanvar == var:
var = scan_range[0]
if scan_type == '2d' and scanvar2 == var:
var = scan_range[0]
return value
# reset some parameters to match internal normalization of HYDROS
# (k_norm=k*rho_i||, omega_norm=omega/kpar/v_Ti||)
def renormalize(
normalization,
scanvar,
scanvar2,
params,
start,
scan_range,
scan_range2,
scan_type):
wavevector_mode = params[0]
kperp = params[1]
kpar = params[2]
theta = params[9]
k = params[10]
if normalization == 1:
if scanvar == 'beta' or scanvar2 == 'beta':
exit('Error: d_i normalization presently not possible for beta scans.')
beta = params[3]
# we need kpar to change the normalization of the start frequency
# if scans over k and/or theta are done, have to overwrite the above
# values
if wavevector_mode == 1:
kpar = get_var_from_scan_range(
'kpar',
kpar,
scanvar,
scanvar2,
scan_range,
scan_range2,
scan_type)
kperp = get_var_from_scan_range(
'kperp',
kperp,
scanvar,
scanvar2,
scan_range,
scan_range2,
scan_type)
if wavevector_mode == 2:
k = get_var_from_scan_range(
'k',
k,
scanvar,
scanvar2,
scan_range,
scan_range2,
scan_type)
theta = get_var_from_scan_range(
'theta',
theta,
scanvar,
scanvar2,
scan_range,
scan_range2,
scan_type)
if wavevector_mode == 1 and (kperp < 0. or kpar < 0):
exit('Need positive kpar and kperp for wavevector_mode=1!')
if wavevector_mode == 2:
if (k < 0. or theta < 0):
exit('Need positive k and theta for wavevector_mode=2!')
elif theta > 0. and k > 0:
kpar = k * np.cos(theta * np.pi / 180)
# k is now given in terms of d_i units, convert it to code internal
# rho_i|| units
kpar *= np.sqrt(beta)
# change start frequency normalization
start = list(np.array(start, dtype=float) / kpar)
# now redefine scan ranges to code internal units
if scanvar in ['kpar', 'kperp', 'k']:
scan_range = tuple(
np.array(
scan_range,
dtype=float) *
np.sqrt(beta))
if scanvar2 in ['kpar', 'kperp', 'k']:
scan_range2 = tuple(
np.array(
scan_range2,
dtype=float) *
np.sqrt(beta))
# finally, adapt global parameters
params[1] *= np.sqrt(beta)
params[2] *= np.sqrt(beta)
params[10] *= np.sqrt(beta)
return params, start, scan_range, scan_range2
# switch from internal normalization to output normalization
def renormalize_output(normalization, scanvar, scanvar2, params, a, b, c, d):
wavevector_mode = params[0]
beta = params[3]
theta = params[9]
kpar = params[2]
if scanvar == 'kpar':
kpar = a
if scanvar2 == 'kpar':
kpar = b
if scanvar == 'theta':
theta = a
if scanvar2 == 'theta':
theta = b
if scanvar == 'k':
k = a
if scanvar2 == 'k':
k = b
if wavevector_mode == 2 and theta > 0 and k > 0:
kpar = k * np.cos(theta * np.pi / 180)
if normalization == 1:
c *= kpar
d *= kpar
if scanvar in ['kpar', 'kperp', 'k']:
a /= np.sqrt(beta)
if scanvar2 in ['kpar', 'kperp', 'k']:
b /= np.sqrt(beta)
return a, b, c, d
def init_resultfile(resultfilepath):
file = open(resultfilepath, 'w')
file.write(
'#%15s %16s %16s %16s %16s %16s %16s %16s\n' %
('var1',
'var2',
'omega',
'-gamma',
'Re(dBy/dBz)',
'Im(dBy/dBz)',
'Re(dn/dBz)',
'Im(dn/dBz)'))
return file
def write_result_to_scanfile(file, a, b, c, d, e, f):
file.write(
'%16.8e %16.8e %16.8e %16.8e %16.8e %16.8e %16.8e %16.8e\n' %
(a, b, c, d, e.real, e.imag, f.real, f.imag))
def write_2d_scan(file, w, g, DR):
outfile = open(file, 'w')
outfile.write('#%15s %16s %16s\n' % ('omega', 'gamma', 'DR'))
for i in range(len(w.flatten())):
outfile.write('%16.8e %16.8e %16.8e\n' %
(w.flatten()[i], g.flatten()[i], DR.flatten()[i]))
outfile.close()
def setup_xarray(
scan_list,
scan_list2,
scan_range,
scan_range2,
log,
log2,
precis,
precis2,
scan_type):
if scan_type == '1d':
# for scanning a predefined list of numbers
if scan_list != []:
# interpolate to prescribed length
l1_spl = US(range(len(scan_list)), scan_list)
XARRAY = l1_spl(np.linspace(0, len(scan_list) - 1, precis))
X2ARRAY = None
# for having a second simultaneously varying variable
if scan_list2 != []:
# interpolate to prescribed length
l2_spl = US(scan_list, scan_list2)
X2ARRAY = l2_spl(XARRAY)
# log- or linearly spaced 1d scan
if (scan_list == [] and scan_list2 == []):
X2ARRAY = None # np.empty(1,dtype=np.float)
if log:
XARRAY = np.logspace(
np.log10(
scan_range[0]), np.log10(
scan_range[1]), precis)
else:
XARRAY = np.linspace(scan_range[0], scan_range[1], precis)
elif scan_type == '2d':
# for scanning a predefined list of numbers
if scan_list != []:
# interpolate to prescribed length
l1_spl = US(range(len(scan_list)), scan_list)
XARRAY = np.tile(
l1_spl(
np.linspace(
0,
len(scan_list) - 1,
precis)),
precis).flatten()
# for having a second simultaneously varying variable
if scan_list2 != []:
# interpolate to prescribed length
l2_spl = US(scan_list, scan_list2)
X2ARRAY = np.repeat(l2_spl(XARRAY), precis)
else:
exit('Error: need to specify two scan_lists for 2d scan!')
# log- or linearly spaced 2d scan
if (scan_list == [] and scan_list2 == []):
if log:
XARRAY = np.tile(
np.logspace(
np.log10(
scan_range[0]), np.log10(
scan_range[1]), precis), precis2).flatten()
else:
XARRAY = np.tile(
np.linspace(
scan_range[0],
scan_range[1],
precis),
precis2).flatten()
if log2:
X2ARRAY = np.repeat(
np.logspace(
np.log10(
scan_range2[0]), np.log10(
scan_range2[1]), precis2), precis).flatten()
else:
X2ARRAY = np.repeat(
np.linspace(
scan_range2[0],
scan_range2[1],
precis2),
precis).flatten()
return XARRAY, X2ARRAY
def update_output(string, end=False):
stdout.write(string.ljust(80))
| |
<filename>msatestgroup.py
#!/usr/local/bin/python3
#
# Do MSA tests
from testgroup import Testgroup, TestError
import time
class MSATestgroup(Testgroup):
"""
tests for MSA
"""
def __init__(self, product, config="config.txt", debug=False):
super().__init__('MSA', product, config=config, debug=debug)
self.msgtime = self.pconfig.getint('msgtime',fallback=2) # how many mins to wait for a message to arrive
self.user = self.pconfig.get('submituser')
self.passwd = self.pconfig.get('submitpass')
self.messages = {}
self.standalone = self.pconfig.getboolean('standalone')
self.downgrade = self.pconfig.getboolean('downgrade')
self.nosubeai = self.pconfig.getboolean('nosubeai')
# specific tests
# return tuple of True (Pass) / False (Fail) / None (Pending),
# and comment
def msa001(self):
"""
test that SMTPUTF8 in banner
"""
atleastone = False
cmnt = ""
print("connect to", self.sserver)
# port 587 banner
if self.subconnect(port=self.submitport):
print("try plain submit")
if not self.ssock.has_extn('SMTPUTF8'):
return ("Fail", f"Missing in submit, EHLO response was {self.ssock.ehlo_resp.decode()}")
atleastone = True
print("try STARTTLS submit")
if self.subconnect(starttls=True):
if not self.ssock.has_extn('SMTPUTF8'):
return ("Fail", f"Missing after STARTLS, EHLO response was {self.ssock.ehlo_resp.decode()}")
else:
cmnt = "\nDoes not do STARTTLS"
else:
cmnt = f"\nDoes not do port {self.submitport} SUBMIT"
print("try submits")
if self.submits and self.subconnect(smtps=True):
if not self.ssock.has_extn('SMTPUTF8'):
return ("Fail", f"Missing SMTPUTF8 in submits, EHLO response was {self.ssock.ehlo_resp.decode()}")
atleastone = True
else:
cmnt += "\nDoes not do SUBMITS"
if atleastone:
return ("Pass", "Capability is present"+cmnt)
else:
return('Fail', "No submission server found")
def msa002(self):
"""
test that 8BITMIME in banner
"""
atleastone = False
cmnt = ""
print("connect to", self.sserver)
# port 587 banner
print("try plain submit")
if self.subconnect(port=self.submitport):
if not self.ssock.has_extn('8BITMIME'):
return ("Fail", f"Missing in submit, EHLO response was {self.ssock.ehlo_resp.decode()}")
atleastone = True
print("try STARTTLS submit")
if self.subconnect(starttls=True):
if not self.ssock.has_extn('8BITMIME'):
return ("Fail", f"Missing after STARTLS, EHLO response was {self.ssock.ehlo_resp.decode()}")
else:
cmnt = "\nDoes not do STARTTLS"
else:
cmnt = f"\nDoes not do port {self.submitport} SUBMIT"
print("try submits")
if self.submits and self.subconnect(smtps=True):
if not self.ssock.has_extn('8BITMIME'):
return ("Fail", f"Missing in submits, EHLO response was {self.ssock.ehlo_resp.decode()}")
atleastone = True
else:
cmnt += "\nDoes not do SUBMITS"
if atleastone:
return ("Pass", "Capability is present"+cmnt)
else:
return('Fail', "No submission server found")
def msa003(self):
if not self.standalone:
return ("NA", "Test not applicable")
return None
def msa004(self):
if not self.standalone:
return ("NA", "Test not applicable")
return None
def msa005(self):
"""
test that it sends UTF-8 reverse path
"""
fromaddr = self.pconfig.get('fromaddr')
toaddr = self.pconfig.get('toaddr')
if not fromaddr:
return("NA", "No UTF-8 sending address")
assert fromaddr and toaddr
pmsg = self.domsg('plain', From=fromaddr, To=toaddr, getrmt=True)
if not pmsg:
return ('Fail', f"Cannot send test message {self.submiterror}")
(dhdrs, lhdrs, body) = pmsg
if 'eai-from' in dhdrs:
if dhdrs['eai-from'][0] == fromaddr:
return('Pass', f"Envelope from is {fromaddr}")
else:
return('Fail', f"Envelope from is {dhdrs['eai-from']}")
else:
return('Fail', f"Envelope from is missing, {dhdrs['return-path']}")
def msa006(self):
"""
test that it sends UTF-8 recipient
"""
fromaddr = self.pconfig.get('fromaddr') or self.pconfig.get('asciifrom')
toaddr = self.pconfig.get('toaddr')
pmsg = self.domsg('plain', From=fromaddr, To=toaddr, getrmt=True, eaiflag=not self.nosubeai)
if not pmsg:
return ('Fail', f"Cannot send test message {self.submiterror}")
(dhdrs, lhdrs, body) = pmsg
if 'eai-rcpt' in dhdrs:
if dhdrs['eai-rcpt'][0] == toaddr:
return('Pass', f"Envelope to is {toaddr}")
else:
return('Fail', f"Envelope to is {dhdrs['eai-rcpt'][0]}")
else:
return('Fail', f"Envelope to is missing, {dhdrs['delivered-to']}")
def msa007(self):
"""
test that it sends UTF-8 From: header
"""
fromaddr = self.pconfig.get( 'fromaddr')
toaddr = self.pconfig.get( 'toaddr')
if not fromaddr:
return("NA", "No UTF-8 sending address")
pmsg = self.domsg('plain', From=fromaddr, To=toaddr, getrmt=True)
if not pmsg:
return ('Fail', f"Cannot send test message {self.submiterror}")
(dhdrs, lhdrs, body) = pmsg
if 'from' in dhdrs:
msgfrom = dhdrs['from'][0]
if fromaddr in msgfrom:
return ('Pass', f"UTF-8 From header: {msgfrom}")
else:
return ('Fail', f"No UTF-8 From header: {msgfrom}")
def msa008(self):
"""
test that it sends UTF-8 To: header
"""
fromaddr = self.pconfig.get( 'fromaddr') or self.pconfig.get('asciifrom')
toaddr = self.pconfig.get( 'toaddr')
pmsg = self.domsg('plain', From=fromaddr, To=toaddr, getrmt=True)
if not pmsg:
return ('Fail', f"Cannot send test message {self.submiterror}")
(dhdrs, lhdrs, body) = pmsg
if 'to' in dhdrs:
msgto = dhdrs['to'][0]
if toaddr in msgto:
return ('Pass', f"UTF-8 To header: {msgto}")
else:
return ('Fail', f"No UTF-8 To header: {msgto}")
def msa009(self):
"""
test that it sends UTF-8 Subject: header
"""
fromaddr = self.pconfig.get( 'fromaddr') or self.pconfig.get('asciifrom')
toaddr = self.pconfig.get( 'toaddr')
pmsg = self.domsg('eaisubj', From=fromaddr, To=toaddr, getrmt=True)
if not pmsg:
return ('Fail', f"Cannot send test message {self.submiterror}")
(dhdrs, lhdrs, body) = pmsg
subject = dhdrs['subject'][0]
if '中国' in subject:
return ('Pass', "Message has unencoded UTF-8 subject")
return ('Fail', "Message subject was "+subject)
def msa010(self):
"""
test that it sends ASCII messages as not EAI
"""
fromaddr = self.pconfig.get( 'asciifrom')
toaddr = self.pconfig.get( 'asciito')
if not fromaddr:
return('NA', 'No ASCII test address available')
pmsg = self.domsg('plain', From=fromaddr, To=toaddr, eaiflag=False, getrmt=True)
if not pmsg:
return ('Fail', f"Cannot send test message {self.submiterror}")
(dhdrs, lhdrs, body) = pmsg
# find relay received line
r = None
for rx in dhdrs['received']:
if 'mail1.iecc.com' in rx:
r = rx
break
if not r:
return ('NA', 'Cannot find received header', dhdrs['received'])
if 'with UTF8' not in r:
return ('Pass', 'Message sent as ASCII '+r)
return ('Fail', 'Message not sent as ASCII '+r)
def msa011(self):
"""
test that EAI messages to ASCII host fail
or are downgraded
"""
fromaddr = self.pconfig.get( 'fromaddr') or self.pconfig.get('asciifrom')
toaddr = self.pconfig.get( 'noeaito')
self.submiterror = None
pmsg = self.domsg('eaisubj', From=fromaddr, To=toaddr, getrmt=True)
if pmsg:
(dhdrs, lhdrs, body) = pmsg
if self.iseai(dhdrs['return-path'][0]) or self.iseai(dhdrs['delivered-to'][0]) or self.iseai(dhdrs['subject'][0]):
return ('Fail', f"Message sent anyway\nreturn path {dhdrs['return-path'][0]}\n" \
f"recipient {dhdrs['delivered-to'][0]}\nsubject {dhdrs['subject'][0]}")
return('NA', "Message downgraded\nreturn path {dhdrs['return-path'][0]}\n" \
f"recipient {dhdrs['delivered-to'][0]}\nsubject {dhdrs['subject'][0]}")
elif self.submiterror:
return ('NA', f"Cannot send test message {self.submiterror}")
# see if we have a bounce locally
pmsg = self.getmori(maxcheck=1)
if pmsg:
(dhdrs, lhdrs, body) = self.parsemsg(pmsg)
# see if there is a Diagnostic
dl = tuple(l for l in body if 'Diagnostic' in l)
if dl:
bm = dl[0]
else:
bm = dhdrs['subject'][0]
return ('Pass', "Test message not received, likely bounce "+bm)
return ('Pass', "Test message not received")
def msa012(self):
"""
See if EAI mail AA@UU from is downgraded
"""
fromaddr = self.pconfig.get( 'dgfrom') # downgradable from address
toaddr = self.pconfig.get( 'noeaito')
if not fromaddr:
return('NA', "No downgradable address available")
self.submiterror = None
pmsg = self.domsg('plain', From=fromaddr, To=toaddr, getrmt=True)
if pmsg:
(dhdrs, lhdrs, body) = pmsg
if 'eai-from' in dhdrs:
return('Fail', f"Envelope from is {dhdrs['eai-from'][0]}")
return ('Pass', "Message sent with ASCII return address")
elif self.submiterror:
return ('NA', f"Cannot send test message {self.submiterror}")
# see if we have a bounce
pmsg = self.getmori(prefix='dg', maxcheck=1)
if pmsg:
(dhdrs, lhdrs, body) = self.parsemsg(pmsg)
# see if there is a Diagnostic
dl = tuple(l for l in body if 'Diagnostic' in l)
if dl:
bm = dl[0]
else:
bm = dhdrs['subject'][0]
return ('Pass', "Test message not received, likely bounce "+bm)
return ('Pass', "Test message not received")
def msa013(self):
"""
See if EAI rcpt to AA@UU is downgraded
"""
fromaddr = self.pconfig.get( 'asciifrom')
toaddr = self.pconfig.get( 'adgto') # downgradable to address in envelope
atoaddr = self.pconfig.get( 'adgto') # downgraded to address in To header
if not fromaddr:
return('NA', 'No ASCII test address available')
self.submiterror = None
pmsg = self.domsg('plaindg', From=fromaddr, To=toaddr, bFrom=fromaddr, bTo=atoaddr, getrmt=True)
if pmsg:
(dhdrs, lhdrs, body) = pmsg
if 'eai-rcpt' in dhdrs:
return('Fail', f"Envelope recipient is {dhdrs['eai-rcpt'][0]}")
return ('Pass', f"Message sent with ASCII recipient address {dhdrs['delivered-to'][0]}")
elif self.submiterror:
return ('NA', f"Cannot send test message {self.submiterror}")
# see if we have a bounce
pmsg = self.getmori(prefix='dg', maxcheck=1)
if pmsg:
(dhdrs, lhdrs, body) = self.parsemsg(pmsg)
return ('Pass', "Test message not received, likely bounce "+dhdrs['subject'][0])
return ('Pass', "Test message not received")
# for MSAs that do downgrades
# mas014 downgrade From
def msa014(self):
"""
See if From header address is downgraded
"""
if not self.downgrade:
return ("NA", "Test not applicable")
fromaddr = self.pconfig.get( 'fromaddr')
toaddr = self.pconfig.get( 'dgto') # downgradable to address
atoaddr = self.pconfig.get( 'adgto') # downgraded to address
if not fromaddr:
return('NA', 'No ASCII test address available')
self.submiterror = None
pmsg = self.domsg('plaindgcc', From=fromaddr, To=toaddr, bFrom=fromaddr, bTo=atoaddr, getrmt=True)
if pmsg:
(dhdrs, lhdrs, body) = self.parsemsg(pmsg)
if 'eai-from' in dhdrs:
return('Fail', f"Envelope from is {dhdrs['eai-from'][0]}")
return ('Pass', "Message sent with ASCII sender address")
elif self.submiterror:
return ('NA', f"Cannot | |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import argparse
import json
import os
import random
from datetime import datetime
from pprint import pprint
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, BatchSampler
from pretrained_models import *
from tensorboardX import SummaryWriter
#from torch.utils.tensorboard import SummaryWriter
from experiments.exp_def import TaskDefs
from mt_dnn.inference import eval_model, extract_encoding
from data_utils.log_wrapper import create_logger
from data_utils.task_def import EncoderModelType
from data_utils.utils import set_environment
from mt_dnn.batcher import SingleTaskDataset, MultiTaskDataset, Collater, MultiTaskBatchSampler, DistMultiTaskBatchSampler, DistSingleTaskBatchSampler
from mt_dnn.batcher import DistTaskDataset
from mt_dnn.model import MTDNNModel
def model_config(parser):
parser.add_argument('--update_bert_opt', default=0, type=int, help='是否更新固定预训练的bert模型参数,大于0表示固定')
parser.add_argument('--multi_gpu_on', action='store_true',help='默认False,是否使用多GPU')
parser.add_argument('--mem_cum_type', type=str, default='simple',
help='bilinear/simple/default')
parser.add_argument('--answer_num_turn', type=int, default=5,help='论文中的超参数K,K步推理')
parser.add_argument('--answer_mem_drop_p', type=float, default=0.1)
parser.add_argument('--answer_att_hidden_size', type=int, default=128)
parser.add_argument('--answer_att_type', type=str, default='bilinear', help='bilinear/simple/default')
parser.add_argument('--answer_rnn_type', type=str, default='gru', help='SAN逐步推理模块使用的结构是,rnn/gru/lstm')
parser.add_argument('--answer_sum_att_type', type=str, default='bilinear', help='bilinear/simple/default')
parser.add_argument('--answer_merge_opt', type=int, default=1)
parser.add_argument('--answer_mem_type', type=int, default=1)
parser.add_argument('--max_answer_len', type=int, default=10)
parser.add_argument('--answer_dropout_p', type=float, default=0.1)
parser.add_argument('--answer_weight_norm_on', action='store_true')
parser.add_argument('--dump_state_on', action='store_true')
parser.add_argument('--answer_opt', type=int, default=1, help='可选0,1,代表是否使用SANClassifier分类头还是普通的线性分类头,1表示使用SANClassifier, 0是普通线性映射')
parser.add_argument('--pooler_actf', type=str, default='tanh',
help='tanh/relu/gelu, 构建输出头的时的激活函数的选择')
parser.add_argument('--mtl_opt', type=int, default=0)
parser.add_argument('--ratio', type=float, default=0)
parser.add_argument('--mix_opt', type=int, default=0)
parser.add_argument('--max_seq_len', type=int, default=512)
parser.add_argument('--init_ratio', type=float, default=1)
parser.add_argument('--encoder_type', type=int, default=EncoderModelType.BERT)
parser.add_argument('--num_hidden_layers', type=int, default=-1, help='-1表示不修改模型的隐藏层参数,使用默认值,否则修改')
# BERT pre-training
parser.add_argument('--bert_model_type', type=str, default='bert-base-uncased',help='使用的预训练模型')
parser.add_argument('--do_lower_case', action='store_true',help='是否小写')
parser.add_argument('--masked_lm_prob', type=float, default=0.15)
parser.add_argument('--short_seq_prob', type=float, default=0.2)
parser.add_argument('--max_predictions_per_seq', type=int, default=128)
# bin samples
parser.add_argument('--bin_on', action='store_true')
parser.add_argument('--bin_size', type=int, default=64)
parser.add_argument('--bin_grow_ratio', type=int, default=0.5)
# dist training
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--world_size", type=int, default=1, help="For distributed training: world size")
parser.add_argument("--master_addr", type=str, default="localhost")
parser.add_argument("--master_port", type=str, default="6600")
parser.add_argument("--backend", type=str, default="nccl")
return parser
def data_config(parser):
parser.add_argument('--log_file', default='mt-dnn-train.log', help='path for log file.')
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--tensorboard_logdir', default='tensorboard_logdir')
parser.add_argument("--init_checkpoint", default='mt_dnn_models/bert_model_base_uncased.pt', type=str, help='使用哪个模型初始模型参数,请注意,选择正确的中英文模型')
parser.add_argument('--data_dir', default='data/canonical_data/bert_uncased_lower',help='tokenize后的数据的地址')
parser.add_argument('--data_sort_on', action='store_true')
parser.add_argument('--name', default='farmer')
parser.add_argument('--task_def', type=str, default="experiments/glue/glue_task_def.yml",help="使用的task任务定义的文件,默认是glue的task进行训练")
parser.add_argument('--train_datasets', default='mnli',help='训练的多个任务的数据集,用逗号,分隔,如果多个数据集存在')
parser.add_argument('--test_datasets', default='mnli_matched,mnli_mismatched',help='测试的多个任务的数据集,用逗号,分隔,如果多个数据集存在,根据任务名前缀自动匹配,例如mnli的前半部分mnli_')
parser.add_argument('--glue_format_on', action='store_true')
parser.add_argument('--mkd-opt', type=int, default=0,
help=">0表示开启知识蒸馏, requires 'softlabel' column in input data")
parser.add_argument('--do_padding', action='store_true')
return parser
def train_config(parser):
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available(),
help='是否使用GPU')
parser.add_argument('--log_per_updates', type=int, default=500)
parser.add_argument('--save_per_updates', type=int, default=10000,help='结合save_per_updates_on一起使用,表示每多少step,进行模型评估和保存')
parser.add_argument('--save_per_updates_on', action='store_true',help='每一步都保存模型,保存频繁,每步都评估 ')
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=8, help='训练的batch_size')
parser.add_argument('--batch_size_eval', type=int, default=8)
parser.add_argument('--optimizer', default='adamax',
help='supported optimizer: adamax, sgd, adadelta, adam, 使用的优化器')
parser.add_argument('--grad_clipping', type=float, default=0)
parser.add_argument('--global_grad_clipping', type=float, default=1.0)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--learning_rate', type=float, default=5e-5)
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--warmup', type=float, default=0.1)
parser.add_argument('--warmup_schedule', type=str, default='warmup_linear')
parser.add_argument('--adam_eps', type=float, default=1e-6)
parser.add_argument('--vb_dropout', action='store_false')
parser.add_argument('--dropout_p', type=float, default=0.1,help='构建输出头时Pooler的dropout设置')
parser.add_argument('--dropout_w', type=float, default=0.000)
parser.add_argument('--bert_dropout_p', type=float, default=0.1)
# loading
parser.add_argument("--model_ckpt", default='checkpoints/model_0.pt', type=str, help='继续训练模型时的已存在模型')
parser.add_argument("--resume", action='store_true',help='继续训练模型,结合参数--model_ckpt一起使用')
# scheduler
parser.add_argument('--have_lr_scheduler', dest='have_lr_scheduler', action='store_false')
parser.add_argument('--multi_step_lr', type=str, default='10,20,30')
#parser.add_argument('--feature_based_on', action='store_true')
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--scheduler_type', type=str, default='ms', help='ms/rop/exp')
parser.add_argument('--output_dir', default='checkpoint')
parser.add_argument('--seed', type=int, default=2018,
help='random seed for data shuffling, embedding init, etc.')
parser.add_argument('--grad_accumulation_step', type=int, default=1)
#fp 16
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
# adv training
parser.add_argument('--adv_train', action='store_true')
# the current release only includes smart perturbation
parser.add_argument('--adv_opt', default=0, type=int)
parser.add_argument('--adv_norm_level', default=0, type=int)
parser.add_argument('--adv_p_norm', default='inf', type=str)
parser.add_argument('--adv_alpha', default=1, type=float)
parser.add_argument('--adv_k', default=1, type=int)
parser.add_argument('--adv_step_size', default=1e-5, type=float)
parser.add_argument('--adv_noise_var', default=1e-5, type=float)
parser.add_argument('--adv_epsilon', default=1e-6, type=float)
parser.add_argument('--encode_mode', action='store_true', help='只把测试数据用模型编码一下,然后保存到checkpoint目录,没啥用')
parser.add_argument('--debug', action='store_true', help="print debug info")
return parser
# 各种参数
parser = argparse.ArgumentParser()
parser = data_config(parser)
parser = model_config(parser)
parser = train_config(parser)
args = parser.parse_args()
output_dir = args.output_dir
data_dir = args.data_dir
args.train_datasets = args.train_datasets.split(',')
args.test_datasets = args.test_datasets.split(',')
os.makedirs(output_dir, exist_ok=True)
output_dir = os.path.abspath(output_dir)
set_environment(args.seed, args.cuda)
log_path = args.log_file
logger = create_logger(__name__, to_disk=True, log_file=log_path)
task_defs = TaskDefs(args.task_def)
encoder_type = args.encoder_type
def dump(path, data):
with open(path, 'w') as f:
json.dump(data, f)
def evaluation(model, datasets, data_list, task_defs, output_dir='checkpoints', epoch=0, n_updates=-1, with_label=False, tensorboard=None, glue_format_on=False, test_on=False, device=None, logger=None):
# eval on rank 1
print_message(logger, "开始评估")
test_prefix = "Test" if test_on else "Dev"
if n_updates > 0:
updates_str = "updates"
else:
updates_str = "epoch"
updates = model.updates if n_updates > 0 else epoch
for idx, dataset in enumerate(datasets):
prefix = dataset.split('_')[0]
task_def = task_defs.get_task_def(prefix)
label_dict = task_def.label_vocab
test_data = data_list[idx]
if test_data is not None:
with torch.no_grad():
test_metrics, test_predictions, test_scores, test_golds, test_ids= eval_model(model,
test_data,
metric_meta=task_def.metric_meta,
device=device,
with_label=with_label,
label_mapper=label_dict,
task_type=task_def.task_type)
for key, val in test_metrics.items():
if tensorboard:
tensorboard.add_scalar('{}/{}/{}'.format(test_prefix, dataset, key), val, global_step=updates)
if isinstance(val, str):
print_message(logger, '任务是 {0} -- {1} {2} -- {3} {4}: {5}'.format(dataset, updates_str, updates, test_prefix, key, val), level=1)
elif isinstance(val, float):
print_message(logger, '任务是 {0} -- {1} {2} -- {3} {4}: {5:.3f}'.format(dataset, updates_str, updates, test_prefix, key, val), level=1)
else:
test_metrics[key] = str(val)
print_message(logger, 'Task {0} -- {1} {2} -- {3} {4}: \n{5}'.format(dataset, updates_str, updates, test_prefix, key, val), level=1)
if args.local_rank in [-1, 0]:
score_file = os.path.join(output_dir, '{}_{}_scores_{}_{}.json'.format(dataset, test_prefix.lower(), updates_str, updates))
results = {'metrics': test_metrics, 'predictions': test_predictions, 'uids': test_ids, 'scores': test_scores}
dump(score_file, results)
if glue_format_on:
from experiments.glue.glue_utils import submit
official_score_file = os.path.join(output_dir, '{}_{}_scores_{}.tsv'.format(dataset, test_prefix.lower(), updates_str))
submit(official_score_file, results, label_dict)
def initialize_distributed(args):
"""Initialize torch.distributed."""
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
# We are using (OpenMPI) mpirun for launching distributed data parallel processes
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
args.local_rank = local_rank
args.rank = nodeid * local_size + local_rank
args.world_size = num_nodes * local_size
#args.batch_size = args.batch_size * args.world_size
device = args.rank % torch.cuda.device_count()
if args.local_rank is not None:
device = args.local_rank
torch.cuda.set_device(device)
device = torch.device('cuda', args.local_rank)
# Call the init process
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6600')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(
backend=args.backend,
world_size=args.world_size, rank=args.rank,
init_method=init_method)
return device
def print_message(logger, message, level=0):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
do_logging = True
else:
do_logging = False
else:
do_logging = True
if do_logging:
if level == 1:
logger.warning(message)
else:
logger.info(message)
def main():
# set up dist
if args.local_rank > -1:
device = initialize_distributed(args)
elif torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# opt还是args,只不过是字典格式
opt = vars(args)
# update data dir
opt['data_dir'] = data_dir
batch_size = args.batch_size
print_message(logger, '开始MT-DNN训练')
#return
tasks = {}
task_def_list = []
dropout_list = []
# 不是分布式,那么就打印
printable = args.local_rank in [-1, 0]
train_datasets = []
# 初始化每个任务的数据集
for dataset in args.train_datasets:
prefix = dataset.split('_')[0]
if prefix in tasks:
continue
task_id = len(tasks)
tasks[prefix] = task_id
#训练的基本数据信息,例如用哪个损失,任务类型,任务标签等
task_def = task_defs.get_task_def(prefix)
task_def_list.append(task_def)
assert len(task_def.label_vocab.ind2tok) == task_def.n_class, "配置中的类别数量和标签数量不相等,请检查"
train_path = os.path.join(data_dir, '{}_train.json'.format(dataset))
print_message(logger, '加载训练任务 {},训练任务的顺序id是: {}'.format(train_path, task_id))
# 训练的数据的json文件, train_path = 'data_my/canonical_data/bert-base-chinese/absa_train.json'
train_data_set = SingleTaskDataset(path=train_path, is_train=True, maxlen=args.max_seq_len, task_id=task_id, task_def=task_def, printable=printable)
train_datasets.append(train_data_set)
#Collater函数
train_collater = Collater(dropout_w=args.dropout_w, encoder_type=encoder_type, soft_label=args.mkd_opt > 0, max_seq_len=args.max_seq_len, do_padding=args.do_padding)
#把数据放到一起
multi_task_train_dataset = MultiTaskDataset(train_datasets)
if args.local_rank != -1:
multi_task_batch_sampler = DistMultiTaskBatchSampler(train_datasets, args.batch_size, args.mix_opt, args.ratio, rank=args.local_rank, world_size=args.world_size)
else:
# 一个batch的数据集采用器
multi_task_batch_sampler = MultiTaskBatchSampler(train_datasets, args.batch_size, args.mix_opt, args.ratio, bin_on=args.bin_on, bin_size=args.bin_size, bin_grow_ratio=args.bin_grow_ratio)
# Dataloader格式
multi_task_train_data = DataLoader(multi_task_train_dataset, batch_sampler=multi_task_batch_sampler, collate_fn=train_collater.collate_fn, pin_memory=args.cuda)
# len(task_def_list),里面包含几个task,长度就是几
opt['task_def_list'] = task_def_list
# 测试数据,同理
dev_data_list = []
test_data_list = []
test_collater = Collater(is_train=False, encoder_type=encoder_type, max_seq_len=args.max_seq_len, do_padding=args.do_padding)
for dataset in args.test_datasets:
prefix = dataset.split('_')[0]
task_def = task_defs.get_task_def(prefix)
task_id = tasks[prefix]
task_type = task_def.task_type
data_type = task_def.data_type
dev_path = os.path.join(data_dir, '{}_dev.json'.format(dataset))
dev_data = None
if os.path.exists(dev_path):
dev_data_set = SingleTaskDataset(dev_path, False, maxlen=args.max_seq_len, task_id=task_id, task_def=task_def, printable=printable)
if args.local_rank != -1:
dev_data_set = DistTaskDataset(dev_data_set, task_id)
single_task_batch_sampler = DistSingleTaskBatchSampler(dev_data_set, args.batch_size_eval, rank=args.local_rank, world_size=args.world_size)
dev_data = DataLoader(dev_data_set, batch_sampler=single_task_batch_sampler, collate_fn=test_collater.collate_fn, pin_memory=args.cuda)
else:
dev_data = DataLoader(dev_data_set, batch_size=args.batch_size_eval, collate_fn=test_collater.collate_fn, pin_memory=args.cuda)
dev_data_list.append(dev_data)
test_path = os.path.join(data_dir, '{}_test.json'.format(dataset))
test_data = None
if os.path.exists(test_path):
test_data_set = SingleTaskDataset(test_path, False, maxlen=args.max_seq_len, task_id=task_id, task_def=task_def, printable=printable)
if args.local_rank != -1:
test_data_set = DistTaskDataset(test_data_set, task_id)
single_task_batch_sampler = DistSingleTaskBatchSampler(test_data_set, args.batch_size_eval, rank=args.local_rank, world_size=args.world_size)
test_data = DataLoader(test_data_set, batch_sampler=single_task_batch_sampler, collate_fn=test_collater.collate_fn, pin_memory=args.cuda)
else:
test_data = DataLoader(test_data_set, batch_size=args.batch_size_eval, collate_fn=test_collater.collate_fn, pin_memory=args.cuda)
test_data_list.append(test_data)
# 打印默认参数
print_message(logger, '#' * 20)
print_message(logger, opt)
print_message(logger, '#' * 20)
# 需要除以grad accumulation,来计算一共需要多少个batch step
num_all_batches = args.epochs * len(multi_task_train_data) // args.grad_accumulation_step
print_message(logger, '############# Gradient Accumulation 信息 #############')
print_message(logger, '原有训练的step数是: {}'.format(args.epochs * len(multi_task_train_data)))
print_message(logger, '梯度度累积参数 grad_accumulation 为: {}'.format(args.grad_accumulation_step))
print_message(logger, '经过梯度累积后的训练step数是: {}'.format(num_all_batches))
print_message(logger, '############# Gradient Accumulation 信息 #############')
#使用哪个模型初始化参数
init_model = args.init_checkpoint
state_dict = None
# 加载模型参数,可选bert和roberta
if os.path.exists(init_model):
if encoder_type == EncoderModelType.BERT or \
encoder_type == EncoderModelType.DEBERTA or \
encoder_type == EncoderModelType.ELECTRA:
state_dict = torch.load(init_model, map_location=device)
config = state_dict['config']
elif encoder_type == EncoderModelType.ROBERTA or encoder_type == EncoderModelType.XLM:
model_path = '{}/model.pt'.format(init_model)
state_dict = torch.load(model_path, map_location=device)
arch = state_dict['args'].arch
arch = arch.replace('_', '-')
if encoder_type == EncoderModelType.XLM:
arch = "xlm-{}".format(arch)
# convert model arch
from data_utils.roberta_utils import update_roberta_keys
from data_utils.roberta_utils import patch_name_dict
state = update_roberta_keys(state_dict['model'], nlayer=state_dict['args'].encoder_layers)
state = patch_name_dict(state)
literal_encoder_type = EncoderModelType(opt['encoder_type']).name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[literal_encoder_type]
config = config_class.from_pretrained(arch).to_dict()
state_dict = {'state': state}
else:
if opt['encoder_type'] not in EncoderModelType._value2member_map_:
raise ValueError("encoder_type is out of pre-defined types")
literal_encoder_type = EncoderModelType(opt['encoder_type']).name.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[literal_encoder_type]
config = config_class.from_pretrained(init_model).to_dict()
# config是预训练模型的参数,设置一下,dropout默认0.1
config['attention_probs_dropout_prob'] = args.bert_dropout_p
config['hidden_dropout_prob'] = args.bert_dropout_p
# 是否开启多GPU
config['multi_gpu_on'] = opt["multi_gpu_on"]
# 如果大于0,说明模型的修改隐藏层参数
if args.num_hidden_layers > 0:
config['num_hidden_layers'] = args.num_hidden_layers
#更新下opt,用于保存所有参数
opt.update(config)
#MTDNN模型初始化
| |
<reponame>CMU-cabot/cabot
# Copyright (c) 2020 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This is a menu implementation for cabot handle
Author: <NAME> <<EMAIL>>
"""
import json
import subprocess
import os
import rospy
import std_msgs.msg
import mongodb_store.srv
import dynamic_reconfigure.client
import cabot.util
from cabot_ui import i18n
class Action(object):
"""Menu Action abstract class"""
def __init__(self, config, menu):
self._menu = menu
self._config = config
def do_action(self):
"""need to implement do_action in concreate class"""
return False
class Actions(Action):
"""Lisf of Actions"""
@staticmethod
def create_actions(config, menu):
"""create menu action classes"""
actions = Menu.get_menu_config(config, "actions")
return Actions(actions, menu)
def __init__(self, config, menu):
super(Actions, self).__init__(config, menu)
temp = []
if config:
for action in config:
_type = Menu.get_menu_config(action, "type", error=True)
if _type == "publish_topic":
temp.append(PublishTopicAction(action, menu))
elif _type == "reconfigure":
temp.append(ReconfigureAction(action, menu))
elif _type == "syscommand":
temp.append(SyscommandAction(action, menu))
else:
raise RuntimeError("%s action is not defined" % (_type))
temp.append(MenuSelectAction(None, menu))
self.actions = temp
def do_action(self):
result = True
for action in self.actions:
result = result and action.do_action()
return result
def __str__(self):
return str(self.actions)
def my_import(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class PublishTopicAction(Action):
"""Menu Action for publishing topic"""
def __init__(self, config, menu):
super(PublishTopicAction, self).__init__(config, menu)
self._topic = Menu.get_menu_config(config, "topic", error=True)
self._msg_type = Menu.get_menu_config(config, "msg_type", default="std_msgs.msg.String")
if self._topic is not None:
### needs to update with custom message typep
self._pub = rospy.Publisher(self._topic, my_import(self._msg_type), queue_size=1)
def do_action(self):
curr = self._menu.value
if curr is not None:
if isinstance(curr, Menu):
curr = curr.value
if curr is not None:
self._pub.publish(curr)
return True
return False
class ReconfigureAction(Action):
"""Menu Action for reconfiguration"""
def __init__(self, config, menu):
super(ReconfigureAction, self).__init__(config, menu)
self._targets = Menu.get_menu_config(config, "targets", error=True)
self._error_count = 0
_clients = {}
def do_action(self):
for target in self._targets:
target_name = target["name"]
if target_name not in ReconfigureAction._clients:
try:
rospy.loginfo("Trying to connect dynamic_reconfigure client")
ReconfigureAction._clients[target_name] \
= dynamic_reconfigure.client.Client(target_name, timeout=3)
except rospy.ROSException:
rospy.loginfo("Timed out connecting dynamic_reconfigure client")
#return True
if target_name in ReconfigureAction._clients:
client = ReconfigureAction._clients[target_name]
config = target["config"]
if client is not None:
temp = {}
for key in config:
val = config[key]
if isinstance(val, (float,int)):
temp[key] = val * self._menu.value
elif isinstance(val, str):
# TODO (security issue)
value = self._menu.value
temp[key] = eval(val)
rospy.loginfo(temp)
result = client.update_configuration(temp)
rospy.loginfo(result)
return True
self._error_count += 1
if self._error_count > 10:
raise RuntimeError("dynamic_reconfigure server is not responded")
return False
class SyscommandAction(Action):
"""Menu Action for system command"""
def __init__(self, config, menu):
super(SyscommandAction, self).__init__(config, menu)
self._command = Menu.get_menu_config(config, "command", error=True)
def do_action(self):
rospy.loginfo("do_action for system command")
command = self._command % (self._menu.value)
rospy.loginfo(command)
process = subprocess.Popen(command, preexec_fn=os.setsid, shell=True)
process.wait()
return True
class Event(object):
def __init__(self, origin, value):
self.origin = origin
self.value = value
class MenuSelectAction(Action):
"""Menu Select Action"""
def __init__(self, config, menu):
super(MenuSelectAction, self).__init__(config, menu)
def do_action(self):
self._menu._menu_selected(self._menu)
return True
class Menu(object):
"""Menu class"""
Undefined = 0
List = 1
Action = 2
Adjust = 3
def _get_path(self, name):
return "/".join([self._name_space, name, "value"])
def _get_saved_config(self, name, default=None):
try:
return rospy.get_param(self._get_path(name))
except KeyError:
if default is not None:
self._save_config(name, default)
return default
def _save_config(self, name, value):
rospy.wait_for_service('/config_manager/set_param')
service = rospy.ServiceProxy('/config_manager/set_param',
mongodb_store.srv.SetParam)
path = self._get_path(name)
rospy.loginfo("%s = %s", path, str(value))
service(json.dumps({"path":path, "value":value}))
@staticmethod
def get_menu_config(config, name, default=None, error=False):
"""Utility function to get config value specified by name.
if value is not exists return 'default' value
if error is True and value is not exists raise KeyError
"""
if name in config:
return config[name]
elif error:
raise KeyError("Config does not have '%s'"%name)
return default
@staticmethod
def create_menu(config, identifier=None, name_space=None, title=None, usage=None, parent=None):
if not config:
return None
"""Create menu from config"""
# refer menu
menu = config["menu"] if "menu" in config else None
if menu is not None:
path = "%s/%s"%(name_space, menu) if name_space is not None else menu
config2 = rospy.get_param(path, [])
return Menu.create_menu(config2, identifier=menu, name_space=name_space, title=title, usage=usage, parent=parent)
# otherwise
_type = Menu.get_menu_config(config, "type", "item")
if _type == "list":
return MenuList(config, identifier=identifier, name_space=name_space, parent=parent)
elif _type == "adjust":
return MenuAdjust(config, identifier=identifier, name_space=name_space, parent=parent)
elif _type == "item":
return MenuItem(config, identifier=identifier, name_space=name_space, parent=parent)
raise ValueError("%s is not a menu type" % (_type))
def __init__(self, config=None, identifier=None, name_space=None, parent=None):
self._title = Menu.get_menu_config(config, "title")
self._usage = Menu.get_menu_config(config, "usage")
self._type = Menu.Undefined
self._config = config
self._identifier = identifier
self._name_space = name_space
self._parent = parent
self._items = []
self._actions = None
self._listeners = []
self.delegate = None
def __str__(self):
text = ""
if self._type == Menu.List:
text += "Menu List (%s, %s)\n" % (self._identifier, self._title) \
+ "\n".join([" "+str(x) for x in self._items])
elif self._type == Menu.Action:
text += "Menu Action (%s, %s)" % (self._identifier, self._title)
elif self._type == Menu.Adjust:
text += "Menu Adjust (%s, %s)" % (self._identifier, self._title)
else:
text += super(Menu, self).__str__()
if self._actions is not None:
text += "\n with Action (%s)" % (self._actions)
return text
@property
def identifier(self):
"""Menu identifier"""
return self._identifier
@property
def type(self):
"""Menu type"""
return self._type
@property
def title(self):
"""Menu title"""
return i18n.localized_string(self._title)
@property
def usage(self):
"""Menu usage which is read by TTS"""
return i18n.localized_string(self._usage)
@property
def description(self):
"""Description of the menu"""
return i18n.localized_string(self._title)
@property
def value(self):
"""Value of the menu"""
return None
def sev_value(self, value):
raise RuntimeError("not implemented")
@property
def can_explore(self):
return False
def next(self):
"""Move to next item or value"""
pass
def prev(self):
"""Move to previous item or value"""
pass
def select(self):
"""Do action for selection"""
return self
def reset(self):
"""Reset for reuse"""
pass
def _menu_selected(self, origin):
"""menu selected"""
if self.delegate:
self.delegate.menu_selected(origin)
if self._parent is not None:
self._parent._menu_selected(origin)
class MenuList(Menu):
"""List of Menu items"""
def __init__(self, config=None, identifier=None, name_space=None, parent=None):
if Menu.get_menu_config(config, "usage") is None:
config["usage"] = "MENU_NAVIGATE_USAGE"
super(MenuList, self).__init__(config=config, identifier=identifier, name_space=name_space, parent=parent)
self._type = Menu.List
self._actions = Actions.create_actions(config, self)
temp = []
items = Menu.get_menu_config(config, "items")
for item in items:
menu_item = Menu.create_menu(item, name_space=self._name_space, parent=self)
if menu_item:
temp.append(menu_item)
else:
rospy.logerr("menu {} is not found".format(item))
self._items = temp
self._current = None
def _get_item(self, diff, default):
if self._current is None:
self._current = default
else:
self._current = (self._current + diff) % len(self._items)
if self._current is None:
return None
return self._items[self._current]
@property
def value(self):
"""Current value"""
return self._get_item(0, None)
@property
def can_explore(self):
return True
def next(self):
return self._get_item(+1, 0)
def prev(self):
return self._get_item(-1, -1)
def select(self):
if self._actions is not None:
self._actions.do_action()
return self.value
def get_menu_by_identifier(self, identifier):
for item in self._items:
if item._identifier == identifier:
return item
return None
@property
def description(self):
#return self.value.title if self.value is not None else "not selected"\
return i18n.localized_string(self.value._title) if self.value is not None else None
def reset(self):
self._current = None
for item in self._items:
item.reset()
class MenuAdjust(Menu):
"""Adjustable menu"""
def __init__(self, config=None, identifier=None, name_space=None, parent=None):
super(MenuAdjust, self).__init__(config=config, identifier=identifier, name_space=name_space, parent=parent)
self._type = Menu.Adjust
self._max = Menu.get_menu_config(config, "max", error=True)
self._min = Menu.get_menu_config(config, "min", error=True)
self._values = Menu.get_menu_config(config, "values")
if self._values is not None:
self._format = Menu.get_menu_config(config, "format", default="{}")
else:
self._format = Menu.get_menu_config(config, "format", default="{}")
if self._min >= self._max:
raise ValueError("min value should be smaller than max value " \
+ "(%f < %f)"%(self._min, self._max))
self._default = Menu.get_menu_config(config, "default", error=True)
if self._default < self._min or self._max < self._default:
raise ValueError("default value should be in min-max range " \
+ "(%f < %f | |
<reponame>raoulbq/WaveBlocksND
"""The WaveBlocks Project
IOM plugin providing functions for handling
linear combinations of general wavepackets.
@author: <NAME>
@copyright: Copyright (C) 2013, 2016 <NAME>
@license: Modified BSD License
"""
import numpy as np
def add_lincombwp(self, parameters, timeslots=None, lincombsize=None, blockid=0):
r"""Add storage for the linear combination of general wavepackets.
:param parameters: An :py:class:`ParameterProvider` instance with at
least the key ``ncomponents``.
:param timeslots: The number of time slots we need. Can be set to ``None``
to get automatically growing datasets.
:param lincombsize: The (maximal) size ``J`` of the linear combination of wavepackets. If specified
this remains fixed for all timeslots. Can be set to ``None`` (default)
to get automatically growing datasets.
:param blockid: The ID of the data block to operate on.
"""
N = parameters["ncomponents"]
# TODO: Handle multi-component packets
assert N == 1
if timeslots is None:
T = 0
Ts = None
else:
T = timeslots
Ts = timeslots
if lincombsize is None:
J = 0
Js = None
csJs = 32
else:
J = lincombsize
Js = lincombsize
csJs = min(32, Js)
# The overall group containing all lincombwp data
grp_lc = self._srf[self._prefixb + str(blockid)].require_group("lincombwp")
# Create the dataset with appropriate parameters
daset_tg_c = grp_lc.create_dataset("timegrid_coefficients", (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1)
daset_tg_p = grp_lc.create_dataset("timegrid_packets", (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1)
grp_lc.create_dataset("lincomb_size", (T,), dtype=np.integer, chunks=True, maxshape=(Ts,))
# Coefficients
grp_lc.create_dataset("coefficients", (T, J), dtype=np.complexfloating, chunks=(1, csJs), maxshape=(Ts, Js))
# Packet IDs (32 characters is the length of a 'md5' digest in hex representation)
daset_refs = grp_lc.create_dataset("packet_refs", (T, J), dtype=np.dtype((str, 32)), chunks=(1, csJs), maxshape=(Ts, Js))
gid = self.create_group(groupid="wavepacketsLCblock" + str(blockid))
daset_refs.attrs["packet_gid"] = gid
# Attach pointer to timegrid
daset_tg_c.attrs["pointer"] = 0
daset_tg_p.attrs["pointer"] = 0
def delete_lincombwp(self, blockid=0):
r"""Remove the stored linear combination.
:param blockid: The ID of the data block to operate on.
"""
try:
del self._srf[self._prefixb + str(blockid) + "/lincombwp"]
except KeyError:
pass
def has_lincombwp(self, blockid=0):
r"""Ask if the specified data block has the desired data tensor.
:param blockid: The ID of the data block to operate on.
"""
return "lincombwp" in self._srf[self._prefixb + str(blockid)].keys()
def save_lincombwp_description(self, descr, blockid=0):
r"""Save the description of this linear combination.
:param descr: The description.
:param blockid: The ID of the data block to operate on.
"""
pathd = "/" + self._prefixb + str(blockid) + "/lincombwp"
# Save the description
for key, value in descr.items():
self._srf[pathd].attrs[key] = self._save_attr_value(value)
def save_lincombwp_coefficients(self, coefficients, timestep=None, blockid=0):
r"""Save the coefficients of the linear combination to a file.
:param coefficients: The coefficients of the linear combination of wavepackets.
:type coefficients: A single, suitable :py:class:`ndarray`.
:param timestep: The timestep at which we save the data.
:param blockid: The ID of the data block to operate on.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/lincombwp/timegrid_coefficients"
pathlcs = "/" + self._prefixb + str(blockid) + "/lincombwp/lincomb_size"
pathd = "/" + self._prefixb + str(blockid) + "/lincombwp/coefficients"
timeslot = self._srf[pathtg].attrs["pointer"]
# Write the data
self.must_resize(pathlcs, timeslot)
J = np.size(coefficients)
self._srf[pathlcs][timeslot] = J
self.must_resize(pathd, timeslot)
if not J == 0:
self.must_resize(pathd, J - 1, axis=1)
self._srf[pathd][timeslot, :J] = np.squeeze(coefficients)
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathtg].attrs["pointer"] += 1
def save_lincombwp_wavepackets(self, packetlist, timestep=None, blockid=0):
r"""Save the wavepackets being part of this linear combination.
.. warning:: This is quite an expensive operation.
:param timestep: Load only the data of this timestep.
:param blockid: The ID of the data block to operate on.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/lincombwp/timegrid_packets"
pathd = "/" + self._prefixb + str(blockid) + "/lincombwp/packet_refs"
gid = self._srf[pathd].attrs["packet_gid"]
timeslot = self._srf[pathtg].attrs["pointer"]
# Book keeping
self.must_resize(pathd, timeslot)
K = len(packetlist)
if not K == 0:
self.must_resize(pathd, K - 1, axis=1)
# Save the packets
known_packets = self.get_block_ids(groupid=gid)
for k, packet in enumerate(packetlist):
bid = "LC" + str(blockid) + "WP" + str(packet.get_id())
if bid not in known_packets:
bid = self.create_block(blockid=bid, groupid=gid)
descr = packet.get_description()
self.add_genericwp(descr, blockid=bid)
self.save_genericwp(packet, timestep=timestep, blockid=bid)
# Book keeping
self._srf[pathd][timeslot, k] = packet.get_id()
# Write the timestep to which the stored packets belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathtg].attrs["pointer"] += 1
def load_lincombwp_description(self, blockid=0):
r"""Load the description of this linear combination.
:param blockid: The ID of the data block to operate on.
"""
pathd = "/" + self._prefixb + str(blockid) + "/lincombwp"
# Load and return all descriptions available
descr = {}
for key, value in self._srf[pathd].attrs.items():
descr[key] = self._load_attr_value(value)
return descr
def load_lincombwp_timegrid(self, blockid=0, key=("coeffs", "packets")):
r"""Load the timegrid of this linear combination.
:param blockid: The ID of the data block to operate on.
:param key: Specify which linear combination timegrids to load. All are independent.
:type key: Tuple of valid identifier strings that are ``coeffs`` and ``packets``.
Default is ``("coeffs", "packets")``.
"""
tg = []
for item in key:
if item == "coeffs":
pathtg = "/" + self._prefixb + str(blockid) + "/lincombwp/timegrid_coefficients"
tg.append(self._srf[pathtg][:])
elif item == "packets":
pathtg = "/" + self._prefixb + str(blockid) + "/lincombwp/timegrid_packets"
tg.append(self._srf[pathtg][:])
if len(tg) == 1:
return tg[0]
else:
return tuple(tg)
def load_lincombwp_size(self, timestep=None, blockid=0):
r"""Load the size (number of packets) of this linear combination.
:param timestep: Load only the data of this timestep.
:param blockid: The ID of the data block to operate on.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/lincombwp/timegrid_coefficients"
pathlcs = "/" + self._prefixb + str(blockid) + "/lincombwp/lincomb_size"
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
return self._srf[pathlcs][index]
else:
index = slice(None)
return self._srf[pathlcs][index]
def load_lincombwp_coefficients(self, timestep=None, blockid=0):
r"""Load the coefficients of this linear combination.
:param timestep: Load only the data of this timestep.
:param blockid: The ID of the data block to operate on.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/lincombwp/timegrid_coefficients"
pathlcs = "/" + self._prefixb + str(blockid) + "/lincombwp/lincomb_size"
pathd = "/" + self._prefixb + str(blockid) + "/lincombwp/coefficients"
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
J = self._srf[pathlcs][index]
return self._srf[pathd][index, :J]
else:
index = slice(None)
return self._srf[pathd][index, :]
def load_lincombwp_wavepackets(self, timestep, packetindex=None, blockid=0):
r"""Load the wavepackets being part of this linear combination.
Note that this is quite an expensive operation.
:param timestep: Load only the data of this timestep.
:param packetindex: Load only the packet with this index. If ``None``
then load all packets for the given timestep.
:param blockid: The ID of the data block to operate on.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/lincombwp/timegrid_packets"
pathlcs = "/" + self._prefixb + str(blockid) + "/lincombwp/lincomb_size"
pathd = "/" + self._prefixb + str(blockid) + "/lincombwp/packet_refs"
index = self.find_timestep_index(pathtg, timestep)
J = self._srf[pathlcs][index]
refs = self._srf[pathd][index, :J]
if packetindex is None:
packets = []
for ref in refs:
bid = "LC" + str(blockid) + "WP" + str(ref)
packets.append(self.load_genericwp(timestep=timestep, blockid=bid))
return tuple(packets)
else:
if packetindex >= J:
raise ValueError("Packet index is invalid.")
bid = "LC" + str(blockid) + "WP" + str(refs[packetindex])
return self.load_genericwp(timestep=timestep, blockid=bid)
def load_lincombwp_wavepacket_refs(self, timestep=None, blockid=0):
r"""Load the references of the wavepackets being part of
this linear combination. References can be used as ``blockid``
for loading selected wavepackets manually. If for example a
``ref`` obtained through this method is:
>>> refs = anIom.load_lincombwp_wavepacket_refs(timestep=4)
>>> refs
array(['673290fd36a0fa80f28973ae31f10378',
'075dc9d7d2c558c97608e2fe08a7d53d',
'0aed8bf3e21b5894bf89ef894d3f7d0c'],
dtype='|S32')
>>> ref = refs[0]
'673290fd36a0fa80f28973ae31f10378'
the the corresponding block ID is:
>>> bid = "LC" + str(blockid) + "WP" + ref
'LC0WP673290fd36a0fa80f28973ae31f10378'
with ``blockid`` the block ID where the linear combination
was stored. With that ``bid`` we can now for example load
data of a selected wavepacket:
>>> Pi = anIom.load_wavepacket_parameters(timestep=4, blockid=bid)
in case of a Hagedorn wavepacket.
:param timestep: Load only the data of this timestep.
:param blockid: The ID of the data block to operate on.
:return: A :py:class:`ndarray` of strings.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/lincombwp/timegrid_packets"
pathd = "/" + self._prefixb + str(blockid) + "/lincombwp/packet_refs"
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
else:
index = slice(None)
return self._srf[pathd][index, :]
#
# The following two methods are only for convenience and are NOT particularly efficient.
#
def load_lincombwp(self, timestep, blockid=0):
r"""Load a linear combination at | |
#!/usr/bin/env python3
'''
###############################################################################
###############################################################################
## ##
## _ ___ ___ ___ ___ ___ ##
## | | | __ / \ / __| _ | __| ##
## | |__| __ ( ) | (_ | _|__ \ ##
## |____|___ \___/ \___|_| \___/ ##
## v 1.0 (Stable) ##
## ##
## FILE DESCRIPTION: ##
## ##
## Screens the phase information in RINEX observables (e.g. C1,P2,L1,L2) ##
## RINEX observables are stored as a dictionary (produced by 'rinxtr.py') ##
## If freqnum == 1 (single frequency): ##
## We will perform a code-phase Melbourne-Wubbena linear combination ##
## If freqnum == 2 (dual frequency) ##
## We will perform a geometry-free linear combination to screen cycle slips ##
## ##
## INPUTS: ##
## ##
## No file inputs required; this programme is meant as a sub-routine. ##
## To be called by routine 'rinxtr.py', where it takes in a dictionary of ##
## RINEX observations as values, with SVs as the keys to the observations, ##
## and with the epochs as the keys to the SVs. ##
## ##
## ##
## OUTPUT: ##
## ##
## It outputs a phase-processed nested dictionary of RINEX observations. ##
## The format is essentially the same, except in the third sub-dictionary: ##
## rnxdata[epoch][SV] has two new key-value pairs added. ##
## One, L4: the geometry-free linear combination / MBWB linear combination ##
## Two, a cycle slip flags based on L4 observables computed ##
## The format is formatted as (and will be the format of 'rinxtr.py) ##
## ##
## Output = {epoch1:{5:{'C1':123,'L1':123, ... 'L4':321,'flag':'none'}...}...##
## epoch2:{3:{'C1':123,'L1':123, ... 'L4':321,'flag':'slip'}...}...##
## ... ... ... ... ... ... ##
## epochX:{3:{'C1':123,'L1':123, ... 'L4':321,'flag':'none'}...}} ##
## ##
## REMARKS: ##
## ##
## This programme is run as a subroutine in 'rinxtr.py' only. ##
## It does not take in any other observation file, except 'config.txt' ##
## ##
## AUTHOR MODIFIED: 02-12-2019, by <NAME> ##
## ##
###############################################################################
###############################################################################
'''
import copy
import warnings
import numpy as np
''' The first function flags the carrier phase status at each epoch '''
def phsmrk(rnxdata, rnxstep, goodsats, inps):
# Get the desired input parameters.
freqnum = inps['freq']
cycleslip_tolerance = float(inps['cycsliptol'])
cycleslip_filtlength = inps['cycsliplen']
# Ignore polyfit warnings
warnings.simplefilter('ignore', np.RankWarning)
# Wavelengths of L1 and L2
WL1 = 0.190293672798
WL2 = 0.244210213425
print('Marking and screening observables for cycle slips.')
rnxproc = copy.deepcopy(rnxdata) # Dictionary of processed RINEX data
slipcount = 0 # Counting the number of cycle slips
# For each particular SV ID
for SV in goodsats:
# Across all the epochs recorded
for epoch in rnxproc:
# Initialise a time variable for the previous and next step
prev_epoch = epoch-rnxstep
next_epoch = epoch+rnxstep
# Now, we check if the current SV is being observed, and flag it.
if SV in rnxproc[epoch]:
# Check if this is the first observation.
if prev_epoch in rnxproc:
if SV not in rnxproc[prev_epoch]:
rnxproc[epoch][SV]['flag'] = 'start'
else:
rnxproc[epoch][SV]['flag'] = 'start'
# Check if this is the final observation.
if next_epoch in rnxproc:
if SV not in rnxproc[next_epoch]:
rnxproc[epoch][SV]['flag'] = 'end'
else:
rnxproc[epoch][SV]['flag'] = 'end'
# Check if this is a lone observable
if prev_epoch in rnxproc and next_epoch in rnxproc:
if SV not in rnxproc[prev_epoch]:
if SV not in rnxproc[next_epoch]:
rnxproc[epoch][SV]['flag'] = 'solo'
if SV in rnxproc[prev_epoch]:
if SV in rnxproc[next_epoch]:
rnxproc[epoch][SV]['flag'] = 'none'
elif prev_epoch not in rnxproc and next_epoch in rnxproc:
if SV not in rnxproc[next_epoch]:
rnxproc[epoch][SV]['flag'] = 'solo'
elif next_epoch not in rnxproc and prev_epoch in rnxproc:
if SV not in rnxproc[prev_epoch]:
rnxproc[epoch][SV]['flag'] = 'solo'
# At this stage, we will calculate an intermediate value L4.
# L4 will then be poly-fitted with adjacent values across time.
# If L4 remains an outlier from the polynomial fit,
# Then that epoch's L4 value identifies as a cycle slip.
# Perform Melbourne-Wubbena linear combination as the L4.
if freqnum == 1:
if 'P1' in rnxproc[epoch][SV]:
L1p = rnxproc[epoch][SV]['P1'] # P1 code observable
elif 'C1' in rnxproc[epoch][SV]:
L1p = rnxproc[epoch][SV]['C1'] # P1 code observable
else:
print('Problem found, no C1 or P1 observable in: ')
print(str(epoch) + ' for satellite SV ' + str(SV))
return None
L2p = rnxproc[epoch][SV]['L1'] # L1 phase observable
L2p = L2p * WL1 # Multiply phase by wavelength
# Perform the geometry-free linear combination as the L4.
elif freqnum == 2:
L1p = rnxproc[epoch][SV]['L1'] # L1 phase observable
L1p = L1p * WL1 # Multiply phase by wavelength
L2p = rnxproc[epoch][SV]['L2'] # L2 phase observable
L2p = L2p * WL2 # Multiply phase by wavelength
# Conclude the entry for the geometry-free LC and the flag
L4 = L1p - L2p # Time-stamped geometry-free LC for dual freq
rnxproc[epoch][SV]['L4'] = L4 # Throw in L4 value
# Now we begin the process of an N-window polynomial fitting filter
# We will use a quadratic filter for this approach
N = cycleslip_filtlength # Length of sliding window filter for poly-fit
# For each SV ID
for SV in goodsats:
k = 1 # Restart the time counter
t = [] # Restart the time array
L = [] # Restart the observation array
# Then check through each epoch based on a SV ID
for epoch in rnxproc:
prev_epoch = epoch - rnxstep
# If this SV exists
if SV in rnxproc[epoch]:
# Read the flag of this SV observable
obs = rnxproc[epoch][SV]['L4']
flag = rnxproc[epoch][SV]['flag']
if prev_epoch in rnxproc:
if SV in rnxproc[prev_epoch]:
preflag = rnxproc[prev_epoch][SV]['flag']
else:
preflag = 'none'
# Now is the critical steps that lead to the poly-fit...
# We want to ensure that L4 observations are recorded for
# Flags: start, none, end...
# We would never have a case where the current flag is slip
# First, let's check if it is the last entry of this SV...
if flag == 'end':
trigger = True # Trigger to do polynomial fitting
k += 1 # Increase the time stamp by 1
t.append(k) # Add in the first element
L.append(obs) # Add in the L4 observation
# Next, let's see if this is a stand-alone observable.
elif flag == 'solo':
trigger = False # Do not record the poly-fit results.
k = 1 # Restart the time counter
t = [] # Restart the time array
L = [] # Restart the observation array
# If it isn't, then let's check if it is the starting entry
elif flag == 'start' or preflag == 'slip':
trigger = False # Reset the poly-fit trigger
k = 1 # Restart the time counter
t = [] # Restart the time array
L = [] # Restart the observation array
t.append(k) # Add in the first element
L.append(obs) # Add in the L4 observation
# Otherwise then it is just a normal entry...
elif flag == 'none':
k += 1 # Increase the time stamp by 1
t.append(k) # Add in the first element
L.append(obs) # Add in the L4 observation
elif flag == 'slip':
print('Something is wrong here...')
print('Current epoch is recorded as a cycle slip?')
print('Epoch and SV ID:')
print(str(epoch) + ' ' + str(SV))
else:
print('Something else is wrong here...')
print('Flag string does not match any known flags...')
print('Current flag is: ' + str(flag))
# Be mindful that the tolerance depends on the time step
# Due to the time-dependence on the ionospheric variation
# Now, is where we decide if we wish to do poly-fitting
if k == N+1: # It has reached the filter size limit
trigger = True
# If the trigger is true...
# Then this is where we screen for cycle slips
if trigger == True:
# Over here, we do the polynomial curve fitting
tn = np.array(t) # Numpy-rize the time array
Ln = np.array(L) # Numpy-rize the original L4 data
pn = np.polyfit(tn,Ln,2)
| |
* FROM {table_name}").output
assert "1" in output, error()
@TestScenario
@Requirements(RQ_SRS_006_RBAC_RowPolicy_Alter_Rename("1.0"))
def rename(self, node=None):
"""Check that a row policy altered using RENAME restricts rows as expected."""
table_name = f"table_{getuid()}"
pol_name = f"pol_{getuid()}"
pol_new_name = f"pol_new_{getuid()}"
if node is None:
node = self.context.node
with table(node, table_name):
try:
with Given("I have a row policy"):
row_policy(name=pol_name, table=table_name)
with And("The table has some values"):
node.query(f"INSERT INTO {table_name} (y) VALUES (1)")
with And("The row policy is permissive"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default"
)
with When("I have alter a row policy by renaming it"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} RENAME TO {pol_new_name}"
)
with Then("I select from the table"):
output = node.query(f"SELECT * FROM {table_name}").output
assert "1" in output, error()
finally:
with Finally("I drop the row policy"):
node.query(f"DROP ROW POLICY IF EXISTS {pol_new_name} ON {table_name}")
@TestScenario
@Requirements(RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0"))
def on_cluster(self, node=None):
"""Check that a row policy altered using ON CLUSTER applies to the nodes of the cluster correctly."""
table_name = f"table_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
node2 = self.context.node2
try:
with Given("I have a table on a cluster"):
node.query(
f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory"
)
with And("I have a row policy on a cluster on that table"):
node.query(
f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}"
)
with And("The table has some values on the first node"):
node.query(f"INSERT INTO {table_name} (x) VALUES (1)")
with And("The table has some values on the second node"):
node2.query(f"INSERT INTO {table_name} (x) VALUES (1)")
with When("I alter the row policy to have a condition"):
node.query(
f"ALTER ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1"
)
with Then("I select from the table"):
output = node.query(f"SELECT * FROM {table_name}").output
assert "" == output, error()
with And("I select from another node on the cluster"):
output = node2.query(f"SELECT * FROM {table_name}").output
assert "" == output, error()
finally:
with Finally("I drop the row policy", flags=TE):
node.query(
f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}"
)
with And("I drop the table", flags=TE):
node.query(f"DROP TABLE {table_name} ON CLUSTER sharded_cluster")
@TestScenario
def diff_policies_on_diff_nodes(self, node=None):
"""Check that a row policy altered on a node, does not effect row policy on a different node."""
table_name = f"table_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
node2 = self.context.node2
try:
with Given("I have a table on a cluster"):
node.query(
f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory"
)
with And("I have a row policy on the cluster"):
node.query(
f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}"
)
with And("The table has some values on the first node"):
node.query(f"INSERT INTO {table_name} (x) VALUES (1)")
with And("The table has some values on the second node"):
node2.query(f"INSERT INTO {table_name} (x) VALUES (1)")
with When("I alter the row policy on the first node"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1"
)
with Then("I select from the table"):
output = node.query(f"SELECT * FROM {table_name}").output
assert "" == output, error()
with And("I select from another node on the cluster"):
output = node2.query(f"SELECT * FROM {table_name}").output
assert "1" in output, error()
finally:
with Finally("I drop the row policy", flags=TE):
node.query(
f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}"
)
with And("I drop the table", flags=TE):
node.query(f"DROP TABLE {table_name} ON CLUSTER sharded_cluster")
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0"),
)
def assignment(self, node=None):
"""Check that user is able to see rows from a table when they have PERMISSIVE policy assigned to them."""
table_name = f"table_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
with table(node, table_name):
with Given("I have a row policy"):
row_policy(name=pol_name, table=table_name)
with And("The row policy is permissive"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1"
)
with And("The table has some values"):
node.query(f"INSERT INTO {table_name} (y) VALUES (1)")
with When("I alter a row policy to be assigned to default"):
node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} TO default")
with Then("I try to select from the table"):
output = node.query(f"SELECT * FROM {table_name}").output
assert "1" in output, error()
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_None("1.0"),
)
def assignment_none(self, node=None):
"""Check that no one is affected when a row policy is altered to be assigned to NONE."""
table_name = f"table_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
with table(node, table_name):
with Given("I have a row policy"):
row_policy(name=pol_name, table=table_name)
with And("The row policy is permissive"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1"
)
with And("The table has some values"):
node.query(f"INSERT INTO {table_name} (y) VALUES (1)")
with When("I alter a row policy to be assigned to NONE"):
node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} TO NONE")
with Then("I try to select from the table"):
output = node.query(f"SELECT * FROM {table_name}").output
assert "" == output, error()
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_All("1.0"),
)
def assignment_all(self, node=None):
"""Check that everyone is effected with a row policy is altered to be assigned to ALL."""
table_name = f"table_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
with table(node, table_name):
with Given("I have a row policy"):
row_policy(name=pol_name, table=table_name)
with And("The row policy is permissive"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1"
)
with And("The table has some values"):
node.query(f"INSERT INTO {table_name} (y) VALUES (1)")
with When("I alter a row policy to be assigned to ALL"):
node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} TO ALL")
with Then("I try to select from the table"):
output = node.query(f"SELECT * FROM {table_name}").output
assert "1" in output, error()
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept("1.0"),
)
def assignment_all_except(self, node=None):
"""Check that everyone is except the specified user is effect by a row policy is altered to be assigned to ALL EXCEPT."""
table_name = f"table_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
with table(node, table_name):
with Given("I have a row policy"):
row_policy(name=pol_name, table=table_name)
with And("The row policy is permissive"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1"
)
with And("The table has some values"):
node.query(f"INSERT INTO {table_name} (y) VALUES (1)")
with When("I alter a row policy to be assigned to ALL EXCEPT default"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} TO ALL EXCEPT default"
)
with Then("I try to select from the table"):
output = node.query(f"SELECT * FROM {table_name}").output
assert "" == output, error()
@TestScenario
@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0"))
def nested_view(self, node=None):
"""Check that if a user has a row policy on a table and a view is altered to use a condition on that table,
the user is only able to access the rows specified by the assigned policies.
"""
table_name = f"table_{getuid()}"
view_name = f"view_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
with table(node, table_name):
try:
with Given("I have a row policy"):
row_policy(name=pol_name, table=table_name)
with And("The table has some values"):
node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)")
with And("There is a view on the table"):
node.query(f"CREATE VIEW {view_name} AS SELECT * FROM {table_name}")
with When("I alter the row policy to be permissive"):
node.query(
f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default"
)
with Then("I try to select from the view"):
output = node.query(f"SELECT * FROM {view_name}").output
assert "1" in output and "2" not in output, error()
finally:
with Finally("I drop the view", flags=TE):
node.query(f"DROP VIEW IF EXISTS {view_name}")
@TestScenario
@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0"))
def nested_live_view_before_policy(self, node=None):
"""Check that if a live view exists on a table and then a row policy is created,
the user is only able to select rows specified by the assigned policies from the view.
"""
table_name = f"table_{getuid()}"
view_name = f"view_{getuid()}"
pol_name = f"pol_{getuid()}"
if node is None:
node = self.context.node
with table(node, table_name):
try:
with Given(
"I add allow_experimental_live_view to the default query settings"
):
default_query_settings = getsattr(
current().context, "default_query_settings", []
)
default_query_settings.append(("allow_experimental_live_view", 1))
with And("I have a row policy"):
row_policy(name=pol_name, table=table_name)
with And("The table has some values"):
node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)")
with And("There exists a live view on the table"):
node.query(
f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}"
)
with When("I alter the | |
│
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 1 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 04:00:00 ┆ 2021-12-16 03:00:00 ┆ 1 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 1 │
└────────┴─────────────────────┴─────────────────────┴─────────────────────┴────────────┘
Dynamic groupby on an index column
>>> df = pl.DataFrame(
... {
... "idx": np.arange(6),
... "A": ["A", "A", "B", "B", "B", "C"],
... }
... )
>>> (
... df.groupby_dynamic(
... "idx",
... every="2i",
... period="3i",
... include_boundaries=True,
... ).agg(pl.col("A").list().alias("A_agg_list"))
... )
shape: (3, 4)
┌─────────────────┬─────────────────┬─────┬─────────────────┐
│ _lower_boundary ┆ _upper_boundary ┆ idx ┆ A_agg_list │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 ┆ list [str] │
╞═════════════════╪═════════════════╪═════╪═════════════════╡
│ 0 ┆ 3 ┆ 0 ┆ ["A", "B", "B"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ 5 ┆ 2 ┆ ["B", "B", "C"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ 7 ┆ 4 ┆ ["C"] │
└─────────────────┴─────────────────┴─────┴─────────────────┘
"""
return DynamicGroupBy(
self,
index_column,
every,
period,
offset,
truncate,
include_boundaries,
closed,
by,
)
def upsample(
self: DF,
time_column: str,
every: str,
offset: Optional[str] = None,
by: Optional[Union[str, Sequence[str]]] = None,
maintain_order: bool = False,
) -> DF:
"""
Upsample a DataFrame at a regular frequency.
Parameters
----------
time_column
time column will be used to determine a date_range.
Note that this column has to be sorted for the output to make sense.
every
interval will start 'every' duration
offset
change the start of the date_range by this offset.
by
First group by these columns and then upsample for every group
maintain_order
Keep the ordering predictable. This is slower.
The `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
Examples
--------
Upsample a DataFrame by a certain interval.
>>> from datetime import datetime
>>> df = pl.DataFrame(
... {
... "time": [
... datetime(2021, 2, 1),
... datetime(2021, 4, 1),
... datetime(2021, 5, 1),
... datetime(2021, 6, 1),
... ],
... "groups": ["A", "B", "A", "B"],
... "values": [0, 1, 2, 3],
... }
... )
>>> (
... df.upsample(
... time_column="time", every="1mo", by="groups", maintain_order=True
... ).select(pl.all().forward_fill())
... )
shape: (7, 3)
┌─────────────────────┬────────┬────────┐
│ time ┆ groups ┆ values │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ str ┆ i64 │
╞═════════════════════╪════════╪════════╡
│ 2021-02-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-03-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-04-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-05-01 00:00:00 ┆ A ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-04-01 00:00:00 ┆ B ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-05-01 00:00:00 ┆ B ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-06-01 00:00:00 ┆ B ┆ 3 │
└─────────────────────┴────────┴────────┘
"""
if by is None:
by = []
if isinstance(by, str):
by = [by]
if offset is None:
offset = "0ns"
return self._from_pydf(
self._df.upsample(by, time_column, every, offset, maintain_order)
)
def join_asof(
self: DF,
df: "DataFrame",
left_on: Optional[str] = None,
right_on: Optional[str] = None,
on: Optional[str] = None,
by_left: Optional[Union[str, List[str]]] = None,
by_right: Optional[Union[str, List[str]]] = None,
by: Optional[Union[str, List[str]]] = None,
strategy: str = "backward",
suffix: str = "_right",
tolerance: Optional[Union[str, int, float]] = None,
allow_parallel: bool = True,
force_parallel: bool = False,
) -> DF:
"""
Perform an asof join. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the asof_join key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
The default is "backward".
Parameters
----------
ldf
Lazy DataFrame to join with.
left_on
Join column of the left DataFrame.
right_on
Join column of the right DataFrame.
on
Join column of both DataFrames. If set, `left_on` and `right_on` should be None.
by
join on these columns before doing asof join
by_left
join on these columns before doing asof join
by_right
join on these columns before doing asof join
strategy
One of {'forward', 'backward'}
suffix
Suffix to append to columns with a duplicate name.
tolerance
Numeric tolerance. By setting this the join will only be done if the near keys are within this distance.
If an asof join is done on columns of dtype "Date", "Datetime", "Duration" or "Time" you
use the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
allow_parallel
Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel.
force_parallel
Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel.
Examples
--------
>>> from datetime import datetime
>>> gdp = pl.DataFrame(
... {
... "date": [
... datetime(2016, 1, 1),
... datetime(2017, 1, 1),
... datetime(2018, 1, 1),
... datetime(2019, 1, 1),
... ], # note record date: Jan 1st (sorted!)
... "gdp": [4164, 4411, 4566, 4696],
... }
... )
>>> population = pl.DataFrame(
... {
... "date": [
... datetime(2016, 5, 12),
... datetime(2017, 5, 12),
... datetime(2018, 5, 12),
... datetime(2019, 5, 12),
... ], # note record date: May 12th (sorted!)
... "population": [82.19, 82.66, 83.12, 83.52],
... }
... )
>>> population.join_asof(
... gdp, left_on="date", right_on="date", strategy="backward"
... )
shape: (4, 3)
┌─────────────────────┬────────────┬──────┐
│ date ┆ population ┆ gdp │
│ --- ┆ --- ┆ --- │
│ datetime[μs] ┆ f64 ┆ i64 │
╞═════════════════════╪════════════╪══════╡
│ 2016-05-12 00:00:00 ┆ 82.19 ┆ 4164 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2017-05-12 00:00:00 ┆ 82.66 ┆ 4411 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2018-05-12 00:00:00 ┆ 83.12 ┆ 4566 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2019-05-12 00:00:00 ┆ 83.52 ┆ 4696 │
└─────────────────────┴────────────┴──────┘
"""
return (
self.lazy()
.join_asof(
df.lazy(),
left_on=left_on,
right_on=right_on,
on=on,
by_left=by_left,
by_right=by_right,
by=by,
strategy=strategy,
suffix=suffix,
tolerance=tolerance,
allow_parallel=allow_parallel,
force_parallel=force_parallel,
)
.collect(no_optimization=True)
)
def join(
self: DF,
df: "DataFrame",
left_on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
right_on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
how: str = "inner",
suffix: str = "_right",
asof_by: Optional[Union[str, List[str]]] = None,
asof_by_left: Optional[Union[str, List[str]]] = None,
asof_by_right: Optional[Union[str, List[str]]] = None,
) -> DF:
"""
SQL like joins.
Parameters
----------
df
DataFrame to join with.
left_on
Name(s) of the left join column(s).
right_on
Name(s) of the right join column(s).
on
Name(s) of the join columns in both DataFrames.
how
Join strategy
- "inner"
- "left"
- "outer"
- "asof"
- "cross"
- "semi"
- "anti"
suffix
Suffix to append to columns with a duplicate name.
asof_by
join on these columns before doing asof join
asof_by_left
join on these columns before doing asof join
asof_by_right
join on these columns before doing asof join
Returns
-------
Joined DataFrame
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> other_df | |
#!/usr/bin/env python3
import os
import sys
import platform
import subprocess
from multiprocessing import Process
from textwrap import dedent
from datetime import datetime,timedelta
from python_utils import print_info_msg, print_err_msg_exit, import_vars, cp_vrfy, cd_vrfy,\
rm_vrfy, ln_vrfy, mkdir_vrfy, mv_vrfy, run_command, date_to_str, \
define_macos_utilities, create_symlink_to_file, check_for_preexist_dir_file, \
cfg_to_yaml_str, find_pattern_in_str
from setup import setup
from set_FV3nml_sfc_climo_filenames import set_FV3nml_sfc_climo_filenames
from get_crontab_contents import get_crontab_contents
from fill_jinja_template import fill_jinja_template
from set_namelist import set_namelist
def python_error_handler():
""" Error handler for missing packages """
print_err_msg_exit('''
Errors found: check your python environment
Instructions for setting up python environments can be found on the web:
https://github.com/ufs-community/ufs-srweather-app/wiki/Getting-Started
''', stack_trace=False)
# Check for non-standard python packages
try:
import jinja2
import yaml
import f90nml
except ImportError as error:
print_info_msg(error.__class__.__name__ + ": " + str(error))
python_error_handler()
def generate_FV3LAM_wflow():
""" Function to setup a forecast experiment and create a workflow
(according to the parameters specified in the config file
Args:
None
Returns:
None
"""
print(dedent('''
========================================================================
========================================================================
Starting experiment generation...
========================================================================
========================================================================'''))
#set ushdir
ushdir = os.path.dirname(os.path.abspath(__file__))
#check python version
major,minor,patch = platform.python_version_tuple()
if int(major) < 3 or int(minor) < 6:
print_info_msg(f'''
Error: python version must be 3.6 or higher
python version: {major}.{minor}''')
#define macros
define_macos_utilities()
#
#-----------------------------------------------------------------------
#
# Source the file that defines and then calls the setup function. The
# setup function in turn first sources the default configuration file
# (which contains default values for the experiment/workflow parameters)
# and then sources the user-specified configuration file (which contains
# user-specified values for a subset of the experiment/workflow parame-
# ters that override their default values).
#
#-----------------------------------------------------------------------
#
setup()
#import all environment variables
import_vars()
#
#-----------------------------------------------------------------------
#
# Set the full path to the experiment's rocoto workflow xml file. This
# file will be placed at the top level of the experiment directory and
# then used by rocoto to run the workflow.
#
#-----------------------------------------------------------------------
#
WFLOW_XML_FP = os.path.join(EXPTDIR, WFLOW_XML_FN)
#
#-----------------------------------------------------------------------
#
# Create a multiline variable that consists of a yaml-compliant string
# specifying the values that the jinja variables in the template rocoto
# XML should be set to. These values are set either in the user-specified
# workflow configuration file (EXPT_CONFIG_FN) or in the setup.sh script
# sourced above. Then call the python script that generates the XML.
#
#-----------------------------------------------------------------------
#
if WORKFLOW_MANAGER == "rocoto":
template_xml_fp = os.path.join(TEMPLATE_DIR, WFLOW_XML_FN)
print_info_msg(f'''
Creating rocoto workflow XML file (WFLOW_XML_FP) from jinja template XML
file (template_xml_fp):
template_xml_fp = \"{template_xml_fp}\"
WFLOW_XML_FP = \"{WFLOW_XML_FP}\"''')
ensmem_indx_name = ""
uscore_ensmem_name = ""
slash_ensmem_subdir = ""
if DO_ENSEMBLE:
ensmem_indx_name = "mem"
uscore_ensmem_name = f"_mem#{ensmem_indx_name}#"
slash_ensmem_subdir = f"/mem#{ensmem_indx_name}#"
#get time string
d = DATE_FIRST_CYCL + timedelta(seconds=DT_ATMOS)
time_str = d.strftime("%M:%S")
cycl_hrs_str = [ f"{c:02d}" for c in CYCL_HRS ]
cdate_first_cycl = DATE_FIRST_CYCL + timedelta(hours=CYCL_HRS[0])
# Dictionary of settings
settings = {
#
# Parameters needed by the job scheduler.
#
'account': ACCOUNT,
'sched': SCHED,
'partition_default': PARTITION_DEFAULT,
'queue_default': QUEUE_DEFAULT,
'partition_hpss': PARTITION_HPSS,
'queue_hpss': QUEUE_HPSS,
'partition_fcst': PARTITION_FCST,
'queue_fcst': QUEUE_FCST,
'machine': MACHINE,
'slurm_native_cmd': SLURM_NATIVE_CMD,
#
# Workflow task names.
#
'make_grid_tn': MAKE_GRID_TN,
'make_orog_tn': MAKE_OROG_TN,
'make_sfc_climo_tn': MAKE_SFC_CLIMO_TN,
'get_extrn_ics_tn': GET_EXTRN_ICS_TN,
'get_extrn_lbcs_tn': GET_EXTRN_LBCS_TN,
'make_ics_tn': MAKE_ICS_TN,
'make_lbcs_tn': MAKE_LBCS_TN,
'run_fcst_tn': RUN_FCST_TN,
'run_post_tn': RUN_POST_TN,
'get_obs_ccpa_tn': GET_OBS_CCPA_TN,
'get_obs_ndas_tn': GET_OBS_NDAS_TN,
'get_obs_mrms_tn': GET_OBS_MRMS_TN,
'vx_tn': VX_TN,
'vx_gridstat_tn': VX_GRIDSTAT_TN,
'vx_gridstat_refc_tn': VX_GRIDSTAT_REFC_TN,
'vx_gridstat_retop_tn': VX_GRIDSTAT_RETOP_TN,
'vx_gridstat_03h_tn': VX_GRIDSTAT_03h_TN,
'vx_gridstat_06h_tn': VX_GRIDSTAT_06h_TN,
'vx_gridstat_24h_tn': VX_GRIDSTAT_24h_TN,
'vx_pointstat_tn': VX_POINTSTAT_TN,
'vx_ensgrid_tn': VX_ENSGRID_TN,
'vx_ensgrid_refc_tn': VX_ENSGRID_REFC_TN,
'vx_ensgrid_retop_tn': VX_ENSGRID_RETOP_TN,
'vx_ensgrid_03h_tn': VX_ENSGRID_03h_TN,
'vx_ensgrid_06h_tn': VX_ENSGRID_06h_TN,
'vx_ensgrid_24h_tn': VX_ENSGRID_24h_TN,
'vx_ensgrid_mean_tn': VX_ENSGRID_MEAN_TN,
'vx_ensgrid_prob_tn': VX_ENSGRID_PROB_TN,
'vx_ensgrid_mean_03h_tn': VX_ENSGRID_MEAN_03h_TN,
'vx_ensgrid_prob_03h_tn': VX_ENSGRID_PROB_03h_TN,
'vx_ensgrid_mean_06h_tn': VX_ENSGRID_MEAN_06h_TN,
'vx_ensgrid_prob_06h_tn': VX_ENSGRID_PROB_06h_TN,
'vx_ensgrid_mean_24h_tn': VX_ENSGRID_MEAN_24h_TN,
'vx_ensgrid_prob_24h_tn': VX_ENSGRID_PROB_24h_TN,
'vx_ensgrid_prob_refc_tn': VX_ENSGRID_PROB_REFC_TN,
'vx_ensgrid_prob_retop_tn': VX_ENSGRID_PROB_RETOP_TN,
'vx_enspoint_tn': VX_ENSPOINT_TN,
'vx_enspoint_mean_tn': VX_ENSPOINT_MEAN_TN,
'vx_enspoint_prob_tn': VX_ENSPOINT_PROB_TN,
#
# Entity used to load the module file for each GET_OBS_* task.
#
'get_obs': GET_OBS,
#
# Number of nodes to use for each task.
#
'nnodes_make_grid': NNODES_MAKE_GRID,
'nnodes_make_orog': NNODES_MAKE_OROG,
'nnodes_make_sfc_climo': NNODES_MAKE_SFC_CLIMO,
'nnodes_get_extrn_ics': NNODES_GET_EXTRN_ICS,
'nnodes_get_extrn_lbcs': NNODES_GET_EXTRN_LBCS,
'nnodes_make_ics': NNODES_MAKE_ICS,
'nnodes_make_lbcs': NNODES_MAKE_LBCS,
'nnodes_run_fcst': NNODES_RUN_FCST,
'nnodes_run_post': NNODES_RUN_POST,
'nnodes_get_obs_ccpa': NNODES_GET_OBS_CCPA,
'nnodes_get_obs_mrms': NNODES_GET_OBS_MRMS,
'nnodes_get_obs_ndas': NNODES_GET_OBS_NDAS,
'nnodes_vx_gridstat': NNODES_VX_GRIDSTAT,
'nnodes_vx_pointstat': NNODES_VX_POINTSTAT,
'nnodes_vx_ensgrid': NNODES_VX_ENSGRID,
'nnodes_vx_ensgrid_mean': NNODES_VX_ENSGRID_MEAN,
'nnodes_vx_ensgrid_prob': NNODES_VX_ENSGRID_PROB,
'nnodes_vx_enspoint': NNODES_VX_ENSPOINT,
'nnodes_vx_enspoint_mean': NNODES_VX_ENSPOINT_MEAN,
'nnodes_vx_enspoint_prob': NNODES_VX_ENSPOINT_PROB,
#
# Number of cores used for a task
#
'ncores_run_fcst': PE_MEMBER01,
'native_run_fcst': f"--cpus-per-task {OMP_NUM_THREADS_RUN_FCST} --exclusive",
#
# Number of logical processes per node for each task. If running without
# threading, this is equal to the number of MPI processes per node.
#
'ppn_make_grid': PPN_MAKE_GRID,
'ppn_make_orog': PPN_MAKE_OROG,
'ppn_make_sfc_climo': PPN_MAKE_SFC_CLIMO,
'ppn_get_extrn_ics': PPN_GET_EXTRN_ICS,
'ppn_get_extrn_lbcs': PPN_GET_EXTRN_LBCS,
'ppn_make_ics': PPN_MAKE_ICS,
'ppn_make_lbcs': PPN_MAKE_LBCS,
'ppn_run_fcst': PPN_RUN_FCST,
'ppn_run_post': PPN_RUN_POST,
'ppn_get_obs_ccpa': PPN_GET_OBS_CCPA,
'ppn_get_obs_mrms': PPN_GET_OBS_MRMS,
'ppn_get_obs_ndas': PPN_GET_OBS_NDAS,
'ppn_vx_gridstat': PPN_VX_GRIDSTAT,
'ppn_vx_pointstat': PPN_VX_POINTSTAT,
'ppn_vx_ensgrid': PPN_VX_ENSGRID,
'ppn_vx_ensgrid_mean': PPN_VX_ENSGRID_MEAN,
'ppn_vx_ensgrid_prob': PPN_VX_ENSGRID_PROB,
'ppn_vx_enspoint': PPN_VX_ENSPOINT,
'ppn_vx_enspoint_mean': PPN_VX_ENSPOINT_MEAN,
'ppn_vx_enspoint_prob': PPN_VX_ENSPOINT_PROB,
#
# Maximum wallclock time for each task.
#
'wtime_make_grid': WTIME_MAKE_GRID,
'wtime_make_orog': WTIME_MAKE_OROG,
'wtime_make_sfc_climo': WTIME_MAKE_SFC_CLIMO,
'wtime_get_extrn_ics': WTIME_GET_EXTRN_ICS,
'wtime_get_extrn_lbcs': WTIME_GET_EXTRN_LBCS,
'wtime_make_ics': WTIME_MAKE_ICS,
'wtime_make_lbcs': WTIME_MAKE_LBCS,
'wtime_run_fcst': WTIME_RUN_FCST,
'wtime_run_post': WTIME_RUN_POST,
'wtime_get_obs_ccpa': WTIME_GET_OBS_CCPA,
'wtime_get_obs_mrms': WTIME_GET_OBS_MRMS,
'wtime_get_obs_ndas': WTIME_GET_OBS_NDAS,
'wtime_vx_gridstat': WTIME_VX_GRIDSTAT,
'wtime_vx_pointstat': WTIME_VX_POINTSTAT,
'wtime_vx_ensgrid': WTIME_VX_ENSGRID,
'wtime_vx_ensgrid_mean': WTIME_VX_ENSGRID_MEAN,
'wtime_vx_ensgrid_prob': WTIME_VX_ENSGRID_PROB,
'wtime_vx_enspoint': WTIME_VX_ENSPOINT,
'wtime_vx_enspoint_mean': WTIME_VX_ENSPOINT_MEAN,
'wtime_vx_enspoint_prob': WTIME_VX_ENSPOINT_PROB,
#
# Maximum number of tries for each task.
#
'maxtries_make_grid': MAXTRIES_MAKE_GRID,
'maxtries_make_orog': MAXTRIES_MAKE_OROG,
'maxtries_make_sfc_climo': MAXTRIES_MAKE_SFC_CLIMO,
'maxtries_get_extrn_ics': MAXTRIES_GET_EXTRN_ICS,
'maxtries_get_extrn_lbcs': MAXTRIES_GET_EXTRN_LBCS,
'maxtries_make_ics': MAXTRIES_MAKE_ICS,
'maxtries_make_lbcs': MAXTRIES_MAKE_LBCS,
'maxtries_run_fcst': MAXTRIES_RUN_FCST,
'maxtries_run_post': MAXTRIES_RUN_POST,
'maxtries_get_obs_ccpa': MAXTRIES_GET_OBS_CCPA,
'maxtries_get_obs_mrms': MAXTRIES_GET_OBS_MRMS,
'maxtries_get_obs_ndas': MAXTRIES_GET_OBS_NDAS,
'maxtries_vx_gridstat': MAXTRIES_VX_GRIDSTAT,
'maxtries_vx_gridstat_refc': MAXTRIES_VX_GRIDSTAT_REFC,
'maxtries_vx_gridstat_retop': MAXTRIES_VX_GRIDSTAT_RETOP,
'maxtries_vx_gridstat_03h': MAXTRIES_VX_GRIDSTAT_03h,
'maxtries_vx_gridstat_06h': MAXTRIES_VX_GRIDSTAT_06h,
'maxtries_vx_gridstat_24h': MAXTRIES_VX_GRIDSTAT_24h,
'maxtries_vx_pointstat': MAXTRIES_VX_POINTSTAT,
'maxtries_vx_ensgrid': MAXTRIES_VX_ENSGRID,
'maxtries_vx_ensgrid_refc': MAXTRIES_VX_ENSGRID_REFC,
'maxtries_vx_ensgrid_retop': MAXTRIES_VX_ENSGRID_RETOP,
'maxtries_vx_ensgrid_03h': MAXTRIES_VX_ENSGRID_03h,
'maxtries_vx_ensgrid_06h': MAXTRIES_VX_ENSGRID_06h,
'maxtries_vx_ensgrid_24h': MAXTRIES_VX_ENSGRID_24h,
'maxtries_vx_ensgrid_mean': MAXTRIES_VX_ENSGRID_MEAN,
'maxtries_vx_ensgrid_prob': MAXTRIES_VX_ENSGRID_PROB,
'maxtries_vx_ensgrid_mean_03h': MAXTRIES_VX_ENSGRID_MEAN_03h,
'maxtries_vx_ensgrid_prob_03h': MAXTRIES_VX_ENSGRID_PROB_03h,
'maxtries_vx_ensgrid_mean_06h': MAXTRIES_VX_ENSGRID_MEAN_06h,
'maxtries_vx_ensgrid_prob_06h': MAXTRIES_VX_ENSGRID_PROB_06h,
'maxtries_vx_ensgrid_mean_24h': MAXTRIES_VX_ENSGRID_MEAN_24h,
'maxtries_vx_ensgrid_prob_24h': MAXTRIES_VX_ENSGRID_PROB_24h,
'maxtries_vx_ensgrid_prob_refc': MAXTRIES_VX_ENSGRID_PROB_REFC,
'maxtries_vx_ensgrid_prob_retop': MAXTRIES_VX_ENSGRID_PROB_RETOP,
'maxtries_vx_enspoint': MAXTRIES_VX_ENSPOINT,
'maxtries_vx_enspoint_mean': MAXTRIES_VX_ENSPOINT_MEAN,
'maxtries_vx_enspoint_prob': MAXTRIES_VX_ENSPOINT_PROB,
#
# Flags that specify whether to run the preprocessing or
# verification-related tasks.
#
'run_task_make_grid': RUN_TASK_MAKE_GRID,
'run_task_make_orog': RUN_TASK_MAKE_OROG,
'run_task_make_sfc_climo': RUN_TASK_MAKE_SFC_CLIMO,
'run_task_get_extrn_ics': RUN_TASK_GET_EXTRN_ICS,
'run_task_get_extrn_lbcs': RUN_TASK_GET_EXTRN_LBCS,
'run_task_make_ics': RUN_TASK_MAKE_ICS,
'run_task_make_lbcs': RUN_TASK_MAKE_LBCS,
'run_task_run_fcst': RUN_TASK_RUN_FCST,
'run_task_run_post': RUN_TASK_RUN_POST,
'run_task_get_obs_ccpa': RUN_TASK_GET_OBS_CCPA,
'run_task_get_obs_mrms': RUN_TASK_GET_OBS_MRMS,
'run_task_get_obs_ndas': RUN_TASK_GET_OBS_NDAS,
'run_task_vx_gridstat': RUN_TASK_VX_GRIDSTAT,
'run_task_vx_pointstat': RUN_TASK_VX_POINTSTAT,
'run_task_vx_ensgrid': RUN_TASK_VX_ENSGRID,
'run_task_vx_enspoint': RUN_TASK_VX_ENSPOINT,
#
# Number of physical cores per node for the current machine.
#
'ncores_per_node': NCORES_PER_NODE,
#
# Directories and files.
#
'jobsdir': JOBSDIR,
'logdir': LOGDIR,
'scriptsdir': SCRIPTSDIR,
'cycle_basedir': CYCLE_BASEDIR,
'global_var_defns_fp': GLOBAL_VAR_DEFNS_FP,
'load_modules_run_task_fp': LOAD_MODULES_RUN_TASK_FP,
#
# External model information for generating ICs and LBCs.
#
'extrn_mdl_name_ics': EXTRN_MDL_NAME_ICS,
'extrn_mdl_name_lbcs': EXTRN_MDL_NAME_LBCS,
#
# Parameters that determine the set of cycles to run.
#
'date_first_cycl': date_to_str(DATE_FIRST_CYCL,True),
'date_last_cycl': date_to_str(DATE_LAST_CYCL,True),
'cdate_first_cycl': cdate_first_cycl,
'cycl_hrs': cycl_hrs_str,
'cycl_freq': f"{INCR_CYCL_FREQ:02d}:00:00",
#
# Forecast length (same for all cycles).
#
'fcst_len_hrs': FCST_LEN_HRS,
#
# Inline post
#
'write_dopost': WRITE_DOPOST,
#
# METPlus-specific information
#
'model': MODEL,
'met_install_dir': MET_INSTALL_DIR,
'met_bin_exec': MET_BIN_EXEC,
'metplus_path': METPLUS_PATH,
'vx_config_dir': VX_CONFIG_DIR,
'metplus_conf': METPLUS_CONF,
'met_config': MET_CONFIG,
'ccpa_obs_dir': CCPA_OBS_DIR,
'mrms_obs_dir': MRMS_OBS_DIR,
'ndas_obs_dir': NDAS_OBS_DIR,
#
# Ensemble-related parameters.
#
'do_ensemble': DO_ENSEMBLE,
'num_ens_members': NUM_ENS_MEMBERS,
'ndigits_ensmem_names': f"{NDIGITS_ENSMEM_NAMES}",
'ensmem_indx_name': ensmem_indx_name,
'uscore_ensmem_name': uscore_ensmem_name,
'slash_ensmem_subdir': slash_ensmem_subdir,
#
# Parameters associated with subhourly post-processed output
#
'sub_hourly_post': SUB_HOURLY_POST,
'delta_min': DT_SUBHOURLY_POST_MNTS,
'first_fv3_file_tstr': f"000:{time_str}"
}
# End of "settings" variable.
settings_str = cfg_to_yaml_str(settings)
print_info_msg(dedent(f'''
The variable \"settings\" specifying values of the rococo XML variables
has been set as follows:
#-----------------------------------------------------------------------
settings =\n''') + settings_str, verbose=VERBOSE)
#
# Call the python script to generate the experiment's actual XML file
# from the jinja template file.
#
try:
fill_jinja_template(["-q", "-u", settings_str, "-t", template_xml_fp, "-o", WFLOW_XML_FP])
except:
print_err_msg_exit(f'''
Call to python script fill_jinja_template.py to create a rocoto workflow
XML file from a template file failed. Parameters passed to this script
are:
Full path to template rocoto XML file:
template_xml_fp = \"{template_xml_fp}\"
Full path to output rocoto XML file:
WFLOW_XML_FP = \"{WFLOW_XML_FP}\"
Namelist settings specified on command line:
settings =
{settings_str}''')
#
#-----------------------------------------------------------------------
#
# Create a symlink in the experiment directory that points to the workflow
# (re)launch script.
#
#-----------------------------------------------------------------------
#
print_info_msg(f'''
Creating symlink in the experiment directory (EXPTDIR) that points to the
workflow launch script (WFLOW_LAUNCH_SCRIPT_FP):
EXPTDIR = \"{EXPTDIR}\"
WFLOW_LAUNCH_SCRIPT_FP = \"{WFLOW_LAUNCH_SCRIPT_FP}\"''',verbose=VERBOSE)
create_symlink_to_file(WFLOW_LAUNCH_SCRIPT_FP,
os.path.join(EXPTDIR,WFLOW_LAUNCH_SCRIPT_FN),
False)
#
#-----------------------------------------------------------------------
#
# If USE_CRON_TO_RELAUNCH is set to TRUE, add a line to the user's cron
# table to call the (re)launch script every CRON_RELAUNCH_INTVL_MNTS mi-
# nutes.
#
#-----------------------------------------------------------------------
#
if USE_CRON_TO_RELAUNCH:
#
# Make a backup copy of the user's crontab file and save it in a file.
#
time_stamp = datetime.now().strftime("%F_%T")
crontab_backup_fp=os.path.join(EXPTDIR,f"crontab.bak.{time_stamp}")
print_info_msg(f'''
Copying contents of user cron table to backup file:
crontab_backup_fp = \"{crontab_backup_fp}\"''',verbose=VERBOSE)
global called_from_cron
try: called_from_cron
except: called_from_cron = False
crontab_cmd,crontab_contents = get_crontab_contents(called_from_cron=called_from_cron)
# To create the backup crontab file and add a | |
"serverless")
@serverless.setter
def serverless(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "serverless", value)
@property
@pulumi.getter(name="webSql")
def web_sql(self) -> Optional[pulumi.Input[bool]]:
"""
Allow access for Web SQL. Can be either `true` or `false`.
"""
return pulumi.get(self, "web_sql")
@web_sql.setter
def web_sql(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "web_sql", value)
@pulumi.input_type
class MdbClickhouseClusterBackupWindowStartArgs:
def __init__(__self__, *,
hours: Optional[pulumi.Input[int]] = None,
minutes: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] hours: The hour at which backup will be started.
:param pulumi.Input[int] minutes: The minute at which backup will be started.
"""
if hours is not None:
pulumi.set(__self__, "hours", hours)
if minutes is not None:
pulumi.set(__self__, "minutes", minutes)
@property
@pulumi.getter
def hours(self) -> Optional[pulumi.Input[int]]:
"""
The hour at which backup will be started.
"""
return pulumi.get(self, "hours")
@hours.setter
def hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "hours", value)
@property
@pulumi.getter
def minutes(self) -> Optional[pulumi.Input[int]]:
"""
The minute at which backup will be started.
"""
return pulumi.get(self, "minutes")
@minutes.setter
def minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "minutes", value)
@pulumi.input_type
class MdbClickhouseClusterClickhouseArgs:
def __init__(__self__, *,
resources: pulumi.Input['MdbClickhouseClusterClickhouseResourcesArgs'],
config: Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigArgs']] = None):
"""
:param pulumi.Input['MdbClickhouseClusterClickhouseResourcesArgs'] resources: Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
:param pulumi.Input['MdbClickhouseClusterClickhouseConfigArgs'] config: Main ClickHouse cluster configuration.
"""
pulumi.set(__self__, "resources", resources)
if config is not None:
pulumi.set(__self__, "config", config)
@property
@pulumi.getter
def resources(self) -> pulumi.Input['MdbClickhouseClusterClickhouseResourcesArgs']:
"""
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: pulumi.Input['MdbClickhouseClusterClickhouseResourcesArgs']):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigArgs']]:
"""
Main ClickHouse cluster configuration.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigArgs']]):
pulumi.set(self, "config", value)
@pulumi.input_type
class MdbClickhouseClusterClickhouseConfigArgs:
def __init__(__self__, *,
background_pool_size: Optional[pulumi.Input[int]] = None,
background_schedule_pool_size: Optional[pulumi.Input[int]] = None,
compressions: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigCompressionArgs']]]] = None,
geobase_uri: Optional[pulumi.Input[str]] = None,
graphite_rollups: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs']]]] = None,
kafka: Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigKafkaArgs']] = None,
kafka_topics: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigKafkaTopicArgs']]]] = None,
keep_alive_timeout: Optional[pulumi.Input[int]] = None,
log_level: Optional[pulumi.Input[str]] = None,
mark_cache_size: Optional[pulumi.Input[int]] = None,
max_concurrent_queries: Optional[pulumi.Input[int]] = None,
max_connections: Optional[pulumi.Input[int]] = None,
max_partition_size_to_drop: Optional[pulumi.Input[int]] = None,
max_table_size_to_drop: Optional[pulumi.Input[int]] = None,
merge_tree: Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigMergeTreeArgs']] = None,
metric_log_enabled: Optional[pulumi.Input[bool]] = None,
metric_log_retention_size: Optional[pulumi.Input[int]] = None,
metric_log_retention_time: Optional[pulumi.Input[int]] = None,
part_log_retention_size: Optional[pulumi.Input[int]] = None,
part_log_retention_time: Optional[pulumi.Input[int]] = None,
query_log_retention_size: Optional[pulumi.Input[int]] = None,
query_log_retention_time: Optional[pulumi.Input[int]] = None,
query_thread_log_enabled: Optional[pulumi.Input[bool]] = None,
query_thread_log_retention_size: Optional[pulumi.Input[int]] = None,
query_thread_log_retention_time: Optional[pulumi.Input[int]] = None,
rabbitmq: Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigRabbitmqArgs']] = None,
text_log_enabled: Optional[pulumi.Input[bool]] = None,
text_log_level: Optional[pulumi.Input[str]] = None,
text_log_retention_size: Optional[pulumi.Input[int]] = None,
text_log_retention_time: Optional[pulumi.Input[int]] = None,
timezone: Optional[pulumi.Input[str]] = None,
trace_log_enabled: Optional[pulumi.Input[bool]] = None,
trace_log_retention_size: Optional[pulumi.Input[int]] = None,
trace_log_retention_time: Optional[pulumi.Input[int]] = None,
uncompressed_cache_size: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigCompressionArgs']]] compressions: Data compression configuration. The structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs']]] graphite_rollups: Graphite rollup configuration. The structure is documented below.
:param pulumi.Input['MdbClickhouseClusterClickhouseConfigKafkaArgs'] kafka: Kafka connection configuration. The structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigKafkaTopicArgs']]] kafka_topics: Kafka topic connection configuration. The structure is documented below.
:param pulumi.Input['MdbClickhouseClusterClickhouseConfigMergeTreeArgs'] merge_tree: MergeTree engine configuration. The structure is documented below.
:param pulumi.Input['MdbClickhouseClusterClickhouseConfigRabbitmqArgs'] rabbitmq: RabbitMQ connection configuration. The structure is documented below.
"""
if background_pool_size is not None:
pulumi.set(__self__, "background_pool_size", background_pool_size)
if background_schedule_pool_size is not None:
pulumi.set(__self__, "background_schedule_pool_size", background_schedule_pool_size)
if compressions is not None:
pulumi.set(__self__, "compressions", compressions)
if geobase_uri is not None:
pulumi.set(__self__, "geobase_uri", geobase_uri)
if graphite_rollups is not None:
pulumi.set(__self__, "graphite_rollups", graphite_rollups)
if kafka is not None:
pulumi.set(__self__, "kafka", kafka)
if kafka_topics is not None:
pulumi.set(__self__, "kafka_topics", kafka_topics)
if keep_alive_timeout is not None:
pulumi.set(__self__, "keep_alive_timeout", keep_alive_timeout)
if log_level is not None:
pulumi.set(__self__, "log_level", log_level)
if mark_cache_size is not None:
pulumi.set(__self__, "mark_cache_size", mark_cache_size)
if max_concurrent_queries is not None:
pulumi.set(__self__, "max_concurrent_queries", max_concurrent_queries)
if max_connections is not None:
pulumi.set(__self__, "max_connections", max_connections)
if max_partition_size_to_drop is not None:
pulumi.set(__self__, "max_partition_size_to_drop", max_partition_size_to_drop)
if max_table_size_to_drop is not None:
pulumi.set(__self__, "max_table_size_to_drop", max_table_size_to_drop)
if merge_tree is not None:
pulumi.set(__self__, "merge_tree", merge_tree)
if metric_log_enabled is not None:
pulumi.set(__self__, "metric_log_enabled", metric_log_enabled)
if metric_log_retention_size is not None:
pulumi.set(__self__, "metric_log_retention_size", metric_log_retention_size)
if metric_log_retention_time is not None:
pulumi.set(__self__, "metric_log_retention_time", metric_log_retention_time)
if part_log_retention_size is not None:
pulumi.set(__self__, "part_log_retention_size", part_log_retention_size)
if part_log_retention_time is not None:
pulumi.set(__self__, "part_log_retention_time", part_log_retention_time)
if query_log_retention_size is not None:
pulumi.set(__self__, "query_log_retention_size", query_log_retention_size)
if query_log_retention_time is not None:
pulumi.set(__self__, "query_log_retention_time", query_log_retention_time)
if query_thread_log_enabled is not None:
pulumi.set(__self__, "query_thread_log_enabled", query_thread_log_enabled)
if query_thread_log_retention_size is not None:
pulumi.set(__self__, "query_thread_log_retention_size", query_thread_log_retention_size)
if query_thread_log_retention_time is not None:
pulumi.set(__self__, "query_thread_log_retention_time", query_thread_log_retention_time)
if rabbitmq is not None:
pulumi.set(__self__, "rabbitmq", rabbitmq)
if text_log_enabled is not None:
pulumi.set(__self__, "text_log_enabled", text_log_enabled)
if text_log_level is not None:
pulumi.set(__self__, "text_log_level", text_log_level)
if text_log_retention_size is not None:
pulumi.set(__self__, "text_log_retention_size", text_log_retention_size)
if text_log_retention_time is not None:
pulumi.set(__self__, "text_log_retention_time", text_log_retention_time)
if timezone is not None:
pulumi.set(__self__, "timezone", timezone)
if trace_log_enabled is not None:
pulumi.set(__self__, "trace_log_enabled", trace_log_enabled)
if trace_log_retention_size is not None:
pulumi.set(__self__, "trace_log_retention_size", trace_log_retention_size)
if trace_log_retention_time is not None:
pulumi.set(__self__, "trace_log_retention_time", trace_log_retention_time)
if uncompressed_cache_size is not None:
pulumi.set(__self__, "uncompressed_cache_size", uncompressed_cache_size)
@property
@pulumi.getter(name="backgroundPoolSize")
def background_pool_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "background_pool_size")
@background_pool_size.setter
def background_pool_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "background_pool_size", value)
@property
@pulumi.getter(name="backgroundSchedulePoolSize")
def background_schedule_pool_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "background_schedule_pool_size")
@background_schedule_pool_size.setter
def background_schedule_pool_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "background_schedule_pool_size", value)
@property
@pulumi.getter
def compressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigCompressionArgs']]]]:
"""
Data compression configuration. The structure is documented below.
"""
return pulumi.get(self, "compressions")
@compressions.setter
def compressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigCompressionArgs']]]]):
pulumi.set(self, "compressions", value)
@property
@pulumi.getter(name="geobaseUri")
def geobase_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "geobase_uri")
@geobase_uri.setter
def geobase_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "geobase_uri", value)
@property
@pulumi.getter(name="graphiteRollups")
def graphite_rollups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs']]]]:
"""
Graphite rollup configuration. The structure is documented below.
"""
return pulumi.get(self, "graphite_rollups")
@graphite_rollups.setter
def graphite_rollups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs']]]]):
pulumi.set(self, "graphite_rollups", value)
@property
@pulumi.getter
def kafka(self) -> Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigKafkaArgs']]:
"""
Kafka connection configuration. The structure is documented below.
"""
return pulumi.get(self, "kafka")
@kafka.setter
def kafka(self, value: Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigKafkaArgs']]):
pulumi.set(self, "kafka", value)
@property
@pulumi.getter(name="kafkaTopics")
def kafka_topics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigKafkaTopicArgs']]]]:
"""
Kafka topic connection configuration. The structure is documented below.
"""
return pulumi.get(self, "kafka_topics")
@kafka_topics.setter
def kafka_topics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MdbClickhouseClusterClickhouseConfigKafkaTopicArgs']]]]):
pulumi.set(self, "kafka_topics", value)
@property
@pulumi.getter(name="keepAliveTimeout")
def keep_alive_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "keep_alive_timeout")
@keep_alive_timeout.setter
def keep_alive_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "keep_alive_timeout", value)
@property
@pulumi.getter(name="logLevel")
def log_level(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "log_level")
@log_level.setter
def log_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_level", value)
@property
@pulumi.getter(name="markCacheSize")
def mark_cache_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "mark_cache_size")
@mark_cache_size.setter
def mark_cache_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mark_cache_size", value)
@property
@pulumi.getter(name="maxConcurrentQueries")
def max_concurrent_queries(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_concurrent_queries")
@max_concurrent_queries.setter
def max_concurrent_queries(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_queries", value)
@property
@pulumi.getter(name="maxConnections")
def max_connections(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_connections")
@max_connections.setter
def max_connections(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_connections", value)
@property
@pulumi.getter(name="maxPartitionSizeToDrop")
def max_partition_size_to_drop(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_partition_size_to_drop")
@max_partition_size_to_drop.setter
def max_partition_size_to_drop(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_partition_size_to_drop", value)
@property
@pulumi.getter(name="maxTableSizeToDrop")
def max_table_size_to_drop(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_table_size_to_drop")
@max_table_size_to_drop.setter
def max_table_size_to_drop(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_table_size_to_drop", value)
@property
@pulumi.getter(name="mergeTree")
def merge_tree(self) -> Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigMergeTreeArgs']]:
"""
MergeTree engine configuration. The structure is documented below.
"""
return pulumi.get(self, "merge_tree")
@merge_tree.setter
def merge_tree(self, value: Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigMergeTreeArgs']]):
pulumi.set(self, "merge_tree", value)
@property
@pulumi.getter(name="metricLogEnabled")
def metric_log_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "metric_log_enabled")
@metric_log_enabled.setter
def metric_log_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "metric_log_enabled", value)
@property
@pulumi.getter(name="metricLogRetentionSize")
def metric_log_retention_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "metric_log_retention_size")
@metric_log_retention_size.setter
def metric_log_retention_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "metric_log_retention_size", value)
@property
@pulumi.getter(name="metricLogRetentionTime")
def metric_log_retention_time(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "metric_log_retention_time")
@metric_log_retention_time.setter
def metric_log_retention_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "metric_log_retention_time", value)
@property
@pulumi.getter(name="partLogRetentionSize")
def part_log_retention_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "part_log_retention_size")
@part_log_retention_size.setter
def part_log_retention_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "part_log_retention_size", value)
@property
@pulumi.getter(name="partLogRetentionTime")
def part_log_retention_time(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "part_log_retention_time")
@part_log_retention_time.setter
def part_log_retention_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "part_log_retention_time", value)
@property
@pulumi.getter(name="queryLogRetentionSize")
def query_log_retention_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "query_log_retention_size")
@query_log_retention_size.setter
def query_log_retention_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "query_log_retention_size", value)
@property
@pulumi.getter(name="queryLogRetentionTime")
def query_log_retention_time(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "query_log_retention_time")
@query_log_retention_time.setter
def query_log_retention_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "query_log_retention_time", value)
@property
@pulumi.getter(name="queryThreadLogEnabled")
def query_thread_log_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "query_thread_log_enabled")
@query_thread_log_enabled.setter
def query_thread_log_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "query_thread_log_enabled", value)
@property
@pulumi.getter(name="queryThreadLogRetentionSize")
def query_thread_log_retention_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "query_thread_log_retention_size")
@query_thread_log_retention_size.setter
def query_thread_log_retention_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "query_thread_log_retention_size", value)
@property
@pulumi.getter(name="queryThreadLogRetentionTime")
def query_thread_log_retention_time(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "query_thread_log_retention_time")
@query_thread_log_retention_time.setter
def query_thread_log_retention_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "query_thread_log_retention_time", value)
@property
@pulumi.getter
def rabbitmq(self) -> Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigRabbitmqArgs']]:
"""
RabbitMQ connection configuration. The structure is documented below.
"""
return pulumi.get(self, "rabbitmq")
@rabbitmq.setter
def rabbitmq(self, value: Optional[pulumi.Input['MdbClickhouseClusterClickhouseConfigRabbitmqArgs']]):
pulumi.set(self, "rabbitmq", value)
@property
@pulumi.getter(name="textLogEnabled")
def text_log_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "text_log_enabled")
@text_log_enabled.setter
def text_log_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "text_log_enabled", value)
@property
@pulumi.getter(name="textLogLevel")
def text_log_level(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "text_log_level")
@text_log_level.setter
def text_log_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "text_log_level", value)
@property
@pulumi.getter(name="textLogRetentionSize")
def text_log_retention_size(self) | |
import pygef.utils as utils
import pandas as pd
import io
import numpy as np
import pygef.plot_utils as plot
from pygef import robertson, been_jefferies
import logging
from pygef.grouping import GroupClassification
logger = logging.getLogger(__name__)
MAP_QUANTITY_NUMBER_COLUMN_NAME_CPT = {
1: "penetration_length",
2: "qc", # 2
3: "fs", # 3
4: "friction_number", # 4
5: "u1", # 5
6: "u2", # 6
7: "u3", # 7
8: "inclination", # 8
9: "inclination_NS", # 9
10: "inclination_EW", # 10
11: "corrected_depth", # 11
12: "time", # 12
13: "corrected_qc", # 13
14: "net_cone_resistance", # 14
15: "pore_ratio", # 15
16: "cone_resistance_number", # 16
17: "weight_per_unit_volume", # 17
18: "initial_pore_pressure", # 18
19: "total_vertical_soil_pressure", # 19
20: "effective_vertical_soil_pressure",
21: "Inclination_in_X_direction",
22: "Inclination_in_Y_direction",
23: "Electric_conductivity",
31: "magnetic_field_x",
32: "magnetic_field_y",
33: "magnetic_field_z",
34: "total_magnetic_field",
35: "magnetic_inclination",
36: "magnetic_declination",
99: "Classification_zone_Robertson_1990",
# found in:#COMPANYID= Fugro GeoServices B.V., NL005621409B08, 31
131: "speed", # found in:COMPANYID= Multiconsult, 09073590, 31
135: "Temperature_C", # found in:#COMPANYID= Inpijn-Blokpoel,
250: "magneto_slope_y", # found in:COMPANYID= Danny, Tjaden, 31
251: "magneto_slope_x",
} # found in:COMPANYID= Danny, Tjaden, 31
COLUMN_NAMES_BORE = [
"depth_top", # 1
"depth_bottom", # 2
"lutum_percentage", # 3
"silt_percentage", # 4
"sand_percentage", # 5
"gravel_percentage", # 6
"organic_matter_percentage", # 7
"sand_median", # 8
"gravel_median",
] # 9
MAP_QUANTITY_NUMBER_COLUMN_NAME_BORE = dict(enumerate(COLUMN_NAMES_BORE, 1))
COLUMN_NAMES_BORE_CHILD = [
"depth_top", # 1
"depth_bottom", # 2
"undrained_shear_strength", # 3
"vertical_permeability", # 4
"horizontal_permeability", # 5
"effective_cohesion_at_x%_strain", # 6
"friction_angle_at_x%_strain", # 7
"water_content", # 8
"dry_volumetric_mass", # 9
"wet_volumetric_mass", # 10
"d_50", # 11
"d_60/d_10_uniformity", # 12
"d_90/d_10_gradation", # 13
"dry_volumetric_weight", # 14
"wet_volumetric_weight", # 15
"vertical_strain",
] # 16
MAP_QUANTITY_NUMBER_COLUMN_NAME_BORE_CHILD = dict(enumerate(COLUMN_NAMES_BORE_CHILD, 1))
dict_soil_type_rob = {
"Peat": 1,
"Clays - silty clay to clay": 2,
"Silt mixtures - clayey silt to silty clay": 3,
"Sand mixtures - silty sand to sandy silt": 4,
"Sands - clean sand to silty sand": 5,
"Gravelly sand to dense sand": 6,
}
dict_soil_type_been = {
"Peat": 1,
"Clays": 2,
"Clayey silt to silty clay": 3,
"Silty sand to sandy silt": 4,
"Sands: clean sand to silty": 5,
"Gravelly sands": 6,
}
class ParseGEF:
"""
The ParseGEF file can be used to parse a *.gef file and use it as an ParseGEF object.
The gef parser is built following the conventional format described in:
https://publicwiki.deltares.nl/download/attachments/102204318/GEF-CPT.pdf?version=1&modificationDate=1409732008000&api=v2
For more information on initialization of this class type:
print(ParseGEF.__init__.__doc__)
To check the available methods, type:
print(dir(ParseGEF))
**Attributes**:
The ParseGEF class accept as input the *.gef file of a bore or cpt type.
Some attributes are common for the both types, other are specific to the type(cpt or bore).
Check the list below for the available attributes.
** Common attributes:**
type: str
Type of the gef file
project_id: str
Project id
x: float
X coordinate respect to the coordinate system
y: float
Y coordinate respect to the coordinate system
zid: float
Z coordinate respect to the height system
height_system: float
Type of coordinate system, 31000 is NAP
file_date: datatime.datetime
Start date time
test_id: str
Identifying name of gef file.
s: str
String version of gef file.
** Cpt attributes:**
*Always present:*
df: pandas.DataFrame
DataFrame containing the same column contained in the original .gef file and
some additional columns [depth, elevation_with_respect_to_NAP]
Tip: Use depth column instead of the penetration_length, the depth is corrected
with the inclination(if present).
Note that the Friction ratio is always calculated from the fs and qc values and not parsed from the file.
If this attribute is called after the classify method the columns relative to the classification
are also contained.
*Not always present* default: None
The description is added only for the most important attributes, for the others check:
https://publicwiki.deltares.nl/download/attachments/102204318/GEF-CPT.pdf?version=1&modificationDate=1409732008000&api=v2
cpt_class: str
Cpt class. The format is not standard so it might be not always properly parsed.
column_void: str
It is the definition of no value for the gef file
nom_surface_area_cone_tip: float
Nom. surface area of cone tip [mm2]
nom_surface_area_friction_element: float
Nom. surface area of friction casing [mm2]
net_surface_area_quotient_of_the_cone_tip: float
Net surface area quotient of cone tip [-]
net_surface_area_quotient_of_the_friction_casing: float
Net surface area quotient of friction casing [-]
distance_between_cone_and_centre_of_friction_casing: float
friction_present: float
ppt_u1_present: float
ppt_u2_present: float
ppt_u3_present: float
inclination_measurement_present: float
use_of_back_flow_compensator: float
type_of_cone_penetration_test: float
pre_excavated_depth: float
Pre excavate depth [m]
groundwater_level: float
Ground water level [m]
water_depth_offshore_activities: float
end_depth_of_penetration_test: float
stop_criteria: float
zero_measurement_cone_before_penetration_test: float
zero_measurement_cone_after_penetration_test: float
zero_measurement_friction_before_penetration_test: float
zero_measurement_friction_after_penetration_test: float
zero_measurement_ppt_u1_before_penetration_test: float
zero_measurement_ppt_u1_after_penetration_test: float
zero_measurement_ppt_u2_before_penetration_test: float
zero_measurement_ppt_u2_after_penetration_test: float
zero_measurement_ppt_u3_before_penetration_test: float
zero_measurement_ppt_u3_after_penetration_test: float
zero_measurement_inclination_before_penetration_test: float
zero_measurement_inclination_after_penetration_test: float
zero_measurement_inclination_ns_before_penetration_test: float
zero_measurement_inclination_ns_after_penetration_test: float
zero_measurement_inclination_ew_before_penetration_test: float
zero_measurement_inclination_ew_after_penetration_test : float
mileage: float
** Bore attributes:**
df: pandas.DataFrame
DataFrame containing the columns: [
"depth_top",
"depth_bottom",
"soil_code",
"G", gravel component
"S", sand component
"C", clay component
"L", loam component
"P", peat component
"SI", silt component
"Remarks",
]
"""
def __init__(self, path=None, string=None):
"""
Base class of gef parser. It switches between the cpt or borehole parser.
It takes as input either the path to the gef file or the gef file as string.
Parameters
----------
path: str
Path to the *.gef file.
string: str
String version of the *.gef file.
"""
self.path = path
self.df = None
self.net_surface_area_quotient_of_the_cone_tip = None
self.pre_excavated_depth = None
if string is None:
with open(path, encoding="utf-8", errors="ignore") as f:
string = f.read()
self.s = string
end_of_header = utils.parse_end_of_header(self.s)
header_s, data_s = self.s.split(end_of_header)
self.zid = utils.parse_zid_as_float(header_s)
self.height_system = utils.parse_height_system(header_s)
self.x = utils.parse_xid_as_float(header_s)
self.y = utils.parse_yid_as_float(header_s)
self.file_date = utils.parse_file_date(header_s)
self.test_id = utils.parse_test_id(header_s)
self.type = utils.parse_gef_type(string)
if self.type == "cpt":
parsed = ParseCPT(header_s, data_s, self.zid, self.height_system)
elif self.type == "bore":
parsed = ParseBORE(header_s, data_s)
elif self.type == "borehole-report":
raise ValueError(
"The selected gef file is a GEF-BOREHOLE-Report. Can only parse "
"GEF-CPT-Report and GEF-BORE-Report. Check the PROCEDURECODE."
)
else:
raise ValueError(
"The selected gef file is not a cpt nor a borehole. "
"Check the REPORTCODE or the PROCEDURECODE."
)
self.__dict__.update(parsed.__dict__)
self.df = self.df.dropna().reset_index(drop=True)
def plot(
self,
classification=None,
water_level_NAP=None,
water_level_wrt_depth=None,
min_thickness=None,
p_a=0.1,
new=True,
show=False,
figsize=(11, 8),
df_group=None,
do_grouping=False,
grid_step_x=None,
dpi=100,
colors=None,
z_NAP=False,
):
"""
Plot the *.gef file and return matplotlib.pyplot.figure .
It works both with a cpt or borehole type file. If no argument it is passed it returns:
- CPT: plot of qc [MPa] and Friction ratio [%]
- BOREHOLE: plot of soil components over the depth.
Parameters
----------
classification: str, only for cpt type
If classification ("robertson", "been_jefferies") is specified a subplot is added with the classification
for each cpt row.
water_level_NAP: float, only for cpt type, necessary for the classification: give this or water_level_wrt_depth
Water level with respect to NAP
water_level_wrt_depth: float, only for cpt type, necessary for the classification: give this or water_level_NAP
Water level with respect to the ground_level [0], it should be a negative value.
min_thickness: float, only for cpt type, optional for the classification [m]
If specified together with the do_grouping set to True, a group classification is added to the plot.
The grouping is a simple algorithm that merge all the layers < min_thickness with the last above one >
min_thickness.
In order to not make a big error do not use a value bigger then 0.2 m
p_a: float, only for cpt type, optional for the classification
Atmospheric pressure. Default: 0.1 MPa.
new: bool, only for cpt type, optional for the classification default:True
If True and the classification is robertson, the new(2016) implementation of robertson is used.
show: bool
If True the plot is showed, else the matplotlib.pytplot.figure is returned
figsize: tuple
Figsize of the plot, default (11, 8).
df_group: pd.DataFrame, only for cpt type, optional for the classification
Use this argument to plot a defined soil layering next to the other subplots.
It should contain the columns:
- layer
Name of layer, should be either BeenJefferies of Robertson soil type,
if it is different then also the argument colors should be passed.
- z_centr_NAP
Z value of the middle of the layer
- thickness
Thickness of the layer
do_grouping: bool, only for cpt type, optional for the classification
If True a group classification is added to the plot.
grid_step_x: float, only for cpt type, | |
(
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None
):
load_balancer_managed_outbound_ip_count = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count
)
elif self.decorator_mode == DecoratorMode.UPDATE:
if (
not self.get_load_balancer_outbound_ips() and
not self.get_load_balancer_outbound_ip_prefixes() and
load_balancer_managed_outbound_ip_count is None
):
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None
):
load_balancer_managed_outbound_ip_count = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_managed_outbound_ip_count
def get_load_balancer_managed_outbound_ipv6_count(self) -> Union[int, None]:
"""Obtain the expected count of IPv6 managed outbound IPs.
Note: SDK provides default value 0 and performs the following validation {'maximum': 100, 'minimum': 0}.
:return: int or None
"""
count_ipv6 = self.raw_param.get(
'load_balancer_managed_outbound_ipv6_count')
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None
):
count_ipv6 = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6
)
elif self.decorator_mode == DecoratorMode.UPDATE:
if (
not self.get_load_balancer_outbound_ips() and
not self.get_load_balancer_outbound_ip_prefixes() and
count_ipv6 is None
):
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None
):
count_ipv6 = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6
)
return count_ipv6
# pylint: disable=unused-argument
def _get_outbound_type(
self,
enable_validation: bool = False,
read_only: bool = False,
load_balancer_profile: ManagedClusterLoadBalancerProfile = None,
) -> Union[str, None]:
"""Internal function to dynamically obtain the value of outbound_type according to the context.
Note: Overwritten in aks-preview to add support for the newly added nat related constants.
Note: All the external parameters involved in the validation are not verified in their own getters.
When outbound_type is not assigned, dynamic completion will be triggerd. By default, the value is set to
CONST_OUTBOUND_TYPE_LOAD_BALANCER.
This function supports the option of enable_validation. When enabled, if the value of outbound_type is one of
CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY, CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY or
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, the following checks will be performed. If load_balancer_sku is set
to basic, an InvalidArgumentValueError will be raised. If the value of outbound_type is not
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING and vnet_subnet_id is not assigned, a RequiredArgumentMissingError
will be raised. If the value of outbound_type equals to CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING and
any of load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips or load_balancer_outbound_ip_prefixes
is assigned, a MutuallyExclusiveArgumentError will be raised.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
This function supports the option of load_balancer_profile, if provided, when verifying loadbalancer-related
parameters, the value in load_balancer_profile will be used for validation.
:return: string or None
"""
# read the original value passed by the command
outbound_type = self.raw_param.get("outbound_type")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.outbound_type is not None
):
outbound_type = self.mc.network_profile.outbound_type
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return outbound_type
# dynamic completion
if (
not read_from_mc and
outbound_type != CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY and
outbound_type != CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY and
outbound_type != CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING
):
outbound_type = CONST_OUTBOUND_TYPE_LOAD_BALANCER
# validation
# Note: The parameters involved in the validation are not verified in their own getters.
if enable_validation:
if outbound_type in [
CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY,
CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
]:
# Should not enable read_only for get_load_balancer_sku, since its default value is None, and it has
# not been decorated into the mc object at this time, only the value after dynamic completion is
# meaningful here.
if safe_lower(self._get_load_balancer_sku(enable_validation=False)) == "basic":
raise InvalidArgumentValueError(
"{} doesn't support basic load balancer sku".format(outbound_type))
if outbound_type == CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY:
if self.get_vnet_subnet_id() in ["", None]:
raise RequiredArgumentMissingError(
"--vnet-subnet-id must be specified for userAssignedNATGateway and it must "
"be pre-associated with a NAT gateway with outbound public IPs or IP prefixes"
)
if outbound_type == CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING:
if self.get_vnet_subnet_id() in ["", None]:
raise RequiredArgumentMissingError(
"--vnet-subnet-id must be specified for userDefinedRouting and it must "
"be pre-configured with a route table with egress rules"
)
if load_balancer_profile:
if (
load_balancer_profile.managed_outbound_i_ps or
load_balancer_profile.outbound_i_ps or
load_balancer_profile.outbound_ip_prefixes
):
raise MutuallyExclusiveArgumentError(
"userDefinedRouting doesn't support customizing "
"a standard load balancer with IP addresses"
)
else:
if (
self.get_load_balancer_managed_outbound_ip_count() or
self.get_load_balancer_outbound_ips() or
self.get_load_balancer_outbound_ip_prefixes()
):
raise MutuallyExclusiveArgumentError(
"userDefinedRouting doesn't support customizing "
"a standard load balancer with IP addresses"
)
return outbound_type
def _get_enable_windows_gmsa(self, enable_validation: bool = False) -> bool:
"""Internal function to obtain the value of enable_windows_gmsa.
This function supports the option of enable_validation. Please refer to function __validate_gmsa_options for
details of validation.
:return: bool
"""
# read the original value passed by the command
enable_windows_gmsa = self.raw_param.get("enable_windows_gmsa")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.windows_profile and
# backward compatibility
hasattr(self.mc.windows_profile, "gmsa_profile") and
self.mc.windows_profile.gmsa_profile and
self.mc.windows_profile.gmsa_profile.enabled is not None
):
enable_windows_gmsa = self.mc.windows_profile.gmsa_profile.enabled
# this parameter does not need dynamic completion
# validation
if enable_validation:
(
gmsa_dns_server,
gmsa_root_domain_name,
) = self._get_gmsa_dns_server_and_root_domain_name(
enable_validation=False
)
self.__validate_gmsa_options(
enable_windows_gmsa, gmsa_dns_server, gmsa_root_domain_name, self.get_yes()
)
return enable_windows_gmsa
def get_enable_windows_gmsa(self) -> bool:
"""Obtain the value of enable_windows_gmsa.
This function will verify the parameter by default. When enable_windows_gmsa is specified, if both
gmsa_dns_server and gmsa_root_domain_name are not assigned and user does not confirm the operation,
a DecoratorEarlyExitException will be raised; if only one of gmsa_dns_server or gmsa_root_domain_name is
assigned, raise a RequiredArgumentMissingError. When enable_windows_gmsa is not specified, if any of
gmsa_dns_server or gmsa_root_domain_name is assigned, raise a RequiredArgumentMissingError.
:return: bool
"""
return self._get_enable_windows_gmsa(enable_validation=True)
def _get_gmsa_dns_server_and_root_domain_name(self, enable_validation: bool = False):
"""Internal function to obtain the values of gmsa_dns_server and gmsa_root_domain_name.
This function supports the option of enable_validation. Please refer to function __validate_gmsa_options for
details of validation.
:return: a tuple containing two elements: gmsa_dns_server of string type or None and gmsa_root_domain_name of
string type or None
"""
# gmsa_dns_server
# read the original value passed by the command
gmsa_dns_server = self.raw_param.get("gmsa_dns_server")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
gmsa_dns_read_from_mc = False
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.windows_profile and
# backward compatibility
hasattr(self.mc.windows_profile, "gmsa_profile") and
self.mc.windows_profile.gmsa_profile and
self.mc.windows_profile.gmsa_profile.dns_server is not None
):
gmsa_dns_server = self.mc.windows_profile.gmsa_profile.dns_server
gmsa_dns_read_from_mc = True
# gmsa_root_domain_name
# read the original value passed by the command
gmsa_root_domain_name = self.raw_param.get("gmsa_root_domain_name")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
gmsa_root_read_from_mc = False
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.windows_profile and
# backward compatibility
hasattr(self.mc.windows_profile, "gmsa_profile") and
self.mc.windows_profile.gmsa_profile and
self.mc.windows_profile.gmsa_profile.root_domain_name is not None
):
gmsa_root_domain_name = self.mc.windows_profile.gmsa_profile.root_domain_name
gmsa_root_read_from_mc = True
# consistent check
if gmsa_dns_read_from_mc != gmsa_root_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of gmsa_dns_server and gmsa_root_domain_name "
"is read from the `mc` object."
)
# this parameter does not need dynamic completion
# validation
if enable_validation:
self.__validate_gmsa_options(
self._get_enable_windows_gmsa(enable_validation=False),
gmsa_dns_server,
gmsa_root_domain_name,
self.get_yes(),
)
return gmsa_dns_server, gmsa_root_domain_name
def get_gmsa_dns_server_and_root_domain_name(self) -> Tuple[Union[str, None], Union[str, None]]:
"""Obtain the values of gmsa_dns_server and gmsa_root_domain_name.
This function will verify the parameter by default. When enable_windows_gmsa is specified, if both
gmsa_dns_server and gmsa_root_domain_name are not assigned and user does not confirm the operation,
a DecoratorEarlyExitException will be raised; if only one of gmsa_dns_server or gmsa_root_domain_name is
assigned, raise a RequiredArgumentMissingError. When enable_windows_gmsa is not specified, if any of
gmsa_dns_server or gmsa_root_domain_name is assigned, raise a RequiredArgumentMissingError.
:return: a tuple containing two elements: gmsa_dns_server of string type or None and gmsa_root_domain_name of
string type or None
"""
return self._get_gmsa_dns_server_and_root_domain_name(enable_validation=True)
def get_snapshot_id(self) -> Union[str, None]:
"""Obtain the values of snapshot_id.
:return: string or None
"""
# read the original value passed by the command
snapshot_id = self.raw_param.get("snapshot_id")
# try to read the property value corresponding to the parameter from the `mc` object
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if (
agent_pool_profile and
agent_pool_profile.creation_data and
agent_pool_profile.creation_data.source_resource_id is not None
):
snapshot_id = (
agent_pool_profile.creation_data.source_resource_id
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return snapshot_id
def get_snapshot(self) -> Union[Snapshot, None]:
"""Helper function to retrieve the Snapshot object corresponding to a snapshot id.
This fuction will store an intermediate "snapshot" | |
distance, to_unit_cell=True):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
#deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False,to_unit_cell=to_unit_cell)
def displace_by(self, dr, to_unit_cell=True):
for i in range(len(self._sites)):
self.translate_sites([i], dr[i], frac_coords=False,to_unit_cell=to_unit_cell)
def from_displacement(self, dr, to_unit_cell=True):
s1= Structure(self._lattice, self.species_and_occu, self.frac_coords)
s1.displace_by(dr, to_unit_cell=to_unit_cell)
return s1
def add_oxidation_state_by_element(self, oxidation_states):
"""
Add oxidation states to a structure.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except KeyError:
raise ValueError("Oxidation state of all elements must be "
"specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[i])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the dictionary.")
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for i, site in enumerate(self._sites):
new_sp = collections.defaultdict(float)
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Element(sym)] += occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def generate_supercell(self, scaling_matrix, scref=None):
"""
Create a supercell.
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
"""
scmat = np.array(scaling_matrix, np.int16)
if scmat.shape != (3, 3):
scmat= np.array(scmat* np.eye(3), np.int16)
n_cell=int(round(np.linalg.det(scmat)))
old_lattice = self._lattice
new_lattice = Lattice(np.dot(scmat, old_lattice.matrix))
tvects = supercell_latticepoints(scmat)
inv=np.linalg.inv(scmat)
if scref is None:
sc_ref= supercell_latticepoints(scmat)
else:
sc_ref= scref
return Structure(Lattice(np.dot(scmat, self._lattice.matrix)),
[s.species_and_occu for s in self for _ in range(n_cell)],
(self.frac_coords[:,None,:]+sc_ref[None,:,:]).reshape((-1,3)).dot(inv),
coords_are_cartesian=False, to_unit_cell=True,
site_properties_T=[s.properties for s in self for _ in range(n_cell)],
intensive_properties=self.intensive_properties,extensive_properties=
{k:v*self.n_cell for k,v in self.extensive_properties.items()})
def optimize_supercell(nsc1, maxIter=2000):
"""
search for optimal supercell shape (as cubic like as possible)
Args:
nsc1: positive means number of supercells targeted
negative means a certain number of atoms desired
maxIter: number of iterations
"""
nsc=nsc1
if nsc<0:
nsc=int(round(nsc/self.num_sites))
volsc = nsc*self.volume
invprim = np.linalg.inv(self.lattice)
ntry=0
bestsc=np.identity(3, dtype=int)
bestlen=999999.0
for i in range(maxIter):
scmat=1
def scale_lattice(self, volume):
"""
Performs a scaling of the lattice vectors so that length proportions
and angles are preserved.
Args:
volume (float): New volume of the unit cell in A^3.
"""
self.modify_lattice(self._lattice.scale(volume))
def ijkl2frac(self, ijkl):
"""
same but ijkl in one array
"""
return np.array(ijkl[:3]) + self._sites[int(ijkl[3])].frac_coords
def ijkl2cart(self, ijkl):
"""
:return: cartesian coordinates
"""
return np.dot(self.ijkl2frac(ijkl), self.lattice._matrix)
def frac2ijkl(self, coords, frac_coords=True, tolerance= 1E-3):
"""
Identify which atom corresponds to the specified coordinates
Args:
coords (3x1 array): Array-like object with the coordinates.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
Returns:
integer index for the identified site
"""
assert frac_coords == True
return Structure.frac2ijkl_fromapos(coords, self.frac_coords, tolerance)
@staticmethod
def frac2ijkl_fromapos(c_f, apos, tolerance):
for l in range(len(apos)):
rvec= c_f - apos[l]
rvec_frac = rvec - np.round(rvec)
if np.linalg.norm(rvec_frac) < tolerance:
return np.append(np.round(rvec), [l]).astype(int)
print('debug from apos', c_f)
raise ValueError("could not find [%f, %f, %f] in cell" % (c_f[0], c_f[1], c_f[2]))
@staticmethod
def ijkl_in_supercell(scmat, ijkl):
"""
:param scmat: supercell scaling matrix
:param refpts: list of lattice points within the supercell
:return: new ijkl in the supercell.
Asuming ijkl.inv(scmat) give new ijk, and refpts should be within the supercell spanned by scmat
"""
inv = np.linalg.inv(scmat)
nsc = abs(int(round(np.linalg.det(scmat))))
return _ijkl_in_supercell(nsc, inv, supercell_latticepoints(scmat).dot(inv), ijkl)
@staticmethod
def _ijkl_in_supercell(nsc, invscmat, refpts, ijkl):
newfrac= np.dot(ijkl[:3], invscmat)
newijkl = Structure.frac2ijkl_fromapos(newfrac, refpts, 1E-3)
newijkl[3] += ijkl[3]*nsc
return newijkl
def nbtable(self, r):
"""
r: cutoff distance
return a table: neighbor list of each atom
"""
if not hasattr(self, '_nbtable'):
self._nbtable = {}
if r in self._nbtable:
return self._nbtable[r]
recp_len = np.array(self.lattice.reciprocal_lattice.abc)
sr = r + 0.15
nmax = sr * recp_len / (2 * math.pi)
floor = math.floor
n = self.num_sites
fcoords = self.frac_coords
indices = np.array(range(n))
nbtable_per_atom = []
pmin = np.amin(fcoords, axis=0)
pmax = np.amax(fcoords, axis=0)
arange = np.arange(int(floor(pmin[0] - nmax[0])),
int(floor(pmax[0] + nmax[0])) + 1)
brange = np.arange(int(floor(pmin[1] - nmax[1])),
int(floor(pmax[1] + nmax[1])) + 1)
crange = np.arange(int(floor(pmin[2] - nmax[2])),
int(floor(pmax[2] + nmax[2])) + 1)
# print("debug arange=", arange.shape)
arange = arange[:, None] * np.array([1, 0, 0])[None, :]
brange = brange[:, None] * np.array([0, 1, 0])[None, :]
crange = crange[:, None] * np.array([0, 0, 1])[None, :]
# print("debug arange=", arange.shape, arange)
images = arange[:, None, None] + brange[None, :, None] + crange[None, None, :]
images = images.reshape((-1,3))
shifted_coords = fcoords[:, None, :] + images[None, :, :]
shifted_coords= shifted_coords.reshape((-1,3))
coords = self.lattice.get_cartesian_coords(shifted_coords)
ijkls = np.array([[img[0], img[1], img[2], l] for l in range(n) for img in images])
for i in range(n):
pct = self.cart_coords[i]
dists = np.array([np.sqrt(np.sum((p-pct) ** 2)) for p in coords])
within_r = np.where(dists <= r)
nbtable_per_atom.append(ijkls[within_r])
self._nbtable[r] = nbtable_per_atom
return nbtable_per_atom
def find_nb_cluster(self, ijkls, cut):
"""
cut: cutoff distance
Find atoms within cutoff of EVERY ijkl
"""
# print(ijkls)
nbtable = self.nbtable(cut)
nsite = ijkls.shape[0]
# print("nsite", nsite)
if nsite <=0:
raise ValueError("At least 1 atom in cluster needed")
# print("ijkls", ijkls)
atoms = []
for _atom in nbtable[ijkls[0][3]]:
atom = _atom.copy()
atom[:3] += ijkls[0][:3]
# print("testing", atom)
within = True
for j in range(1, nsite):
atom_wrt_j = atom.copy()
atom_wrt_j[:3] -= ijkls[j,:3]
within = False
for x in nbtable[ijkls[j][3]]:
if (atom_wrt_j == x).all():
within = True
break
if not within:
# print("atom_wrt_j", atom_wrt_j, "not in", ijkls[j,:])
break
if within:
atoms.append(atom)
return atoms
def get_scmat(self, sc_R):
"""
Given primitive cell (self) and supercell lattice vectors
:param sc_R: lattice vectors of supercell
:return: integer scaling matrix
"""
return np.dot(sc_R.lattice.matrix if isinstance(sc_R, Structure) else sc_R, self.lattice.inv_matrix).round().astype(int)
def map_to_prim(self, prim):
"""
Given supercell (self) and primitive cell, get supercell without displacement
:param prim: primitive
:return: supercell without displacement
"""
scmat = prim.get_scmat(self.lattice.matrix)
sc=prim.generate_supercell(scmat)
return self.map_to_reference(sc)
def map_to_reference(self, sc):
"""
Given supercell (self) and ideal supercell, get supercell without displacement
Assume that
:param sc: supercell
:return: supercell without displacement
"""
assert self.num_sites == sc.num_sites
dist_mat = self.lattice.get_all_distances(self.frac_coords, sc.frac_coords)
jmin = np.argmin(dist_mat, axis=1)
bad_i, bad_j = non_1to1(jmin)
if len(bad_i) > 0:
print("** WARNING** found %d conflicting mappings" % (len(bad_i)))
print(self.frac_coords[bad_i], "==>", sc.frac_coords[bad_j])
# try to resolve conflict
from itertools import permutations
min_dist = 1E99
solve_j = bad_j
for try_j in permutations(bad_j):
dist = np.sum([dist_mat[bad_i[i], try_j[i]] for i in range(len(bad_i))])
if dist < min_dist:
min_dist = dist
solve_j = [try_j]
jmin[bad_i] = solve_j
print(bad_j, solve_j)
print("resolved", self.frac_coords[bad_i], "==>", sc.frac_coords[solve_j])
for i in range(self.num_sites):
# print("%d %.4f" % (i, np.linalg.norm(self.frac_coords[i] - sc.frac_coords[jmin[i]])))
self[i].set_coords(np.round(self.frac_coords[i] - sc.frac_coords[jmin[i]]) + sc.frac_coords[jmin[i]], cart=False)
# self[i].set_coords(sc.frac_coords[jmin[i]], cart=False)
# print("%d %.4f" % (i, np.linalg.norm(self.frac_coords[i] - sc.frac_coords[jmin[i]])))
return self
def set_coords(self, c, cart=True):
# print('debug set_coords', c.shape)
for i in range(self.num_sites):
self[i].set_coords(c[i], cart=cart)
def get_order_wrt(self, p1, inverse=False, tol=1E-4):
from _c_util import get_structure_ordering
if p1 is None:
return list(range(self.num_sites))
if isinstance(p1, Structure):
assert (np.abs(self.lattice._matrix-p1.lattice._matrix)<1E-6).all(), "ERROR difference lattice"
pos= p1.frac_coords if isinstance(p1, Structure) else p1
if inverse:
ordering= get_structure_ordering(pos, self.frac_coords, 1, tol).tolist()
else:
ordering= get_structure_ordering(self.frac_coords, pos, 1, | |
"""
try :
self._destip = destip
except Exception as e:
raise e
@property
def destipop(self) :
"""Either the equals (=) or does not equal (!=) logical operator.<br/>Possible values = =, !=, EQ, NEQ."""
try :
return self._destipop
except Exception as e:
raise e
@destipop.setter
def destipop(self, destipop) :
"""Either the equals (=) or does not equal (!=) logical operator.<br/>Possible values = =, !=, EQ, NEQ
:param destipop:
"""
try :
self._destipop = destipop
except Exception as e:
raise e
@property
def destipval(self) :
"""IP address or range of IP addresses to match against the destination IP address of an incoming IPv4 packet. In the command line interface, separate the range with a hyphen and enclose within brackets. For example: [10.102.29.30-10.102.29.189]."""
try :
return self._destipval
except Exception as e:
raise e
@destipval.setter
def destipval(self, destipval) :
"""IP address or range of IP addresses to match against the destination IP address of an incoming IPv4 packet. In the command line interface, separate the range with a hyphen and enclose within brackets. For example: [10.102.29.30-10.102.29.189].
:param destipval:
"""
try :
self._destipval = destipval
except Exception as e:
raise e
@property
def destport(self) :
"""Port number or range of port numbers to match against the destination port number of an incoming IPv4 packet. In the command line interface, separate the range with a hyphen and enclose within brackets. For example: [40-90].
Note: The destination port can be specified only for TCP and UDP protocols.
"""
try :
return self._destport
except Exception as e:
raise e
@destport.setter
def destport(self, destport) :
"""Port number or range of port numbers to match against the destination port number of an incoming IPv4 packet. In the command line interface, separate the range with a hyphen and enclose within brackets. For example: [40-90].
Note: The destination port can be specified only for TCP and UDP protocols.
:param destport:
"""
try :
self._destport = destport
except Exception as e:
raise e
@property
def destportop(self) :
"""Either the equals (=) or does not equal (!=) logical operator.<br/>Possible values = =, !=, EQ, NEQ."""
try :
return self._destportop
except Exception as e:
raise e
@destportop.setter
def destportop(self, destportop) :
"""Either the equals (=) or does not equal (!=) logical operator.<br/>Possible values = =, !=, EQ, NEQ
:param destportop:
"""
try :
self._destportop = destportop
except Exception as e:
raise e
@property
def destportval(self) :
"""Port number or range of port numbers to match against the destination port number of an incoming IPv4 packet. In the command line interface, separate the range with a hyphen and enclose within brackets. For example: [40-90].
Note: The destination port can be specified only for TCP and UDP protocols.<br/>Maximum length = 65535.
"""
try :
return self._destportval
except Exception as e:
raise e
@destportval.setter
def destportval(self, destportval) :
"""Port number or range of port numbers to match against the destination port number of an incoming IPv4 packet. In the command line interface, separate the range with a hyphen and enclose within brackets. For example: [40-90].
Note: The destination port can be specified only for TCP and UDP protocols.<br/>Maximum length = 65535
:param destportval:
"""
try :
self._destportval = destportval
except Exception as e:
raise e
@property
def ttl(self) :
"""Number of seconds, in multiples of four, after which the extended ACL rule expires. If you do not want the extended ACL rule to expire, do not specify a TTL value.<br/>Minimum length = 1<br/>Maximum length = 0x7FFFFFFF."""
try :
return self._ttl
except Exception as e:
raise e
@ttl.setter
def ttl(self, ttl) :
"""Number of seconds, in multiples of four, after which the extended ACL rule expires. If you do not want the extended ACL rule to expire, do not specify a TTL value.<br/>Minimum length = 1<br/>Maximum length = 0x7FFFFFFF
:param ttl:
"""
try :
self._ttl = ttl
except Exception as e:
raise e
@property
def srcmac(self) :
"""MAC address to match against the source MAC address of an incoming IPv4 packet."""
try :
return self._srcmac
except Exception as e:
raise e
@srcmac.setter
def srcmac(self, srcmac) :
"""MAC address to match against the source MAC address of an incoming IPv4 packet.
:param srcmac:
"""
try :
self._srcmac = srcmac
except Exception as e:
raise e
@property
def srcmacmask(self) :
"""Used to define range of Source MAC address. It takes string of 0 and 1, 0s are for exact match and 1s for wildcard. For matching first 3 bytes of MAC address, srcMacMask value "000000111111". .<br/>Default value: "000000000000"."""
try :
return self._srcmacmask
except Exception as e:
raise e
@srcmacmask.setter
def srcmacmask(self, srcmacmask) :
"""Used to define range of Source MAC address. It takes string of 0 and 1, 0s are for exact match and 1s for wildcard. For matching first 3 bytes of MAC address, srcMacMask value "000000111111". .<br/>Default value: "000000000000"
:param srcmacmask:
"""
try :
self._srcmacmask = srcmacmask
except Exception as e:
raise e
@property
def protocol(self) :
"""Protocol to match against the protocol of an incoming IPv4 packet.<br/>Possible values = ICMP, IGMP, TCP, EGP, IGP, ARGUS, UDP, RDP, RSVP, EIGRP, L2TP, ISIS."""
try :
return self._protocol
except Exception as e:
raise e
@protocol.setter
def protocol(self, protocol) :
"""Protocol to match against the protocol of an incoming IPv4 packet.<br/>Possible values = ICMP, IGMP, TCP, EGP, IGP, ARGUS, UDP, RDP, RSVP, EIGRP, L2TP, ISIS
:param protocol:
"""
try :
self._protocol = protocol
except Exception as e:
raise e
@property
def protocolnumber(self) :
"""Protocol to match against the protocol of an incoming IPv4 packet.<br/>Minimum length = 1<br/>Maximum length = 255."""
try :
return self._protocolnumber
except Exception as e:
raise e
@protocolnumber.setter
def protocolnumber(self, protocolnumber) :
"""Protocol to match against the protocol of an incoming IPv4 packet.<br/>Minimum length = 1<br/>Maximum length = 255
:param protocolnumber:
"""
try :
self._protocolnumber = protocolnumber
except Exception as e:
raise e
@property
def vlan(self) :
"""ID of the VLAN. The NetScaler appliance applies the ACL rule only to the incoming packets of the specified VLAN. If you do not specify a VLAN ID, the appliance applies the ACL rule to the incoming packets on all VLANs.<br/>Minimum length = 1<br/>Maximum length = 4094."""
try :
return self._vlan
except Exception as e:
raise e
@vlan.setter
def vlan(self, vlan) :
"""ID of the VLAN. The NetScaler appliance applies the ACL rule only to the incoming packets of the specified VLAN. If you do not specify a VLAN ID, the appliance applies the ACL rule to the incoming packets on all VLANs.<br/>Minimum length = 1<br/>Maximum length = 4094
:param vlan:
"""
try :
self._vlan = vlan
except Exception as e:
raise e
@property
def vxlan(self) :
"""ID of the VXLAN. The NetScaler appliance applies the ACL rule only to the incoming packets of the specified VXLAN. If you do not specify a VXLAN ID, the appliance applies the ACL rule to the incoming packets on all VXLANs.<br/>Minimum length = 1<br/>Maximum length = 16777215."""
try :
return self._vxlan
except Exception as e:
raise e
@vxlan.setter
def vxlan(self, vxlan) :
"""ID of the VXLAN. The NetScaler appliance applies the ACL rule only to the incoming packets of the specified VXLAN. If you do not specify a VXLAN ID, the appliance applies the ACL rule to the incoming packets on all VXLANs.<br/>Minimum length = 1<br/>Maximum length = 16777215
:param vxlan:
"""
try :
self._vxlan = vxlan
except Exception as e:
raise e
@property
def Interface(self) :
"""ID of an interface. The NetScaler appliance applies the ACL rule only to the incoming packets from the specified interface. If you do not specify any value, the appliance applies the ACL rule to the incoming packets of all interfaces."""
try :
return self._Interface
except Exception as e:
raise e
@Interface.setter
def Interface(self, Interface) :
| |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 01 10:00:58 2021
@author: <NAME>
"""
#------------------------------------------------------------------#
# # # # # Imports # # # # #
#------------------------------------------------------------------#
import numpy as np
import pandas as pd
import os
import time
from scipy import ndimage
import losses
import models
from generate_files import GenerateFiles
from make_data import MakeData
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
import seaborn as sns
import matplotlib.style as style
style.use('seaborn-poster') #sets the size of the charts
style.use('ggplot')
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_filename
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy.units as u
from astropy.stats import mad_std
import astrotools.healpytools as hpt
import astropy_healpix as ahp
from astropy.coordinates import ICRS
from tqdm import tqdm
from collections import Counter
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger, TensorBoard
from tensorflow.keras.losses import binary_crossentropy
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
import tensorflow.keras.backend as K
from keras_unet_collection import models as tf_models
import healpy as hp
from hpproj import CutSky, to_coord
# import logging
# cs_logger = logging.getLogger('cutsky')
# cs_logger.setLevel(logging.WARNING)
# cs_logger.propagate = False
# hpproj_logger = logging.getLogger('hpproj')
# hpproj_logger.setLevel(logging.WARNING)
# mpl_logger = logging.getLogger('matplotlib')
# mpl_logger.setLevel(logging.WARNING)
class CNNSegmentation(MakeData):
def __init__(self, dataset, bands, npix, n_labels, cold_cores, planck_path, milca_path, model, range_comp, epochs, batch, lr, patience, loss, optimizer, loops, size=64, disk_radius = None, delta=0.4, gamma=0.75, drop_out=False, output_path = None):
super().__init__(dataset, npix, loops, planck_path, milca_path, disk_radius=disk_radius, output_path=output_path)
self.range_comp = range_comp
self.loops = loops
self.size = size
self.drop_out = drop_out
self.epochs = epochs
self.batch = batch
self.lr = lr
self.patience = patience
self.pmax=0.6
self.dmin=2
self.dmax=60
self.bands = bands
self.delta = delta
self.gamma = gamma
self.cold_cores = cold_cores
self.freq = 0
self.planck_freq = 0
if '100GHz' in bands:
self.freq += 2
self.planck_freq += 2
if '143GHz' in bands:
self.freq += 4
self.planck_freq += 4
if '217GHz' in bands:
self.freq += 8
self.planck_freq += 8
if '353GHz' in bands:
self.freq += 16
self.planck_freq += 16
if '545GHz' in bands:
self.freq += 32
self.planck_freq += 32
if '857GHz' in bands:
self.freq += 64
self.planck_freq += 64
if 'y-map' in bands:
self.freq += 128
if 'CO' in bands:
self.freq += 256
if 'p-noise' in bands:
self.freq += 512
self.output_name = 'l%s_e%s_b%s_lr%s_p%s_d%s'%(n_labels, epochs, batch, lr, patience, disk_radius)
optimizers_dict = {'sgd': SGD(lr=self.lr, momentum=0.9), 'adam': Adam(learning_rate=self.lr), 'rmsprop': tf.keras.optimizers.RMSprop(learning_rate=self.lr, rho=0.9, momentum=0.5)}
self.optimizer = optimizers_dict[optimizer]
self.optimizer_name = optimizer
losses_dict = {'categorical_crossentropy': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'binary_crossentropy': 'binary_crossentropy', 'weighted_binary_crossentropy': 'binary_crossentropy',
'tversky_loss': losses.tversky_loss(delta=self.delta), 'focal_tversky_loss': losses.focal_tversky_loss(delta=self.delta, gamma=self.gamma), 'dice_loss': losses.dice_loss,
'modified_tversky_loss': losses.modified_tversky_loss,
'combo_loss': losses.combo_loss(alpha=0.5,beta=0.5), 'focal_dice_loss': losses.focal_dice_loss(delta=0.5, gamma_fd=self.gamma),
'focal_loss': losses.focal_loss(alpha=None, beta=None, gamma_f=2.), 'mixed_focal_loss': losses.mixed_focal_loss(weight=None, alpha=None, beta=None, delta=0.7, gamma_f=2.,gamma_fd=0.75)}
self.loss = losses_dict[loss]
self.loss_name = loss
input_size = (self.npix, self.npix, len(self.bands))
# filter_num = [8, 16, 32, 64, 128]
filter_num = [64, 128, 256, 512, 1024]
self.n_labels = n_labels
dilation_num = [1, 3, 15, 31]
filter_num_down = [64, 128, 256, 512]#, 1024]
# 'vnet': tf_models.vnet_2d(input_size, filter_num, n_labels, res_num_ini=1, res_num_max=3,
# activation='ReLU', output_activation='Softmax', batch_norm=False, pool=True, unpool=True, name='vnet'),
if model == 'unet':
self.model = tf_models.unet_2d(input_size, filter_num, n_labels, stack_num_down=2, stack_num_up=2,
activation='ReLU', output_activation='Sigmoid', batch_norm=False, pool=True, unpool=True,
backbone=None, weights=None, freeze_backbone=True, freeze_batch_norm=True, name='unet')
elif model == 'attn_unet':
self.model = tf_models.att_unet_2d(input_size, filter_num, self.n_labels, stack_num_down=2, stack_num_up=2,
activation='ReLU', atten_activation='ReLU', attention='add', output_activation='Sigmoid',
batch_norm=True, weights=None, pool=False, unpool=False, freeze_batch_norm=True, name='attunet')
elif model == 'r2u_net':
self.model = tf_models.r2_unet_2d(input_size, filter_num, self.n_labels,
stack_num_down=2, stack_num_up=2, recur_num=2,
activation='ReLU', output_activation='Sigmoid',
batch_norm=False, pool=True, unpool=True, name='r2_unet')
elif model == 'unet_plus':
self.model = tf_models.unet_plus_2d(input_size, filter_num, self.n_labels, stack_num_down=2, stack_num_up=2,
activation='ReLU', output_activation='Sigmoid', batch_norm=False, pool=True, unpool=True, deep_supervision=False,
backbone=None, weights=None, freeze_backbone=True, freeze_batch_norm=True, name='xnet')
elif model == 'resunet_a':
self.model = tf_models.resunet_a_2d(input_size, filter_num, dilation_num, self.n_labels,
aspp_num_down=256, aspp_num_up=128, activation='ReLU', output_activation='Sigmoid',
batch_norm=True, pool=True, unpool=True, name='resunet')
elif model == 'u2net':
self.model = tf_models.u2net_2d(input_size, self.n_labels, filter_num_down, filter_num_up='auto', filter_mid_num_down='auto', filter_mid_num_up='auto',
filter_4f_num='auto', filter_4f_mid_num='auto', activation='ReLU', output_activation='Sigmoid',
batch_norm=False, pool=True, unpool=True, deep_supervision=False, name='u2net')
elif model == 'unet_3plus':
self.model = tf_models.unet_3plus_2d(input_size, self.n_labels, filter_num_down, filter_num_skip='auto', filter_num_aggregate='auto',
stack_num_down=2, stack_num_up=1, activation='ReLU', output_activation='Sigmoid',
batch_norm=False, pool=True, unpool=True, deep_supervision=False,
backbone=None, weights=None, freeze_backbone=True, freeze_batch_norm=True, name='unet3plus')
self.model_name = model
if loss == 'tversky_loss':
self.pre_output_name = 'f%s_s%s_c%s_%s_%s_%s_%s'%(self.freq, self.npix, int(self.cold_cores), self.model_name, self.loss_name, self.delta, self.optimizer_name)
elif loss == 'focal_tversky_loss':
self.pre_output_name = 'f%s_s%s_c%s_%s_%s_%s_%s_%s'%(self.freq, self.npix, int(self.cold_cores), self.model_name, self.loss_name, self.delta, self.gamma, self.optimizer_name)
else:
self.pre_output_name = 'f%s_s%s_c%s_%s_%s_%s'%(self.freq, self.npix, int(self.cold_cores), self.model_name, self.loss_name, self.optimizer_name)
def dec_to_bin(self, n):
return str(bin(n)[2:])
def bin_to_dec(self, n):
return int(n,2)
def remove_intersection(self, lst1, lst2):
lst3 = [value for value in lst1 if value not in lst2]
return lst3
def missing_frequencies(self):
freq_list = [2,4,8,16,32,64,128,256,512]
bin_freq = self.dec_to_bin(self.freq)
n = len(bin_freq)
freq_num = []
for i,string in enumerate(bin_freq):
if self.bin_to_dec(string+'0'*(n-1-i)) != 0:
freq_num.append(self.bin_to_dec(string+'0'*(n-1-i)))
freq_list_minus_freq_num = self.remove_intersection(freq_list, freq_num)
index_to_remove = []
for freq in freq_list_minus_freq_num:
if freq == 2:
index_to_remove.append(0)
if freq == 4:
index_to_remove.append(1)
if freq == 8:
index_to_remove.append(2)
if freq == 16:
index_to_remove.append(3)
if freq == 32:
index_to_remove.append(4)
if freq == 64:
index_to_remove.append(5)
if freq == 128:
index_to_remove.append(6)
if freq == 256:
index_to_remove.append(7)
if freq == 512:
index_to_remove.append(8)
return index_to_remove
def remove_index(self, input):
index_to_remove = self.missing_frequencies()
return np.delete(input, index_to_remove, axis=3)
def remove_input_index(self, input, cluster):
#not gona work, needs to account for validation
if cluster:
index_to_remove = np.arange(0,1072,1)
for loop in range(self.loops - 1):
index_to_remove = np.concatenate((index_to_remove, np.arange( (1072 + 1049)*loop,(1072 + 1049)*loop + 1072 ,1)) )
def npy_to_tfdata(self, region, batch_size=10, buffer_size=1000, only_train=True, only_test=False):
if only_train:
if self.range_comp:
try:
input_train = np.load(self.dataset_path + 'input_train_pre_r%s_f%s_s%s_c%s_'%(region, 1022, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']
except:
input_train = np.load(self.dataset_path + 'input_train_pre_r%s_f%s_'%(region, 1022) + self.dataset + '.npz')['arr_0']
else:
input_train = np.load(self.dataset_path + 'input_train_pre_r%s_f%s_r0_s%s_c%s_'%(region, 1022, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']
input_train = self.remove_index(input_train)
if self.range_comp:
try:
input_val = np.load(self.dataset_path + 'input_val_pre_r%s_f%s_s%s_c%s_'%(region, 1022, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']
except:
input_val = np.load(self.dataset_path + 'input_val_pre_r%s_f%s_'%(region, 1022) + self.dataset + '.npz')['arr_0']
else:
input_val = np.load(self.dataset_path + 'input_val_pre_r%s_f%s_r0_s%s_c%s_'%(region, 1022, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']
input_val = self.remove_index(input_val)
try:
output_train = np.load(self.dataset_path + 'label_train_pre_r%s_f%s_d%s_s%s_c%s_'%(region, 1022, self.disk_radius, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']
except:
output_train = np.load(self.dataset_path + 'label_train_pre_r%s_f%s_d%s_'%(region, 1022, self.disk_radius) + self.dataset + '.npz')['arr_0']
label_train = np.ndarray((np.shape(output_train)[0], self.npix, self.npix, self.n_labels))
label_train[:,:,:,0] = output_train[:,:,:,0].astype(int)
try:
label_train[:,:,:,1] = output_train[:,:,:,1].astype(int)
except:
pass
try:
output_val = np.load(self.dataset_path + 'label_val_pre_r%s_f%s_d%s_s%s_c%s_'%(region, 1022, self.disk_radius, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']
except:
output_val = np.load(self.dataset_path + 'label_val_pre_r%s_f%s_d%s_'%(region, 1022, self.disk_radius) + self.dataset + '.npz')['arr_0']
label_val = np.ndarray((np.shape(output_val)[0], self.npix, self.npix, self.n_labels))
label_val[:,:,:,0] = output_val[:,:,:,0].astype(int)
try:
label_val[:,:,:,1] = output_val[:,:,:,1].astype(int)
except:
pass
train_dataset = tf.data.Dataset.from_tensor_slices((input_train, label_train))
val_dataset = tf.data.Dataset.from_tensor_slices((input_val, label_val))
train_dataset = train_dataset.shuffle(buffer_size).batch(batch_size).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf.data.AUTOTUNE)
val_dataset = val_dataset.shuffle(buffer_size).batch(batch_size).repeat()
val_dataset = val_dataset.prefetch(buffer_size=tf.data.AUTOTUNE)
return train_dataset, val_dataset
if only_test:
if self.range_comp:
try:
input_test = np.load(self.dataset_path + 'input_test_pre_r%s_f%s_s%s_c%s_'%(region, 1022, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']
except:
input_test = np.load(self.dataset_path + 'input_test_pre_r%s_f%s_'%(region, 1022) + self.dataset + '.npz')['arr_0']
else:
input_test = np.load(self.dataset_path + 'input_test_pre_r%s_f%s_r0_'%(region, 1022) + self.dataset + '.npz')['arr_0']
input_test = self.remove_index(input_test)
try:
output_test = np.load(self.dataset_path + 'label_test_pre_r%s_f%s_d%s_s%s_c%s_'%(region, 1022, self.disk_radius, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']
except:
output_test = np.load(self.dataset_path + 'label_test_pre_r%s_f%s_d%s_'%(region, 1022, self.disk_radius) + self.dataset + '.npz')['arr_0']
label_test = np.ndarray((np.shape(output_test)[0], self.npix, self.npix, self.n_labels))
label_test[:,:,:,0] = output_test[:,:,:,0].astype(int)
try:
label_test[:,:,:,1] = output_test[:,:,:,1].astype(int)
except:
pass
test_dataset = tf.data.Dataset.from_tensor_slices((input_test, label_test))
test_dataset = test_dataset.batch(batch_size)
return test_dataset
def prepare(self, ds, shuffle=False, augment=False):
AUTOTUNE = tf.data.AUTOTUNE
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2)
])
if shuffle:
ds = ds.shuffle(1000)
# Batch all datasets
ds = ds.batch(self.batch)
# Use data augmentation only on the training set
if augment:
ds = ds.map(lambda x, y: (data_augmentation(x, training=True), y),
num_parallel_calls=AUTOTUNE)
# Use buffered prefecting on all datasets
return ds.prefetch(buffer_size=AUTOTUNE)
def train_model(self, regions):
for region in regions:
tf.keras.backend.clear_session()
# train_size = 9300
# val_size = 800
# if self.cold_cores:
# train_size = train_size*2
# val_size = val_size*2
############ COULD BE DONE BETTER ############
try:
train_size = int(len(np.load(self.dataset_path + 'label_train_pre_r%s_f%s_d%s_s%s_c%s_'%(region, 1022, self.disk_radius, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']))
val_size = int(len(np.load(self.dataset_path + 'label_val_pre_r%s_f%s_d%s_s%s_c%s_'%(region, 1022, self.disk_radius, self.npix, int(self.cold_cores)) + self.dataset + '.npz')['arr_0']))
except:
train_size = int(len(np.load(self.dataset_path + 'label_train_pre_r%s_f%s_d%s_'%(region, 1022, self.disk_radius) + self.dataset + '.npz')['arr_0']))
val_size = int(len(np.load(self.dataset_path + 'label_val_pre_r%s_f%s_d%s_'%(region, 1022, self.disk_radius) + self.dataset + '.npz')['arr_0']))
##############################################
train_dataset, valid_dataset = self.npy_to_tfdata(region, batch_size=self.batch, buffer_size=1000, only_train=True, only_test=False)
print('\n')
print('---------------------------------------')
print('[REGION] %s'%region)
print('[TRAINING SIZE] %s'%train_size)
print('[VALIDATION SIZE] %s'%val_size)
print('---------------------------------------')
print('\n')
callbacks = [
ModelCheckpoint(monitor='val_loss', filepath=self.path + "tf_saves/" + self.dataset + "/model_r%s_%s_%s"%(region, self.pre_output_name, self.output_name) + ".h5", save_best_only=True),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=self.patience),
CSVLogger(self.path + "tf_saves/" + self.dataset + "/data_r%s_%s_%s"%(region, self.pre_output_name, self.output_name) + ".csv"),
TensorBoard(),
EarlyStopping(monitor='val_loss', patience=self.patience, restore_best_weights=True)
]
model = self.model
metrics = [losses.dice_coefficient, tf.keras.metrics.Recall(), | |
r"""
Emperor 3D PCoA viewer (:mod:`emperor.core`)
============================================
This module provides an Object to interact and visualize an Emperor plot
from the IPython notebook.
.. currentmodule:: emperor.core
Classes
-------
.. autosummary::
:toctree: generated/
Emperor
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, emperor development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE.md, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from copy import deepcopy
from os.path import join, basename
from distutils.dir_util import copy_tree
import warnings
import numpy as np
import pandas as pd
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from skbio import OrdinationResults
from emperor import __version__ as emperor_version
from emperor.util import (get_emperor_support_files_dir,
preprocess_coords_file, resolve_stable_url,
validate_and_process_custom_axes, EmperorWarning)
# we are going to use this remote location to load external resources
REMOTE_URL = ('https://cdn.rawgit.com/biocore/emperor/%s/emperor'
'/support_files')
LOCAL_URL = "/nbextensions/emperor/support_files"
STYLE_PATH = join(get_emperor_support_files_dir(), 'templates',
'style-template.html')
LOGIC_PATH = join(get_emperor_support_files_dir(), 'templates',
'logic-template.html')
STANDALONE_PATH = join(get_emperor_support_files_dir(), 'templates',
'standalone-template.html')
JUPYTER_PATH = join(get_emperor_support_files_dir(), 'templates',
'jupyter-template.html')
class Emperor(object):
"""Display principal coordinates analysis plots
Use this object to interactively display a PCoA plot using the Emperor
GUI. IPython provides a rich display system that will let you display a
plot inline, without the need of creating a temprorary file or having to
write to disk.
Parameters
----------
ordination: skbio.OrdinationResults
Object containing the computed values for an ordination method in
scikit-bio. Currently supports skbio.stats.ordination.PCoA and
skbio.stats.ordination.RDA results.
mapping_file: pd.DataFrame
DataFrame object with the metadata associated to the samples in the
``ordination`` object, should have an index set and it should match the
identifiers in the ``ordination`` object.
feature_mapping_file: pd.DataFrame, optional
DataFrame object with the metadata associated to the features in the
``ordination`` object, should have an index set and it should match the
identifiers in the ``ordination.features`` object.
dimensions: int, optional
Number of dimensions to keep from the ordination data, defaults to 5.
Be aware that this value will determine the number of dimensions for
all computations.
remote: bool or str, optional
This parameter can have one of the following three behaviors according
to the value: (1) ``str`` - load the resources from a user-specified
remote location, (2) ``False`` - load the resources from the
nbextensions folder in the Jupyter installation or (3) ``True`` - load
the resources from the GitHub repository. This parameter defaults to
``True``. See the Notes section for more information.
jackknifed: list of OrdinationResults, optional
A list of the OrdinationResults objects with the same sample
identifiers as the identifiers in ``ordination``.
procrustes: list of OrdinationResults, optional
A list of the OrdinationResults objects with the same sample
identifiers as the identifiers in ``ordination``.
ignore_missing_samples: bool, optional
If set to `True` samples without metadata are included by setting all
metadata values to: ``This sample has not metadata``. By default an
exception will be raised if missing samples are encountered. Note, this
flag only takes effect if there's at least one overlapping sample.
Attributes
----------
jackknifed: list
List of OrdinationResults objects in the same sample-order as
``self.ordination``.
procrustes: list
List of OrdinationResults objects in the same sample-order as
``self.ordination``.
procrustes_names: list
A list of names that will be used to distinguish samples from each
ordination in a procrustes plot. The GUI will display a category
labeled ``__Procrustes_Names__``.
width: str
Width of the plot when displayed in the Jupyter notebook (in CSS
units).
height: str
Height of the plot when displayed in the Jupyter notebook (in CSS
units).
settings: dict
A dictionary of settings that is loaded when a plot is displayed.
Settings generated from the graphical user interface are stored as JSON
files that can be loaded, and directly set to this attribute.
Alternatively, each aspect of the plot can be changed with dedicated
methods, for example see ``color_by``, ``set_background_color``, etc.
This attribute can also be serialized as a JSON string and loaded from
the GUI.
feature_mf: pd.DataFrame
DataFrame object with the metadata associated to the features in the
``ordination`` object, should have an index set and it should match the
identifiers in the ``ordination.features`` property.
custom_axes : list of str, optional
Custom axes to embed in the ordination.
jackknifing_method : {'IQR', 'sdev'}, optional
Used only when plotting ellipsoids for jackknifed beta diversity
(i.e. using a directory of coord files instead of a single coord
file). Valid values are ``"IQR"`` (for inter-quartile ranges) and
``"sdev"`` (for standard deviation). This argument is ignored if
``self.jackknifed`` is ``None`` or an empty list.
Examples
--------
Create an Emperor object and display it from the Jupyter notebook:
>>> import pandas as pd, numpy as np
>>> from emperor import Emperor
>>> from skbio import OrdinationResults
Ordination plots are almost invariantly associated with a set of data, that
relates each sample to its scientific context, we refer to this as the
*sample metadata*, and represent it using Pandas DataFrames. For this
example we will need some metadata, we start by creating our metadata
object:
>>> data = [['PC.354', 'Control', '20061218', 'Control_mouse_I.D._354'],
... ['PC.355', 'Control', '20061218', 'Control_mouse_I.D._355'],
... ['PC.356', 'Control', '20061126', 'Control_mouse_I.D._356'],
... ['PC.481', 'Control', '20070314', 'Control_mouse_I.D._481'],
... ['PC.593', 'Control', '20071210', 'Control_mouse_I.D._593'],
... ['PC.607', 'Fast', '20071112', 'Fasting_mouse_I.D._607'],
... ['PC.634', 'Fast', '20080116', 'Fasting_mouse_I.D._634'],
... ['PC.635', 'Fast', '20080116', 'Fasting_mouse_I.D._635'],
... ['PC.636', 'Fast', '20080116', 'Fasting_mouse_I.D._636']]
>>> columns = ['SampleID', 'Treatment', 'DOB', 'Description']
>>> mf = pd.DataFrame(columns=columns, data=data)
Before we can use this mapping file in Emperor, we should set the index
to be `SampleID`.
>>> mf.set_index('SampleID', inplace=True)
Then let's create some artificial ordination data:
>>> ids = ('PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
... 'PC.355', 'PC.607', 'PC.634')
>>> eigvals = np.array([0.47941212, 0.29201496, 0.24744925,
... 0.20149607, 0.18007613, 0.14780677,
... 0.13579593, 0.1122597, 0.])
>>> eigvals = pd.Series(data=eigvals, index=ids)
>>> n = eigvals.shape[0]
>>> samples = np.random.randn(n, n)
>>> samples = pd.DataFrame(data=site, index=ids)
>>> p_explained = np.array([0.26688705, 0.1625637, 0.13775413, 0.11217216,
... 0.10024775, 0.08228351, 0.07559712, 0.06249458,
... 0.])
>>> p_explained = pd.Series(data=p_explained, index=ids)
And encapsulate it inside an ``OrdinationResults`` object:
>>> ores = OrdinationResults(eigvals, samples=samples,
... proportion_explained=p_explained)
Finally import the Emperor object and display it using Jupyter, note that
this call will have no effect under a regular Python session:
>>> Emperor(ores, mf)
Notes
-----
This object currently does not support the full range of actions that the
GUI does support and should be considered experimental at the moment.
The ``remote`` parameter is intended for different use-cases, you should
use the first option "(1) - URL" when you want to load the data from a
location different than the GitHub repository or your Jupyter notebook
resources i.e. a custom URL. The second option "(2) - ``False``" loads
resources from your local Jupyter installation, note that you **need** to
execute ``nbinstall`` at least once or the application will error, this
option is ideal for developers modifying the JavaScript source code, and in
environments of limited internet connection. Finally, the third option "(3)
- ``True``" should be used if you intend to embed an Emperor plot in a
notebook and then publish it using http://nbviewer.jupyter.org.
Raises
------
ValueError
If the remote argument is not of ``bool`` or ``str`` type.
If none of the samples in the ordination matrix are in the metadata.
KeyError
If there's samples in the ordination matrix but not in the metadata.
References
----------
.. [1] EMPeror: a tool for visualizing high-throughput microbial community
data <NAME>, <NAME>, <NAME>, <NAME>. Gigascience.
2013 Nov 26;2(1):16.
"""
def __init__(self, ordination, mapping_file, feature_mapping_file=None,
dimensions=5, remote=True, jackknifed=None, procrustes=None,
ignore_missing_samples=False):
self.ordination = ordination
self.jackknifed = jackknifed if jackknifed is not None else []
self.procrustes = procrustes if procrustes is not None else []
self.mf = mapping_file.copy()
self.mf = self._validate_metadata(self.mf, self.ordination.samples,
ignore_missing_samples)
# if biplots are to be visualized
if self.ordination.features is not None:
self.feature_mf = \
self._validate_metadata(feature_mapping_file,
self.ordination.features,
ignore_missing_samples=False)
self._validate_ordinations()
self._html = None
if self.ordination.proportion_explained.shape[0] < dimensions:
self.dimensions = self.ordination.proportion_explained.shape[0]
else:
self.dimensions = dimensions
if isinstance(remote, bool):
if remote:
self.base_url = resolve_stable_url(emperor_version,
REMOTE_URL)
else:
self.base_url = LOCAL_URL
elif isinstance(remote, str):
self.base_url = remote
else:
raise ValueError("Unsupported type for `remote` argument, should "
"be a bool or str")
# dimensions for the div containing the plot in the context | |
and error bar pair in `Decimal` to a string.
Parameters
----------
mu : Decimal
Value of estimate in `Decimal`. Mu must have enough precision to be
defined to dot after shifting. Can be inf or nan.
EB : Decimal
Error bar on estimate in `Decimal`. Must be non-negative. It must be
defined to same precision (quantum) as `mu` if `EB` is finite positive
and `mu` is positive.
shift : int
How many decimal points to shift `mu` for display purposes. If `mu`
is in meters and shift=3 than we display the result in mm, i.e., x1e3.
min_clip : Decimal
Lower limit clip value on estimate. If ``mu < min_clip`` then simply
return ``< min_clip`` for string. This is used for score metric where a
lower metric is simply on another order of magnitude to other methods.
max_clip : Decimal
Upper limit clip value on estimate. If ``mu > max_clip`` then simply
return ``> max_clip`` for string. This is used for loss metric where a
high metric is simply on another order of magnitude to other methods.
below_fmt : str (format string)
Format string to display when estimate is lower limit clipped, often:
'<{0:,f}'.
above_fmt : str (format string)
Format string to display when estimate is upper limit clipped, often:
'>{0:,f}'.
non_finite_fmt : dict of str to str
Display format when estimate is non-finite. For example, for latex
looking output, one could use:
``{'inf': r'\infty', '-inf': r'-\infty', 'nan': '--'}``.
Returns
-------
std_str : str
String representation of `mu` and `EB`. This is in format 1.234(56)
for ``mu=1.234`` and ``EB=0.056`` unless there are non-finite values
or a value has been clipped.
"""
assert min_clip == D_NINF or min_clip.is_finite()
assert max_clip == D_INF or max_clip.is_finite()
assert min_clip < max_clip
shift = int(shift) # scaleb doesn't like np ints in Py3 => cast to int
# First check the clipped case
if (not mu.is_nan()) and max_clip < mu: # above max
assert max_clip.is_finite()
return above_fmt.format(max_clip.scaleb(shift))
if (not mu.is_nan()) and mu < min_clip: # below min
assert min_clip.is_finite()
return below_fmt.format(min_clip.scaleb(shift))
# Now let's process the non-finite estimate case
if not mu.is_finite():
mu_str = NAN_STR if mu.is_nan() else str(float(mu))
# Default to float string rep if no instructions
return non_finite_fmt.get(mu_str, mu_str)
mu_shifted = mu.scaleb(shift)
if not decimal_to_dot(mu_shifted):
raise ValueError("Shifting mu too far left for its precision.")
std_str = GEN_FMT.format(mu_shifted)
if EB.is_finite():
# At this point everything should be finite and match quantums
assert EB.is_zero() or as_tuple_chk(mu).exponent == as_tuple_chk(EB).exponent
assert EB >= 0
EB_str = digit_str(EB)
std_str = "%s(%s)" % (std_str, EB_str)
assert "E" not in std_str
return std_str
def print_pval(pval, below_fmt=BELOW_FMT, non_finite_fmt={}):
"""Convert decimal p-value into string representation.
Parameters
----------
pval : Decimal
Decimal p-value to represent as string. Must be in [0,1] or nan.
below_fmt : str (format string)
Format string to display when p-value is lower limit clipped, often:
``'<{0:,f}'``.
non_finite_fmt : dict of str to str
Display format when estimate is non-finite. For example, for latex
looking output, one could use: ``{'nan': '--'}``.
Returns
-------
pval_str : str
String representation of p-value. If p-value is zero or minimum
Decimal value allowable in precision of pval. We simply return clipped
string, e.g. ``'<0.0001'``, as value.
"""
pval_eps = decimal_eps(pval)
if pval.is_nan():
pval_str = non_finite_fmt.get(NAN_STR, NAN_STR)
elif pval <= pval_eps:
assert 0 <= pval and pval <= pval_eps
# Note this assumes that if pvalue was rounded up to 0.0001
# then the actual value must be stricly <0.0001 and not equal
# to 0.0001. This sounds shaky but 1ek is not representable
# exactly in binary fp anyway, so it is true.
pval_str = below_fmt.format(pval_eps)
else:
assert pval_eps < pval and pval <= 1
# Some style guides suggest we should remove the leading zero
# here, but format strings give no easy to do that. we could
# still add that option later.
pval_str = GEN_FMT.format(pval)
return pval_str
def get_shift_range(x_dec_list, shift_mod=1):
"""Helper function to `find_shift` that find upper and lower limits to
shift the estimates based on the constraints. This bounds the search space
for the optimal shift.
Attempts to fulful three constraints:
1) All estimates displayed to dot after shifting
2) At least one estimate is >= 1 after shift to avoid space waste with 0s.
3) ``shift % shift_mod == 0``
If not all 3 are possible then requirement 2 is violated.
Parameters
----------
x_dec_list : array-like of Decimal
List of `Decimal` estimates to format. Assumes all non-finite and
clipped values are already removed.
shift_mod : int
Required modulus for output. This is usually 1 or 3. When an SI prefix
is desired on the shift then a modulus of 3 is used. Must be >= 1.
Returns
-------
min_shift : int
Minimum shift (inclusive) to consider to satisfy contraints.
max_shift : int
Maximum shift (inclusive) to consider to satisfy contraints.
all_small : bool
If True, it means constraint 2 needed to be violated. This could be
used to flag warning.
"""
assert len(x_dec_list) >= 1
assert shift_mod >= 1
assert all(x.is_finite() for x in x_dec_list)
# Maximum allowed and keep everything decimal to dot. Arguably this is only
# relevant for mean estimates with finite errorbars, but we ignore that for
# the moment for simplicity.
max_shift_0 = min(-mu.as_tuple().exponent for mu in x_dec_list)
# Round down to make sure it obeys shift_mod
max_shift = floor_mod(max_shift_0, shift_mod)
assert max_shift % shift_mod == 0 and max_shift <= max_shift_0
# Try to keep at least one number >= 1 to avoid wasting space with 0s
min_shift_0 = min(-mu.adjusted() for mu in x_dec_list)
# Round up to make sure it obeys shift_mod
min_shift = ceil_mod(min_shift_0, shift_mod)
assert min_shift % shift_mod == 0 and min_shift >= min_shift_0
# Might not be possible, in which case, sacrifice >= 1 requirement
all_small = min_shift > max_shift
if all_small:
min_shift = max_shift
assert min_shift <= max_shift
assert any(k % shift_mod == 0 for k in range(min_shift, max_shift + 1))
return min_shift, max_shift, all_small
def find_shift(mean_list, err_list, shift_mod=1):
"""Find optimal decimal point shift to display the numbers in `mean_list`
for display compactness.
Finds optimal shift of Decimal numbers with potentially varying significant
figures and varying magnitudes to limit the length of the longest resulting
string of all the numbers. This is to limit the length of the resulting
column which is determined by the longest number. This function assumes the
number will *not* be displayed in a fixed width font and hence the decimal
point only adds a neglible width. Assumes all clipped and non-finite values
have been removed from list.
Attempts to fulful three constraints:
1) All estimates displayed to dot after shifting
2) At least one estimate is >= 1 after shift to avoid space waste with 0s.
3) ``shift % shift_mod == 0``
If not all 3 are possible then requirement 2 is violated.
Parameters
----------
mean_list : array-like of Decimal, shape (n,)
List of `Decimal` estimates to format. Assumes all non-finite and
clipped values are already removed.
err_list : array-like of Decimal, shape (n,)
List of `Decimal` error bars. Must be of same length as `mean_list`.
shift_mod : int
Required modulus for output. This is usually 1 or 3. When an SI prefix
is desired on the shift then a modulus of 3 is used. Must be >= 1.
Returns
-------
best_shift : int
Best shift of mean_list for compactness. This is number of digits
to move point to right, e.g. ``shift=3`` => change 1.2345 to 1234.5
Notes
-----
This function is fairly inefficient and could be done implicitly, but it
shouldn't be the bottleneck anyway for most usages.
"""
assert len(mean_list) == len(err_list)
# Check all non-finite values for mean removed, but allow non-finite EB
assert all(x.is_finite() for x in mean_list)
assert shift_mod >= 1
if len(mean_list) == 0:
return 0 # Just return 0 to keep | |
<gh_stars>1-10
# Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
def safe_get(key_val, key):
return key_val[key] if key in key_val else ""
def find_node_name(node_id, node_list):
for node in node_list:
if node['id'] == node_id:
return node['template_name']
raise Exception('can not find node(%s).' % node_id)
def find_node_type(node_id, node_list):
for node in node_list:
if node['id'] == node_id:
return node['type_name']
raise Exception('can not find node(%s).' % node_id)
def find_related_node(node_id, src_json_model, requirement_name):
related_nodes = []
for model_tpl in safe_get(src_json_model, "node_templates"):
for rt in safe_get(model_tpl, 'requirement_templates'):
if safe_get(rt, 'name') == requirement_name and \
safe_get(rt, 'target_node_template_name') == node_id:
related_nodes.append(model_tpl['name'])
return related_nodes
def convert_props(src_node, dest_node):
if 'properties' in src_node and src_node['properties']:
for prop_name, prop_info in list(src_node['properties'].items()):
if 'value' in prop_info:
dest_node['properties'][prop_name] = prop_info['value']
def convert_metadata(src_json):
return src_json['metadata'] if 'metadata' in src_json else {}
def convert_factor_unit(value):
if isinstance(value, str):
return value
return "%s %s" % (value["factor"], value["unit"])
def convert_inputs(src_json):
inputs = {}
if 'inputs' in src_json:
src_inputs = src_json['inputs']
for param_name, param_info in list(src_inputs.items()):
input_param = {}
if 'type_name' in param_info:
input_param['type'] = param_info['type_name']
if 'description' in param_info:
input_param['description'] = param_info['description']
if 'value' in param_info:
input_param['value'] = param_info['value']
inputs[param_name] = input_param
return inputs
def convert_vnf_node(src_node, src_json_model):
vnf_node = {'type': src_node['type_name'], 'vnf_id': src_node['template_name'],
'description': '', 'properties': {}, 'dependencies': [], 'networks': []}
convert_props(src_node, vnf_node)
for model_tpl in safe_get(src_json_model, "node_templates"):
if model_tpl['name'] != vnf_node['vnf_id']:
continue
vnf_node['dependencies'] = [{
'key_name': requirement['name'],
'vl_id': requirement['target_node_template_name']} for
requirement in safe_get(model_tpl, 'requirement_templates') if
safe_get(requirement, 'target_capability_name') == 'virtual_linkable']
vnf_node['networks'] = [requirement['target_node_template_name'] for
requirement in safe_get(model_tpl, 'requirement_templates') if
safe_get(requirement, 'name') == 'dependency']
return vnf_node
def convert_pnf_node(src_node, src_json_model):
pnf_node = {'pnf_id': src_node['template_name'], 'description': '', 'properties': {}}
convert_props(src_node, pnf_node)
pnf_node['cps'] = find_related_node(src_node['id'], src_json_model, 'virtualbinding')
return pnf_node
def convert_vl_node(src_node, src_node_list):
vl_node = {'vl_id': src_node['template_name'], 'description': '', 'properties': {}}
convert_props(src_node, vl_node)
vl_node['route_id'] = ''
for relation in safe_get(src_node, 'relationships'):
if safe_get(relation, 'type_name').endswith('.VirtualLinksTo'):
vl_node['route_id'] = find_node_name(relation['target_node_id'], src_node_list)
break
vl_node['route_external'] = (src_node['type_name'].find('.RouteExternalVL') > 0)
return vl_node
def convert_cp_node(src_node, src_node_list, model_type='NSD'):
cp_node = {'cp_id': src_node['template_name'], 'description': '', 'properties': {}}
convert_props(src_node, cp_node)
src_relationships = src_node['relationships']
for relation in src_relationships:
if safe_get(relation, 'name') in ('virtualLink', 'virtual_link'):
cp_node['vl_id'] = find_node_name(relation['target_node_id'], src_node_list)
elif safe_get(relation, 'name') in ('virtualbinding', 'virtual_binding'):
node_key = 'pnf_id' if model_type == 'NSD' else 'vdu_id'
cp_node[node_key] = find_node_name(relation['target_node_id'], src_node_list)
return cp_node
def convert_router_node(src_node, src_node_list):
router_node = {'router_id': src_node['template_name'], 'description': '', 'properties': {}}
convert_props(src_node, router_node)
for relation in src_node['relationships']:
if safe_get(relation, 'name') != 'external_virtual_link':
continue
router_node['external_vl_id'] = find_node_name(relation['target_node_id'], src_node_list)
router_node['external_ip_addresses'] = []
if 'properties' not in relation:
continue
for prop_name, prop_info in list(relation['properties'].items()):
if prop_name == 'router_ip_address':
router_node['external_ip_addresses'].append(prop_info['value'])
break
return router_node
def convert_fp_node(src_node, src_node_list, src_json_model):
fp_node = {'fp_id': src_node['template_name'], 'description': '',
'properties': {}, 'forwarder_list': []}
convert_props(src_node, fp_node)
for relation in safe_get(src_node, 'relationships'):
if safe_get(relation, 'name') != 'forwarder':
continue
forwarder_point = {'type': 'vnf'}
target_node_type = find_node_type(relation['target_node_id'], src_node_list).upper()
if target_node_type.find('.CP.') >= 0 or target_node_type.endswith('.CP'):
forwarder_point['type'] = 'cp'
forwarder_point['node_name'] = find_node_name(relation['target_node_id'], src_node_list)
forwarder_point['capability'] = ''
if forwarder_point['type'] == 'vnf':
for node_tpl in src_json_model["node_templates"]:
if fp_node['fp_id'] != node_tpl["name"]:
continue
for r_tpl in safe_get(node_tpl, "requirement_templates"):
if safe_get(r_tpl, "target_node_template_name") != forwarder_point['node_name']:
continue
forwarder_point['capability'] = safe_get(r_tpl, "target_capability_name")
break
break
fp_node['forwarder_list'].append(forwarder_point)
return fp_node
def convert_vnffg_group(src_group, src_group_list, src_node_list):
vnffg = {'vnffg_id': src_group['template_name'], 'description': '',
'properties': {}, 'members': []}
convert_props(src_group, vnffg)
for member_node_id in src_group['member_node_ids']:
vnffg['members'].append(find_node_name(member_node_id, src_node_list))
return vnffg
def convert_imagefile_node(src_node, src_node_list):
image_node = {'image_file_id': src_node['template_name'], 'description': '',
'properties': {}}
convert_props(src_node, image_node)
return image_node
def convert_localstorage_node(src_node, src_node_list):
localstorage_node = {'local_storage_id': src_node['template_name'], 'description': '',
'properties': {}}
convert_props(src_node, localstorage_node)
return localstorage_node
def convert_volumestorage_node(src_node, src_node_list):
volumestorage_node = {
'volume_storage_id': src_node['id'],
'description': "",
'properties': {}}
convert_props(src_node, volumestorage_node)
volumestorage_node["properties"]["size"] = convert_factor_unit(
volumestorage_node["properties"]["size_of_storage"])
return volumestorage_node
def convert_vdu_node(src_node, src_node_list, src_json_model):
vdu_node = {'vdu_id': src_node['template_name'], 'description': '', 'properties': {},
'image_file': '', 'local_storages': [], 'dependencies': [], 'nfv_compute': {},
'vls': [], 'artifacts': [], 'volume_storages': []}
convert_props(src_node, vdu_node)
for relation in src_node.get('relationships', ''):
r_id, r_name = safe_get(relation, 'target_node_id'), safe_get(relation, 'name')
if r_name == 'guest_os':
vdu_node['image_file'] = find_node_name(r_id, src_node_list)
elif r_name == 'local_storage':
vdu_node['local_storages'].append(find_node_name(r_id, src_node_list))
elif r_name == 'virtual_storage':
vdu_node['volume_storages'].append(r_id)
elif r_name.endswith('.AttachesTo'):
nt = find_node_type(r_id, src_node_list)
if nt.endswith('.BlockStorage.Local') or nt.endswith('.LocalStorage'):
vdu_node['local_storages'].append(find_node_name(r_id, src_node_list))
for capability in src_node['capabilities']:
if not capability['type_name'].endswith('.VirtualCompute'):
continue
vdu_node['nfv_compute']['flavor_extra_specs'] = {}
for prop_name, prop_info in list(capability['properties'].items()):
if prop_name == "virtual_cpu":
vdu_node['nfv_compute']['num_cpus'] = prop_info["value"]["num_virtual_cpu"]
vdu_node['nfv_compute']['cpu_frequency'] = convert_factor_unit(
prop_info["value"]["virtual_cpu_clock"])
elif prop_name == "virtual_memory":
vdu_node['nfv_compute']['mem_size'] = convert_factor_unit(
prop_info["value"]["virtual_mem_size"])
elif prop_name == "requested_additional_capabilities":
for key, val in list(prop_info["value"].items()):
vdu_node['nfv_compute']['flavor_extra_specs'].update(
val["target_performance_parameters"])
vdu_node['cps'] = find_related_node(src_node['id'], src_json_model, 'virtualbinding')
for cp_node in vdu_node['cps']:
for src_cp_node in src_node_list:
if src_cp_node['template_name'] != cp_node:
continue
for relation in safe_get(src_cp_node, 'relationships'):
if relation['name'] != 'virtualLink':
continue
vl_node_name = find_node_name(relation['target_node_id'], src_node_list)
if vl_node_name not in vdu_node['vls']:
vdu_node['vls'].append(vl_node_name)
for item in safe_get(src_node, 'artifacts'):
artifact = {'artifact_name': item['name'], 'type': item['type_name'],
'file': item['source_path'], 'properties': {}}
convert_props(item, artifact)
for key in artifact['properties']:
if 'factor' in artifact['properties'][key] and 'unit' in artifact['properties'][key]:
artifact['properties'][key] = convert_factor_unit(artifact['properties'][key])
vdu_node['artifacts'].append(artifact)
if artifact["type"].endswith(".SwImage"):
vdu_node['image_file'] = artifact["artifact_name"]
return vdu_node
def convert_exposed_node(src_json, src_nodes, exposed):
for item in safe_get(safe_get(src_json, 'substitution'), 'requirements'):
exposed['external_cps'].append({'key_name': item['mapped_name'],
"cp_id": find_node_name(item['node_id'], src_nodes)})
for item in safe_get(safe_get(src_json, 'substitution'), 'capabilities'):
exposed['forward_cps'].append({'key_name': item['mapped_name'],
"cp_id": find_node_name(item['node_id'], src_nodes)})
def convert_vnffgs(src_json_inst, src_nodes):
vnffgs = []
src_groups = safe_get(src_json_inst, 'groups')
for group in src_groups:
type_name = group['type_name'].upper()
if type_name.find('.VNFFG.') >= 0 or type_name.endswith('.VNFFG'):
vnffgs.append(convert_vnffg_group(group, src_groups, src_nodes))
return vnffgs
def merge_imagefile_node(img_nodes, vdu_nodes):
for vdu_node in vdu_nodes:
for artifact in vdu_node.get("artifacts", []):
if not artifact["type"].endswith(".SwImage"):
continue
imgids = [img["image_file_id"] for img in img_nodes]
if artifact["artifact_name"] in imgids:
continue
img_nodes.append({
"image_file_id": artifact["artifact_name"],
"description": "",
"properties": artifact["properties"]
})
def convert_common(src_json, target_json):
if isinstance(src_json, str):
src_json_dict = json.loads(src_json)
else:
src_json_dict = src_json
src_json_inst = src_json_dict["instance"]
src_json_model = src_json_dict["model"] if "model" in src_json_dict else {}
target_json['metadata'] = convert_metadata(src_json_inst)
target_json['inputs'] = convert_inputs(src_json_inst)
target_json['vls'] = []
target_json['cps'] = []
target_json['routers'] = []
return src_json_inst, src_json_model
def convert_nsd_model(src_json):
target_json = {'vnfs': [], 'pnfs': [], 'fps': []}
src_json_inst, src_json_model = convert_common(src_json, target_json)
src_nodes = src_json_inst['nodes']
for node in src_nodes:
type_name = node['type_name']
if type_name.find('.VNF.') > 0 or type_name.endswith('.VNF'):
target_json['vnfs'].append(convert_vnf_node(node, src_json_model))
elif type_name.find('.PNF.') > 0 or type_name.endswith('.PNF'):
target_json['pnfs'].append(convert_pnf_node(node, src_json_model))
elif type_name.find('.VL.') > 0 or type_name.endswith('.VL') \
or node['type_name'].find('.RouteExternalVL') > 0:
target_json['vls'].append(convert_vl_node(node, src_nodes))
elif type_name.find('.CP.') > 0 or type_name.endswith('.CP'):
target_json['cps'].append(convert_cp_node(node, src_nodes))
elif type_name.find('.FP.') > 0 or type_name.endswith('.FP'):
target_json['fps'].append(convert_fp_node(node, src_nodes, src_json_model))
elif type_name.endswith('.Router'):
target_json['routers'].append(convert_router_node(node, src_nodes))
target_json['vnffgs'] = convert_vnffgs(src_json_inst, src_nodes)
target_json['ns_exposed'] = {'external_cps': [], 'forward_cps': []}
convert_exposed_node(src_json_inst, src_nodes, target_json['ns_exposed'])
return json.dumps(target_json)
def convert_vnfd_model(src_json):
target_json = {'image_files': [], 'local_storages': [], 'vdus': [], 'volume_storages': []}
src_json_inst, src_json_model = convert_common(src_json, target_json)
src_nodes = src_json_inst['nodes']
for node in src_nodes:
type_name = node['type_name']
if type_name.endswith('.ImageFile'):
target_json['image_files'].append(convert_imagefile_node(node, src_nodes))
elif type_name.endswith('.BlockStorage.Local') or type_name.endswith('.LocalStorage'):
target_json['local_storages'].append(convert_localstorage_node(node, src_nodes))
elif type_name.endswith('VDU.VirtualStorage'):
target_json['volume_storages'].append(convert_volumestorage_node(node, src_nodes))
elif type_name.endswith('VDU.Compute'):
target_json['vdus'].append(convert_vdu_node(node, src_nodes, src_json_model))
elif type_name.find('.VL.') > 0 or type_name.endswith('.VL') \
or type_name.endswith('.VnfVirtualLinkDesc') \
or type_name.endswith('.RouteExternalVL'):
target_json['vls'].append(convert_vl_node(node, src_nodes))
elif type_name.find('.CP.') > 0 or type_name.endswith('.CP') or type_name.endswith(".VduCpd"):
target_json['cps'].append(convert_cp_node(node, src_nodes, 'VNFD'))
elif type_name.endswith('.Router'):
target_json['routers'].append(convert_router_node(node, src_nodes))
target_json['vnf_exposed'] = {'external_cps': [], 'forward_cps': []}
convert_exposed_node(src_json_inst, src_nodes, target_json['vnf_exposed'])
merge_imagefile_node(target_json['image_files'], target_json['vdus'])
return json.dumps(target_json)
if __name__ == '__main__':
src_json = json.dumps({
"instance": {
"metadata": {
"vnfSoftwareVersion": "1.0.0",
"vnfProductName": "zte",
"localizationLanguage": [
"english",
"chinese"
],
"vnfProvider": "zte",
"vnfmInfo": "zte",
"defaultLocalizationLanguage": "english",
"vnfdId": "zte-hss-1.0",
"vnfProductInfoDescription": "hss",
"vnfdVersion": "1.0.0",
"vnfProductInfoName": "hss"
},
"nodes": [
{
"id": "vNAT_Storage_6wdgwzedlb6sq18uzrr41sof7",
"type_name": "tosca.nodes.nfv.VDU.VirtualStorage",
"template_name": "vNAT_Storage",
"properties": {
"size_of_storage": {
"type_name": "scalar-unit.size",
"value": {
"value": 10000000000,
"factor": 10,
"unit": "GB",
"unit_size": 1000000000
}
},
"type_of_storage": {
"type_name": "string",
"value": "volume"
},
"rdma_enabled": {
"type_name": "boolean",
"value": False
}
},
"interfaces": [
{
"name": "Standard",
"description": "This lifecycle interface defines the essential, normative operations that TOSCA nodes may support.",
"type_name": "tosca.interfaces.node.lifecycle.Standard",
"operations": [
{
"name": "create",
"description": "Standard lifecycle create operation."
},
{
"name": "stop",
"description": "Standard lifecycle stop operation."
},
{
"name": "start",
"description": "Standard lifecycle start operation."
},
{
"name": "delete",
"description": "Standard lifecycle delete operation."
},
{
"name": "configure",
"description": "Standard lifecycle configure operation."
}
]
}
],
"capabilities": [
{
"name": "feature",
"type_name": "tosca.capabilities.Node"
},
{
"name": "virtual_storage",
"type_name": "tosca.capabilities.nfv.VirtualStorage"
}
]
},
{
"id": "sriov_link_2610d7gund4e645wo39dvp238",
"type_name": | |
= position if position is not None else self.cursor
if self.number_of_chars < self.limit:
self.text_char_list.insert(self.cursor, character)
self.cursor += 1
return True
return False
def print_text_to_screen(self, console:tcod.Console, x:int, y:int,
fg:Optional[Tuple[int,int,int]]=None, bg:Optional[Tuple[int,int,int]]=None) -> None:
raise NotImplementedError("You are trying to access the unimplemented method 'print_text_to_screen' from an abstract 'InputHanderer' object")
def render(
self,
console: tcod.Console,
*,
x: int = 0, y: int = 0,
fg: Optional[Tuple[int, int, int]] = None, bg: Optional[Tuple[int, int, int]] = None, text: Optional[str] = None,
cursor_position: Optional[int] = None
):
super().render(
console,
x=x, y=y,
fg=fg, bg=bg,
text=text if text else self.text_to_print,
cursor_position=cursor_position if cursor_position else self.cursor
)
class TextHandeler(InputHanderer):
"""Handels string operations
Args:
limit (int): The maxinum nuber of characters that may be present.
text_char_list (List[IntOrString], optional): A list of strings. Each string must contain only one character. Defaults to None.
"""
def __init__(
self,
*,
limit: int,
text_char_list: Optional[List[IntOrString]] = None,
x:int, y:int,
height:int, width:int,
title:str,
active_fg:Tuple[int, int, int], inactive_fg:Tuple[int, int, int], bg:Tuple[int, int, int],
initally_active:bool=True,
alignment = tcod.LEFT
):
super().__init__(
limit=limit,
text_char_list=text_char_list,
x=x, y=y,
height=height, width=width,
title=title,
active_fg=active_fg, inactive_fg=inactive_fg, bg=bg,
initally_active=initally_active, alignment=alignment
)
self.text = "".join(self.text_char_list)
self.text_to_print = self.text
def set_text(self, character: str):
if len(character) > self.limit:
character = character[:self.limit]
self.text_to_print = character
self.text_char_list = list(character)
def send(self) -> str:
return "".join(self.text_char_list)
@send_text_after_call
def delete(self, reverse: bool = False):
return super().delete(reverse=reverse)
def handle_key(self, event: tcod.event.KeyDown):
if event.sym in cursor_move_left_right:
return self.cursor_move(event.sym)
elif event.sym in delete_keys:
return self.delete(event.sym == tcod.event.K_DELETE)
else:
key = self.translate_key(event)
if key is not None:
return self.insert(character=key)
return False
def translate_key(self, event: tcod.event.KeyDown):
if event.sym in {tcod.event.K_HOME, tcod.event.K_END,
tcod.event.K_PAGEUP, tcod.event.K_PAGEDOWN,
tcod.event.K_LEFT, tcod.event.K_RIGHT, tcod.event.K_UP, tcod.event.K_DOWN,
tcod.event.K_TAB, tcod.event.K_KP_TAB,
tcod.event.K_ESCAPE,
tcod.event.K_CLEAR, tcod.event.K_KP_CLEAR,
tcod.event.K_RETURN, tcod.event.K_KP_ENTER}:
return None
if event.sym in modifiers:
return None
if event.sym in {tcod.event.K_KP_PERIOD, tcod.event.K_KP_PLUS, tcod.event.K_KP_MINUS}:
return chr(event.sym - 1073741912)
if event.sym in range(tcod.event.K_KP_1, tcod.event.K_KP_9 + 1):
return chr(event.sym - 1073741912)
if event.sym == tcod.event.K_KP_0:
return "0"
if event.sym == tcod.event.K_SPACE:
return " "
return chr(event.sym - (32 if event.mod & tcod.event.KMOD_SHIFT != 0 or event.mod & tcod.event.KMOD_CAPS != 0 else 0))
@send_text_after_call
def insert(self, *, character:str, position:Optional[int]=None) -> bool:
if not isinstance(character, str):
raise TypeError("The paramiter 'character' must be a string")
if len(character) != 1:
raise ValueError(f"The string 'character' must have only one character. You are passing in a string with {len(character)} characters")
return super().insert(character=character, position=position)
def print_text_to_screen(self, console:tcod.Console, x:int, y:int,
fg:Optional[Tuple[int,int,int]]=None, bg:Optional[Tuple[int,int,int]]=None) -> None:
s = self.text_to_print if 0 < self.number_of_chars else " "
try:
s2 = s[self.cursor]
except IndexError:
s2 = " "
console.print(x=x,y=y, string=s, fg=fg, bg=bg, bg_blend=constants.BKGND_SET)
console.print(x=x+self.cursor, y=y, string=s2, fg=bg, bg=fg)
"""
1, 1 (1*1)
2, 2 (1*2)
3, 6 (2*3)
4, 24 (6*4)
5, 120, (24*5)
6, 720, (120*6)
7, 5040 (720*7)
"""
class NumberHandeler(InputHanderer):
def __init__(
self,
*,
limit:int,
max_value:int, min_value:int,
wrap_around:bool=False,
starting_value:Optional[int]=None,
x:int, y:int,
height:int, width:int,
title:str,
active_fg:Tuple[int, int, int], inactive_fg:Tuple[int, int, int], bg:Tuple[int, int, int],
initally_active:bool=True, alignment = tcod.RIGHT
) -> None:
if min_value > max_value:
min_value, max_value = max_value, min_value
s_value = starting_value if starting_value is not None else min_value
self.is_negitive = s_value < 0
super().__init__(
limit=limit,
x=x, y=y,
height=height, width=width,
title=title,
active_fg=active_fg, inactive_fg=inactive_fg, bg=bg,
initally_active=initally_active, alignment=alignment
)
self.max_value = max_value
self.min_value = min_value
self.wrap_around = wrap_around
self.text_char_list = self.break_up(s_value)
self.text_to_print = ("-" if self.is_negitive else "") + "".join([str(i) for i in self.text_char_list])
def set_text(self, character: int):
"""Takes a intiger and breaks it up and assigns it to the self.text_char_list. Calls 'self.check_if_is_in_bounds()' afterwards.
Args:
character (int): The intiger that is to broken up and assigned
"""
self.is_negitive = character < 0
self.text_char_list = self.break_up(character)
#print(f"New char list {self.text_char_list}")
self.check_if_is_in_bounds()
n_of_chars = self.number_of_chars
if self.cursor > n_of_chars:
self.cursor = n_of_chars
#print(f"New char list {self.text_char_list}, new chars: {character}")
@property
def can_be_negative(self):
return self.min_value < 0
@property
def can_be_positive(self):
return self.max_value > -1
def handle_key(self, event: tcod.event.KeyDown):
if event.sym in plus and self.can_be_positive:
self.is_negitive = False
self.check_if_is_in_bounds()
elif event.sym in minus:
if not self.is_negitive and self.can_be_negative:
self.is_negitive = True
self.check_if_is_in_bounds()
elif self.is_negitive and self.can_be_positive:
self.is_negitive = False
self.check_if_is_in_bounds()
elif event.sym in cursor_move_left_right:
self.cursor_move(event.sym)
elif event.sym in cursor_move_up_down:
self.increment(is_up=event.sym == tcod.event.K_UP, cursor=self.cursor)
elif event.sym in delete_keys:
self.delete(event.sym == tcod.event.K_DELETE)
else:
key = self.translate_key(event)
if key is not None:
self.insert(character=key)
def translate_key(self, event: tcod.event.KeyDown):
if event.sym in range(tcod.event.K_0, tcod.event.K_9 + 1):
return event.sym - 48
if event.sym in range(tcod.event.K_KP_1, tcod.event.K_KP_9 + 1):
return event.sym - 1073741912
if event.sym in {tcod.event.K_KP_0, tcod.event.K_KP_00, tcod.event.K_KP_000}:
return 0
return None
def send(self) -> str:
return ("-" if self.is_negitive else "") + "".join([str(i) for i in self.text_char_list])
@send_text_after_call
@clamp_number_after_call_strict
@check_for_unwanted_zeros
def delete(self, reverse: bool = False):
if super().delete(reverse=reverse):
if self.is_empty:
self.text_char_list.append(0)
return True
return False
@send_text_after_call
@clamp_number_after_call
@check_for_unwanted_zeros
def increment(self, *, is_up:bool=True, cursor:Optional[int]=None):
"""Increments the number based on the position of the cursor. If the cursor is all the way to the left, and the leftmost digit is 5, then the leftmost digit will be incremented 'up' to 6. If
Calls self.check_if_is_in_bounds() at the end.
Args:
is_up (bool, optional): This determins if the digit will be incremented up or down. Defaults to True.
cursor (Optional[int], optional): If this is None, then self.cursor will be used. Defaults to None.
"""
cursor = cursor if cursor is not None else self.cursor
def _increment(*, is_up:bool, cursor:int):
up_or_down = (1 if is_up else -1)
try:
old = self.text_char_list[cursor]
self.text_char_list[cursor]+=up_or_down
new_ = self.text_char_list[cursor]
if is_up and old == 9 and new_ == 10:
self.text_char_list[cursor] = 0
if cursor - 1 >= 0:
_increment(is_up=is_up, cursor=cursor-1)
elif cursor == 0 and self.number_of_chars < self.limit:
self.insert(character=1)
elif not is_up and old == 0 and new_ == -1:
self.text_char_list[cursor] = 9
if cursor < self.limit:
_increment(is_up=is_up, cursor=cursor-1)
except IndexError:
pass
#elif cursor == self.limit and self.number_of_chars < self.limit:
_increment(is_up=is_up, cursor=cursor)
@send_text_after_call
def check_if_is_in_bounds(self):
added = self.add_up() * (-1 if self.is_negitive else 1)
clamped = clamp(number=added, min_value=self.min_value, max_value=self.max_value, wrap_around=self.wrap_around)
self.is_negitive = clamped < 0
#print(f"In bounds: {clamped} ")
self.text_char_list = self.break_up(clamped)
def add_up(self, to_add:Optional[Iterator[int]]=None) -> int:
to_add = self.text_char_list if to_add is None else to_add
total = 0
for i, n in enumerate(reversed(to_add)):
total += n * pow(10, i)
return total
#return reduce(lambda a,b: b * pow(10, a), enumerate(reversed(self.int_list)))
@staticmethod
def break_up(num:int):
"""Breaks up an intiger into a list of ints, each of which is less then 10 and greater to or equal to 0.
Args:
num (int): The intiger that is to be broken up
Returns:
list[int]: A list of intigers.
How it works:
the argument 'num' is converted to a positive number. If it is lower then 10, it will be convirted directly into a list and returned. Otherwise, the sub function, __break_up will be called.
c = 0
p = pow(10, c)
while p <= num:
yield (num % pow(10, c+1)) // p
c += 1
p = pow(10, c)
so assuming that num is 280...
c = 0
p = pow(10, 0)
p = 1
while 1 <= 280:
yield (280 % pow(10, 0+1)) // 1
(280 % 10) // 1
0 // 1
yield 0
c += 1
c = 1
p = pow(10, 1)
p = 10
(second loop)
while 10 <= 280:
yield (280 % pow(10, 1+1)) // 10
(280 % 100) // 10
80 // 10
yield 8
"""
#print(f"Num to be broken up: {num}")
num = abs(num)
if num < 10:
return [num]
def __break_up():
c = 0
p = pow(10, c)
while p <= num:
yield (num % pow(10, c+1)) // p
c += 1
p = pow(10, c)
bu:List[int] = list(__break_up())
#print(f"{bu}")
bu.reverse()
#print(f"{bu}")
return bu
@send_text_after_call
@clamp_number_after_call_strict
@check_for_unwanted_zeros
def insert(self, *, character:int, position:Optional[int]=None) -> bool:
if not isinstance(character, int):
raise TypeError("The paramiter 'character' must be a integer")
if character not in range(0,10):
raise ValueError(
f"The integer 'character' | |
import torch
import os
from datetime import datetime
from time import time
import numpy as np
from mpi4py import MPI
from mpi_utils.mpi_utils import sync_networks, sync_grads
from rl_modules.replay_buffer import replay_buffer
from rl_modules.models import actor, actor_bilinear, critic, critic_bilinear, critic_sum,\
actor_large, critic_large
from rl_modules.renn_models import actor_ReNN, critic_ReNN
from rl_modules.attn_models import actor_attn, critic_attn
from rl_modules.biattn_models import critic_biattn, actor_biattn
from rl_modules.ma_models import actor_shared, actor_separated, actor_dropout, actor_multihead
from mpi_utils.normalizer import normalizer
from her_modules.her import her_sampler
import wandb
from tqdm import tqdm
"""
ddpg with HER (MPI-version)
"""
class ddpg_agent:
def __init__(self, args, env, env_params):
self.args = args
self.env = env
self.env_params = env_params
# MPI
self.comm = MPI.COMM_WORLD
self.nprocs = self.comm.Get_size()
# create the network and target network
if args.actor_shared:
self.actor_network = actor_shared(env_params)
self.actor_target_network = actor_shared(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
elif args.actor_separated:
self.actor_network = actor_separated(env_params)
self.actor_target_network = actor_separated(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
elif args.actor_dropout:
self.actor_network = actor_dropout(env_params)
self.actor_target_network = actor_dropout(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
elif args.actor_multihead:
self.actor_network = actor_multihead(env_params)
self.actor_target_network = actor_multihead(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
elif args.use_renn:
self.actor_network = actor_ReNN(env_params)
self.actor_target_network = actor_ReNN(env_params)
self.critic_network = critic_ReNN(env_params)
self.critic_target_network = critic_ReNN(env_params)
elif args.use_bilinear:
self.actor_network = actor_bilinear(env_params)
self.actor_target_network = actor_bilinear(env_params)
self.critic_network = critic_bilinear(env_params)
self.critic_target_network = critic_bilinear(env_params)
elif args.use_critic_sum:
self.actor_network = actor(env_params)
self.actor_target_network = actor(env_params)
self.critic_network = critic_sum(env_params)
self.critic_target_network = critic_sum(env_params)
elif args.use_attn:
self.actor_network = actor_attn(env_params)
self.actor_target_network = actor_attn(env_params)
self.critic_network = critic_attn(env_params)
self.critic_target_network = critic_attn(env_params)
elif args.use_biattn:
self.actor_network = actor_attn(env_params)
self.actor_target_network = actor_attn(env_params)
self.critic_network = critic_biattn(env_params)
self.critic_target_network = critic_biattn(env_params)
elif args.actor_large:
self.actor_network = actor_large(env_params)
self.actor_target_network = actor_large(env_params)
self.critic_network = critic_large(env_params)
self.critic_target_network = critic_large(env_params)
else:
self.actor_network = actor(env_params)
self.actor_target_network = actor(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
if self.args.learn_from_expert:
assert args.resume, 'expert need model!'
self.new_actor_loss = []
self.expert_network = actor(env_params).eval()
# load paramters
if args.resume:
if self.args.model_path == None:
path = os.path.join(self.args.save_dir, self.args.env_name, self.args.name, 'model.pt')
else:
path = self.args.model_path
try:
o_dict, g_dict, actor_model, critic_model = torch.load(path, map_location=lambda storage, loc: storage)
# OLD Version o_mean, o_std, g_mean, g_std, actor_model, critic_model = torch.load(path, map_location=lambda storage, loc: storage)
except:
print('fail to load the model!')
exit()
print('loaded done!')
if self.args.learn_from_expert:
self.expert_network.load_state_dict(actor_model)
else:
self.actor_network.load_state_dict(actor_model)
self.critic_network.load_state_dict(critic_model)
# sync the networks across the cpus
sync_networks(self.actor_network)
sync_networks(self.critic_network)
# load the weights into the target networks
self.actor_target_network.load_state_dict(self.actor_network.state_dict())
self.critic_target_network.load_state_dict(self.critic_network.state_dict())
# if use gpu
if self.args.cuda:
self.actor_network.cuda()
self.critic_network.cuda()
self.actor_target_network.cuda()
self.critic_target_network.cuda()
# create the optimizer
self.actor_optim = torch.optim.Adam(self.actor_network.parameters(), lr=self.args.lr_actor)
self.critic_optim = torch.optim.Adam(self.critic_network.parameters(), lr=self.args.lr_critic)
# her sampler
self.her_module = her_sampler(self.args.replay_strategy, self.args.replay_k, self.env.compute_reward, random_unmoved = self.args.random_unmoved, not_relabel_unmoved = self.args.not_relabel_unmoved)
# create the replay buffer
self.buffer = replay_buffer(self.env_params, self.args.buffer_size, self.her_module.sample_her_transitions)
# create the normalizer
self.o_norm = normalizer(size=env_params['obs'], default_clip_range=self.args.clip_range)
self.g_norm = normalizer(size=env_params['goal'], default_clip_range=self.args.clip_range)
if args.resume:
# Note: if use object number curriculum, the normalizer need to be extended
self.o_norm.load(o_dict)
self.g_norm.load(g_dict)
# OLD VERSION self.o_norm.mean = o_mean
# self.o_norm.std = o_std
# self.g_norm.mean = g_mean
# self.g_norm.std = g_std
# create the dict for store the model
if MPI.COMM_WORLD.Get_rank() == 0:
# if not os.path.exists(self.args.save_dir):
# os.mkdir(self.args.save_dir, exist_ok=True)
# path to save the model
self.model_path = os.path.join(self.args.save_dir, self.args.env_name, self.args.name)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
# start wandb to log
if self.args.wandb:
wandb.init(
project = self.args.project,
group = self.args.group,
tags = self.args.tags,
name = self.args.name,
notes = f'Env:{self.args.env_name},Note:{self.args.note}'
)
def learn(self):
"""
train the network
"""
# warm up
if self.args.warmup:
self.warmup(100)
# start to collect samples
start_time = time()
collect_per_epoch = self.args.n_cycles * self.args.num_rollouts_per_mpi * self.env_params['max_timesteps']
self.global_relabel_rate = 0.3
curriculum_param = self.args.curriculum_init
curri_indicator = 0
for epoch in range(self.args.n_epochs):
# start curriculum
if self.args.curriculum and curri_indicator > self.args.curriculum_bar:
if curriculum_param < self.args.curriculum_end:
curriculum_param += self.args.curriculum_step
self.env.change(curriculum_param)
observation = self.env.reset()
# extend normalizer to new observation
o_size = len(observation['observation'])
g_size = len(observation['desired_goal'])
self.o_norm.change_size(new_size = o_size)
self.g_norm.change_size(new_size = g_size)
# extend buffer to new observation
self.buffer.change_size(max_timesteps=self.env._max_episode_steps,\
obs_size=o_size, goal_size=g_size)
num_useless_rollout = 0 # record number of useless rollout(ag not change)
for _ in tqdm(range(self.args.n_cycles)):
mb_obs, mb_ag, mb_g, mb_info, mb_actions = [], [], [], [], []
for _ in range(self.args.num_rollouts_per_mpi):
# try until collect successful experience
for j in range(self.args.max_trail_time):
# reset the rollouts
ep_obs, ep_ag, ep_g, ep_info, ep_actions = [], [], [], [], []
# reset the environment
observation = self.env.reset()
obs = observation['observation']
ag = observation['achieved_goal']
g = observation['desired_goal']
info = observation.get('info') # if no info, return None
# start to collect samples
ag_origin = ag
for t in range(self.env._max_episode_steps):
with torch.no_grad():
input_tensor = self._preproc_inputs(obs, g)
if self.args.collect_from_expert:
pi = self.expert_network(input_tensor)
else:
pi = self.actor_network(input_tensor)
action = self._select_actions(pi)
# feed the actions into the environment
observation_new, _, _, info = self.env.step(action)
# self.env.render()
obs_new = observation_new['observation']
ag_new = observation_new['achieved_goal']
# append rollouts
ep_obs.append(obs.copy())
ep_ag.append(ag.copy())
ep_g.append(g.copy())
ep_info.append(info.copy())
ep_actions.append(action.copy())
# re-assign the observation
obs = obs_new
ag = ag_new
# check if use this rollout
if_moved = np.linalg.norm(ag.reshape(-1,self.args.dim) - ag_origin.reshape(-1,self.args.dim), axis=-1) > 0.005
if self.args.trail_mode == 'all':
if_moved = if_moved.all()
elif self.args.trail_mode == 'any':
if_moved = if_moved.any()
else:
raise NotImplementedError
if if_moved:
break
else:
num_useless_rollout += 1
ep_obs.append(obs.copy())
ep_ag.append(ag.copy())
mb_obs.append(ep_obs)
mb_ag.append(ep_ag)
mb_info.append(ep_info)
mb_g.append(ep_g)
mb_actions.append(ep_actions)
# convert them into arrays
mb_obs = np.array(mb_obs)
mb_ag = np.array(mb_ag)
mb_g = np.array(mb_g)
mb_info = np.array(mb_info)
mb_actions = np.array(mb_actions)
# store the episodes
self.buffer.store_episode([mb_obs, mb_ag, mb_g, mb_info, mb_actions])
self._update_normalizer([mb_obs, mb_ag, mb_g, mb_info, mb_actions])
# train the network
self._update_network()
# soft update
self._soft_update_target_network(self.actor_target_network, self.actor_network)
self._soft_update_target_network(self.critic_target_network, self.critic_network)
# start to do the evaluation
data = self._eval_agent(render = ((epoch%10)==0 and self.args.render))
if self.args.curriculum_reward:
curri_indicator = data['reward']
else:
curri_indicator = data['success_rate']
# record relabel rate
local_relabel_rate = self.her_module.relabel_num/self.her_module.total_sample_num
local_random_relabel_rate = self.her_module.random_num/self.her_module.total_sample_num
local_not_relabel_rate = self.her_module.nochange_num/self.her_module.total_sample_num
local_data = np.array([local_relabel_rate, local_random_relabel_rate, local_not_relabel_rate])
global_data = np.zeros(3)
self.comm.Allreduce(local_data, global_data, op=MPI.SUM)
self.global_relabel_rate, global_random_relabel_rate, global_not_relabel_rate = global_data/self.nprocs
# local
if MPI.COMM_WORLD.Get_rank() == 0:
# save data
print('[{}] epoch is: {}, eval success rate is: {:.3f}, reward is: {:.3f}'.format(datetime.now(), epoch, data['success_rate'], data['reward']))
torch.save([self.o_norm.state_dict(), self.g_norm.state_dict(), self.actor_network.state_dict(), self.critic_network.state_dict()], \
self.model_path + '/model.pt')
if self.args.wandb:
# log data
wandb.log(
{
'success rate': data['success_rate'],
"reward": data['reward'],
"curriculum param": curriculum_param,
"run time": (time()-start_time)/3600,
"useless rollout per epoch": num_useless_rollout/(self.args.n_cycles*self.args.num_rollouts_per_mpi),
"future relabel rate": self.global_relabel_rate,
"random relabel rate": global_random_relabel_rate,
"not change relabel rate": global_not_relabel_rate,
},
step=(epoch+1)*collect_per_epoch
)
# reset record parameters
self.her_module.total_sample_num = 1
self.her_module.relabel_num = 0
self.her_module.random_num = 0
self.her_module.nochange_num = 0
# pre_process the inputs
def _preproc_inputs(self, obs, g):
obs_norm = self.o_norm.normalize(obs)
g_norm = self.g_norm.normalize(g)
# concatenate the stuffs
inputs = np.concatenate([obs_norm, g_norm])
inputs = torch.tensor(inputs, dtype=torch.float32).unsqueeze(0)
if self.args.cuda:
inputs = inputs.cuda()
return inputs
# this function will choose action for the agent and do the exploration
def _select_actions(self, pi):
action = pi.cpu().numpy().squeeze()
# add the gaussian
action += self.args.noise_eps * self.env_params['action_max'] * np.random.randn(*action.shape)
action = np.clip(action, -self.env_params['action_max'], self.env_params['action_max'])
# random actions...
random_actions = np.random.uniform(low=-self.env_params['action_max'], high=self.env_params['action_max'], \
size=self.env_params['action'])
# choose if use the random actions
action += np.random.binomial(1, self.args.random_eps, 1)[0] * (random_actions - action)
return action
# update the normalizer
def _update_normalizer(self, episode_batch):
mb_obs, mb_ag, mb_g, mb_info, mb_actions = episode_batch
mb_obs_next = mb_obs[:, 1:, :]
mb_ag_next = mb_ag[:, 1:, :]
# get the number of normalization transitions
num_transitions = mb_actions.shape[1]
# create the new buffer to store them
buffer_temp = {'obs': mb_obs,
'ag': mb_ag,
'g': mb_g,
'info': mb_info,
'actions': mb_actions,
'obs_next': mb_obs_next,
'ag_next': mb_ag_next,
}
transitions = self.her_module.sample_her_transitions(buffer_temp, num_transitions)
obs, g = transitions['obs'], transitions['g']
# pre process the obs and g
transitions['obs'], transitions['g'] = self._preproc_og(obs, g)
# update
self.o_norm.update(transitions['obs'])
self.g_norm.update(transitions['g'])
# recompute the stats
self.o_norm.recompute_stats()
self.g_norm.recompute_stats()
def _preproc_og(self, o, g):
o = np.clip(o, -self.args.clip_obs, self.args.clip_obs)
g = np.clip(g, -self.args.clip_obs, self.args.clip_obs)
return o, g
# soft update
def _soft_update_target_network(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - self.args.polyak) * param.data + self.args.polyak * target_param.data)
# update the network
def _update_network(self):
if self.args.dynamic_batch: # update according to buffer size
update_times = int(self.args.n_batches * self.buffer.current_size / self.buffer.size)
elif self.args.her_batch:
update_times = int(self.args.n_batches / self.global_relabel_rate)
else:
update_times = self.args.n_batches
for _ in range(update_times):
# sample the episodes
transitions = self.buffer.sample(self.args.batch_size)
# pre-process the observation and goal
o, o_next, g = transitions['obs'], transitions['obs_next'], transitions['g']
transitions['obs'], transitions['g'] = self._preproc_og(o, g)
transitions['obs_next'], transitions['g_next'] = self._preproc_og(o_next, g)
# start to do the update
obs_norm = self.o_norm.normalize(transitions['obs'])
g_norm = self.g_norm.normalize(transitions['g'])
inputs_norm = np.concatenate([obs_norm, g_norm], axis=1)
obs_next_norm = self.o_norm.normalize(transitions['obs_next'])
g_next_norm | |
#!/usr/bin/env python3
import os
import glob
import logging
import ctypes
import unittest
import ast
from time import sleep
import threading
import multiprocessing as mp
from queue import Empty
import socket
from shutil import which, rmtree, copyfile
import numpy as np
from astropy.time import Time, TimeDelta
import h5py
from darc import Processor, ProcessorManager, AMBERListener
from darc.processor import Clustering, Extractor, Classifier, Visualizer
from darc import util
from darc.definitions import TIME_UNIT
# disable debug log messages from matplotlib
logging.getLogger('matplotlib').setLevel(logging.ERROR)
# An simple idling thread to test the thread scavenger
class Idling(threading.Thread):
def __init__(self):
super(Idling, self).__init__()
self.event = mp.Event()
def run(self):
while not self.event.is_set():
self.event.wait(.1)
def stop_observation(self, abort=False):
self.event.set()
class TestProcessorManager(unittest.TestCase):
def test_scavenger(self):
queue = mp.Queue()
# initialize the processor manager
manager = ProcessorManager(queue)
# set the scavenger interval
manager.scavenger_interval = 0.1
# create a thread that idles forever
thread = Idling()
thread.name = 'obs'
thread.start()
# add the thread to the manager observation list
# ToDo: fix this in Process setup
manager.observations['0'] = thread
manager.start()
# give it some time to start
sleep(.2)
# the scavenger should not remove the thread
sleep(manager.scavenger_interval)
self.assertTrue(thread.is_alive())
# now stop thread
thread.event.set()
# manager should remove thread, but how to check in Process setup?
# stop the manager
queue.put('stop')
manager.join()
# skip if not running on arts041 or zeus
@unittest.skipUnless(socket.gethostname() in ('arts041', 'zeus'), "Test can only run on arts041 or zeus")
# Skip if psrdada not available
@unittest.skipIf(which('dada_db') is None, "psrdada not available")
class TestProcessor(unittest.TestCase):
def setUp(self):
if socket.gethostname() == 'zeus':
self.dada_files = glob.glob('/data/arts/data/dada/*.dada')[:1]
self.dada_files.sort()
output_dir = '/data/arts/darc/output'
log_dir = f'{output_dir}/log'
filterbank_dir = f'{output_dir}/filterbank'
amber_dir = f'{output_dir}/amber'
amber_conf_dir = '/data/arts/darc/amber_conf'
amber_conf_file = '/data/arts/darc/amber.conf'
sb_table = '/data/arts/darc/sbtable-sc4-12tabs-71sbs.txt'
else:
self.dada_files = glob.glob('/tank/data/sky/B1933+16/20200211_dump/dada/*.dada')[:1]
self.dada_files.sort()
user = os.getlogin()
if user == 'oostrum':
main_dir = '/tank/users/oostrum/darc/automated_testing'
elif user == 'arts':
main_dir = '/tank/users/arts/darc_automated_testing'
else:
self.skipTest(f"Cannot run test as user {user}")
output_dir = f'{main_dir}/output'
log_dir = f'{output_dir}/log'
filterbank_dir = f'{output_dir}/filterbank'
amber_dir = f'{output_dir}/amber'
amber_conf_dir = f'{main_dir}/amber/amber_conf'
amber_conf_file = f'{main_dir}/amber/amber.conf'
sb_table = '/home/arts/.controller/synthesized_beam_tables/sbtable-sc4-12tabs-71sbs.txt'
# ensure we start clean
try:
rmtree(output_dir)
except FileNotFoundError:
pass
for d in (output_dir, log_dir, amber_dir, filterbank_dir):
util.makedirs(d)
self.processes = {}
# extract PSRDADA header
self.header = self.get_psrdada_header(self.dada_files[0])
self.tstart = Time.now() + TimeDelta(5, format='sec')
# add general settings
self.header['nreader'] = 2
self.header['nbuffer'] = 5
self.header['key_i'] = '5000'
self.header['beam'] = 0
self.header['ntab'] = 12
self.header['nsb'] = 71
# self.header['nbatch'] = int(float(self.header['SCANLEN']) / 1.024)
self.header['nbatch'] = 10
self.header['duration'] = float(self.header['SCANLEN'])
self.header['log_dir'] = log_dir
self.header['output_dir'] = output_dir
self.header['filterbank_dir'] = filterbank_dir
self.header['amber_dir'] = amber_dir
self.header['amber_conf_dir'] = amber_conf_dir
self.header['amber_config'] = amber_conf_file
self.header['sb_table'] = sb_table
self.header['date'] = '20200101'
self.header['datetimesource'] = '2020-01-01-00:00:00.FAKE'
self.header['freq'] = int(np.round(float(self.header['FREQ'])))
self.header['snrmin'] = 8
self.header['min_freq'] = 1220.7
self.header['startpacket'] = int(self.tstart.unix * TIME_UNIT)
# add parset
parset = {'task.duration': self.header['SCANLEN'],
'task.startTime': self.tstart.isot,
'task.taskID': '001122',
'task.beamSet.0.compoundBeam.0.phaseCenter': '[293.94876deg, 16.27778deg]',
'task.directionReferenceFrame': 'J2000'}
self.header['parset'] = parset
# create ringbuffer
self.create_ringbuffer()
# create processes for the different pipeline steps
self.diskdb_proc = self.diskdb_command()
self.amber_proc = self.amber_command()
self.dadafilterbank_proc = self.dadafilterbank_command()
# start all except data reader
self.dadafilterbank_proc.start()
self.amber_proc.start()
# initialize AMBERListener, used for feeding triggers to Processor
self.amber_queue = mp.Queue()
self.processor_queue = mp.Queue()
self.amber_listener = AMBERListener(self.amber_queue, target_queue=self.processor_queue)
self.amber_listener.start()
# initialize Processor, connect input queue to output of AMBERListener
self.processor = Processor(self.processor_queue)
def tearDown(self):
# remove ringbuffers
for key in ('key_i', ):
cmd = f'dada_db -d -k {self.header[key]}'
os.system(cmd)
@staticmethod
def get_psrdada_header(fname):
# load a typical amount of bytes from the file and look for header size keyword
nbyte = 1
raw_header = ''
with open(fname, 'r') as f:
while True:
raw_header = raw_header + f.read(nbyte)
header = [line.strip().split(maxsplit=1) for line in raw_header.split('\n')]
header = np.array(header)
try:
key_index = np.where(header == 'HDR_SIZE')[0]
hdr_size = header[key_index, 1][0].astype(int)
except (IndexError, ValueError):
if nbyte > 1e6:
raise ValueError("Key HDR_SIZE not found in first MB of file")
nbyte += 4096
else:
break
# load the full header with known size
with open(fname, 'r') as f:
header = f.read(hdr_size)
# convert to dict, skipping empty lines and zero padding at the end
header = dict([line.strip().split(maxsplit=1) for line in header.split('\n') if line][:-1])
return header
def create_ringbuffer(self):
# run ringbuffer
cmd = 'dada_db -a {HDR_SIZE} -b {RESOLUTION} -k {key_i} -n {nbuffer} -r {nreader}'.format(**self.header)
os.system(cmd)
def diskdb_command(self):
cmd = 'dada_diskdb -k {key_i} '.format(**self.header)
for fname in self.dada_files:
cmd += f' -f {fname}'
proc = mp.Process(target=os.system, args=(cmd, ))
return proc
def amber_command(self):
# load amber config file
with open(self.header['amber_config']) as f:
amber_conf = util.parse_parset(f.read())
# extract step1 settings and add to a full config dict
fullconfig = self.header.copy()
for key, value in amber_conf.items():
# some values are lists, interpret these
if value.startswith('['):
value = ast.literal_eval(value)
if isinstance(value, list):
# extract 1st item
fullconfig[key] = value[0]
else:
fullconfig[key] = value
# add freq to device name
fullconfig['device_name'] = fullconfig['device_name'].format(**self.header)
amber_step1 = "taskset -c 3 amber -sync -print -opencl_platform {opencl_platform} " \
"-opencl_device {opencl_device} " \
"-device_name {device_name} " \
"-padding_file {amber_conf_dir}/padding.conf " \
"-zapped_channels {amber_conf_dir}/zapped_channels_{freq}.conf " \
"-integration_steps {amber_conf_dir}/{integration_file} " \
"-subband_dedispersion " \
"-dedispersion_stepone_file {amber_conf_dir}/dedispersion_stepone.conf " \
"-dedispersion_steptwo_file {amber_conf_dir}/dedispersion_steptwo.conf " \
"-integration_file {amber_conf_dir}/integration.conf " \
"-snr_file {amber_conf_dir}/snr.conf " \
"-dms {num_dm} -dm_first {dm_first} -dm_step {dm_step} -subbands {subbands} " \
"-subbanding_dms {subbanding_dms} -subbanding_dm_first {subbanding_dm_first} " \
"-subbanding_dm_step {subbanding_dm_step} -snr_sc -nsigma {snr_nsigma} " \
"-downsampling_configuration {amber_conf_dir}/downsampling.conf " \
"-downsampling_factor {downsamp} -rfim -time_domain_sigma_cut -frequency_domain_sigma_cut " \
"-time_domain_sigma_cut_steps {amber_conf_dir}/tdsc_steps.conf" \
" -time_domain_sigma_cut_configuration {amber_conf_dir}/tdsc.conf " \
"-frequency_domain_sigma_cut_steps {amber_conf_dir}/fdsc_steps.conf " \
"-frequency_domain_sigma_cut_configuration {amber_conf_dir}/fdsc.conf " \
"-nr_bins {fdsc_nbins} -threshold {snrmin} " \
"-output {amber_dir}/CB{beam:02d}_step1 " \
"-beams {ntab} -synthesized_beams {nsb} -synthesized_beams_chunk {nsynbeams_chunk} " \
"-dada -dada_key {key_i} -batches {nbatch} {extra_flags} " \
"-synthesized_beams_file {sb_table}".format(**fullconfig)
proc = mp.Process(target=os.system, args=(amber_step1,))
return proc
def dadafilterbank_command(self):
cmd = 'dadafilterbank -l {log_dir}/dadafilterbank.log -k {key_i} ' \
'-n {filterbank_dir}/CB{beam:02d}'.format(**self.header)
proc = mp.Process(target=os.system, args=(cmd, ))
return proc
def test_processor_obs(self):
# start processor
self.processor.start()
# start amber listener and processor
cmd = {'command': 'start_observation', 'obs_config': self.header, 'reload': False}
self.amber_queue.put(cmd)
self.processor_queue.put(cmd)
# at start time, read data into buffer, other processes are already set up and waiting for data
util.sleepuntil_utc(self.tstart)
self.diskdb_proc.start()
# wait until processes are done
for proc in (self.diskdb_proc, self.amber_proc, self.dadafilterbank_proc):
proc.join()
# stop observation
self.amber_queue.put({'command': 'stop_observation'})
self.processor.source_queue.put({'command': 'stop_observation'})
# stop services
self.amber_listener.source_queue.put('stop')
self.amber_listener.join()
self.processor.source_queue.put('stop')
self.processor.join()
@unittest.skipUnless(socket.gethostname() == 'zeus', "Test can only run on zeus")
class TestExtractor(unittest.TestCase):
def setUp(self):
self.output_dir = '/data/arts/darc/output'
startpacket = Time.now().unix // TIME_UNIT
obs_config = {'freq': 1370, 'min_freq': 1220.7, 'startpacket': startpacket,
'output_dir': self.output_dir, 'beam': 0}
logger = logging.getLogger('test_extractor')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s.%(levelname)s.%(name)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self.ncand = mp.Value('i', 0)
self.extractor = Extractor(obs_config, self.output_dir + '/triggers_realtime', logger, mp.Queue(), mp.Queue(),
self.ncand)
# set filterbank reader (normally done in run method)
self.extractor.filterbank_reader = self.extractor.init_filterbank_reader()
# ensure we start clean
for fname in glob.glob(os.path.join(self.output_dir, 'data', '*.hdf5')):
os.remove(fname)
def test_extract(self):
# parameters from an earlier amber run
snr = 71.26
dm = 159.8
toa = 5.79174
sb = 35
downsamp = 100
# run extractor
self.extractor._extract(dm, snr, toa, downsamp, sb)
# read output file name
try:
fname = self.extractor.output_queue.get(timeout=.1)
except Empty:
fname = None
self.assertTrue(fname is not None)
# check that the output file exists
self.assertTrue(os.path.isfile(fname))
self.assertTrue(self.ncand.value == 1)
@unittest.skipUnless(socket.gethostname() == 'zeus', "Test can only run on zeus")
class TestClassifier(unittest.TestCase):
def setUp(self):
# path to test file
fname_in = glob.glob('/data/arts/darc/output/triggers_realtime/data/*.hdf5')[0]
self.fname = fname_in.replace('.hdf5', '_test.hdf5')
# copy over for testing as not to overwrite the original
copyfile(fname_in, self.fname)
# initialize the classifier
logger = logging.getLogger('test_classifier')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s.%(levelname)s.%(name)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self.conn, child_conn = mp.Pipe()
self.classifier = Classifier(logger, mp.Queue(), child_conn)
def test_classify(self):
# start the classifier
self.classifier.start()
# feed the file path
self.classifier.input_queue.put(self.fname)
# stop the classifier
self.classifier.input_queue.put('stop')
# read the output
candidates = self.conn.recv()
self.classifier.join()
self.assertEqual(len(candidates), 1)
self.assertTrue(candidates[0].endswith('.hdf5'))
# read the probabilities
with h5py.File(candidates[0], 'r') as f:
self.assertTrue('prob_freqtime' in f.attrs.keys())
self.assertTrue('prob_dmtime' in f.attrs.keys())
def tearDown(self):
# remove the test file
os.remove(self.fname)
@unittest.skipUnless(socket.gethostname() == 'zeus', "Test can only run on zeus")
class TestVisualizer(unittest.TestCase):
def setUp(self):
self.output_dir = '/data/arts/darc/output/triggers_realtime'
self.result_dir = '/data/arts/darc/output/central'
# ensure we start clean
try:
rmtree(self.result_dir)
except FileNotFoundError:
pass
util.makedirs(self.result_dir)
for fname in glob.glob(os.path.join(self.output_dir, '*.pdf')):
os.remove(fname)
def test_visualize(self):
files = glob.glob('/data/arts/darc/output/triggers_realtime/data/*.hdf5')
logger = logging.getLogger('test_visualizer')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s.%(levelname)s.%(name)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
parset = {'task.taskID': '001122',
'task.beamSet.0.compoundBeam.0.phaseCenter': '[293.94876deg, 16.27778deg]',
'task.directionReferenceFrame': 'J2000'}
obs_config = {'date': '20200101',
'datetimesource': '2020-01-01-00:00:00.FAKE',
'min_freq': 1220.7,
'beam': 0,
'parset': | |
<reponame>maistra-bot/proxy<filename>maistra/vendor/com_googlesource_chromium_v8/wee8/build/locale_tool.py
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script used to manage locale-related files in Chromium.
This script is used to check, and potentially fix, many locale-related files
in your Chromium workspace, such as:
- GRIT input files (.grd) and the corresponding translations (.xtb).
- BUILD.gn files listing Android localized resource string resource .xml
generated by GRIT for all supported Chrome locales. These correspond to
<output> elements that use the type="android" attribute.
The --scan-dir <dir> option can be used to check for all files under a specific
directory, and the --fix-inplace option can be used to try fixing any file
that doesn't pass the check.
This can be very handy to avoid tedious and repetitive work when adding new
translations / locales to the Chrome code base, since this script can update
said input files for you.
Important note: checks and fix may fail on some input files. For example
remoting/resources/remoting_strings.grd contains an in-line comment element
inside its <outputs> section that breaks the script. The check will fail, and
trying to fix it too, but at least the file will not be modified.
"""
from __future__ import print_function
import argparse
import json
import os
import re
import shutil
import subprocess
import sys
import unittest
# Assume this script is under build/
_SCRIPT_DIR = os.path.dirname(__file__)
_SCRIPT_NAME = os.path.join(_SCRIPT_DIR, os.path.basename(__file__))
_TOP_SRC_DIR = os.path.join(_SCRIPT_DIR, '..')
# Need to import android/gyp/util/resource_utils.py here.
sys.path.insert(0, os.path.join(_SCRIPT_DIR, 'android/gyp'))
from util import build_utils
from util import resource_utils
# This locale is the default and doesn't have translations.
_DEFAULT_LOCALE = 'en-US'
# Misc terminal codes to provide human friendly progress output.
_CONSOLE_CODE_MOVE_CURSOR_TO_COLUMN_0 = '\x1b[0G'
_CONSOLE_CODE_ERASE_LINE = '\x1b[K'
_CONSOLE_START_LINE = (
_CONSOLE_CODE_MOVE_CURSOR_TO_COLUMN_0 + _CONSOLE_CODE_ERASE_LINE)
##########################################################################
##########################################################################
#####
##### G E N E R I C H E L P E R F U N C T I O N S
#####
##########################################################################
##########################################################################
def _FixChromiumLangAttribute(lang):
"""Map XML "lang" attribute values to Chromium locale names."""
_CHROMIUM_LANG_FIXES = {
'en': 'en-US', # For now, Chromium doesn't have an 'en' locale.
'iw': 'he', # 'iw' is the obsolete form of ISO 639-1 for Hebrew
'no': 'nb', # 'no' is used by the Translation Console for Norwegian (nb).
}
return _CHROMIUM_LANG_FIXES.get(lang, lang)
def _FixTranslationConsoleLocaleName(locale):
_FIXES = {
'nb': 'no', # Norwegian.
'he': 'iw', # Hebrew
}
return _FIXES.get(locale, locale)
def _CompareLocaleLists(list_a, list_expected, list_name):
"""Compare two lists of locale names. Print errors if they differ.
Args:
list_a: First list of locales.
list_expected: Second list of locales, as expected.
list_name: Name of list printed in error messages.
Returns:
On success, return False. On error, print error messages and return True.
"""
errors = []
missing_locales = sorted(set(list_a) - set(list_expected))
if missing_locales:
errors.append('Missing locales: %s' % missing_locales)
extra_locales = sorted(set(list_expected) - set(list_a))
if extra_locales:
errors.append('Unexpected locales: %s' % extra_locales)
if errors:
print('Errors in %s definition:' % list_name)
for error in errors:
print(' %s\n' % error)
return True
return False
def _BuildIntervalList(input_list, predicate):
"""Find ranges of contiguous list items that pass a given predicate.
Args:
input_list: An input list of items of any type.
predicate: A function that takes a list item and return True if it
passes a given test.
Returns:
A list of (start_pos, end_pos) tuples, where all items in
[start_pos, end_pos) pass the predicate.
"""
result = []
size = len(input_list)
start = 0
while True:
# Find first item in list that passes the predicate.
while start < size and not predicate(input_list[start]):
start += 1
if start >= size:
return result
# Find first item in the rest of the list that does not pass the
# predicate.
end = start + 1
while end < size and predicate(input_list[end]):
end += 1
result.append((start, end))
start = end + 1
def _SortListSubRange(input_list, start, end, key_func):
"""Sort an input list's sub-range according to a specific key function.
Args:
input_list: An input list.
start: Sub-range starting position in list.
end: Sub-range limit position in list.
key_func: A function that extracts a sort key from a line.
Returns:
A copy of |input_list|, with all items in [|start|, |end|) sorted
according to |key_func|.
"""
result = input_list[:start]
inputs = []
for pos in xrange(start, end):
line = input_list[pos]
key = key_func(line)
inputs.append((key, line))
for _, line in sorted(inputs):
result.append(line)
result += input_list[end:]
return result
def _SortElementsRanges(lines, element_predicate, element_key):
"""Sort all elements of a given type in a list of lines by a given key.
Args:
lines: input lines.
element_predicate: predicate function to select elements to sort.
element_key: lambda returning a comparison key for each element that
passes the predicate.
Returns:
A new list of input lines, with lines [start..end) sorted.
"""
intervals = _BuildIntervalList(lines, element_predicate)
for start, end in intervals:
lines = _SortListSubRange(lines, start, end, element_key)
return lines
def _ProcessFile(input_file, locales, check_func, fix_func):
"""Process a given input file, potentially fixing it.
Args:
input_file: Input file path.
locales: List of Chrome locales to consider / expect.
check_func: A lambda called to check the input file lines with
(input_lines, locales) argument. It must return an list of error
messages, or None on success.
fix_func: None, or a lambda called to fix the input file lines with
(input_lines, locales). It must return the new list of lines for
the input file, and may raise an Exception in case of error.
Returns:
True at the moment.
"""
print('%sProcessing %s...' % (_CONSOLE_START_LINE, input_file), end=' ')
sys.stdout.flush()
with open(input_file) as f:
input_lines = f.readlines()
errors = check_func(input_file, input_lines, locales)
if errors:
print('\n%s%s' % (_CONSOLE_START_LINE, '\n'.join(errors)))
if fix_func:
try:
input_lines = fix_func(input_file, input_lines, locales)
output = ''.join(input_lines)
with open(input_file, 'wt') as f:
f.write(output)
print('Fixed %s.' % input_file)
except Exception as e: # pylint: disable=broad-except
print('Skipped %s: %s' % (input_file, e))
return True
def _ScanDirectoriesForFiles(scan_dirs, file_predicate):
"""Scan a directory for files that match a given predicate.
Args:
scan_dir: A list of top-level directories to start scan in.
file_predicate: lambda function which is passed the file's base name
and returns True if its full path, relative to |scan_dir|, should be
passed in the result.
Returns:
A list of file full paths.
"""
result = []
for src_dir in scan_dirs:
for root, _, files in os.walk(src_dir):
result.extend(os.path.join(root, f) for f in files if file_predicate(f))
return result
def _WriteFile(file_path, file_data):
"""Write |file_data| to |file_path|."""
with open(file_path, 'w') as f:
f.write(file_data)
def _FindGnExecutable():
"""Locate the real GN executable used by this Chromium checkout.
This is needed because the depot_tools 'gn' wrapper script will look
for .gclient and other things we really don't need here.
Returns:
Path of real host GN executable from current Chromium src/ checkout.
"""
# Simply scan buildtools/*/gn and return the first one found so we don't
# have to guess the platform-specific sub-directory name (e.g. 'linux64'
# for 64-bit Linux machines).
buildtools_dir = os.path.join(_TOP_SRC_DIR, 'buildtools')
for subdir in os.listdir(buildtools_dir):
subdir_path = os.path.join(buildtools_dir, subdir)
if not os.path.isdir(subdir_path):
continue
gn_path = os.path.join(subdir_path, 'gn')
if os.path.exists(gn_path):
return gn_path
return None
def _PrettyPrintListAsLines(input_list, available_width, trailing_comma=False):
result = []
input_str = ', '.join(input_list)
while len(input_str) > available_width:
pos = input_str.rfind(',', 0, available_width)
result.append(input_str[:pos + 1])
input_str = input_str[pos + 1:].lstrip()
if trailing_comma and input_str:
input_str += ','
result.append(input_str)
return result
class _PrettyPrintListAsLinesTest(unittest.TestCase):
def test_empty_list(self):
self.assertListEqual([''], _PrettyPrintListAsLines([], 10))
def test_wrapping(self):
input_list = ['foo', 'bar', 'zoo', 'tool']
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 8),
['foo,', 'bar,', 'zoo,', 'tool'])
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 12), ['foo, bar,', 'zoo, tool'])
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 79), ['foo, bar, zoo, tool'])
def test_trailing_comma(self):
input_list = ['foo', 'bar', 'zoo', 'tool']
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 8, trailing_comma=True),
['foo,', 'bar,', 'zoo,', 'tool,'])
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 12, trailing_comma=True),
['foo, bar,', 'zoo, tool,'])
self.assertListEqual(
_PrettyPrintListAsLines(input_list, 79, trailing_comma=True),
['foo, bar, zoo, tool,'])
##########################################################################
##########################################################################
#####
##### L O C A L E S L I S T S
#####
##########################################################################
##########################################################################
# Various list of locales that will be extracted from build/config/locales.gni
# Do not use these directly, use ChromeLocales(), AndroidOmittedLocales() and
# IosUnsupportedLocales() instead to access these lists.
_INTERNAL_CHROME_LOCALES = []
_INTERNAL_ANDROID_OMITTED_LOCALES = []
_INTERNAL_IOS_UNSUPPORTED_LOCALES = []
def ChromeLocales():
"""Return the list of all locales supported by Chrome."""
if not _INTERNAL_CHROME_LOCALES:
_ExtractAllChromeLocalesLists()
return _INTERNAL_CHROME_LOCALES
def AndroidOmittedLocales():
"""Reutrn the list of locales omitted from Android APKs."""
if not _INTERNAL_ANDROID_OMITTED_LOCALES:
_ExtractAllChromeLocalesLists()
return _INTERNAL_ANDROID_OMITTED_LOCALES
def IosUnsupportedLocales():
"""Return the list of locales that are unsupported on iOS."""
if not | |
<reponame>ggirelli/gpseq-img-py
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact: <EMAIL>
@description: contains Condition wrapper, which in turn contains Series.
"""
# DEPENDENCIES =================================================================
from joblib import Parallel, delayed
import multiprocessing
import os
import time
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pygpseq import const
from pygpseq.tools import path as pt, io as iot, plot, stat as stt, string as st
from pygpseq.anim.series import Series
# CLASSES ======================================================================
class Condition(iot.IOinterface):
"""GPSeq condition, i.e., ensemble of series with nuclei.
Args:
__version__ (string): package string.
path (string): condition folder path.
name (string): condition name.
ext (string): condition series extension.
reg (string): condition series regexp.
series (list[series]): condition series.
"""
__version__ = const.VERSION
path = "."
name = ""
ext = ".tif"
reg = "^(?P<channel_name>[^/]*)"
reg += "\.(?P<channel_str>channel[0-9]+)"
reg += "\.(?P<series_str>series[0-9]+)"
reg += "(?P<ext>\.tif)$"
series = []
def __init__(self, path, dna_channels, sig_channels, main=None):
"""Run IOinterface __init__ method.
Args:
path (string): path to the condition folder.
main (pyGPSeq.main): main wrapper (opt).
"""
# If required, inherit from `main` wrap
if main is None:
super(Condition, self).__init__()
else:
logpath = main.logpath
super(Condition, self).__init__(path=logpath, append=True)
self.ext = main.ext
self.verbose = main.verbose
self.reg = main.reg
# Save input parameters
self.path = pt.add_trailing_slash(os.path.abspath(path))
self.name = self.path[: len(self.path) - 1].split("/")
self.name = self.name[len(self.name) - 1]
self.printout('Initializing condition: "' + self.name + '"', 0)
# Select condition's series
self.series = pt.select_files(self.path, self.ext)
self.series = pt.select_series(self.series, self.reg).items()
# Check that each series has at least one dna_channel and sig_channel
for s in self.series:
if all(
cdata["channel_name"] not in dna_channels
for (cname, cdata) in s[1].items()
):
self.printout(
"No DNA channel found in '%s' of '%s'" % (s[0], self.name), -2
)
if all(
cdata["channel_name"] not in sig_channels
for (cname, cdata) in s[1].items()
):
self.printout(
"No Signal channel found in '%s' of '%s'" % (s[0], self.name), -2
)
# If no series, stop and trigger error
if len(self.series) == 0:
msg = "No series found in condition %s." % (self.name,)
self.printout(msg, -2)
else:
msg = "Found %d series..." % (len(self.series),)
self.printout(msg, 1)
# Instantiate series
self.series = [list(ds) for ds in self.series]
[self.series[i].append(i + 1) for i in range(len(self.series))]
self.series = [Series(s, condition=self) for s in self.series]
def __getitem__(self, key):
"""Allow get item."""
if key in dir(self):
return getattr(self, key)
else:
return None
def __setitem__(self, key, value):
"""Allow set item."""
if key in dir(self):
self.__setattr__(key, value)
def adjust_options(self, **kwargs):
"""Adjust options to be passed to the Series class.
Args:
**kwargs
Returns:
dict: adds the following kawrgs:
cond_name (string): condition wrapper name.
"""
kwargs["cond_name"] = self.name
return kwargs
def analyze_nuclei(self, **kwargs):
"""Export current condition nuclei.
Args:
sigma (float): sigma for smoothing and covariance calculation (opt).
**kwargs
Returns:
tuple: profiles, summaries and single-pixel tables.
"""
# CHECK PARAMS =========================================================
# Get default number of cores
ncores = 1 if not "ncores" in kwargs.keys() else kwargs["ncores"]
# Set output suffix
if not "suffix" in kwargs.keys():
suffix = ""
else:
suffix = st.add_leading_dot(kwargs["suffix"])
# Check plotting
if not "plotting" in kwargs.keys():
kwargs["plotting"] = True
# Check number of cores
if ncores > multiprocessing.cpu_count():
ncores = multiprocessing.cpu_count()
msg = "Decreased core number to maximum allowed: %i" % ncores
msg += "\nPlease, don't ask for the impossible... ಠ_ಠ"
self.printout(msg, -1)
# Add necessary options
self.printout('Current condition: "' + self.name + '"...', 0)
kwargs = self.adjust_options(**kwargs)
# Create condition nuclear data directory if necessary
if not os.path.isdir(kwargs["out_dir"]):
os.mkdir(kwargs["out_dir"])
# GET NUCLEAR DATA =====================================================
# Retrieve nuclei
nuclei = self.get_nuclei()
# Check that the condition contains nuclei
if len(nuclei) == 0:
return (None, None, None, None)
if kwargs["seg_type"] == const.SEG_3D:
DTYPE_NUCLEAR_SUMMARY = const.DTYPE_NUCLEAR_SUMMARY_3D
else:
DTYPE_NUCLEAR_SUMMARY = const.DTYPE_NUCLEAR_SUMMARY_2D
# Retrieve nuclei summaries
self.printout("Retrieving nuclear summary...", 1)
summary = np.zeros(len(nuclei), dtype=DTYPE_NUCLEAR_SUMMARY)
for i in range(len(nuclei)):
summary[i] = nuclei[i].get_summary()
# Filter nuclei
msg = "Filtering nuclei based on size, intensity and shape..."
self.printout(msg, 1)
selected = self.multi_threshold_nuclei(data=summary, **kwargs)
# Check that nuclei are selected
if len(selected) == 0:
return (None, None, None, None)
# Apply selection
summary = np.asarray(
[summary[i] for i in selected], dtype=DTYPE_NUCLEAR_SUMMARY
)
# Retrieve selected nuclei single-pixel data
data_nested = Parallel(n_jobs=ncores)(
delayed(get_series_nuclear_data)(self, summary, sidx, **kwargs)
for sidx in list(set(summary["s"]))
)
# Un-nest nuclear data
data = []
[data.extend(nested["spx_data"]) for nested in data_nested]
# Assemble into a single array
self.printout("Merging into a single table...", 1)
merged = np.zeros(
sum([d.shape[0] for d in data]), dtype=const.DTYPE_NUCLEAR_DATA
)
currpos = 0
for d in data:
merged[currpos : (currpos + d.shape[0])] = d
currpos += d.shape[0]
# Remove rows with no DNA signal
self.printout("Removing pixels without DNA signal...", 1)
self.printout("Identifying pixels...", 2)
toKeep = np.where(merged["dna"] != 0)[0]
nToRemove = len(merged) - len(toKeep)
if not nToRemove == 0:
merged = merged[toKeep]
msg = "Removed %i pixels without DNA signal..." % nToRemove
self.printout(msg, 2)
# Density profile ------------------------------------------------------
dp = np.vstack([nested["density"] for nested in data_nested])
dp = pd.DataFrame(dp)
col_labs = ["c", "s", "n"]
col_labs.extend(
["nd_%f" % b for b in np.linspace(0, 1, kwargs["nbins"] + 1)[1:]]
)
dp.columns = col_labs
# Volume profile -------------------------------------------------------
vp = np.vstack([nested["volume"] for nested in data_nested])
vp = pd.DataFrame(vp)
col_labs = ["c", "s", "n"]
col_labs.extend(
["nd_%f" % b for b in np.linspace(0, 1, kwargs["nbins"] + 1)[1:]]
)
vp.columns = col_labs
# PLOT =================================================================
# EVERY PIXEL ----------------------------------------------------------
# Produce profile plot
self.printout("Generating profiles...", 1)
profiles = self.make_profiles(merged, len(data), **kwargs)
# Export single profile study
self.printout("Studying single-pixel behaviour...", 1)
self.check_single_pixels(merged, profiles, **kwargs)
# Export single-condition plot
self.printout("Exporting profiles...", 1)
# Mean/median/mode profile plot
fig = plot.single_condition_profiles(profiles, n_nuclei=len(data), **kwargs)
plot.single_condition_profiles(
profiles, n_nuclei=len(data), yfield="median", new_figure=False, **kwargs
)
plot.single_condition_profiles(
profiles, n_nuclei=len(data), yfield="mode", new_figure=False, **kwargs
)
plot.single_condition_profiles(
profiles, n_nuclei=len(data), yfield="max", new_figure=False, **kwargs
)
# Add legend
plt.subplot(3, 2, 1)
plot.set_font_size(12)
plt.legend(
labels=["mean", "median", "mode", "max"],
bbox_to_anchor=(0.0, 1.12, 1.0, 0.102),
loc=3,
ncol=2,
mode="expand",
borderaxespad=0.0,
)
# Export PDF
fname = kwargs["out_dir"] + const.OUTDIR_PDF + self.name
fname += ".profiles" + suffix + ".pdf"
if kwargs["plotting"]:
plot.export(fname, "pdf")
# Export PNG
fname = kwargs["out_dir"] + const.OUTDIR_PNG + self.name
fname += ".profiles" + suffix + ".png"
if kwargs["plotting"]:
plot.export(fname, "png")
# Close figure
plt.close(fig)
# Output
self.printout("", 0)
return (profiles, summary, merged, dp, vp)
def check_single_pixels(
self, indata, profiles, partial=None, supcomm=None, **kwargs
):
"""Produce single pixel behaviour study plot.
Args:
indata (np.array): single-pixel table, const.DTYPE_NUCLEAR_DATA.
profiles (dict): smoothened and raw profiles (I ~ d).
partial (bool): True if working on partial volume.
supcomm (string): a comment to be add to the plot main title.
**kwargs
"""
# CHECK PARAMS =========================================================
# Set output suffix
if not "suffix" in kwargs.keys():
suffix = ""
else:
suffix = st.add_leading_dot(kwargs["suffix"])
# Partial volume data
if None == partial:
partial = False
partial = False
# Check plotting
if not "plotting" in kwargs.keys():
kwargs["plotting"] = True
# Output file pointers
fname = kwargs["out_dir"] + const.OUTDIR_PDF
out_png = kwargs["out_dir"] + const.OUTDIR_PNG
out_png += self.name + ".pixel_study."
if partial:
fname += self.name + ".pixel_study.part" + suffix + ".pdf"
else:
fname += self.name + ".pixel_study" + suffix + ".pdf"
if kwargs["plotting"]:
pp = PdfPages(fname)
# PREPARE DATA =========================================================
# Setup data for plotting
dna = indata["dna"].astype("float")
rat = indata["sig"] / dna
pltitems = [
("DNA channel...", indata["dna"], "DNA [a.u.]", "dna"),
("Signal channel...", indata["sig"], "Signal [a.u.]", "sig"),
("Signal/DNA ratio...", rat[rat != np.inf], "Signal/DNA", "ratio"),
]
# PLOT =================================================================
# Set plot super title
if self.name in kwargs["cdescr"].keys():
suptitle = 'Analysis "' + kwargs["cdescr"][self.name] + '"'
else:
suptitle = 'Analysis "' + self.name + '"'
if None != supcomm:
suptitle += supcomm
suptitle += " [" + str(kwargs["an_type"]) + "]"
suptitle += " [sigma = " + str(kwargs["sigma_smooth"]) + "]"
suptitle += " [nbins = " + str(kwargs["nbins"]) + "]"
# Plot
for (msg, y, ylab, lab) in pltitems:
self.printout(msg, 2)
# Actual plot
fig = plot.single_pixel_study(
indata[kwargs["dfield"]],
y,
ylab,
profiles[lab],
partial=partial,
**kwargs
)
fig.tight_layout()
plt.subplots_adjust(top=0.95)
plt.suptitle(suptitle)
# Export PDF
if kwargs["plotting"]:
plt.savefig(pp, format="pdf")
# Export PNG
if partial:
fname = out_png + lab + ".part" + suffix + ".png"
else:
fname = out_png + lab + suffix + ".png"
if kwargs["plotting"]:
plt.savefig(fname, format="png")
| |
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, LSTM, RNN, Bidirectional, Flatten, Activation, \
RepeatVector, Permute, Multiply, Lambda, Concatenate, BatchNormalization
from tensorflow.keras.layers import Embedding, Conv1D, MaxPooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D
from tensorflow.keras.utils import plot_model
from tensorflow.keras.optimizers import SGD, Adam
from sklearn.metrics import confusion_matrix
from mlearning.attention import AttentionLayer
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import TimeDistributed, GRU
import tensorflow.keras.backend as K
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import RMSprop, Adagrad
from transformers import TFBertModel
from mlearning.attention_context import AttentionWithContext
from tensorflow.keras.layers import SpatialDropout1D
from transformers import TFBertForSequenceClassification, BertConfig
from tensorflow.keras.callbacks import Callback
from sklearn.metrics import f1_score, recall_score, precision_score
def compile(model, optimizer, lr, dl_config, loss, n_classes):
if not optimizer:
optimizer = Adam(lr=lr)
if not loss:
if n_classes == 2:
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
elif n_classes > 2:
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
else:
model.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
dl_config.learning_rate = lr
return model
def Burns_CNNBiLSTM(embedding_matrix, dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None):
max_sent_len = dl_config.max_sent_len
vocab_size = embedding_matrix.shape[0]
embed_dim = embedding_matrix.shape[1]
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
sequence_input = Input(shape=(max_sent_len,), dtype='int32')
activations = Embedding(vocab_size, embed_dim,
weights=[embedding_matrix], input_length=max_sent_len, trainable=False)(sequence_input)
activations = Dropout(0.4, seed=seed_value)(activations)
activations = Conv1D(16, 5, strides=1, activation='relu', padding='same',
kernel_regularizer=regularizers.l2(1e-4))(activations)
activations = MaxPooling1D(4)(activations)
activations = Conv1D(16, 5, strides=1, activation='relu', padding='same',
kernel_regularizer=regularizers.l2(1e-4))(activations)
activations = Dropout(0.4, seed=seed_value)(activations)
activations = Bidirectional(LSTM(64))(activations)
activations = Dropout(0.4, seed=seed_value)(activations)
attention = Dense(1, activation='tanh')(activations)
attention = Flatten()(attention)
attention = Activation('softmax')(attention)
attention = RepeatVector(128)(attention)
attention = Permute([2, 1])(attention)
doc_representation = Multiply()([activations, attention])
doc_representation = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(128,))(doc_representation)
output_layer = Dense(1, activation='sigmoid')(doc_representation)
model = Model(sequence_input, output_layer)
if not optimizer:
optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model = compile(model=model,
optimizer=optimizer,
lr=learning_rate,
dl_config=dl_config,
loss=loss,
n_classes=n_classes)
model.summary()
return model
def Hierarchical_Attention_GRU(embedding_matrix, dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None):
max_sent_len = dl_config.max_sent_len
max_nb_sentences = dl_config.max_nb_sentences
vocab_size = embedding_matrix.shape[0]
embed_dim = embedding_matrix.shape[1]
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
embedding_layer = Embedding(vocab_size, embed_dim, weights=[embedding_matrix],
input_length=max_sent_len, trainable=False, name='word_embedding')
# Words level attention model
word_input = Input(shape=(max_sent_len,), dtype='int32', name='word_input')
word_sequences = embedding_layer(word_input)
word_gru = Bidirectional(GRU(50, return_sequences=True), name='word_gru')(word_sequences)
word_dense = Dense(100, activation='relu', name='word_dense')(word_gru)
word_att, word_coeffs = AttentionLayer(embed_dim, True, name='word_attention')(word_dense)
wordEncoder = Model(inputs=word_input, outputs=word_att)
# Sentence level attention model
sent_input = Input(shape=(max_nb_sentences, max_sent_len), dtype='int32', name='sent_input')
sent_encoder = TimeDistributed(wordEncoder, name='sent_linking')(sent_input)
sent_gru = Bidirectional(GRU(50, return_sequences=True), name='sent_gru')(sent_encoder)
sent_dense = Dense(100, activation='relu', name='sent_dense')(sent_gru)
sent_att, sent_coeffs = AttentionLayer(embed_dim, return_coefficients=True, name='sent_attention')(sent_dense)
sent_drop = Dropout(0.5, name='sent_dropout', seed=seed_value)(sent_att)
preds = Dense(1, activation='sigmoid', name='output')(sent_drop)
# Model compile
model = Model(sent_input, preds)
model = compile(model=model,
optimizer=optimizer,
lr=learning_rate,
dl_config=dl_config,
loss=loss,
n_classes=n_classes)
print(wordEncoder.summary())
print(model.summary())
return model
def Hierarchical_Attention_GRU2(embedding_matrix, dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None):
max_sent_len = dl_config.max_sent_len
max_nb_sentences = dl_config.max_nb_sentences
vocab_size = embedding_matrix.shape[0]
embed_dim = embedding_matrix.shape[1]
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
embedding_layer = Embedding(vocab_size, embed_dim, weights=[embedding_matrix],
input_length=max_sent_len, trainable=False, name='word_embedding')
# Words level attention model
word_input = Input(shape=(max_sent_len,), dtype='int32', name='word_input')
word_sequences = embedding_layer(word_input)
word_gru = Bidirectional(GRU(50, return_sequences=True), name='word_gru')(word_sequences)
word_dense = Dense(100, activation='relu', name='word_dense')(word_gru)
word_att, word_coeffs = AttentionLayer(embed_dim, True, name='word_attention')(word_dense)
wordEncoder = Model(inputs=word_input, outputs=word_att)
# Sentence level attention model
sent_input = Input(shape=(max_nb_sentences, max_sent_len), dtype='int32', name='sent_input')
sent_encoder = TimeDistributed(wordEncoder, name='sent_linking')(sent_input)
sent_gru = Bidirectional(GRU(50, return_sequences=True), name='sent_gru')(sent_encoder)
sent_dense = Dense(100, activation='relu', name='sent_dense')(sent_gru)
sent_att, sent_coeffs = AttentionLayer(embed_dim, return_coefficients=True, name='sent_attention')(sent_dense)
sent_drop = Dropout(0.5, name='sent_dropout', seed_value=seed_value)(sent_att)
preds = Dense(1, activation='sigmoid', name='output')(sent_drop)
# Model compile
model = Model(sent_input, preds)
model = compile(model=model,
optimizer=optimizer,
lr=learning_rate,
dl_config=dl_config,
loss=loss,
n_classes=n_classes)
print(wordEncoder.summary())
print(model.summary())
return model
def Hierarchical_Attention_LSTM(embedding_matrix, dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None):
max_sent_len = dl_config.max_sent_len
max_nb_sentences = dl_config.max_nb_sentences
vocab_size = embedding_matrix.shape[0]
embed_dim = embedding_matrix.shape[1]
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
embedding_layer = Embedding(vocab_size, embed_dim, weights=[embedding_matrix],
input_length=max_sent_len, trainable=False, name='word_embedding')
# Words level attention model
word_input = Input(shape=(max_sent_len,), dtype='int32', name='word_input')
word_sequences = embedding_layer(word_input)
word_gru = Bidirectional(LSTM(50, return_sequences=True), name='word_gru')(word_sequences)
word_dense = Dense(100, activation='relu', name='word_dense')(word_gru)
word_att, word_coeffs = AttentionLayer(embed_dim, True, name='word_attention')(word_dense)
wordEncoder = Model(inputs=word_input, outputs=word_att)
# Sentence level attention model
sent_input = Input(shape=(max_nb_sentences, max_sent_len), dtype='int32', name='sent_input')
sent_encoder = TimeDistributed(wordEncoder, name='sent_linking')(sent_input)
sent_gru = Bidirectional(LSTM(50, return_sequences=True), name='sent_gru')(sent_encoder)
sent_gru = Dropout(0.2, seed=seed_value)(sent_gru)
sent_dense = Dense(100, activation='relu', name='sent_dense')(sent_gru)
sent_att, sent_coeffs = AttentionLayer(embed_dim, return_coefficients=True, name='sent_attention')(sent_dense)
sent_drop = Dropout(0.5, name='sent_dropout', seed=seed_value)(sent_att)
preds = Dense(1, activation='sigmoid', name='output')(sent_drop)
# Model compile
model = Model(sent_input, preds)
model = compile(model=model,
optimizer=optimizer,
lr=learning_rate,
dl_config=dl_config,
loss=loss,
n_classes=n_classes)
print(wordEncoder.summary())
print(model.summary())
return model
def Hierarchical_Attention_LSTM2(embedding_matrix, dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None):
max_sent_len = dl_config.max_sent_len
max_nb_sentences = dl_config.max_nb_sentences
vocab_size = embedding_matrix.shape[0]
embed_dim = embedding_matrix.shape[1]
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
embedding_layer = Embedding(vocab_size, embed_dim, weights=[embedding_matrix],
input_length=max_sent_len, trainable=False, name='word_embedding')
# Words level attention model
word_input = Input(shape=(max_sent_len,), dtype='int32', name='word_input')
from tensorflow.keras.layers import GaussianNoise
word_sequences = embedding_layer(word_input)
word_gru = Bidirectional(LSTM(50, return_sequences=True), name='word_gru')(word_sequences)
word_gru = GaussianNoise(0.4)(word_gru)
word_gru = Dropout(0.5, seed=seed_value)(word_gru)
word_dense = Dense(100, activation='relu', name='word_dense')(word_gru)
word_att, word_coeffs = AttentionLayer(embed_dim, True, name='word_attention')(word_dense)
wordEncoder = Model(inputs=word_input, outputs=word_att)
# Sentence level attention model
sent_input = Input(shape=(max_nb_sentences, max_sent_len), dtype='int32', name='sent_input')
sent_encoder = TimeDistributed(wordEncoder, name='sent_linking')(sent_input)
sent_gru = Bidirectional(GRU(50, return_sequences=True), name='sent_gru')(sent_encoder)
sent_gru = Dropout(0.5, seed=seed_value)(sent_gru)
sent_dense = Dense(100, activation='relu', name='sent_dense')(sent_gru)
sent_att, sent_coeffs = AttentionLayer(embed_dim, return_coefficients=True, name='sent_attention')(sent_dense)
sent_drop = Dropout(0.5, name='sent_dropout', seed=seed_value)(sent_att)
preds = Dense(1, activation='sigmoid', name='output')(sent_drop)
# Model compile
model = Model(sent_input, preds)
model = compile(model=model,
optimizer=optimizer,
lr=learning_rate,
dl_config=dl_config,
loss=loss,
n_classes=n_classes)
print(wordEncoder.summary())
print(model.summary())
from tensorflow.keras.utils import plot_model
plot_model(wordEncoder, to_file='model_plot1.png', show_shapes=True, show_layer_names=True)
plot_model(model, to_file='model_plot2.png', show_shapes=True, show_layer_names=True)
return model
def Hierarchical_Attention_LSTM3(embedding_matrix, dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None):
max_sent_len = dl_config.max_sent_len
max_nb_sentences = dl_config.max_nb_sentences
vocab_size = embedding_matrix.shape[0]
embed_dim = embedding_matrix.shape[1]
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
embedding_layer = Embedding(vocab_size, embed_dim, weights=[embedding_matrix],
input_length=max_sent_len, trainable=False, name='word_embedding')
# Words level attention model
word_input = Input(shape=(max_sent_len,), dtype='int32', name='word_input')
from tensorflow.keras.layers import GaussianNoise
word_sequences = embedding_layer(word_input)
word_gru = Bidirectional(LSTM(100, return_sequences=True), name='word_gru')(word_sequences)
# word_gru = GaussianNoise(0.1)(word_gru)
word_dense = Dense(200, activation='relu', name='word_dense')(word_gru)
word_att, word_coeffs = AttentionLayer(embed_dim, True, name='word_attention')(word_dense)
wordEncoder = Model(inputs=word_input, outputs=word_att)
# Sentence level attention model
sent_input = Input(shape=(max_nb_sentences, max_sent_len), dtype='int32', name='sent_input')
sent_encoder = TimeDistributed(wordEncoder, name='sent_linking')(sent_input)
sent_gru = Bidirectional(LSTM(100, return_sequences=True), name='sent_gru')(sent_encoder)
# sent_gru = Dropout(0.5, seed=seed_value)(sent_gru)
sent_dense = TimeDistributed(Dense(200, activation='relu', name='sent_dense'))(sent_gru)
sent_att, sent_coeffs = AttentionLayer(embed_dim, return_coefficients=True, name='sent_attention')(sent_dense)
# sent_drop = Dropout(0.5,name='sent_dropout', seed=seed_value)(sent_att)
preds = Dense(1, activation='sigmoid', name='output')(sent_att)
# Model compile
model = Model(sent_input, preds)
model = compile(model=model,
optimizer=optimizer,
lr=learning_rate,
dl_config=dl_config,
loss=loss,
n_classes=n_classes)
print(wordEncoder.summary())
print(model.summary())
return model
def Hierarchical_Attention_Context(embedding_matrix, dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None):
max_sent_len = dl_config.max_sent_len
max_nb_sentences = dl_config.max_nb_sentences
vocab_size = embedding_matrix.shape[0]
embed_dim = embedding_matrix.shape[1]
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
embedding_layer = Embedding(vocab_size, embed_dim, weights=[embedding_matrix],
input_length=max_sent_len, trainable=False, name='word_embedding')
word_input = Input(shape=(max_sent_len,), dtype='int32')
word = embedding_layer(word_input)
word = SpatialDropout1D(0.2, seed=seed_value)(word)
word = Bidirectional(LSTM(128, return_sequences=True))(word)
word_out = AttentionWithContext()(word)
wordEncoder = Model(word_input, word_out)
sente_input = Input(shape=(max_nb_sentences, max_sent_len), dtype='int32')
sente = TimeDistributed(wordEncoder)(sente_input)
sente = SpatialDropout1D(0.2, seed=seed_value)(sente)
sente = Bidirectional(LSTM(128, return_sequences=True))(sente)
sente = AttentionWithContext()(sente)
preds = Dense(1, activation='sigmoid')(sente)
model = Model(sente_input, preds)
model = compile(model=model,
optimizer=optimizer,
lr=learning_rate,
dl_config=dl_config,
loss=loss,
n_classes=n_classes)
print(wordEncoder.summary())
print(model.summary())
return model
def Bert_Dense(dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None, static_bert=True, bert_name_or_path="bert-base-uncased", bert_config=False):
max_sent_len = dl_config.max_sent_len
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
idx = Input((max_sent_len), dtype="int32", name="input_idx")
masks = Input((max_sent_len), dtype="int32", name="input_masks")
segments = Input((max_sent_len), dtype="int32", name="input_segments")
## pre-trained bert
if bert_config:
bert_config = BertConfig.from_json_file(bert_name_or_path+ '/bert_config.json')
bert_model = TFBertModel.from_pretrained(bert_name_or_path, from_pt=True, config = bert_config)
else:
bert_model = TFBertModel.from_pretrained(bert_name_or_path)
embedding = bert_model([idx, masks, segments])[0]
## fine-tuning
x = GlobalAveragePooling1D()(embedding)
x = Dense(100, activation="relu")(x)
if n_classes==2:
y_out = Dense(1, activation='sigmoid')(x)
elif n_classes>2:
y_out = Dense(n_classes, activation='softmax')(x)
model = Model([idx, masks, segments], y_out)
if static_bert:
for layer in model.layers[:4]:
layer.trainable = False
model = compile(model=model,
optimizer=optimizer,
lr=learning_rate,
dl_config=dl_config,
loss=loss,
n_classes=n_classes)
print(model.summary())
return model
def Bert_LSTM(dl_config, n_classes=2, loss=None, learning_rate=None,
optimizer=None, seed_value=None, static_bert=True, bert_name_or_path="bert-base-uncased", bert_config=False):
max_sent_len = dl_config.max_sent_len
if not learning_rate:
if not dl_config.learning_rate:
learning_rate = 0.001
else:
learning_rate = dl_config.learning_rate
idx = Input((max_sent_len), dtype="int32", name="input_idx")
masks = Input((max_sent_len), dtype="int32", name="input_masks")
segments = Input((max_sent_len), dtype="int32", name="input_segments")
## pre-trained bert
if bert_config:
bert_config = BertConfig.from_json_file(bert_name_or_path+ '/bert_config.json')
bert_model = TFBertModel.from_pretrained(bert_name_or_path, from_pt=True, config = bert_config)
else:
bert_model = TFBertModel.from_pretrained(bert_name_or_path)
embedding = bert_model([idx, masks, segments])[0]
## fine-tuning
x = Bidirectional(LSTM(50, return_sequences=True, recurrent_dropout=0.1))(embedding)
x = Dropout(0.1, seed=seed_value)(x)
x = GlobalAveragePooling1D()(x)
x = Dense(62, activation="relu")(x)
x = Dropout(0.2, seed=seed_value)(x)
if n_classes==2:
y_out = Dense(1, activation='sigmoid')(x)
elif n_classes>2:
y_out = Dense(n_classes, activation='softmax')(x)
| |
<gh_stars>1-10
import pandas as pd
## READ & WRITE
# Pickle
df = pd.read_pickle('psi.pickle')
df.to_pickle('normal.pkl')
# CSV
df = pd.read_csv('shenzhen_processed.csv', low_memory=False)
df = pd.read_csv('olympics.csv', index_col=0, skiprows=1) #take 1st col as index, and remove 1st row
df.to_csv('shenzhen_processed.csv', index=False)
df = pd.read_csv(file, usecols=['col1','col2']) #use only specific columns; can save a lot of memory
# APPENDING DF TO EXISTING CSV
df = df.to_csv('my_csv.csv', mode='a', header=False)
# EXCEL
# reading excel has various differences compared to csv
# dtype of a col to str will convert NaN into 'nan', while csv preserves the NaN
# dtype of a col to str will not preserve numeric 0 padding, while csv preserves
df = pd.read_excel('shenzhen_processed.xlsx', sheet_name=0) #sheetname starts from 0
df = pd.read_csv("P00000001-ALL.csv", nrows=20) # limit to only 20 rows
df.to_excel('output.xlsx', index=False)
# output multiple df in different Excel sheets
from pandas import ExcelWriter
writer = ExcelWriter(xls_path)
for n, df in enumerate(list_dfs):
df1.to_excel(writer,'sheet%s' % n)
writer.save()
# TXT
utown=pd.read_table('university_towns.txt', sep=',', header=None)
df1 = pd.read_table('training_text', sep='\|\|', engine='python', skiprows=1, names=["ID","Text"]) #note that any delimiter more than 1 is a reg, have to use \ to override
# convert a clip board into dataframe!!!
pd.read_clipboard()
# JSON
df=pd.read_json(path)
df.to_json('/Users/xxx/Desktop/d.json') # display by index
out = df.to_json(orient="records") # display by row, key=colname, value=cell value
dict_ = df.to_dict(orient="list") # display by cols, key=colname, value=list of values
# DBF
from simpledbf import Dbf5
dbf = Dbf5('test.dbf')
df = dbf.to_dataframe()
# Sample the data to speed up computation
df = df.sample(frac=0.1, random_state=10)
# set column as string
df = pd.read_csv('sample.csv', dtype={'ID': object}) #no diff if you '' the data type
# encoding error, eg: 'utf-8' codec can't decode byte 0x92 in position 763: invalid start byte
# use below to decode
df = pd.read_csv(email, encoding = "ISO-8859-1")
#--------------------------------------------------------
## SETTINGS
pd.set_option('display.max_columns',1) # expand column height
pd.set_option('display.max_rows', None) # show all rows
pd.set_option('display.max_colwidth', -1) # no limit to column width
pd.reset_option('all') # reset set options
#--------------------------------------------------------
## PERFORMANCE
first_five = pd.read_csv('loans_2007.csv', nrows=5) #call only first 5 rows
df._data #see how BlockManager classify dataframe by dtypes
df.info(memory_usage="deep") # display memory usage, dtype, null/non-null
df.memory_usage(deep=True) # display only memory usage for each column
# step1: filter dataframe to select certain dtype
df_obj = df.select_dtypes(include=['integer'])
# step2: auto-determinine optimal dtype so that memory usage is minimised; eg change form int64 to int16
df['columnNm'] = pd.to_numeric(df['columnNm'], downcast='integer')
df['columnNm'].dtype
# convert objects into category to minimise memory usage as its converted to int backend
# only use this when unique values <50% of rows & that there is no need for numeric calculations
df['columnNm'] = df['columnNm'].astype('category')
#--------------------------------------------------------
## BUILDING A NEW DATAFRAME
#build dataframe from a for loop
x = 0
list = []
for i in df.columns: # how many nan in each column? Value
list.append({'column':x, 'nan_count':df[i].isnull().values.sum(), 'variable':i})
x+=1
df_nan = pd.DataFrame(list)
#create random dataframe
df1 = pd.DataFrame(np.random.randint(1, 5, (10,2)), columns=['a','b']) #10 rows, 2 columns, with numbers 1 to 5
#from a dictionary
df = pd.DataFrame()
newdf['Date'] = x.keys() #where x is a dictionary
df['DateValue'] = x.values()
# build a df with just one row of data. Note the nested list to change it to row
prediction = pd.DataFrame([[4, 21, 1, 5, 91,1984]], \
columns=['flat_type_code','town_code','flat_model_code', \
'storey_range_code','floor_area_sqm', 'lease_commence_date'])
# from dictionary
d = {'Desert':2345,'Mountain':8764,'Water':6689,'Land':7332,'Forest':1050,'Snow':3741, \
'Is_Raining_ec':0,'Had_A_Good_Sleep_ec':0,'Average_Temperature':40}
df = pd.DataFrame([list(d.values())], columns=list(d.keys()))
# from nested list (with headers)
df = pd.DataFrame(data[1:],columns=data[0])
#duplicate a dataframe
df2 = df.copy()
#--------------------------------------------------------
## EXPLORATORY
df.info() #total non-null rows, dtypes, columns
df.shape #total number of rows by columns
df.size #total number of rows
len(df) #total number of rows too
len(df.columns) #total number of columns
df.head(2) #top 2 rows
df.dtypes #format
df.describe() #mean, std, count, etc. only numeric formated columns
#--------------------------------------------------------
## FORMAT
df.dtypes
df['hour'] = df['hour'].astype('int64')
df['text'] = df['text'].astype('str') # string will not preserve NaN, unlike object
df['col3'] = df['col2'].astype('category') # category type has int code in the backend
#coerce, any errors will be converted to NaN
df['price'] = pd.to_numeric(df['price'], errors='coerce')
df['Time'] = pd.to_datetime(df['Time'], errors='coerce')
#--------------------------------------------------------
## CHUNK-SIZE
dtypes = {"ConstituentBeginDate": "float", "ConstituentEndDate": "float"}
chunk_iter = pd.read_csv("moma.csv", chunksize=250, dtype=dtypes) #each chunk is 250 rows
lifespans = []
for chunk in chunk_iter:
diff = chunk['ConstituentEndDate'] - chunk['ConstituentBeginDate']
lifespans.append(diff)
lifespans_dist = pd.concat(lifespans)
print(lifespans_dist)
#--------------------------------------------------------
## Using SQL
# Connection to database
# sqlite connection
import sqlite3
conn = sqlite3.connect(sqlitePath)
df= pd.read_sql_query("SELECT * FROM table", conn)
from sqlalchemy import create_engine
engine = sqlalchemy.create_engine('sqlite:///my_db.sqlite')
# postgres connection
import psycopg2
conn = psycopg2.connect(database="postgres", user="postgres", password="***", host="127.0.0.1", port="5432")
# OR use sqlalchemy, which supports most databases
# database engine + database connector package://username:password @ host ip / database? client encoding
# latin1 or utf8 depending on client encoding
from sqlalchemy import create_engine
import psycopg2
conn = create_engine('postgresql+psycopg2://postgres:password@localhost:5432/postgres?client_encoding=latin1') # postgres
conn = create_engine('mysql+pymysal://{}:{}@{}:{}/{}'.format(username, password, address, port, db_name)) # mysql
query = ''' SELECT * FROM customer '''
# reading from sql
df = pd.read_sql(query, conn)
df = pd.read_sql_query(query, conn)
# upload dataframe to database as new table by default (use if_exist for appending), only available using sqlalchemy as engine
# if_exists can 'replace' entire table, default is fail
df.to_sql(name='wsg_ap_list3', con=conn, index=False, if_exists='append') #OR
df.to_sql('pa', conn, if_exists='append', index=False, dtype='text')
# upload using chunk, per 1000
df.to_sql('table', engine, chunksize=20000)
#--------------------------------------------------------
## INDEX NAMES
df.index
df['country'] = df.index #transfer index to a column
df = df.set_index('Gold') #set index from a column
df = df.set_index(['STNAME', 'CTYNAME']) #can set hierachical index
df.loc['Michigan', 'Washtenaw County'] #querying from index
df = df.reset_index(drop=True) #reset index; drop=True to remove original index as a column
df.loc['Animal'] #index name
df.iloc[5:10] #index location; row number
#iloc can also detect by both index & column
df.iloc[1,0] #index 1, column 0
df.iloc[:,0] #all index, column 0
# ix indexing works just the same as .loc when passed strings
df.ix[['Andrade']] == df.loc[['Andrade']]
# ix indexing works the same as .iloc when passed integers.
df.ix[[33]] == df.iloc[[33]]
#--------------------------------------------------------
## COLUMNS NAMES
## identify column names
df.columns
df.columns[:2] ## first 3 columns
## show column names and position
x = 0
for i in df.columns:
print(x, i)
x += 1
#renaming columns
df.columns = ['newcolumn1', 'newcolumn2', 'newcolumn3'] #easiest way to change, but error if total columns does not match
df.name = 'Original' #changing pd.Series name
df2 = df.rename(columns={'diam_circle_image':'diameter','depth_rimfloor_topog':'depth', 'number_layers':'layers'})
hdata2.rename(columns=dict(zip(hdata2.columns,date_change.tolist())), inplace=True) #change two lists into dictionary
df.columns = map(str.lower, df.columns) # change to lower case
#drop columns
df.drop(df.columns[[0, 1, 3]], axis=1)
df.drop('column_name', axis=1, inplace=True) #note that if inplace value is not set to true, need to reassign a new df
del df['column_name']
#choose column names by condition
col = [i for i in df.columns if i[-4:]=='2012']
#concat two lists of columns together
df[df.columns[1:11] | df.columns[12:14]]
#ordering columns in a df
df[sorted(df.columns.tolist())].head(3)
#specific ordering of columns
df = df[['a', 'b', 'd', 'c']]
#--------------------------------------------------------
## CREATE DATAFRAMES BASED ON UNIQUE COLUMN CATEGORY VALUE, STORED IN A DICT
# https://datascience.stackexchange.com/questions/29825/create-new-data-frames-from-existing-data-frame-based-on-unique-column-values
# store dataframes in dict, based on unique column value 'company_id'
dict_of_companies = {key: value for key, value in df.groupby('company_id')}
# get all keys
keys = [i for i in dict_of_companies]
# print each dataframe out
for i in keys:
print(dict_of_companies[i])
#--------------------------------------------------------
## SET VALUES PER CELL, GOOD FOR ITERATION
df.set_value(i, 'Y_svy', svy[1]) # index, column name, value
# new alternative
df.at[4, 'B'] = 10 # index, column name = value
df.at[4, 'B'] #querying a cell
# >>> 10
#--------------------------------------------------------
## COUNTING
df['EVENT_TYPE'].value_counts()
# using groupby
df.groupby(['Fruit','Name'])['Number'].sum() #sum of Number grouping by fruit and name
df.groupby('name')['activity'].value_counts() #multi-dimension counts
df['number_layers'].value_counts(normalize=True)*100 # by percentage
df.describe()
#--------------------------------------------------------
## DELETE ROWS
df.drop(df.index[[2,3,10,20]])
#--------------------------------------------------------
## NAN NULL VALUES
# note that NAN is only for numerical null values
df.isnull().any().any() # is there any nan in entire dataframe? Boolean
df.isnull().values.sum() #total number of nan in dataframe? Value
df.isnull().any() # which column is the nan in? Boolean
df[df['Timestamp'].isnull()] #filter rows with nan
# how many nan in each column? Value
df.isnull().sum() # by counts
df.isnull().sum() / len(df) # by percent
#filter dataframe to only rows NaN of a specific column
df[df['colnm'].isnull()]
#drop NaN
df3 = df2.dropna() #drop all rows with nan in any columns
df3 = df2.dropna(how='all') #drop only rows with all nan values
df3 = df2.dropna(threa=2) #drop only rows with 2 or more nan values
df.dropna(subset=['x277_2012'],inplace=True) #drop all rows for specific columns
df[df['Col2'].notnull()] # same as above
#fill NaN
df = df.fillna(value=99) #change NaN a value
df = df.fillna(method='ffill') #forward filling, note need to sort index
df = df.fillna(method='bfill') #back filling, note to sort index
df['colname'].interpolate(method='linear', limit=2) #interpolation, very useful for timeseries
#set value as NaN
import numpy as np
df2=df2.replace('nan',np.nan)
df.replace({'99':np.nan}, inplace=True) #multiple rows
#select null within lambda
df['colnm'] = df['colnm'].apply(lambda x: '' if pd.isnull(x)) else x)
#--------------------------------------------------------
# CHECK NEGATIVE VALUES
#entire df
any(df<0)
# each columns
for i in cfoul.columns:
if any(cfoul[i]<0) == True:
print(i)
#--------------------------------------------------------
## SORTING
# sort by index
df.sort_index(ascending=False) # reverse order
df.sort_values #note no brackets
# sort by value (column)
df.sort_values(ascending=False)
# sort 1 column out of many
df2=df[['country','x277_2012']]
df2.sort_values('x277_2012',ascending=False)
# sort multiple columns
df1.sort_values(['a', 'b'], ascending=[True, False])
df3[['a','b']].sort_values(['a','b'], ascending=[True, True]) # sort a first then b
#--------------------------------------------------------
## ENCODING
encode = {'Y':0, 'N':1}
df['Hired'] = df['Hired'].map(encode)
# Convert dummies back to single column
wild_dummies = df[['Wilderness_Area1','Wilderness_Area2','Wilderness_Area3','Wilderness_Area4']]
wild = wild_dummies.idxmax(axis=1)
wild.name = 'Wilderness' # set pd.Series name
wild = pd.concat([df2['Cover_Type'],wild], axis=1)
#--------------------------------------------------------
## STRING MANIPULATIONS
df['column1'].str.len() # length of | |
# Copyright (c) 2020-2021 impersonator.org authors (<NAME> and <NAME>). All rights reserved.
import torch
from torch.nn import functional as F
import numpy as np
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""Convert 3x4 rotation matrix to 4d quaternion vector
This algorithm is based on algorithm described in
https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201
Args:
rotation_matrix (Tensor): the rotation matrix to convert.
Return:
Tensor: the rotation in quaternion
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 4)`
Example:
>>> input = torch.rand(4, 3, 4) # Nx3x4
>>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4
"""
if not torch.is_tensor(rotation_matrix):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if len(rotation_matrix.shape) > 3:
raise ValueError(
"Input size must be a three dimensional tensor. Got {}".format(
rotation_matrix.shape))
if not rotation_matrix.shape[-2:] == (3, 4):
raise ValueError(
"Input size must be a N x 3 x 4 tensor. Got {}".format(
rotation_matrix.shape))
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = (rmat_t[:, 2, 2] < eps).float()
mask_d0_d1 = (rmat_t[:, 0, 0] > rmat_t[:, 1, 1]).float()
mask_d0_nd1 = (rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]).float()
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1
mask_c1 = mask_d2 * (1 - mask_d0_d1)
mask_c2 = (1 - mask_d2) * mask_d0_nd1
mask_c3 = (1 - mask_d2) * (1 - mask_d0_nd1)
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
"""Convert quaternion vector to angle axis of rotation.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
"""Convert an angle axis to a quaternion.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 4) # Nx4
>>> quaternion = tgm.angle_axis_to_quaternion(angle_axis) # Nx3
"""
if not torch.is_tensor(angle_axis):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError("Input must be a tensor of shape Nx3 or 3. Got {}"
.format(angle_axis.shape))
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = torch.sin(half_theta) / theta
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
return torch.cat([w, quaternion], dim=-1)
def rotation_matrix_to_angle_axis(rotation_matrix):
"""Convert 3x4 rotation matrix to Rodrigues vector
Args:
rotation_matrix (Tensor): rotation matrix.
Returns:
Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 4) # Nx4x4
>>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3
"""
# todo add check that matrix is a valid rotation matrix
quaternion = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_angle_axis(quaternion)
def angle_axis_to_rotation_matrix(angle_axis):
"""Convert 3d vector of axis-angle rotation to 4x4 rotation matrix
Args:
angle_axis (Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
Tensor: tensor of 4x4 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 4, 4)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = tgm.angle_axis_to_rotation_matrix(input) # Nx4x4
"""
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
# we get a division by zero.
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = angle_axis / (theta + eps)
wx, wy, wz = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = cos_theta + wx * wx * (k_one - cos_theta)
r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)
r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)
r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta
r11 = cos_theta + wy * wy * (k_one - cos_theta)
r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
[r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
[k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == False).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(4).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 4, 4).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = \
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
return rotation_matrix # Nx4x4
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for | |
value is 5MB.
:param pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs'] cloudwatch_logging_options: The CloudWatch Logging Options for the delivery stream. More details are given below
:param pulumi.Input[str] cluster_endpoint: The endpoint to use when communicating with the cluster. Conflicts with `domain_arn`.
:param pulumi.Input[str] domain_arn: The ARN of the Amazon ES domain. The IAM role must have permission for `DescribeElasticsearchDomain`, `DescribeElasticsearchDomains`, and `DescribeElasticsearchDomainConfig` after assuming `RoleARN`. The pattern needs to be `arn:.*`. Conflicts with `cluster_endpoint`.
:param pulumi.Input[str] index_rotation_period: The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are `NoRotation`, `OneHour`, `OneDay`, `OneWeek`, and `OneMonth`. The default value is `OneDay`.
:param pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs'] processing_configuration: The data processing configuration. More details are given below.
:param pulumi.Input[int] retry_duration: After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
:param pulumi.Input[str] s3_backup_mode: Defines how documents should be delivered to Amazon S3. Valid values are `FailedDocumentsOnly` and `AllDocuments`. Default value is `FailedDocumentsOnly`.
:param pulumi.Input[str] type_name: The Elasticsearch type name with maximum length of 100 characters.
:param pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs'] vpc_config: The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below
"""
pulumi.set(__self__, "index_name", index_name)
pulumi.set(__self__, "role_arn", role_arn)
if buffering_interval is not None:
pulumi.set(__self__, "buffering_interval", buffering_interval)
if buffering_size is not None:
pulumi.set(__self__, "buffering_size", buffering_size)
if cloudwatch_logging_options is not None:
pulumi.set(__self__, "cloudwatch_logging_options", cloudwatch_logging_options)
if cluster_endpoint is not None:
pulumi.set(__self__, "cluster_endpoint", cluster_endpoint)
if domain_arn is not None:
pulumi.set(__self__, "domain_arn", domain_arn)
if index_rotation_period is not None:
pulumi.set(__self__, "index_rotation_period", index_rotation_period)
if processing_configuration is not None:
pulumi.set(__self__, "processing_configuration", processing_configuration)
if retry_duration is not None:
pulumi.set(__self__, "retry_duration", retry_duration)
if s3_backup_mode is not None:
pulumi.set(__self__, "s3_backup_mode", s3_backup_mode)
if type_name is not None:
pulumi.set(__self__, "type_name", type_name)
if vpc_config is not None:
pulumi.set(__self__, "vpc_config", vpc_config)
@property
@pulumi.getter(name="indexName")
def index_name(self) -> pulumi.Input[str]:
"""
The Elasticsearch index name.
"""
return pulumi.get(self, "index_name")
@index_name.setter
def index_name(self, value: pulumi.Input[str]):
pulumi.set(self, "index_name", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The pattern needs to be `arn:.*`.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter(name="bufferingInterval")
def buffering_interval(self) -> Optional[pulumi.Input[int]]:
"""
Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
"""
return pulumi.get(self, "buffering_interval")
@buffering_interval.setter
def buffering_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "buffering_interval", value)
@property
@pulumi.getter(name="bufferingSize")
def buffering_size(self) -> Optional[pulumi.Input[int]]:
"""
Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
"""
return pulumi.get(self, "buffering_size")
@buffering_size.setter
def buffering_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "buffering_size", value)
@property
@pulumi.getter(name="cloudwatchLoggingOptions")
def cloudwatch_logging_options(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs']]:
"""
The CloudWatch Logging Options for the delivery stream. More details are given below
"""
return pulumi.get(self, "cloudwatch_logging_options")
@cloudwatch_logging_options.setter
def cloudwatch_logging_options(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs']]):
pulumi.set(self, "cloudwatch_logging_options", value)
@property
@pulumi.getter(name="clusterEndpoint")
def cluster_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The endpoint to use when communicating with the cluster. Conflicts with `domain_arn`.
"""
return pulumi.get(self, "cluster_endpoint")
@cluster_endpoint.setter
def cluster_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_endpoint", value)
@property
@pulumi.getter(name="domainArn")
def domain_arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the Amazon ES domain. The IAM role must have permission for `DescribeElasticsearchDomain`, `DescribeElasticsearchDomains`, and `DescribeElasticsearchDomainConfig` after assuming `RoleARN`. The pattern needs to be `arn:.*`. Conflicts with `cluster_endpoint`.
"""
return pulumi.get(self, "domain_arn")
@domain_arn.setter
def domain_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_arn", value)
@property
@pulumi.getter(name="indexRotationPeriod")
def index_rotation_period(self) -> Optional[pulumi.Input[str]]:
"""
The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are `NoRotation`, `OneHour`, `OneDay`, `OneWeek`, and `OneMonth`. The default value is `OneDay`.
"""
return pulumi.get(self, "index_rotation_period")
@index_rotation_period.setter
def index_rotation_period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "index_rotation_period", value)
@property
@pulumi.getter(name="processingConfiguration")
def processing_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs']]:
"""
The data processing configuration. More details are given below.
"""
return pulumi.get(self, "processing_configuration")
@processing_configuration.setter
def processing_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs']]):
pulumi.set(self, "processing_configuration", value)
@property
@pulumi.getter(name="retryDuration")
def retry_duration(self) -> Optional[pulumi.Input[int]]:
"""
After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
"""
return pulumi.get(self, "retry_duration")
@retry_duration.setter
def retry_duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retry_duration", value)
@property
@pulumi.getter(name="s3BackupMode")
def s3_backup_mode(self) -> Optional[pulumi.Input[str]]:
"""
Defines how documents should be delivered to Amazon S3. Valid values are `FailedDocumentsOnly` and `AllDocuments`. Default value is `FailedDocumentsOnly`.
"""
return pulumi.get(self, "s3_backup_mode")
@s3_backup_mode.setter
def s3_backup_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_backup_mode", value)
@property
@pulumi.getter(name="typeName")
def type_name(self) -> Optional[pulumi.Input[str]]:
"""
The Elasticsearch type name with maximum length of 100 characters.
"""
return pulumi.get(self, "type_name")
@type_name.setter
def type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type_name", value)
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs']]:
"""
The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. More details are given below
"""
return pulumi.get(self, "vpc_config")
@vpc_config.setter
def vpc_config(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs']]):
pulumi.set(self, "vpc_config", value)
@pulumi.input_type
class FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
log_group_name: Optional[pulumi.Input[str]] = None,
log_stream_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] enabled: Enables or disables the logging. Defaults to `false`.
:param pulumi.Input[str] log_group_name: The CloudWatch group name for logging. This value is required if `enabled` is true.
:param pulumi.Input[str] log_stream_name: The CloudWatch log stream name for logging. This value is required if `enabled` is true.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if log_group_name is not None:
pulumi.set(__self__, "log_group_name", log_group_name)
if log_stream_name is not None:
pulumi.set(__self__, "log_stream_name", log_stream_name)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables the logging. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="logGroupName")
def log_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The CloudWatch group name for logging. This value is required if `enabled` is true.
"""
return pulumi.get(self, "log_group_name")
@log_group_name.setter
def log_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_group_name", value)
@property
@pulumi.getter(name="logStreamName")
def log_stream_name(self) -> Optional[pulumi.Input[str]]:
"""
The CloudWatch log stream name for logging. This value is required if `enabled` is true.
"""
return pulumi.get(self, "log_stream_name")
@log_stream_name.setter
def log_stream_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_stream_name", value)
@pulumi.input_type
class FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
processors: Optional[pulumi.Input[Sequence[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs']]]] = None):
"""
:param pulumi.Input[bool] enabled: Enables or disables data processing.
:param pulumi.Input[Sequence[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs']]] processors: Array of data processors. More details are given below
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if processors is not None:
pulumi.set(__self__, "processors", processors)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables data processing.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def processors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs']]]]:
"""
Array of data processors. More details are given below
"""
return pulumi.get(self, "processors")
@processors.setter
def processors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs']]]]):
pulumi.set(self, "processors", value)
@pulumi.input_type
class FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs']]]] = None):
"""
:param pulumi.Input[str] type: The type of processor. Valid Values: `Lambda`
:param pulumi.Input[Sequence[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs']]] parameters: Array of processor parameters. More details are given below
"""
pulumi.set(__self__, "type", type)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of processor. Valid Values: `Lambda`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs']]]]:
"""
Array of processor parameters. More details are given below
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs:
def __init__(__self__, *,
parameter_name: pulumi.Input[str],
parameter_value: pulumi.Input[str]):
"""
:param pulumi.Input[str] parameter_name: Parameter name. Valid Values: `LambdaArn`, `NumberOfRetries`, `RoleArn`, `BufferSizeInMBs`, `BufferIntervalInSeconds`
:param pulumi.Input[str] parameter_value: Parameter value. Must be between 1 and 512 length (inclusive). When | |
that if :math:`\hat{y} < 0`, then the exponential term
:math:`e^{-\hat{y}}` could become very large. In this case, we can instead
observe that
.. math::
\begin{align*}
\log(1 + e^{-\hat{y}})
&= \log(1 + e^{-\hat{y}}) + \hat{y} - \hat{y} \\
&= \log(1 + e^{-\hat{y}}) + \log(e^{\hat{y}}) - \hat{y} \\
&= \log(1 + e^{\hat{y}}) - \hat{y}.
\end{align*}
Moreover, the :math:`\hat{y} < 0` and :math:`\hat{y} \geq 0` cases can be
unified by writing
.. math::
\log(1 + e^{-\hat{y}})
= \log(1 + e^{-|\hat{y}|}) + \max\{-\hat{y}, 0\}.
Thus, we arrive at the numerically stable formula shown earlier.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Focal loss for
dense object detection. IEEE Transactions on Pattern Analysis and
Machine Intelligence, 2018.
(`DOI <https://doi.org/10.1109/TPAMI.2018.2858826>`__)
(`arXiv preprint <https://arxiv.org/abs/1708.02002>`__)
See Also
--------
:meth:`~focal_loss.BinaryFocalLoss`
A wrapper around this function that makes it a
:class:`tf.keras.losses.Loss`.
"""
# Validate arguments
gamma = check_float(gamma, name='gamma', minimum=0)
pos_weight = check_float(pos_weight, name='pos_weight', minimum=0,
allow_none=True)
from_logits = check_bool(from_logits, name='from_logits')
label_smoothing = check_float(label_smoothing, name='label_smoothing',
minimum=0, maximum=1, allow_none=True)
# Ensure predictions are a floating point tensor; converting labels to a
# tensor will be done in the helper functions
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32)
# calculate Pos weights
count_neg = tf.reduce_sum(1. - y_true)
count_pos = tf.reduce_sum(y_true) + 1
# Equation [2]
beta = count_neg / (count_neg + count_pos)
# Equation [2] divide by 1 - beta
pos_weight = beta / (1.001 - beta)
# Delegate per-example loss computation to helpers depending on whether
# predictions are logits or probabilities
if from_logits:
return _binary_focal_loss_from_logits(labels=y_true, logits=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
else:
return _binary_focal_loss_from_probs(labels=y_true, p=y_pred,
gamma=gamma, pos_weight=pos_weight,
label_smoothing=label_smoothing)
@tf.keras.utils.register_keras_serializable()
class BinaryFocalLossCustom(tf.keras.losses.Loss):
r"""Focal loss function for binary classification.
This loss function generalizes binary cross-entropy by introducing a
hyperparameter called the *focusing parameter* that allows hard-to-classify
examples to be penalized more heavily relative to easy-to-classify examples.
This class is a wrapper around :class:`~focal_loss.binary_focal_loss`. See
the documentation there for details about this loss function.
Parameters
----------
gamma : float
The focusing parameter :math:`\gamma`. Must be non-negative.
pos_weight : float, optional
The coefficient :math:`\alpha` to use on the positive examples. Must be
non-negative.
from_logits : bool, optional
Whether model prediction will be logits or probabilities.
label_smoothing : float, optional
Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary
ground truth labels are squeezed toward 0.5, with larger values of
`label_smoothing` leading to label values closer to 0.5.
**kwargs : keyword arguments
Other keyword arguments for :class:`tf.keras.losses.Loss` (e.g., `name`
or `reduction`).
Examples
--------
An instance of this class is a callable that takes a tensor of binary ground
truth labels `y_true` and a tensor of model predictions `y_pred` and returns
a scalar tensor obtained by reducing the per-example focal loss (the default
reduction is a batch-wise average).
>>> from focal_loss import BinaryFocalLoss
>>> loss_func = BinaryFocalLoss(gamma=2)
>>> loss = loss_func([0, 1, 1], [0.1, 0.7, 0.9]) # A scalar tensor
>>> print(f'Mean focal loss: {loss.numpy():.3f}')
Mean focal loss: 0.011
Use this class in the :mod:`tf.keras` API like any other binary
classification loss function class found in :mod:`tf.keras.losses` (e.g.,
:class:`tf.keras.losses.BinaryCrossentropy`:
.. code-block:: python
# Typical usage
model = tf.keras.Model(...)
model.compile(
optimizer=...,
loss=BinaryFocalLoss(gamma=2), # Used here like a tf.keras loss
metrics=...,
)
history = model.fit(...)
See Also
--------
:meth:`~focal_loss.binary_focal_loss`
The function that performs the focal loss computation, taking a label
tensor and a prediction tensor and outputting a loss.
"""
def __init__(self, gamma, *, pos_weight=None, from_logits=False,
label_smoothing=None, **kwargs):
# Validate arguments
gamma = check_float(gamma, name='gamma', minimum=0)
pos_weight = check_float(pos_weight, name='pos_weight', minimum=0,
allow_none=True)
from_logits = check_bool(from_logits, name='from_logits')
label_smoothing = check_float(label_smoothing, name='label_smoothing',
minimum=0, maximum=1, allow_none=True)
super().__init__(**kwargs)
self.gamma = gamma
self.pos_weight = pos_weight
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary containing the configuration of a
layer. The same layer can be re-instantiated later (without its trained
weights) from this configuration.
Returns
-------
dict
This layer's config.
"""
config = super().get_config()
config.update(gamma=self.gamma, pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
return config
def call(self, y_true, y_pred):
"""Compute the per-example focal loss.
This method simply calls :meth:`~focal_loss.binary_focal_loss` with the
appropriate arguments.
Parameters
----------
y_true : tensor-like
Binary (0 or 1) class labels.
y_pred : tensor-like
Either probabilities for the positive class or logits for the
positive class, depending on the `from_logits` attribute. The shapes
of `y_true` and `y_pred` should be broadcastable.
Returns
-------
:class:`tf.Tensor`
The per-example focal loss. Reduction to a scalar is handled by
this layer's :meth:`~focal_loss.BinaryFocalLoss.__call__` method.
"""
return binary_focal_loss_custom(y_true=y_true, y_pred=y_pred, gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
# Helper functions below
def _process_labels(labels, label_smoothing, dtype):
"""Pre-process a binary label tensor, maybe applying smoothing.
Parameters
----------
labels : tensor-like
Tensor of 0's and 1's.
label_smoothing : float or None
Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary
ground truth labels `y_true` are squeezed toward 0.5, with larger values
of `label_smoothing` leading to label values closer to 0.5.
dtype : tf.dtypes.DType
Desired type of the elements of `labels`.
Returns
-------
tf.Tensor
The processed labels.
"""
labels = tf.dtypes.cast(labels, dtype=dtype)
if label_smoothing is not None:
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels
def _binary_focal_loss_from_logits(labels, logits, gamma, pos_weight,
label_smoothing):
"""Compute focal loss from logits using a numerically stable formula.
Parameters
----------
labels : tensor-like
Tensor of 0's and 1's: binary class labels.
logits : tf.Tensor
Logits for the positive class.
gamma : float
Focusing parameter.
pos_weight : float or None
If not None, losses for the positive class will be scaled by this
weight.
label_smoothing : float or None
Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary
ground truth labels `y_true` are squeezed toward 0.5, with larger values
of `label_smoothing` leading to label values closer to 0.5.
Returns
-------
tf.Tensor
The loss for each example.
"""
labels = _process_labels(labels=labels, label_smoothing=label_smoothing,
dtype=logits.dtype)
# Compute probabilities for the positive class
p = tf.math.sigmoid(logits)
# Without label smoothing we can use TensorFlow's built-in per-example cross
# entropy loss functions and multiply the result by the modulating factor.
# Otherwise, we compute the focal loss ourselves using a numerically stable
# formula below
if label_smoothing is None:
# The labels and logits tensors' shapes need to be the same for the
# built-in cross-entropy functions. Since we want to allow broadcasting,
# we do some checks on the shapes and possibly broadcast explicitly
# Note: tensor.shape returns a tf.TensorShape, whereas tf.shape(tensor)
# returns an int tf.Tensor; this is why both are used below
labels_shape = labels.shape
logits_shape = logits.shape
if not labels_shape.is_fully_defined() or labels_shape != logits_shape:
labels_shape = tf.shape(labels)
logits_shape = tf.shape(logits)
shape = tf.broadcast_dynamic_shape(labels_shape, logits_shape)
labels = tf.broadcast_to(labels, shape)
logits = tf.broadcast_to(logits, shape)
if pos_weight is None:
loss_func = tf.nn.sigmoid_cross_entropy_with_logits
else:
loss_func = partial(tf.nn.weighted_cross_entropy_with_logits,
pos_weight=pos_weight)
loss = loss_func(labels=labels, logits=logits)
modulation_pos = (1 - p) ** gamma
modulation_neg = p ** gamma
mask = tf.dtypes.cast(labels, dtype=tf.bool)
modulation = tf.where(mask, modulation_pos, modulation_neg)
return modulation * loss
# Terms for the positive and negative class components of the loss
pos_term = labels * ((1 - p) ** gamma)
neg_term = (1 - labels) * (p ** gamma)
# Term involving the log and ReLU
log_weight = pos_term
if pos_weight is not None:
log_weight *= pos_weight
log_weight += neg_term
log_term = tf.math.log1p(tf.math.exp(-tf.math.abs(logits)))
log_term += tf.nn.relu(-logits)
log_term *= log_weight
# Combine all the terms into the loss
loss = neg_term * logits + log_term
return loss
def _binary_focal_loss_from_probs(labels, p, gamma, pos_weight,
label_smoothing):
"""Compute focal loss from probabilities.
Parameters
----------
labels : tensor-like
Tensor of 0's and 1's: binary class labels.
p : tf.Tensor
Estimated probabilities for the positive class.
gamma : float
Focusing parameter.
pos_weight : float or None
If not None, losses for the positive class will be scaled by this
weight.
label_smoothing : float or None
Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary
ground truth labels `y_true` are squeezed | |
import copy
class NormaMachine:
def __init__(self):
self.registers = dict(
A={"signal": 0, "magnitude": 0},
B={"signal": 0, "magnitude": 0},
C={"signal": 0, "magnitude": 0},
D={"signal": 0, "magnitude": 0},
E={"signal": 0, "magnitude": 0},
F={"signal": 0, "magnitude": 0},
)
self.stack = []
self.stack_pointer = -1 # pilha vazia
self.response = [] # resposta (history) das ações
def __str__(self):
msg = ""
for reg in self.registers:
msg += "{}: ({},{}) | ".format(reg, self.registers[reg]["signal"], self.registers[reg]["magnitude"])
msg += "Stack: {} | Stack Pointer: {}".format(self.stack, self.stack_pointer)
return msg
def clear_response(self):
self.response = []
def append_to_response(self):
log_entry = {
'registers': copy.deepcopy(self.registers),
'stack': copy.deepcopy(self.stack),
'stack_pointer': copy.deepcopy(self.stack_pointer)
}
self.response.append(log_entry)
# TODO implement change of signal as a function
def change_signal(self, reg):
pass
def get_reg_magnitude(self, reg):
return self.registers[reg]["magnitude"]
def get_reg_signal(self, reg):
return self.registers[reg]["signal"]
def reset_machine(self):
for reg in self.registers:
self.set_0_to_reg(reg)
self.stack = []
self.stack_pointer = -1
self.response = self.response[-1:]
return self.response
def push_to_stack(self, value):
print("Pushing {} to the stack".format(value))
self.stack.append(value)
self.stack_pointer += 1
self.append_to_response()
return self.response
def pop_from_stack(self, reg="A"):
"""
Pops the stack.
A chosen register ca be passed as the target for the popped value
:param reg: (optional, default "A") the register where the popped
value will be stored
:return: None
"""
print("Popping the stack".format(reg))
msg = ''
if len(self.stack) == 0:
msg += "Stack is empty"
self.stack_pointer = -1
else:
val = self.stack.pop()
self.stack_pointer -= 1
self.set_n_to_reg(reg, val)
if len(self.stack) == 0:
msg += "Stack is now empty"
self.stack_pointer = -1
self.append_to_response()
return self.response, msg
def set_0_to_reg(self, reg):
print("{}:= 0".format(reg))
self.append_to_response()
while True:
if self.get_reg_magnitude(reg) == 0:
break
else:
self.registers[reg]["magnitude"] = self.registers[reg]["magnitude"] - 1
if self.get_reg_magnitude(reg) == 0:
self.registers[reg]["signal"] = 0
self.append_to_response()
# print(self)
return self.response
def set_n_to_reg(self, reg, n):
print("{}:= {}".format(reg, n))
self.set_0_to_reg(reg)
cont = abs(n)
if n < 0:
self.registers[reg]["signal"] = 1
while True:
if cont == 0:
break
else:
self.registers[reg]["magnitude"] = self.registers[reg]["magnitude"] + 1
cont -= 1
self.append_to_response()
# print(self)
return self.response
def add_b_to_a(self, reg_b="B", reg_a="A"):
print("{0}:={0} + {1}".format(reg_a, reg_b))
while True:
if self.get_reg_magnitude(reg_b) == 0:
break
else: # se b diferente de 0
self.registers[reg_b]["magnitude"] = self.registers[reg_b]["magnitude"] - 1 # decrementa b
if self.get_reg_signal(reg_b) == 0: # b positivo
if self.get_reg_signal(reg_a) == 0: # a positivo
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] + 1
else: # a negativo
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] - 1
if self.get_reg_magnitude(reg_a) == 0: # o a passa de negativo para positivo
self.registers[reg_a]["signal"] = 0
else: # b negativo
if self.get_reg_signal(reg_a) == 0: # a positivo
# o a vai passar de positivo para negativo e vai passar a somar
if self.get_reg_magnitude(reg_a) == 0:
self.registers[reg_a]["signal"] = 1
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] + 1
else:
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] - 1
else: # a negativo
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] + 1
if self.get_reg_magnitude(reg_b) == 0: # muda sinal de b se ele chegar a 0
self.registers[reg_b]["signal"] = 0
# print(self)
self.append_to_response()
return self.response
def add_b_to_a_with_c(self, reg_b="B", reg_a="A", reg_c="C"):
print("{0}:={0} + {1} usando {2}".format(reg_a, reg_b, reg_c))
self.set_0_to_reg(reg_c)
while True:
if self.get_reg_magnitude(reg_b) == 0:
break
else: # se b diferente de 0
self.registers[reg_b]["magnitude"] = self.registers[reg_b]["magnitude"] - 1 # decrementa b
self.registers[reg_c]["magnitude"] = self.registers[reg_c]["magnitude"] + 1 # incrementa c
if self.get_reg_signal(reg_b) == 0: # b positivo
if self.get_reg_signal(reg_a) == 0: # a positivo
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] + 1
else: # a negativo
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] - 1
if self.get_reg_magnitude(reg_a) == 0: # o a passa de negativo para positivo
self.registers[reg_a]["signal"] = 0
else: # b negativo
if self.get_reg_signal(reg_a) == 0: # a positivo
if self.get_reg_magnitude(
reg_a) == 0: # o a vai passar de positivo para negativo e vai passar a somar
self.registers[reg_a]["signal"] = 1
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] + 1
else:
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] - 1
else: # a negativo
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] + 1
if self.get_reg_magnitude(reg_b) == 0: # muda sinal de b se ele chegar a 0
self.registers[reg_c]["signal"] = self.registers[reg_b]["signal"]
self.registers[reg_b]["signal"] = 0
# print(self)
self.append_to_response()
self.add_b_to_a(reg_c, reg_b)
return self.response
def set_b_to_a_with_c(self, reg_b="B", reg_a="A", reg_c="C"):
print("{}:= {} usando {}".format(reg_a, reg_b, reg_c))
self.set_0_to_reg(reg_a)
self.add_b_to_a_with_c(reg_b, reg_a, reg_c)
return self.response
def mult_a_with_b_with_c_and_d(self, reg_a="A", reg_b="B", reg_c="C", reg_d="D"):
print("{0}:={0} x {1} usando {2}, {3}".format(reg_a, reg_b, reg_c, reg_d))
signal = 0
if (self.get_reg_signal(reg_a) == 0 and self.get_reg_signal(reg_b) == 0) or \
(self.get_reg_signal(reg_a) == 1 and self.get_reg_signal(reg_b) == 1):
pass
else:
signal = 1
self.set_0_to_reg(reg_c)
self.add_b_to_a(reg_a, reg_c)
while True:
if self.get_reg_magnitude(reg_c) == 0:
break
else:
self.add_b_to_a_with_c(reg_b, reg_a, reg_d)
self.registers[reg_c]["magnitude"] = self.registers[reg_c]["magnitude"] - 1
if self.get_reg_magnitude(reg_c) == 0:
self.registers[reg_c]["signal"] = 0
# print(self)
self.append_to_response()
self.registers[reg_a]["signal"] = signal
self.append_to_response()
return self.response
def test_a_lower_eq_than_b_auxc_auxd(self, reg_a="A", reg_b="B", reg_c="C", reg_d="D"):
flag = False
if self.get_reg_signal(reg_a) == 0:
if self.get_reg_signal(reg_b) == 0: # se os dois sao positivos subtrai e ve quem chega primeiro em 0
while True:
if self.get_reg_magnitude(reg_a) == 0:
if self.get_reg_magnitude(reg_b) == 0: # se os dois sao 0, retorna true
flag = True
else:
flag = True
break
elif self.get_reg_magnitude(reg_b) == 0:
flag = False
break
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] - 1
self.registers[reg_c]["magnitude"] = self.registers[reg_c]["magnitude"] + 1
self.registers[reg_b]["magnitude"] = self.registers[reg_b]["magnitude"] - 1
self.registers[reg_d]["magnitude"] = self.registers[reg_d]["magnitude"] + 1
self.append_to_response()
else: # se o a é positivo e o b é negativo então retorna falso (a > b)
flag = False
return self.response, flag
else: # se a é negativo
if self.get_reg_signal(reg_b) == 0: # e b é positivo retorna true
flag = True
return self.response, flag
else: # se os dois sao negativos
while True:
if self.get_reg_magnitude(reg_a) == 0:
if self.get_reg_magnitude(reg_b) == 0: # se os dois sao 0, retorna True
flag = True
else:
flag = False
break
elif self.get_reg_magnitude(reg_b) == 0:
flag = True
break
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] - 1
self.registers[reg_c]["magnitude"] = self.registers[reg_c]["magnitude"] + 1
self.registers[reg_b]["magnitude"] = self.registers[reg_b]["magnitude"] - 1
self.registers[reg_d]["magnitude"] = self.registers[reg_d]["magnitude"] + 1
self.append_to_response()
while True:
if self.get_reg_magnitude(reg_c) == 0:
break
if self.get_reg_magnitude(reg_d) == 0:
break
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] + 1
self.registers[reg_c]["magnitude"] = self.registers[reg_c]["magnitude"] - 1
self.registers[reg_b]["magnitude"] = self.registers[reg_b]["magnitude"] + 1
self.registers[reg_d]["magnitude"] = self.registers[reg_d]["magnitude"] - 1
return self.response, flag
def test_a_lower_than_b_auxc_auxd(self, reg_a="A", reg_b="B", reg_c="E", reg_d="F"):
self.set_0_to_reg(reg_c)
self.set_0_to_reg(reg_d)
flag = False
if self.get_reg_signal(reg_a) == 0:
if self.get_reg_signal(reg_b) == 0: # se os dois sao positivos subtrai e ve quem chega primeiro em 0
while True:
if self.get_reg_magnitude(reg_a) == 0:
if self.get_reg_magnitude(reg_b) == 0: # se os dois sao 0, retorna falso
flag = False
else:
flag = True
break
elif self.get_reg_magnitude(reg_b) == 0:
flag = False
break
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] - 1
self.registers[reg_c]["magnitude"] = self.registers[reg_c]["magnitude"] + 1
self.registers[reg_b]["magnitude"] = self.registers[reg_b]["magnitude"] - 1
self.registers[reg_d]["magnitude"] = self.registers[reg_d]["magnitude"] + 1
self.append_to_response()
else: # se o a é positivo e o b é negativo então retorna falso (a > b)
flag = False
return self.response, flag
else: # se a é negativo
if self.get_reg_signal(reg_b) == 0: # e b é positivo retorna true
flag = True
return self.response, flag
else: # se os dois sao negativos
while True:
if self.get_reg_magnitude(reg_a) == 0:
if self.get_reg_magnitude(reg_b) == 0: # se os dois sao 0, retorna falso
flag = False
else:
flag = False
break
if self.get_reg_magnitude(reg_b) == 0: # se não A não é 0 e o B ja chegou a 0
flag = True
break
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] - 1
self.registers[reg_c]["magnitude"] = self.registers[reg_c]["magnitude"] + 1
self.registers[reg_b]["magnitude"] = self.registers[reg_b]["magnitude"] - 1
self.registers[reg_d]["magnitude"] = self.registers[reg_d]["magnitude"] + 1
self.append_to_response()
while True:
if self.registers[reg_c]["magnitude"] == 0:
break
if self.registers[reg_d]["magnitude"] == 0:
break
self.registers[reg_a]["magnitude"] = self.registers[reg_a]["magnitude"] + 1
self.registers[reg_c]["magnitude"] = self.registers[reg_c]["magnitude"] - 1
self.registers[reg_b]["magnitude"] = self.registers[reg_b]["magnitude"] + 1
self.registers[reg_d]["magnitude"] = self.registers[reg_d]["magnitude"] - 1
return self.response, flag
def factorial(self, n):
error_msg = ''
# Para evitar problemas na máquina, essa condição foi acrescentada
if n < 0: # não existe fatorial negativo
error_msg += "ERROR: Illegal Instruction."
return self.response, error_msg
print("Calculando o fatorial de {}".format(n))
if n == 0: # 0! = 1
self.set_n_to_reg("A", 1)
return self.response, error_msg
self.set_n_to_reg("B", n) # B := n, the value we want factorial from
self.set_n_to_reg("A", 1)
while True:
if self.registers["B"]["magnitude"] == 0:
break
else:
self.mult_a_with_b_with_c_and_d()
self.registers["B"]["magnitude"] -= 1
self.append_to_response()
return self.response, error_msg
def power(self, a, b):
error_msg = ''
if b < 0:
error_msg += "ERROR: Not | |
respBuf = self.dispatchCommand(TPM_CC.PolicyAuthorizeNV, req)
return self.processResponse(respBuf)
# PolicyAuthorizeNV()
def CreatePrimary(self, primaryHandle, inSensitive, inPublic, outsideInfo, creationPCR):
""" This command is used to create a Primary Object under one of the
Primary Seeds or a Temporary Object under TPM_RH_NULL. The command uses
a TPM2B_PUBLIC as a template for the object to be created. The size of
the unique field shall not be checked for consistency with the other
object parameters. The command will create and load a Primary Object.
The sensitive area is not returned.
Args:
primaryHandle (TPM_HANDLE): TPM_RH_ENDORSEMENT, TPM_RH_OWNER,
TPM_RH_PLATFORM+{PP}, or TPM_RH_NULL
Auth Index: 1
Auth Role: USER
inSensitive (TPMS_SENSITIVE_CREATE): The sensitive data, see TPM 2.0
Part 1 Sensitive Values
inPublic (TPMT_PUBLIC): The public template
outsideInfo (int): Data that will be included in the creation data
for this object to provide permanent, verifiable linkage between
this object and some object owner data
creationPCR (TPMS_PCR_SELECTION): PCR that will be used in creation data
Returns:
handle - Handle of type TPM_HT_TRANSIENT for created Primary Object
outPublic - The public portion of the created object
creationData - Contains a TPMT_CREATION_DATA
creationHash - Digest of creationData using nameAlg of outPublic
creationTicket - Ticket used by TPM2_CertifyCreation() to validate
that the creation data was produced by the TPM
name - The name of the created object
"""
req = TPM2_CreatePrimary_REQUEST(primaryHandle, inSensitive, inPublic, outsideInfo, creationPCR)
respBuf = self.dispatchCommand(TPM_CC.CreatePrimary, req)
return self.processResponse(respBuf, CreatePrimaryResponse)
# CreatePrimary()
def HierarchyControl(self, authHandle, enable, state):
""" This command enables and disables use of a hierarchy and its
associated NV storage. The command allows phEnable, phEnableNV,
shEnable, and ehEnable to be changed when the proper authorization is provided.
Args:
authHandle (TPM_HANDLE): TPM_RH_ENDORSEMENT, TPM_RH_OWNER or
TPM_RH_PLATFORM+{PP}
Auth Index: 1
Auth Role: USER
enable (TPM_HANDLE): The enable being modified
TPM_RH_ENDORSEMENT, TPM_RH_OWNER, TPM_RH_PLATFORM, or
TPM_RH_PLATFORM_NV
state (int): YES if the enable should be SET, NO if the enable
should be CLEAR
"""
req = TPM2_HierarchyControl_REQUEST(authHandle, enable, state)
respBuf = self.dispatchCommand(TPM_CC.HierarchyControl, req)
return self.processResponse(respBuf)
# HierarchyControl()
def SetPrimaryPolicy(self, authHandle, authPolicy, hashAlg):
""" This command allows setting of the authorization policy for the
lockout (lockoutPolicy), the platform hierarchy (platformPolicy), the
storage hierarchy (ownerPolicy), and the endorsement hierarchy
(endorsementPolicy). On TPMs implementing Authenticated Countdown Timers
(ACT), this command may also be used to set the authorization policy for
an ACT.
Args:
authHandle (TPM_HANDLE): TPM_RH_LOCKOUT, TPM_RH_ENDORSEMENT,
TPM_RH_OWNER, TPMI_RH_ACT or TPM_RH_PLATFORM+{PP}
Auth Index: 1
Auth Role: USER
authPolicy (int): An authorization policy digest; may be the Empty Buffer
If hashAlg is TPM_ALG_NULL, then this shall be an Empty Buffer.
hashAlg (TPM_ALG_ID): The hash algorithm to use for the policy
If the authPolicy is an Empty Buffer, then this field shall be
TPM_ALG_NULL.
"""
req = TPM2_SetPrimaryPolicy_REQUEST(authHandle, authPolicy, hashAlg)
respBuf = self.dispatchCommand(TPM_CC.SetPrimaryPolicy, req)
return self.processResponse(respBuf)
# SetPrimaryPolicy()
def ChangePPS(self, authHandle):
""" This replaces the current platform primary seed (PPS) with a value
from the RNG and sets platformPolicy to the default initialization value
(the Empty Buffer).
Args:
authHandle (TPM_HANDLE): TPM_RH_PLATFORM+{PP}
Auth Index: 1
Auth Role: USER
"""
req = TPM2_ChangePPS_REQUEST(authHandle)
respBuf = self.dispatchCommand(TPM_CC.ChangePPS, req)
return self.processResponse(respBuf)
# ChangePPS()
def ChangeEPS(self, authHandle):
""" This replaces the current endorsement primary seed (EPS) with a
value from the RNG and sets the Endorsement hierarchy controls to their
default initialization values: ehEnable is SET, endorsementAuth and
endorsementPolicy are both set to the Empty Buffer. It will flush any
resident objects (transient or persistent) in the Endorsement hierarchy
and not allow objects in the hierarchy associated with the previous EPS
to be loaded.
Args:
authHandle (TPM_HANDLE): TPM_RH_PLATFORM+{PP}
Auth Handle: 1
Auth Role: USER
"""
req = TPM2_ChangeEPS_REQUEST(authHandle)
respBuf = self.dispatchCommand(TPM_CC.ChangeEPS, req)
return self.processResponse(respBuf)
# ChangeEPS()
def Clear(self, authHandle):
""" This command removes all TPM context associated with a specific Owner.
Args:
authHandle (TPM_HANDLE): TPM_RH_LOCKOUT or TPM_RH_PLATFORM+{PP}
Auth Handle: 1
Auth Role: USER
"""
req = TPM2_Clear_REQUEST(authHandle)
respBuf = self.dispatchCommand(TPM_CC.Clear, req)
return self.processResponse(respBuf)
# Clear()
def ClearControl(self, auth, disable):
""" TPM2_ClearControl() disables and enables the execution of TPM2_Clear().
Args:
auth (TPM_HANDLE): TPM_RH_LOCKOUT or TPM_RH_PLATFORM+{PP}
Auth Handle: 1
Auth Role: USER
disable (int): YES if the disableOwnerClear flag is to be SET, NO if
the flag is to be CLEAR.
"""
req = TPM2_ClearControl_REQUEST(auth, disable)
respBuf = self.dispatchCommand(TPM_CC.ClearControl, req)
return self.processResponse(respBuf)
# ClearControl()
def HierarchyChangeAuth(self, authHandle, newAuth):
""" This command allows the authorization secret for a hierarchy or
lockout to be changed using the current authorization value as the
command authorization.
Args:
authHandle (TPM_HANDLE): TPM_RH_LOCKOUT, TPM_RH_ENDORSEMENT,
TPM_RH_OWNER or TPM_RH_PLATFORM+{PP}
Auth Index: 1
Auth Role: USER
newAuth (int): New authorization value
"""
req = TPM2_HierarchyChangeAuth_REQUEST(authHandle, newAuth)
respBuf = self.dispatchCommand(TPM_CC.HierarchyChangeAuth, req)
return self.processResponse(respBuf)
# HierarchyChangeAuth()
def DictionaryAttackLockReset(self, lockHandle):
""" This command cancels the effect of a TPM lockout due to a number of
successive authorization failures. If this command is properly
authorized, the lockout counter is set to zero.
Args:
lockHandle (TPM_HANDLE): TPM_RH_LOCKOUT
Auth Index: 1
Auth Role: USER
"""
req = TPM2_DictionaryAttackLockReset_REQUEST(lockHandle)
respBuf = self.dispatchCommand(TPM_CC.DictionaryAttackLockReset, req)
return self.processResponse(respBuf)
# DictionaryAttackLockReset()
def DictionaryAttackParameters(self, lockHandle, newMaxTries, newRecoveryTime, lockoutRecovery):
""" This command changes the lockout parameters.
Args:
lockHandle (TPM_HANDLE): TPM_RH_LOCKOUT
Auth Index: 1
Auth Role: USER
newMaxTries (int): Count of authorization failures before the
lockout is imposed
newRecoveryTime (int): Time in seconds before the authorization
failure count is automatically decremented
A value of zero indicates that DA protection is disabled.
lockoutRecovery (int): Time in seconds after a lockoutAuth failure
before use of lockoutAuth is allowed
A value of zero indicates that a reboot is required.
"""
req = TPM2_DictionaryAttackParameters_REQUEST(lockHandle, newMaxTries, newRecoveryTime, lockoutRecovery)
respBuf = self.dispatchCommand(TPM_CC.DictionaryAttackParameters, req)
return self.processResponse(respBuf)
# DictionaryAttackParameters()
def PP_Commands(self, auth, setList, clearList):
""" This command is used to determine which commands require assertion
of Physical Presence (PP) in addition to platformAuth/platformPolicy.
Args:
auth (TPM_HANDLE): TPM_RH_PLATFORM+PP
Auth Index: 1
Auth Role: USER + Physical Presence
setList (TPM_CC): List of commands to be added to those that will
require that Physical Presence be asserted
clearList (TPM_CC): List of commands that will no longer require
that Physical Presence be asserted
"""
req = TPM2_PP_Commands_REQUEST(auth, setList, clearList)
respBuf = self.dispatchCommand(TPM_CC.PP_Commands, req)
return self.processResponse(respBuf)
# PP_Commands()
def SetAlgorithmSet(self, authHandle, algorithmSet):
""" This command allows the platform to change the set of algorithms
that are used by the TPM. The algorithmSet setting is a vendor-dependent
value.
Args:
authHandle (TPM_HANDLE): TPM_RH_PLATFORM
Auth Index: 1
Auth Role: USER
algorithmSet (int): A TPM vendor-dependent value indicating the
algorithm set selection
"""
req = TPM2_SetAlgorithmSet_REQUEST(authHandle, algorithmSet)
respBuf = self.dispatchCommand(TPM_CC.SetAlgorithmSet, req)
return self.processResponse(respBuf)
# SetAlgorithmSet()
def FieldUpgradeStart(self, authorization, keyHandle, fuDigest, manifestSignature):
""" This command uses platformPolicy and a TPM Vendor Authorization Key
to authorize a Field Upgrade Manifest.
Args:
authorization (TPM_HANDLE): TPM_RH_PLATFORM+{PP}
Auth Index:1
Auth Role: ADMIN
keyHandle (TPM_HANDLE): Handle of a public area that contains the
TPM Vendor Authorization Key that will be used to validate
manifestSignature
Auth Index: None
fuDigest (int): Digest of the first block in the field upgrade sequence
manifestSignature (TPMU_SIGNATURE): Signature over fuDigest using
the key associated with keyHandle (not optional)
(One of [TPMS_SIGNATURE_RSASSA, TPMS_SIGNATURE_RSAPSS,
TPMS_SIGNATURE_ECDSA, TPMS_SIGNATURE_ECDAA, TPMS_SIGNATURE_SM2,
TPMS_SIGNATURE_ECSCHNORR, TPMT_HA, TPMS_SCHEME_HASH,
TPMS_NULL_SIGNATURE])
"""
req = TPM2_FieldUpgradeStart_REQUEST(authorization, keyHandle, fuDigest, manifestSignature)
respBuf = self.dispatchCommand(TPM_CC.FieldUpgradeStart, req)
return self.processResponse(respBuf)
# FieldUpgradeStart()
def FieldUpgradeData(self, fuData):
""" This command will take the actual field upgrade image to be
installed on the TPM. The exact format of fuData is vendor-specific.
This command is only possible following a successful
TPM2_FieldUpgradeStart(). If the TPM has not received a properly
authorized TPM2_FieldUpgradeStart(), then the TPM shall return
TPM_RC_FIELDUPGRADE.
Args:
fuData (int): Field upgrade image data
Returns:
nextDigest - Tagged digest of the next block
TPM_ALG_NULL if field update is complete
firstDigest - Tagged digest of the first block of the sequence
"""
req = TPM2_FieldUpgradeData_REQUEST(fuData)
respBuf = self.dispatchCommand(TPM_CC.FieldUpgradeData, req)
return self.processResponse(respBuf, FieldUpgradeDataResponse)
# FieldUpgradeData()
def FirmwareRead(self, | |
<filename>synapse/federation/sender/__init__.py
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
from collections import OrderedDict
from typing import (
TYPE_CHECKING,
Collection,
Dict,
Hashable,
Iterable,
List,
Optional,
Set,
Tuple,
)
import attr
from prometheus_client import Counter
from typing_extensions import Literal
from twisted.internet import defer
from twisted.internet.interfaces import IDelayedCall
import synapse.metrics
from synapse.api.presence import UserPresenceState
from synapse.events import EventBase
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
from synapse.federation.sender.transaction_manager import TransactionManager
from synapse.federation.units import Edu
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics import (
LaterGauge,
event_processing_loop_counter,
event_processing_loop_room_count,
events_processed_counter,
)
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
from synapse.util import Clock
from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.events.presence_router import PresenceRouter
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
sent_pdus_destination_dist_count = Counter(
"synapse_federation_client_sent_pdu_destinations:count",
"Number of PDUs queued for sending to one or more destinations",
)
sent_pdus_destination_dist_total = Counter(
"synapse_federation_client_sent_pdu_destinations:total",
"Total number of PDUs queued for sending across all destinations",
)
# Time (in s) after Synapse's startup that we will begin to wake up destinations
# that have catch-up outstanding.
CATCH_UP_STARTUP_DELAY_SEC = 15
# Time (in s) to wait in between waking up each destination, i.e. one destination
# will be woken up every <x> seconds after Synapse's startup until we have woken
# every destination has outstanding catch-up.
CATCH_UP_STARTUP_INTERVAL_SEC = 5
class AbstractFederationSender(metaclass=abc.ABCMeta):
@abc.abstractmethod
def notify_new_events(self, max_token: RoomStreamToken) -> None:
"""This gets called when we have some new events we might want to
send out to other servers.
"""
raise NotImplementedError()
@abc.abstractmethod
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
"""Send a RR to any other servers in the room
Args:
receipt: receipt to be sent
"""
raise NotImplementedError()
@abc.abstractmethod
def send_presence_to_destinations(
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
) -> None:
"""Send the given presence states to the given destinations.
Args:
destinations:
"""
raise NotImplementedError()
@abc.abstractmethod
def build_and_send_edu(
self,
destination: str,
edu_type: str,
content: JsonDict,
key: Optional[Hashable] = None,
) -> None:
"""Construct an Edu object, and queue it for sending
Args:
destination: name of server to send to
edu_type: type of EDU to send
content: content of EDU
key: clobbering key for this edu
"""
raise NotImplementedError()
@abc.abstractmethod
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
"""Tells the sender that a new device message is ready to be sent to the
destination. The `immediate` flag specifies whether the messages should
be tried to be sent immediately, or whether it can be delayed for a
short while (to aid performance).
"""
raise NotImplementedError()
@abc.abstractmethod
def wake_destination(self, destination: str) -> None:
"""Called when we want to retry sending transactions to a remote.
This is mainly useful if the remote server has been down and we think it
might have come back.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_current_token(self) -> int:
raise NotImplementedError()
@abc.abstractmethod
def federation_ack(self, instance_name: str, token: int) -> None:
raise NotImplementedError()
@abc.abstractmethod
async def get_replication_rows(
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
) -> Tuple[List[Tuple[int, Tuple]], int, bool]:
raise NotImplementedError()
@attr.s
class _DestinationWakeupQueue:
"""A queue of destinations that need to be woken up due to new updates.
Staggers waking up of per destination queues to ensure that we don't attempt
to start TLS connections with many hosts all at once, leading to pinned CPU.
"""
# The maximum duration in seconds between queuing up a destination and it
# being woken up.
_MAX_TIME_IN_QUEUE = 30.0
# The maximum duration in seconds between waking up consecutive destination
# queues.
_MAX_DELAY = 0.1
sender: "FederationSender" = attr.ib()
clock: Clock = attr.ib()
queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict)
processing: bool = attr.ib(default=False)
def add_to_queue(self, destination: str) -> None:
"""Add a destination to the queue to be woken up."""
self.queue[destination] = None
if not self.processing:
self._handle()
@wrap_as_background_process("_DestinationWakeupQueue.handle")
async def _handle(self) -> None:
"""Background process to drain the queue."""
if not self.queue:
return
assert not self.processing
self.processing = True
try:
# We start with a delay that should drain the queue quickly enough that
# we process all destinations in the queue in _MAX_TIME_IN_QUEUE
# seconds.
#
# We also add an upper bound to the delay, to gracefully handle the
# case where the queue only has a few entries in it.
current_sleep_seconds = min(
self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue)
)
while self.queue:
destination, _ = self.queue.popitem(last=False)
queue = self.sender._get_per_destination_queue(destination)
if not queue._new_data_to_send:
# The per destination queue has already been woken up.
continue
queue.attempt_new_transaction()
await self.clock.sleep(current_sleep_seconds)
if not self.queue:
break
# More destinations may have been added to the queue, so we may
# need to reduce the delay to ensure everything gets processed
# within _MAX_TIME_IN_QUEUE seconds.
current_sleep_seconds = min(
current_sleep_seconds, self._MAX_TIME_IN_QUEUE / len(self.queue)
)
finally:
self.processing = False
class FederationSender(AbstractFederationSender):
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.server_name = hs.hostname
self.store = hs.get_datastores().main
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
self._presence_router: Optional["PresenceRouter"] = None
self._transaction_manager = TransactionManager(hs)
self._instance_name = hs.get_instance_name()
self._federation_shard_config = hs.config.worker.federation_shard_config
# map from destination to PerDestinationQueue
self._per_destination_queues: Dict[str, PerDestinationQueue] = {}
LaterGauge(
"synapse_federation_transaction_queue_pending_destinations",
"",
[],
lambda: sum(
1
for d in self._per_destination_queues.values()
if d.transmission_loop_running
),
)
LaterGauge(
"synapse_federation_transaction_queue_pending_pdus",
"",
[],
lambda: sum(
d.pending_pdu_count() for d in self._per_destination_queues.values()
),
)
LaterGauge(
"synapse_federation_transaction_queue_pending_edus",
"",
[],
lambda: sum(
d.pending_edu_count() for d in self._per_destination_queues.values()
),
)
self._is_processing = False
self._last_poked_id = -1
# map from room_id to a set of PerDestinationQueues which we believe are
# awaiting a call to flush_read_receipts_for_room. The presence of an entry
# here for a given room means that we are rate-limiting RR flushes to that room,
# and that there is a pending call to _flush_rrs_for_room in the system.
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
self._rr_txn_interval_per_room_ms = (
1000.0
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
# wake up destinations that have outstanding PDUs to be caught up
self._catchup_after_startup_timer: Optional[
IDelayedCall
] = self.clock.call_later(
CATCH_UP_STARTUP_DELAY_SEC,
run_as_background_process,
"wake_destinations_needing_catchup",
self._wake_destinations_needing_catchup,
)
self._external_cache = hs.get_external_cache()
self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock)
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
"""Get or create a PerDestinationQueue for the given destination
Args:
destination: server_name of remote server
"""
queue = self._per_destination_queues.get(destination)
if not queue:
queue = PerDestinationQueue(self.hs, self._transaction_manager, destination)
self._per_destination_queues[destination] = queue
return queue
def notify_new_events(self, max_token: RoomStreamToken) -> None:
"""This gets called when we have some new events we might want to
send out to other servers.
"""
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
current_id = max_token.stream
self._last_poked_id = max(current_id, self._last_poked_id)
if self._is_processing:
return
# fire off a processing loop in the background
run_as_background_process(
"process_event_queue_for_federation", self._process_event_queue_loop
)
async def _process_event_queue_loop(self) -> None:
try:
self._is_processing = True
while True:
last_token = await self.store.get_federation_out_pos("events")
next_token, events = await self.store.get_all_new_events_stream(
last_token, self._last_poked_id, limit=100
)
logger.debug(
"Handling %i -> %i: %i events to send (current id %i)",
last_token,
next_token,
len(events),
self._last_poked_id,
)
if not events and next_token >= self._last_poked_id:
logger.debug("All events processed")
break
async def handle_event(event: EventBase) -> None:
# Only send events for this server.
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
is_mine = self.is_mine_id(event.sender)
if not is_mine and send_on_behalf_of is None:
logger.debug("Not sending remote-origin event %s", event)
return
# We also want to not send out-of-band membership events.
#
# OOB memberships are used in three (and a half) situations:
#
# (1) invite events which we have received over federation. Those
# will have a `sender` on a different server, so will be
# skipped by the "is_mine" test above anyway.
#
# (2) rejections of invites to federated rooms - either remotely
# or locally generated. (Such rejections are normally
# created via federation, in which case the remote server is
# responsible for sending out the rejection. If that fails,
# | |
# Generated from resolver: lts-10.5
core_packages = (
{ "array": "0.5.2.0",
"base": "4.10.1.0",
"binary": "0.8.5.1",
"bytestring": "0.10.8.2",
"containers": "0.5.10.2",
"deepseq": "1.4.3.0",
"directory": "1.3.0.2",
"filepath": "1.4.1.2",
"ghc": "8.2.2",
"ghc-boot": "8.2.2",
"ghc-boot-th": "8.2.2",
"ghc-prim": "0.5.1.1",
"ghci": "8.2.2",
"hoopl": "3.10.2.2",
"hpc": "0.6.0.3",
"integer-gmp": "1.0.1.0",
"pretty": "1.1.3.3",
"process": "1.6.1.0",
"rts": "1.0",
"template-haskell": "2.12.0.0",
"terminfo": "0.4.1.0",
"time": "1.8.0.2",
"transformers": "0.5.2.0",
"unix": "2.7.2.2",
}
)
packages = (
{ "ALUT":
struct(
version = "2.4.0.2",
sha256 =
"b8364da380f5f1d85d13e427851a153be2809e1838d16393e37566f34b384b87",
),
"Agda":
struct(
version = "2.5.3",
sha256 =
"aa14d4a3582013100f71e64d71c5deff6caa2a286083e20fc16f6dbb0fdf0065",
),
"BiobaseNewick":
struct(
version = "0.0.0.2",
sha256 =
"6432f684a75fd8a2cea59a5359a59f48020ead19119efaed7018ecae726d13bd",
),
"BlastHTTP":
struct(
version = "1.2.1",
sha256 =
"cee85e0fba0530aff57209b3d91a800db52b63c3f7e4a431a04e7a9cbd355bd5",
),
"Boolean":
struct(
version = "0.2.4",
sha256 =
"67216013b02b8ac5b534a1ef25f409f930eea1a85eae801933a01ad43145eef8",
),
"BoundedChan":
struct(
version = "1.0.3.0",
sha256 =
"531ceaed7f62844c2a63a7cbfdcab332ea5eaa218e9922ca3305580438adc46d",
),
"Cabal":
struct(
version = "2.0.1.1",
sha256 =
"802bc6d0113fdb734ea938ad2aadc14f590e372b55d56be6712de319bb343d1b",
),
"ChannelT":
struct(
version = "0.0.0.4",
sha256 =
"76437ff58b44bfcf805494a12916b127c0e591718b6e7372315992782720d91b",
),
"Chart":
struct(
version = "1.8.2",
sha256 =
"8442c16959e2a46355418b82c0c6fc3174d04b41ea6e2e320c56588a563be28d",
),
"Chart-cairo":
struct(
version = "1.8.2",
sha256 =
"7cd8ba9da4c43ff4d6ba468d65e91b7239a0543038996a9a626818dc1a408fc1",
),
"Chart-diagrams":
struct(
version = "1.8.2",
sha256 =
"ca181dec04bac1029101dd75951f48710ebc42f5333e06c57943e3245bba9f41",
),
"ChasingBottoms":
struct(
version = "1.3.1.3",
sha256 =
"38984a3514fb6a3be12c7a5ea50bced9c674d67d83917b37240c5721b4e45c12",
),
"Clipboard":
struct(
version = "2.3.2.0",
sha256 =
"3f82c8183a599025c5199ba50d0661512683e9cf29e6054858f1abe2ab8b25b7",
),
"ClustalParser":
struct(
version = "1.2.1",
sha256 =
"0034a9fdca3e4bcb70edb961536ee4acb162fec0ab1b2c67108598bfcd75879d",
),
"ConfigFile":
struct(
version = "1.1.4",
sha256 =
"ae087b359ff2945a62b671449227e0a811d143ee651179f4e7e9c66548e0f514",
),
"DAV":
struct(
version = "1.3.2",
sha256 =
"613314357579b29e1d3fa8451b51e8b9a1307a2b33b65a3f2b2ef2bece025169",
),
"DRBG":
struct(
version = "0.5.5",
sha256 =
"21df3202486cc83c7cc3f867cb139eac9a3f69bd91b5f6b016ae026e03c33bfd",
),
"Decimal":
struct(
version = "0.4.2",
sha256 =
"c5f53652949eedd48dbafc1bb3e08c05348d5e25c248e8e1543bc380a9f84261",
),
"Diff":
struct(
version = "0.3.4",
sha256 =
"77b7daec5a79ade779706748f11b4d9b8f805e57a68e7406c3b5a1dee16e0c2f",
),
"Earley":
struct(
version = "0.12.0.1",
sha256 =
"fc26a93c20b2c13fc9d4717926e724f1e9abfb7c8543657d835e35ba6e56d5b1",
),
"Ebnf2ps":
struct(
version = "1.0.15",
sha256 =
"0ecce7d721d6c8993fa6ba6cfb16f1101d85e00bbaf0b6941d36a00badea2b4b",
),
"EdisonAPI":
struct(
version = "1.3.1",
sha256 =
"95a3b8d01599520a50456219b5a2e9f7832bcddaaeb8e94ce777bd87a4a6b56e",
),
"EdisonCore":
struct(
version = "1.3.1.1",
sha256 =
"3e0720ee3b179304f563b99dd446c1d6911e31ddc4d0f78d6550b18e59ed501b",
),
"EntrezHTTP":
struct(
version = "1.0.4",
sha256 =
"b86ffe46c049bdfa7d7ebe99215ac994735fe5772dadf6c2f48ae702f278e5be",
),
"FPretty":
struct(
version = "1.1",
sha256 =
"b8ac0122e923b0e20cee6ba77ffb07dfeaa96a194cdc1622808e97f443a8eb42",
),
"FenwickTree":
struct(
version = "0.1.2.1",
sha256 =
"9c172d62b24365e663a0355e8eaa34362a1a769c18a64391939a9b50e384f03c",
),
"FindBin":
struct(
version = "0.0.5",
sha256 =
"279c7967e0803ca3b9a0a1956ce7ba9b9a2294eb9f971bea8a557b5f80ddfda4",
),
"FloatingHex":
struct(
version = "0.4",
sha256 =
"b277054db48d2dec62e3831586f218cbe0a056dec44dbc032e9a73087425a24c",
),
"FontyFruity":
struct(
version = "0.5.3.3",
sha256 =
"b0d06e7c5eb1aa7ffd11a85598df05d034bab3a83130fdf37121f89136e0025c",
),
"ForestStructures":
struct(
version = "0.0.0.2",
sha256 =
"fe74067fee601844de5c839a115f2bd75d4a1be9f0ee8ec42c0150bcf886693f",
),
"Frames":
struct(
version = "0.3.0.2",
sha256 =
"26a1b821f1dca29ac25c6c964984cba1cca3db0176c73271b545e2e8dac00da8",
),
"GLFW-b":
struct(
version = "1.4.8.1",
sha256 =
"438a49ec5cf6cbda95966fcc42750b9245f54fe7daf69a6493e7703c3f178ae9",
),
"GLURaw":
struct(
version = "2.0.0.3",
sha256 =
"582cf8c0c1b8c0123ee9a8a06eba65fffded6decfe4e2e08bfea308f55f7ccee",
),
"GLUT":
struct(
version = "2.7.0.12",
sha256 =
"66f516bd9f836e5252fe0186e447b68a61b594d9247466c502b74994d3e9f1b5",
),
"GPipe":
struct(
version = "2.2.3",
sha256 =
"77baca8d7a7933d069a3b20d6a16270e8560f1f6aff941c950e71a180e1976a5",
),
"Genbank":
struct(
version = "1.0.3",
sha256 =
"2baf631ac851b1c29ba531ae1c16b8ba3c4b672bac9d4840a3b9afc0a89d2b93",
),
"GenericPretty":
struct(
version = "1.2.1",
sha256 =
"175e334292904d365c630c9dfcc5a94f0c052a88a10d34513f39ebc36205672d",
),
"Glob":
struct(
version = "0.9.1",
sha256 =
"80cb0b048d78f71ba5af1e58c8d651f5b6f1b37766d4da9b18e30a40edd4f567",
),
"H":
struct(
version = "0.9.0.1",
sha256 =
"5fc04dfefcac9f6882cea9e65755479f7b1d853618c89258a005df63c8d57134",
),
"HCodecs":
struct(
version = "0.5",
sha256 =
"b1bf109a5e0877b47eb2f942ad0d1aa2368b9c006882ba07fe345dd0a90a1756",
),
"HDBC":
struct(
version = "2.4.0.2",
sha256 =
"670757fd674b6caf2f456034bdcb54812af2cdf2a32465d7f4b7f0baa377db5a",
),
"HDBC-mysql":
struct(
version = "0.7.1.0",
sha256 =
"81c985d4a243c965930fb412b3175ca799ba66985f8b6844014fd600df1da7cf",
),
"HDBC-session":
struct(
version = "0.1.1.1",
sha256 =
"255c4e55f888c873bfa6f9af25ccb7fb0eb004f398b86b74ed7878d39c59ce99",
),
"HPDF":
struct(
version = "1.4.10",
sha256 =
"de2bfddd93eeef2129a2378e8dce486d086bec3c48ee2a1bf1a5fb01581607d4",
),
"HSet":
struct(
version = "0.0.1",
sha256 =
"eba93be5a76581585ae33af6babe9c2718fae307d41989cd36a605d9b0e8d16a",
),
"HStringTemplate":
struct(
version = "0.8.6",
sha256 =
"7022cb9c1e1c223cfb8adf5ca6994b9f4709399ae197cb7541247c0b5d0255cd",
),
"HSvm":
struct(
version = "0.1.0.3.22",
sha256 =
"8dac8a583c762675f2d64138303618f017d6be95d59e60774ea7cbfc040dab04",
),
"HTF":
struct(
version = "0.13.2.2",
sha256 =
"1db49f6b796699e5f86ae9485bd3f5874eca23bc01a0c8e1ac58519f47e1c3ba",
),
"HTTP":
struct(
version = "4000.3.9",
sha256 =
"05962b8a6248d348977af2a755f0ed57a6ab523185235546dd66cf90a54663ff",
),
"HUnit":
struct(
version = "1.6.0.0",
sha256 =
"7448e6b966e98e84b7627deba23f71b508e9a61e7bc571d74304a25d30e6d0de",
),
"HUnit-approx":
struct(
version = "1.1.1.1",
sha256 =
"4a4327d328bb8b944c73ec211dd29e953e477f99fd3f9e28fe5200f02fa62baf",
),
"HaTeX":
struct(
version = "3.17.3.1",
sha256 =
"ab19f779ba7c265f80d14d2bae85d26c611c031b877f228432b833909c1702ef",
),
"HaXml":
struct(
version = "1.25.4",
sha256 =
"d77467b8c855ba85d900b5d1a9b771aa498c80d570f9ac60a9f10803cfc01db5",
),
"HandsomeSoup":
struct(
version = "0.4.2",
sha256 =
"0ae2dad3fbde1efee9e45b84b2aeb5b526cc7b3ea2cbc5715494f7bde3ceeefb",
),
"HaskellNet":
struct(
version = "0.5.1",
sha256 =
"3245d31ad76f9f9013a2f6e2285d73ed37376eeb073c100b9a6d19e87f0ca838",
),
"HaskellNet-SSL":
struct(
version = "0.3.4.0",
sha256 =
"83ae92547fd5d52b5b74402101ec254423abeac0c0725e14a112d6ffc843040f",
),
"Hclip":
struct(
version = "3.0.0.4",
sha256 =
"d8c80bd2d035571cd76ce4f69453e9fcef4096dbc8868eb4cfcd7eb74fe5f712",
),
"Hoed":
struct(
version = "0.4.1",
sha256 =
"074f44d54aa0ed0334d9ff317b1293b03802f8a6971217d082b597d3afe7a491",
),
"HsOpenSSL":
struct(
version = "0.11.4.12",
sha256 =
"356a526263d988254d5830dd5a368380163975174dfc9230b697e6129e5c15a2",
),
"HsOpenSSL-x509-system":
struct(
version = "0.1.0.3",
sha256 =
"5bdcb7ae2faba07a374109fea0a1431ae09d080f8574e60ab7a351b46f931f92",
),
"IPv6Addr":
struct(
version = "1.0.1",
sha256 =
"dff7e9d19e60f08401fd79a8d5004b2166d45d0a1160e5705aac821268a54207",
),
"IPv6DB":
struct(
version = "0.2.4",
sha256 =
"eda3378299623ca8aceb7a6ade18ebc5a06d8e7a0df1cae41c90b5c960bbb7ab",
),
"IfElse":
struct(
version = "0.85",
sha256 =
"8ad3bfc3e2c867e6330d9bff874b3105476c35b2e1638fd448f233e9f80addcd",
),
"Imlib":
struct(
version = "0.1.2",
sha256 =
"3ed318a7777a3b0752327b7b128edb3a1d562202b480a6d6b793b79ed90ebd1c",
),
"Interpolation":
struct(
version = "0.3.0",
sha256 =
"1bf68489dafd52f25d93a3aad672a2dc7110d77ffb85348cb82c3e5a51e8cb10",
),
"IntervalMap":
struct(
version = "0.5.3.1",
sha256 =
"9a575459f66ad48b734ca79885b599ab5a5eed800bb409b11f08c8a7d53f8c21",
),
"JuicyPixels":
struct(
version = "3.2.9.4",
sha256 =
"ff35047d6f453f9fd5cccb99b2170375ecbf7f73ba350db6ac89b091d91f92d6",
),
"JuicyPixels-extra":
struct(
version = "0.2.2",
sha256 =
"8d7e375f8f30b0f98912dd24365920a4b466aecb49e28f7325408fd378d71eb8",
),
"JuicyPixels-scale-dct":
struct(
version = "0.1.1.2",
sha256 =
"9abd9d00520424912201b58343f252362b9f34760023d3324732ca00a906fe96",
),
"LibZip":
struct(
version = "1.0.1",
sha256 =
"a636e0202d2a3f60d894a814bd9834cf8c62313b67ccc48c295f02a4bebe425f",
),
"List":
struct(
version = "0.6.2",
sha256 =
"c4b92be1202fc59112018f76d5b17cd3a659ebc36384a46e000ab2fbaf99b878",
),
"ListLike":
struct(
version = "4.5.1",
sha256 =
"b70745335b563cd9039bb17a1e2faf7edb1b68febdd19586b28ab67c55562a8d",
),
"MFlow":
struct(
version = "0.4.6.0",
sha256 =
"4e93f7488152d88359fd100a742c2ea96788284d262f3cd1b50d936f80f1a342",
),
"MemoTrie":
struct(
version = "0.6.9",
sha256 =
"1d6045b8fdf7b89ed6b495e535613f5091cdfc9cdfe05a862207e76ce205f794",
),
"MissingH":
struct(
version = "1.4.0.1",
sha256 =
"283f2afd46625d98b4e29f77edadfa5e6009a227812ee2ece10091ad6a7e9b71",
),
"MonadPrompt":
struct(
version = "1.0.0.5",
sha256 =
"b012cbbe83650f741c7b7f6eafcc89dec299b0ac74a758b6f3a8cdfc5d3bbeda",
),
"MonadRandom":
struct(
version = "0.5.1",
sha256 =
"9e3f0f92807285302036dc504066ae6d968c8b0b4c25d9360888f31fe1730d87",
),
"MusicBrainz":
struct(
version = "0.3.1",
sha256 =
"3518fd97581cbb90a15c5dc62b637cde5d71911b3f10d62c37ed17157415f3fd",
),
"Network-NineP":
struct(
version = "0.4.1",
sha256 =
"9d7a456c672c1e7ef1075e27654b21ecacd8062917e1482c8060e404f3960f4a",
),
"NineP":
struct(
version = "0.0.2.1",
sha256 =
"4bb1516b9fb340118960043e0c72aa62316be8ff3f78cc8c1354e2fac96dd8cc",
),
"NoHoed":
struct(
version = "0.1.1",
sha256 =
"9b663a234c034e0049126ae7f06d1756dc496012177bf18548c6d8caeec43b3d",
),
"NoTrace":
struct(
version = "0.3.0.2",
sha256 =
"39ea78488aa2a172691b2d97b3bc6673a423f1eb0c184381da546de61d94125b",
),
"NumInstances":
struct(
version = "1.4",
sha256 =
"cbdb2a49346f59ceb5ab38592d7bc52e5205580d431d0ac6d852fd9880e59679",
),
"ObjectName":
struct(
version = "1.1.0.1",
sha256 =
"72dbef237580fd4e8567de2de752835bbadd3629f486d1586486d49a49aad210",
),
"OneTuple":
struct(
version = "0.2.1",
sha256 =
"4b6f74b6d92df112b0f4eaf15ccdc5fbb763d59f07e9a2afa5690ef89159a2f4",
),
"Only":
struct(
version = "0.1",
sha256 =
"ab7aa193e8c257d3bda6b0b3c1cbcf74cdaa85ab08cb20c2dd62ba248c1ab265",
),
"OpenAL":
struct(
version = "1.7.0.4",
sha256 =
"3989f6c4fe437843551004dd011c4308bf63d787ae4fbb8ce71d44b1b0b1f118",
),
"OpenGL":
struct(
version = "3.0.2.0",
sha256 =
"faa99459724d614d2cf2d2b83c7bda4898ee71752a253bf4699c096822450efb",
),
"OpenGLRaw":
struct(
version = "3.2.7.0",
sha256 =
"62723d0fc287e5e5e93853b1fed0ca76495e6b693261aa9aae35340182a58a08",
),
"PSQueue":
struct(
version = "1.1",
sha256 =
"a8e0871ad10f916f55c3b9baec53eff23c4e97e09cf96d6c66771789e00a49cc",
),
"ParsecTools":
struct(
version = "0.0.2.0",
sha256 =
"ef4843353127aa3e6f6ab0aece9f4245225d375802921e151a1751d797857a87",
),
"PortMidi":
struct(
version = "0.1.6.1",
sha256 =
"b89e9293d5b80d23b197dbb9bf196737765c66ffe96eaabdb9517fe20b516690",
),
"QuasiText":
struct(
version = "0.1.2.6",
sha256 =
"e801d269e25263645ee66fc090040fe9b9c8a8e5bf10485801dd7a5a30e0f119",
),
"QuickCheck":
struct(
version = "2.10.1",
sha256 =
"1dbb56786854fd539315497086284dfff039a52a487319e648140e4987b6d5e5",
),
"RNAlien":
struct(
version = "1.3.7",
sha256 =
"de54278982eecd9568ee155a3155f632b503776fff7634b8b3746e29d28248a5",
),
"RSA":
struct(
version = "2.3.0",
sha256 =
"eee76dc7f9dd2d2cdeb014af728ff56f2f5d2908212bd3bb8c5e89f5c6485333",
),
"Rasterific":
struct(
version = "0.7.2.1",
sha256 =
"7f6d86495a5a3aa72dd9c13f2dd8d93526cd5166889f39c5e7dde529cef44d74",
),
"RefSerialize":
struct(
version = "0.4.0",
sha256 =
"05b25eb1ab943d96119aa2acca678fc8f194c3411af521e3835f4de5c752bbb2",
),
"SCalendar":
struct(
version = "1.1.0",
sha256 =
"4971bf6df45953434088ba50d0e17dcc49a0e4c2dd37ad06385c1f87d87b348d",
),
"SHA":
struct(
version = "1.6.4.2",
sha256 =
"c470176f63cbe49fd0502a1b32ef22bc01b1af42385583b8be94547750958a8c",
),
"STMonadTrans":
struct(
version = "0.4.3",
sha256 =
"574fd56cf74036c20d00a09d815659dbbb0ae51c8103d00c93cd9558ad3322db",
),
"SVGFonts":
struct(
version = "1.6.0.3",
sha256 =
"bc8f8863799070c345fdd88c065852c6434af9e802fd0171df2a3dbd37f35887",
),
"SafeSemaphore":
struct(
version = "0.10.1",
sha256 =
"21e5b737a378cae9e1faf85cab015316d4c84d4b37e6d9d202111cef8c4cef66",
),
"SegmentTree":
struct(
version = "0.3",
sha256 =
"6188c1b1276d7fa0391098a563df73dd522d20b57dc5321fe3418a9e3ca84fc1",
),
"ShellCheck":
struct(
version = "0.4.7",
sha256 =
"184955264d42c5dca0300fb9688b9a6c9a1c70c345dbcd8e30bb48a049a70d7c",
),
"Spintax":
struct(
version = "0.3.2",
sha256 =
"f7e620817ce065f06ae163b08461eb3ce3dc0254caf0dcbd00d01836759bf048",
),
"Spock":
struct(
version = "0.12.0.0",
sha256 =
"8392d1ee34b46238c6bfe951080f06e11e1f3622d8402e7762c70aa61430e3d9",
),
"Spock-api":
struct(
version = "0.12.0.0",
sha256 =
"8cfdbcbd2fa426c595fb7d29f8a6395dea17476c15d5ae863da2605b1c6ebe00",
),
"Spock-api-server":
struct(
version = "0.12.0.0",
sha256 =
"29734206823875ec71d7cad14bf012adb70b01700975e2181a7cb52713b131ce",
),
"Spock-core":
struct(
version = "0.12.0.0",
sha256 =
"e69b70ea3027fa644d546bcae25bbf75e38abd6f4a7f88f0628fea6e16e97895",
),
"Spock-lucid":
struct(
version = "0.4.0.1",
sha256 =
"3126d512e9528a6cf8830ad355dd2f0429bfd41f0ae048138818ae8dcedc2397",
),
"Spock-worker":
struct(
version = "0.3.1.0",
sha256 =
"edc009d59fe528ab3bee887b8092f720a8a4ee85b550dec065964ed55c76dc4b",
),
"StateVar":
struct(
version = "1.1.0.4",
sha256 =
"7ad68decb5c9a76f83c95ece5fa13d1b053e4fb1079bd2d3538f6b05014dffb7",
),
"Strafunski-StrategyLib":
struct(
version = "5.0.0.10",
sha256 =
"308a1a051df6bb617c9d37bda297fdbedfb8b4c7f6ea5864443cfb9f15e80cc2",
),
"Stream":
struct(
version = "0.4.7.2",
sha256 =
"990be249b3ef1b0075563026d4d2c803b86e3cbf168965ba6f9f2b4227a007d1",
),
"TCache":
struct(
version = "0.12.1",
sha256 =
"f134b45fcdd127fa1a4214f01d44dc34e994fed137cec63f4c4ea632363ab7bd",
),
"Taxonomy":
struct(
version = "1.0.3",
sha256 =
"b6f793127ba68fce97e1ab5482e41c8833b9577f01ef9b41470ab143c50e9270",
),
"TypeCompose":
struct(
version = "0.9.12",
sha256 =
"3a182c2cc93f8291b3aedfc32c0b1faa84a982601c1a24cbe7fe1ecc50e333e2",
),
"Unique":
struct(
version = "0.4.7.2",
sha256 =
"b56155043817187170d02e6fa7c5ec69c72dc2a1c00b50bdd34d6d2875795b6b",
),
"ViennaRNAParser":
struct(
version = "1.3.3",
sha256 =
"7ee941d106b8b0c57e1ca5104d19b94215721e4a7b8aeb53fa353d246efbaefe",
),
"Win32":
struct(
version = "2.5.4.1",
sha256 =
"cc183e9e545ad04fe8e509eb9447e9d11b160b2027482230cee8cdc141fd3d64",
),
"Win32-notify":
struct(
version = "0.3.0.3",
sha256 =
"0c21dbe06cb1ce3b3e5f1aace0b7ee359b36e7cb057f8fe2c28c943150044116",
),
"Workflow":
struct(
version = "0.8.3",
sha256 =
"c89b4b3a4a29fe576f8972ffa1e698eff8ac0ceb433642fc0b3f9c0308d22123",
),
"X11":
struct(
version = "1.8",
sha256 =
"541b166aab1e05a92dc8f42a511d827e7aad373af12ae283b9df9982ccc09d8e",
),
"X11-xft":
struct(
version = "0.3.1",
sha256 =
"4eba3fee62570e06447654030a62fb55f19587884bc2cef77a9c3b2c3458f8d1",
),
"Xauth":
struct(
version = "0.1",
sha256 =
"ba332dea9ec152b3f676d22461eee81a657e16987c3dfb8249e9dbe0cda56ed7",
),
"Yampa":
struct(
version = "0.10.7",
sha256 =
"14b13dcb9e52a4c6f738d7515d82d681618720de5598ec11448646333193d1c5",
),
"YampaSynth":
struct(
version = "0.2",
sha256 =
"a1c6a0ea57aee855ca3f558f1b6d7ec167abb57333052d8a9f7b46ef323d0a09",
),
"abstract-deque":
struct(
version = "0.3",
sha256 =
"09aa10f38193a8275a7791b92a4f3a7192a304874637e2a35c897dde25d75ca2",
),
"abstract-deque-tests":
struct(
version = "0.3",
sha256 =
"5f17fb4cc26559f81c777f494622907e8927181175eaa172fb6adbf14b2feba5",
),
"abstract-par":
struct(
version = "0.3.3",
sha256 =
"248a8739bd902462cb16755b690b55660e196e58cc7e6ef8157a72c2a3d5d860",
),
"accelerate":
struct(
version = "1.1.1.0",
sha256 =
"a4f482472bbd0e858bbe568834490af46d882bafb598576213b63a44be828ee1",
),
"accelerate-arithmetic":
struct(
version = "1.0",
sha256 =
"62a467818285031330ecc85968d58d86986e1dacebe901c9d86b0fa53ba60c3f",
),
"accelerate-bignum":
struct(
version = "0.1.0.0",
sha256 =
"7c18c467d646ed30131ad197144c4f7fa6ce3e821d41c6db3dba4361f04e30a5",
),
"accelerate-blas":
struct(
version = "0.1.0.1",
sha256 =
"cda96b600cfa251036db1c3568778235cb766d6f9bcff80420b4de48867a4c66",
),
"accelerate-examples":
struct(
version = "1.1.0.0",
sha256 =
"3de4806f0d4e76733dc76824e737820e53ece0eb42787754739284b1cdacf27e",
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DataRepositoryAssociationArgs', 'DataRepositoryAssociation']
@pulumi.input_type
class DataRepositoryAssociationArgs:
def __init__(__self__, *,
data_repository_path: pulumi.Input[str],
file_system_id: pulumi.Input[str],
file_system_path: pulumi.Input[str],
batch_import_meta_data_on_create: Optional[pulumi.Input[bool]] = None,
delete_data_in_filesystem: Optional[pulumi.Input[bool]] = None,
imported_file_chunk_size: Optional[pulumi.Input[int]] = None,
s3: Optional[pulumi.Input['DataRepositoryAssociationS3Args']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a DataRepositoryAssociation resource.
:param pulumi.Input[str] data_repository_path: The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
:param pulumi.Input[str] file_system_id: The ID of the Amazon FSx file system to on which to create a data repository association.
:param pulumi.Input[str] file_system_path: A path on the file system that points to a high-level directory (such as `/ns1/`) or subdirectory (such as `/ns1/subdir/`) that will be mapped 1-1 with `data_repository_path`. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path `/ns1/`, then you cannot link another data repository with file system path `/ns1/ns2`. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.
:param pulumi.Input[bool] batch_import_meta_data_on_create: Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to `false`.
:param pulumi.Input[bool] delete_data_in_filesystem: Set to true to delete files from the file system upon deleting this data repository association. Defaults to `false`.
:param pulumi.Input[int] imported_file_chunk_size: For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
:param pulumi.Input['DataRepositoryAssociationS3Args'] s3: See the `s3` configuration block. Max of 1.
The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the data repository association. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
pulumi.set(__self__, "data_repository_path", data_repository_path)
pulumi.set(__self__, "file_system_id", file_system_id)
pulumi.set(__self__, "file_system_path", file_system_path)
if batch_import_meta_data_on_create is not None:
pulumi.set(__self__, "batch_import_meta_data_on_create", batch_import_meta_data_on_create)
if delete_data_in_filesystem is not None:
pulumi.set(__self__, "delete_data_in_filesystem", delete_data_in_filesystem)
if imported_file_chunk_size is not None:
pulumi.set(__self__, "imported_file_chunk_size", imported_file_chunk_size)
if s3 is not None:
pulumi.set(__self__, "s3", s3)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="dataRepositoryPath")
def data_repository_path(self) -> pulumi.Input[str]:
"""
The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
"""
return pulumi.get(self, "data_repository_path")
@data_repository_path.setter
def data_repository_path(self, value: pulumi.Input[str]):
pulumi.set(self, "data_repository_path", value)
@property
@pulumi.getter(name="fileSystemId")
def file_system_id(self) -> pulumi.Input[str]:
"""
The ID of the Amazon FSx file system to on which to create a data repository association.
"""
return pulumi.get(self, "file_system_id")
@file_system_id.setter
def file_system_id(self, value: pulumi.Input[str]):
pulumi.set(self, "file_system_id", value)
@property
@pulumi.getter(name="fileSystemPath")
def file_system_path(self) -> pulumi.Input[str]:
"""
A path on the file system that points to a high-level directory (such as `/ns1/`) or subdirectory (such as `/ns1/subdir/`) that will be mapped 1-1 with `data_repository_path`. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path `/ns1/`, then you cannot link another data repository with file system path `/ns1/ns2`. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.
"""
return pulumi.get(self, "file_system_path")
@file_system_path.setter
def file_system_path(self, value: pulumi.Input[str]):
pulumi.set(self, "file_system_path", value)
@property
@pulumi.getter(name="batchImportMetaDataOnCreate")
def batch_import_meta_data_on_create(self) -> Optional[pulumi.Input[bool]]:
"""
Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to `false`.
"""
return pulumi.get(self, "batch_import_meta_data_on_create")
@batch_import_meta_data_on_create.setter
def batch_import_meta_data_on_create(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "batch_import_meta_data_on_create", value)
@property
@pulumi.getter(name="deleteDataInFilesystem")
def delete_data_in_filesystem(self) -> Optional[pulumi.Input[bool]]:
"""
Set to true to delete files from the file system upon deleting this data repository association. Defaults to `false`.
"""
return pulumi.get(self, "delete_data_in_filesystem")
@delete_data_in_filesystem.setter
def delete_data_in_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_data_in_filesystem", value)
@property
@pulumi.getter(name="importedFileChunkSize")
def imported_file_chunk_size(self) -> Optional[pulumi.Input[int]]:
"""
For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
"""
return pulumi.get(self, "imported_file_chunk_size")
@imported_file_chunk_size.setter
def imported_file_chunk_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "imported_file_chunk_size", value)
@property
@pulumi.getter
def s3(self) -> Optional[pulumi.Input['DataRepositoryAssociationS3Args']]:
"""
See the `s3` configuration block. Max of 1.
The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.
"""
return pulumi.get(self, "s3")
@s3.setter
def s3(self, value: Optional[pulumi.Input['DataRepositoryAssociationS3Args']]):
pulumi.set(self, "s3", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the data repository association. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@pulumi.input_type
class _DataRepositoryAssociationState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
association_id: Optional[pulumi.Input[str]] = None,
batch_import_meta_data_on_create: Optional[pulumi.Input[bool]] = None,
data_repository_path: Optional[pulumi.Input[str]] = None,
delete_data_in_filesystem: Optional[pulumi.Input[bool]] = None,
file_system_id: Optional[pulumi.Input[str]] = None,
file_system_path: Optional[pulumi.Input[str]] = None,
imported_file_chunk_size: Optional[pulumi.Input[int]] = None,
s3: Optional[pulumi.Input['DataRepositoryAssociationS3Args']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering DataRepositoryAssociation resources.
:param pulumi.Input[str] arn: Amazon Resource Name of the file system.
:param pulumi.Input[bool] batch_import_meta_data_on_create: Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to `false`.
:param pulumi.Input[str] data_repository_path: The path to the Amazon | |
from RLTest import Env
from sqlalchemy import create_engine
from sqlalchemy.sql import text
import time
import subprocess
import signal
from threading import Thread
MYSQL_DB = 1
ORACLESQL_DB = 2
class Background(object):
"""
A context manager that fires a TimeExpired exception if it does not
return within the specified amount of time.
"""
def doJob(self):
self.f()
self.isAlive = False
def __init__(self, f):
self.f = f
self.isAlive = True
def __enter__(self):
self.t = Thread(target = self.doJob)
self.t.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.t.join()
class TimeLimit(object):
"""
A context manager that fires a TimeExpired exception if it does not
return within the specified amount of time.
"""
def __init__(self, timeout, env, msg):
self.timeout = timeout
self.env = env
self.msg = msg
def __enter__(self):
signal.signal(signal.SIGALRM, self.handler)
signal.setitimer(signal.ITIMER_REAL, self.timeout, 0)
def __exit__(self, exc_type, exc_value, traceback):
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
def handler(self, signum, frame):
self.env.assertTrue(False, message=self.msg)
raise Exception(self.msg)
class MysqlBackend:
def __init__(self):
subprocess.Popen(['/bin/bash', 'service', 'mysql', 'restart'], stdout=subprocess.PIPE).wait()
def disconnect(self):
subprocess.Popen(['/bin/bash', 'service', 'mysql', 'stop'], stdout=subprocess.PIPE).wait()
def connect(self):
subprocess.Popen(['/bin/bash', 'service', 'mysql', 'start'], stdout=subprocess.PIPE).wait()
def getDBConn(self):
return self.getConnection()
def getConnectionHibernateFile(self):
return '../src/test/resources/mysql_hibernate.cfg.xml'
def connectToDB(self):
connection_str = 'mysql+pymysql://{user}:{password}@{db}'.format(user='demouser', password='<PASSWORD>!', db='localhost:3306/test')
engine = create_engine(connection_str).execution_options(autocommit=True)
conn = engine.connect()
return conn
def getConnection(self):
while True:
try:
return self.connectToDB()
except Exception as e:
print(e)
time.sleep(1)
class OracleBackend:
def __init__(self):
import docker
self.client = docker.from_env()
self.container = self.getDockerContainer()
self.network = [n for n in self.client.networks.list() if n.name == 'bridge'][0]
try:
# in case a test stop in the middle after disconnect the network
self.connectDockerToNetwork()
except Exception:
pass
def getDockerContainer(self):
container = [container for container in self.client.containers.list() if container.attrs['Config']['Image'] == 'quay.io/maksymbilenko/oracle-12c']
if len(container) == 0:
print('Starting oracle container')
process = subprocess.Popen(['/bin/bash', '../install_oracle.sh'], stdout=subprocess.PIPE)
while len(container) == 0:
container = [container for container in self.client.containers.list() if container.attrs['Config']['Image'] == 'quay.io/maksymbilenko/oracle-12c']
else:
print('Oracle container already running')
return container[0]
def disconnect(self):
self.network.disconnect(self.container.attrs['Id'])
def connect(self):
self.network.connect(self.container.attrs['Id'])
def getDBConn(self):
return self.getConnection()
def getConnectionHibernateFile(self):
return '../src/test/resources/hibernate.cfg.xml'
def connectToDB(self):
connection_str = 'oracle://{user}:{password}@{db}'.format(user='system', password='<PASSWORD>', db='localhost:1521/xe')
engine = create_engine(connection_str).execution_options(autocommit=True)
conn = engine.connect()
return conn
def getConnection(self):
while True:
try:
return self.connectToDB()
except Exception as e:
time.sleep(1)
class genericTest:
def __init__(self, writePolicy, retryInterval=5, timeout=10, db=MYSQL_DB):
self.backend = OracleBackend() if db == ORACLESQL_DB else MysqlBackend()
self.dbConn = self.backend.getDBConn()
self.env = Env(module='../bin/RedisGears/redisgears.so', moduleArgs='CreateVenv 1 pythonInstallationDir ../../bin/RedisGears/ Plugin ../../bin/RedisGears_JVMPlugin/plugin/gears_jvm.so JvmOptions -Djava.class.path=../../bin/RedisGears_JVMPlugin/gears_runtime/target/gear_runtime-jar-with-dependencies.jar JvmPath ../../bin/RedisGears_JVMPlugin/bin/OpenJDK/jdk-11.0.9.1+1/')
with open('../target/rghibernate-jar-with-dependencies.jar', 'rb') as f:
self.env.cmd('RG.JEXECUTE', 'com.redislabs.WriteBehind', f.read())
with open(self.backend.getConnectionHibernateFile(), 'rt') as f:
self.env.cmd('RG.TRIGGER', 'SYNC.REGISTERCONNECTOR', 'oracle_connector', '10', '10', str(retryInterval), f.read())
with open('../src/test/resources/Student.hbm.xml', 'rt') as f:
cmd = ['RG.TRIGGER', 'SYNC.REGISTERSOURCE', 'students_src', 'oracle_connector', writePolicy]
if writePolicy == 'WriteThrough':
cmd += [str(timeout)]
cmd += [f.read()]
self.env.cmd(*cmd)
def setUp(self):
# verify all executions are done
done = False
while not done:
executions = self.env.cmd('RG.DUMPEXECUTIONS')
done = True
for r in executions:
if r[3] != b'done' and r[3] != b'aborted':
done = False
time.sleep(0.1)
break
try:
self.dbConn.execute(text('delete from student'))
except Exception:
pass
self.env.cmd('flushall')
def disconnectBackend(self):
self.backend.disconnect()
def connectBackend(self):
self.backend.connect()
self.dbConn = self.backend.getDBConn()
class testWriteBehind(genericTest):
def __init__(self):
genericTest.__init__(self, 'WriteBehind')
def testSimpleWriteBehind(self):
self.env.cmd('hset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
result = None
res = None
with TimeLimit(10, self.env, 'Failed waiting for data to reach the db'):
while result is None or res is None:
time.sleep(0.1)
try:
result = self.dbConn.execute(text('select * from student'))
res = result.next()
except Exception as e:
pass
self.env.assertEqual(res, (1, 'foo', 'bar', 'email', 10))
self.env.cmd('del', 'Student:1')
with TimeLimit(10, self.env, 'Failed waiting for data to delete from db'):
while res is not None:
time.sleep(0.1)
result = self.dbConn.execute(text('select * from student'))
res = None
try:
res = result.next()
except Exception:
pass
def testSimpleWriteBehind2(self):
for i in range(100):
self.env.cmd('hmset', 'Student:%d' % i, 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
result = None
res = None
with TimeLimit(10, self.env, 'Failed waiting for data to reach the db'):
while result is None or res is None or res[0] != 100:
time.sleep(0.1)
try:
result = self.dbConn.execute(text('select count(*) from student'))
res = result.next()
except Exception as e:
pass
self.env.assertEqual(res, (100,))
for i in range(100):
self.env.cmd('del', 'Student:%d' % i)
with TimeLimit(10, self.env, 'Failed waiting for data to delete from db'):
while res is not None:
time.sleep(0.1)
result = self.dbConn.execute(text('select * from student'))
res = None
try:
res = result.next()
except Exception:
pass
def testStopDBOnTrafic(self):
for i in range(100):
self.env.cmd('hmset', 'Student:%d' % i, 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
if i == 50:
self.disconnectBackend()
self.connectBackend()
self.dbConn = self.backend.getDBConn()
# make sure all data was written
result = None
res = None
with TimeLimit(10, self.env, 'Failed waiting for data to reach the db'):
while result is None or res is None or res[0] != 100:
time.sleep(0.1)
try:
result = self.dbConn.execute(text('select count(*) from student'))
res = result.next()
except Exception as e:
pass
self.env.assertEqual(res, (100,))
class testWriteThroughTimeout(genericTest):
def __init__(self):
genericTest.__init__(self, 'WriteThrough', retryInterval=1, timeout=1)
def testWriteThroughTimeout(self):
self.disconnectBackend()
with TimeLimit(4, self.env, 'Failed waiting for timeout response'):
self.env.expect('hset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10').error().contains('Write Timed out')
self.connectBackend
def testWriteThroughWithoutTimeout(self):
self.env.expect('RG.TRIGGER', 'SYNC.REGISTERSOURCE', 'students_src', 'oracle_connector', 'WriteThrough', 'bad timeout', 'xml').error().contains('Could not parse timeout argument')
class testWriteThrough(genericTest):
def __init__(self):
genericTest.__init__(self, 'WriteThrough')
def testSimpleWriteThrough(self):
self.env.cmd('hset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, 'foo', 'bar', 'email', 10))
self.env.cmd('del', 'Student:1')
result = self.dbConn.execute(text('select * from student'))
try:
result.next()
self.env.assertTrue(False, message='got results when expecting no results')
except Exception:
pass
def testSimpleWriteThrough2(self):
for i in range(100):
self.env.cmd('hmset', 'Student:%d' % i, 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
result = self.dbConn.execute(text('select count(*) from student'))
res = result.next()
self.env.assertEqual(res, (100,))
for i in range(100):
self.env.cmd('del', 'Student:%d' % i)
result = self.dbConn.execute(text('select * from student'))
try:
result.next()
self.env.assertTrue(False, message='got results when expecting no results')
except Exception:
pass
def testStopDBOnTrafic(self):
def background():
for i in range(100):
self.env.cmd('hmset', 'Student:%d' % i, 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
start = time.time()
# 10 seconds timeout
while (start + 10) < time.time():
try:
self.dbConn = GetConnection()
result = self.dbConn.execute(text('select count(*) from student'))
res = result.next()
self.env.assertEqual(res, (100,))
break
except Exception:
pass
with TimeLimit(60*5, self.env, 'Failed waiting for data to reach the database'):
with Background(background):
time.sleep(0.5)
self.disconnectBackend()
time.sleep(0.5)
self.connectBackend()
def testMandatoryValueMissing(self):
self.env.expect('hmset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'age', '10').error().contains('mandatory "email" value is not set')
def testBadValueAccordingToSchema(self):
self.env.expect('hmset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', 'test').error().contains('Failed parsing acheme for field "age"')
def testBadIdValue(self):
self.env.expect('hmset', 'Student:test', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', 'test').error().contains('Failed parsing id field "id"')
def testExtraHashFieldsAreIgnored(self):
self.env.cmd('hset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10', 'bearth_year', 1999)
self.env.expect('hget', 'Student:1', 'bearth_year').equal(b'1999')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, 'foo', 'bar', 'email', 10))
def testHIncrByFloat(self):
self.env.cmd('hset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
self.env.cmd('hincrbyfloat', 'Student:1', 'age', '2')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, 'foo', 'bar', 'email', 12))
def testHIncr(self):
self.env.cmd('hset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
self.env.cmd('hincrby', 'Student:1', 'age', '2')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, 'foo', 'bar', 'email', 12))
def testNotMandatoryValue(self):
self.env.cmd('hset', 'Student:1', 'lastName', 'bar', 'email', 'email', 'age', '10')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, None, 'bar', 'email', 10))
def testHdel(self):
self.env.cmd('hset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, 'foo', 'bar', 'email', 10))
self.env.cmd('hdel', 'Student:1', 'firstName')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, None, 'bar', 'email', 10))
def testHsetnx(self):
self.env.cmd('hset', 'Student:1', 'lastName', 'bar', 'email', 'email', 'age', '10')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, None, 'bar', 'email', 10))
self.env.cmd('hsetnx', 'Student:1', 'firstName', 'foo')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, 'foo', 'bar', 'email', 10))
def testHdelOnMandatoryField(self):
self.env.cmd('hset', 'Student:1', 'firstName', 'foo', 'lastName', 'bar', 'email', 'email', 'age', '10')
result = self.dbConn.execute(text('select * from student'))
res = result.next()
self.env.assertEqual(res, (1, 'foo', 'bar', 'email', 10))
self.env.expect('hdel', 'Student:1', 'email').error().contains('mandatory "email" value is not set')
self.env.cmd('hdel', 'Student:1', 'firstName', 'lastName', 'age')
result = self.dbConn.execute(text('select * from student'))
try:
result.next()
self.env.assertTrue(False, message='got results when expecting no results')
except Exception:
pass
| |
in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
I[i, :] = np.sum(pv[0][traj], axis=1)[startindex:].T
else:
for j in range(pv.shape[0]):
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
I[j,i, :] = np.sum(pv[j][traj], axis=1)[startindex:].T
intensity_vec = I
else:
fraptime = time_inhibit
inds = np.where(truetime > fraptime)
inds2 = np.where(truetime < fraptime+20)
inds = np.intersect1d(inds,inds2)
endfrap = inds[-1]-1
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
nribs = np.sum(solutionssave[i][:,endfrap]!=0)
#ribloc = solutionssave[i][:,endfrap]
#adj_pv = pv[solutionssave[i][:,inds[-1]][:nribs]]
frap_app = 20
revI = self.get_negative_intensity(traj,genelength,pv,truetime,fraptime+start_time,fraptime+start_time+frap_app)
I[i, :] = np.sum(pv[traj], axis=1)[startindex:].T
I[i,inds[0]:inds[0]+20] = 0
#I[i,endfrap-startindex:] = np.sum(pv[traj],axis=1)[endfrap-startindex:].T
I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] = I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] + revI
intensity_vec = I
new_ssa_obj = ssa()
new_ssa_obj.no_ribosomes = np.vstack(( ssa_obj.no_ribosomes , no_ribosomes))
new_ssa_obj.n_traj = n_traj+ssa_obj.n_traj
new_ssa_obj.k = all_k
new_ssa_obj.no_rib_per_mrna = float(n_traj)/(n_traj+ssa_obj.n_traj) * no_ribosomes_per_mrna + float(ssa_obj.n_traj)/(n_traj+ssa_obj.n_traj) * ssa_obj.no_rib_per_mrna
new_ssa_obj.rib_density = ribosome_density
new_ssa_obj.rib_means = ribosome_means
new_ssa_obj.rib_means = np.mean(np.vstack((ssa_obj.rib_means,ribosome_means)),0)
new_ssa_obj.rib_vec = rib_vec
new_ssa_obj.intensity_vec = np.vstack((ssa_obj.intensity_vec,intensity_vec))
new_ssa_obj.time_vec_fixed = time_vec_fixed
new_ssa_obj.time = truetime
new_ssa_obj.time_rec = truetime[startindex:]
new_ssa_obj.start_time = non_consider_time
new_ssa_obj.watched_ribs = ssa_obj.watched_ribs + watched_ribs
try:
new_ssa_obj.col_points = ssa_obj.col_points + all_col_points
except:
pass
new_ssa_obj.evaluating_inhibitor = evaluating_inhibitor
new_ssa_obj.evaluating_frap = evaluating_frap
new_ssa_obj.time_inhibit = time_inhibit
new_ssa_obj.solutions = ssa_obj.solutions + solutionssave
new_ssa_obj.solvetime = sttime
new_ssa_obj.collisions = np.hstack((ssa_obj.collisions,collisions))
try:
new_ssa_obj.ribtimes = np.hstack((ssa_obj.ribtimes, all_ribtimes[np.where(all_ribtimes > 0)]))
except:
pass
#solt = solutions.T
fragmented_trajectories = []
fragtimes = []
maxlen = 0
fragmentspertraj= []
for k in range(n_traj):
ind = np.array([next(j for j in range(0,solutions[k].shape[0]) if int(solutions[k][j, i]) == 0 or int(solutions[k][j, i]) == -1) for i in range(0, solutions[k].shape[1])])
changes = ind[1:] - ind[:-1]
addindexes = np.where(changes > 0)[0]
subindexes = np.where(changes < 0)[0]
sub = solutions[k][:,1:] - solutions[k][:,:-1]
neutralindexes = np.unique(np.where(sub < 0)[1])
neutralindexes = np.setxor1d(neutralindexes, subindexes)
for index in neutralindexes:
pre = solutions[k][:,index]
post = solutions[k][:,index+1]
changecount = 0
while len(np.where(post - pre < 0)[0]) > 0:
post = np.append([genelength],post)
pre = np.append(pre,0)
changecount+=1
for i in range(changecount):
addindexes = np.sort(np.append(addindexes,index))
subindexes = np.sort(np.append(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in np.where(np.abs(changes)>1)[0]:
if changes[index] < 0:
for i in range(np.abs(changes[index])-1):
subindexes = np.sort(np.append(subindexes,index))
else:
for i in range(np.abs(changes[index])-1):
addindexes = np.sort(np.append(addindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(addindexes):
subindexes = np.append(subindexes, (np.ones((len(addindexes)-len(subindexes)))*(len(truetime)-1)).astype(int))
fragmentspertraj.append(len(subindexes))
for m in range(min(len(subindexes),len(addindexes))):
traj = solutions[k][:, addindexes[m]:subindexes[m]+1]
traj_ind = changes[addindexes[m]:subindexes[m]+1]
startind = ind[addindexes[m]]
minusloc = [0] + np.where(traj_ind < 0)[0].astype(int).tolist()
fragment = np.array([])
iterind = startind
if subindexes[m]-addindexes[m] > 0:
if len(minusloc) > 1:
if m <= truefrags:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[0, minusloc[-1]+1:].flatten())
else:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[m-truefrags, minusloc[-1]+1:].flatten())
else:
fragment = solutions[k][startind][addindexes[m]:subindexes[m]+1].flatten()
fragtimes.append(addindexes[m]+1)
fragmented_trajectories.append(fragment)
#if m <= truefrags:
#kes.append(genelength/truetime[len(fragment)])
if len(fragment) > maxlen:
maxlen = len(fragment)
fragarray = np.zeros((len(fragmented_trajectories), maxlen))
for i in range(len(fragmented_trajectories)):
fragarray[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
fraglen_size = max(fragarray.shape[1],ssa_obj.fragments.shape[1])
if fragarray.shape[1] != fraglen_size:
fragarray = np.hstack((fragarray, np.zeros((fragarray.shape[0],fraglen_size-fragarray.shape[1]))) )
if ssa_obj.fragments.shape[1] != fraglen_size:
ssa_obj.fragments = np.hstack((ssa_obj.fragments, np.zeros((ssa_obj.fragments.shape[0],fraglen_size-ssa_obj.fragments.shape[1]))) )
new_ssa_obj.fragments = np.vstack((ssa_obj.fragments,fragarray))
new_ssa_obj.fragtimes = ssa_obj.fragtimes+fragtimes
new_ssa_obj.frag_per_traj = fragmentspertraj
new_ssa_obj.full_frags = ssa_obj.full_frags + truefrags
new_ssa_obj.all_results = np.vstack((ssa_obj.all_results,all_results))
if pv.shape[0] > 1:
for i in range(pv.shape[0]):
if i > 0:
autocorr_vec2, mean_autocorr2, error_autocorr2, dwelltime2, ke_sim2 = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec = np.vstack((autocorr_vec,autocorr_vec2))
mean_autocorr = np.vstack((mean_autocorr,mean_autocorr2))
error_autocorr = np.vstack((error_autocorr,error_autocorr2))
dwelltime.append(dwelltime2)
ke_sim.append(ke_sim2)
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec[i], truetime, 0, genelength)
dwelltime = [dwelltime]
ke_sim = [ke_sim]
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec, truetime, 0, genelength)
acov,nacov = self.get_all_autocovariances(intensity_vec,truetime,genelength )
new_ssa_obj.autocorr_vec = autocorr_vec
new_ssa_obj.mean_autocorr = mean_autocorr
new_ssa_obj.error_autocorr = error_autocorr
new_ssa_obj.autocorr_vec_norm = autocorr_vec_norm
new_ssa_obj.mean_autocorr_norm = mean_autocorr_norm
new_ssa_obj.error_autocorr_norm = error_autocorr_norm
new_ssa_obj.dwelltime = dwelltime
new_ssa_obj.ke_sim = float(n_traj)/(n_traj+ssa_obj.n_traj) * ke_sim + float(ssa_obj.n_traj)/(n_traj+ssa_obj.n_traj) * ssa_obj.ke_sim
new_ssa_obj.ke_true = float(genelength)/np.mean( new_ssa_obj.ribtimes )
new_ssa_obj.probe = ssa_obj.probe
new_ssa_obj.autocovariance_dict = acov
new_ssa_obj.autocovariance_norm_dict = nacov
# try:
# probePosition = []
# for key in self.POI.tag_epitopes.keys():
# probePosition = probePosition + self.POI.tag_epitopes[key]
# probePosition = np.unique(probePosition).tolist()
# except:
# print('No POI found')
# #nt_seq = self.tag_full['T_flag'] + nt_seq
#
#
# nt_seq = self.POI.nt_seq
# genelength = int(len(nt_seq)/3)
#
#
#
# pv = np.zeros((1, genelength)).astype(int).flatten()
#
# for i in range(len(probePosition)):
# pv[probePosition[i]:] = i
#
#
#
#
#
# npoints = len(time_vec_fixed)
# tstep = npoints-non_consider_time
# for i in range(n):
#
# soln = self.SSA(all_k, time_vec_fixed, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
#
# rb = sparse.lil_matrix((len(time_vec_fixed), genelength), dtype=int)
# for j in range(soln.shape[1]):
#
# #if len(np.where(soln[:,j]!=0)[0]) !=0:
# #print(np.where(soln[:,j]!=0)[0])
#
#
# #rb[j,np.where(soln[:,j]!=0)[0]] = 1
#
#
# for value in soln[:, j][np.where(soln[:, j] != 0 )[0]].astype(int):
#
# rb[j, value-1] = 1
#
# rib_vec.append(rb)
#
#
# no_ribosomes = np.zeros((len(rib_vec), genelength))
#
#
#
# for i in range(len(rib_vec)):
# no_ribosomes[i] = np.sum(rib_vec[i].todense()[non_consider_time:], axis=0).flatten()
#
# ribosome_means = np.mean(no_ribosomes, axis=0)
# ribosome_density = ribosome_means/npoints
#
# no_ribosomes_per_mrna = np.mean(no_ribosomes)
#
# intensity_vec = np.zeros((len(rib_vec), tstep+1))
#
# I = np.zeros((1, tstep+1))
# for i in range(len(rib_vec)):
# for j in range(tstep):
# temp_output = rib_vec[i][non_consider_time + j, :].todense()
#
# I[0, j] = np.sum(pv * temp_output.flatten().T)
# intensity_vec[i] = I
#
#
#
# ssa_obj = ssa()
#
# ssa_obj.n_traj = nRepetitions + n
# ssa_obj.k = all_k
# ssa_obj.no_rib_per_mrna = no_ribosomes_per_mrna
# ssa_obj.rib_density = ribosome_density
# ssa_obj.rib_means = ribosome_means
# ssa_obj.rib_vec = rib_vec
# ssa_obj.intensity_vec = intensity_vec
# ssa_obj.time_vec_fixed = time_vec_fixed
# ssa_obj.start_time = non_consider_time
# ssa_obj.probe = probePosition
# ssa_obj.evaluating_inhibitor = evaluating_inhibitor
# ssa_obj.evaluating_frap = evaluating_frap
# ssa_obj.time_inhibit = time_inhibit
#
#
#
# if evaluating_inhibitor == False:
# autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, time_vec_fixed, 0, genelength)
# ssa_obj.autocorr_vec = autocorr_vec
# ssa_obj.mean_autocorr = mean_autocorr
# ssa_obj.error_autocorr = error_autocorr
# ssa_obj.dwelltime = dwelltime
# ssa_obj.ke_sim = ke_sim
return new_ssa_obj
def multitau_acc(self, ivec, n, sampling_rate, sample_rate_seconds):
'''
Multi-tau acc
'''
sigmas = 3
acc = np.array([[]])
for i in range(0, n):
tempdata = ivec[i, :].flatten()
tempdata[np.where(tempdata > tmean(tempdata, 10)) + sigmas*np.std(tempdata)] = 0
tempdata[np.where(tempdata < tmean(tempdata, 10)) - sigmas*np.std(tempdata)] = 0
if np.isnan(tempdata[0]):
tempdata = tempdata[1:]
if np.isnan(tempdata[-1]):
tempdata = tempdata[:-1]
outliers = np.where(tempdata == 0)[0]
if outliers[-1] == len(tempdata)-1:
outliers = outliers[:-1]
if outliers[0] == 0:
outliers = outliers[1:]
tempdata[outliers] = 1/2*(tempdata[outliers-1] + tempdata[outliers+1])
tempdata = tempdata-np.mean(tempdata)
preacc = self.get_acc2(tempdata)
if i == 0:
acc = preacc
else:
acc = np.hstack((acc, preacc))
for i in range(0, n):
data = acc[i]
data[0:sample_rate_seconds] = []
binnedData_1 = data
def geomean(self, iterable):
'''geometric mean used for codon sensitivity calculations
'''
a = np.array(iterable)
return a.prod()**(1.0/len(a))
def SSA(self, k, t_array, inhibit_time=0, FRAP=False, Inhibitor=False):
'''
mRNA Translation simulation python implementation
given a propensity vector k, time array to record, and inhibitory conditions, run a single trajectory of translation simulation
The simulation is described here: [PUT LINK HERE TO PAPER]
*args*
**k**, propensity vector of size gene length + 2, [initiation rate, Codon dependent rates, completion rate / unbinding rate]
for reference the codon dependent rates are refering to the time rate of a ribosome to move on to the next codon
**t_array**, time points to record the ribosome posistions at
*keyword args*
**inhibit_time**, the time to start inhibition assays if FRAP or Inhibitor (harringtonine) == True
**FRAP**, True or false to apply Fluorescence Recovery After Photobleaching (FRAP) https://en.wikipedia.org/wiki/Fluorescence_recovery_after_photobleaching
**Inhibitor**, True or false to apply harringtonine at inhibit_time. Harringtonine acts as a protien translation initiation inhibitor
'''
#SSA params and propensities
R = 10 #exclusion volume (ribosome footprint), ribosomes cant be less than 10 codons apart because of their physical size
kelong = np.array([k[1:-1]]).T #rates for ribosomes moving to the next codon, based on tRNA concentrations
N = len(kelong) #Number of codons in the mRNA
kbind = k[0] #rate for a ribosome to bind and start translation
kcompl = k[-1] #rate for a ribosome at the end of the mRNA to unbind
X = np.array([0, 0], dtype=int) #the updating ribosome posistion vector that is changed in the simulation
Ncol = np.zeros((1,0))
| |
<reponame>themagicalmammal/wikibot
from os import environ
from firebase_admin import credentials, db, initialize_app
from flask import Flask, request
from telebot import TeleBot, types
from wikipedia import (
WikipediaPage,
geosearch,
page,
random,
search,
set_lang,
suggest,
summary,
)
# Firebase connection
cred = credentials.Certificate("firebase.json") # Firebase key
initialize_app(
cred, {"databaseURL": "https://yourappname-user-default-rtdb.firebaseio.com/"}
)
ref = db.reference("/")
z = ref.get()
# Telegram API
TOKEN = "" # Bot key
bot = TeleBot(TOKEN)
# Flask connection
server = Flask(__name__)
# Common Messages
error = "Wrong word, use <b>title</b>"
error2 = "Wrong word, use <b>suggest</b>"
word = " for the word..."
# Languages
lang_list = [
"aa",
"ab",
"abs",
"ace",
"ady",
"ady-cyrl",
"aeb",
"aeb-arab",
"aeb-latn",
"af",
"ak",
"aln",
"als",
"alt",
"am",
"ami",
"an",
"ang",
"anp",
"ar",
"arc",
"arn",
"arq",
"ary",
"arz",
"as",
"ase",
"ast",
"atj",
"av",
"avk",
"awa",
"ay",
"az",
"azb",
"ba",
"ban",
"ban-bali",
"bar",
"bat-smg",
"bbc",
"bbc-latn",
"bcc",
"bcl",
"be",
"be-tarask",
"be-x-old",
"bg",
"bgn",
"bh",
"bho",
"bi",
"bjn",
"bm",
"bn",
"bo",
"bpy",
"bqi",
"br",
"brh",
"bs",
"btm",
"bto",
"bug",
"bxr",
"ca",
"cbk-zam",
"cdo",
"ce",
"ceb",
"ch",
"cho",
"chr",
"chy",
"ckb",
"co",
"cps",
"cr",
"crh",
"crh-cyrl",
"crh-latn",
"cs",
"csb",
"cu",
"cv",
"cy",
"da",
"de",
"de-at",
"de-ch",
"de-formal",
"din",
"diq",
"dsb",
"dtp",
"dty",
"dv",
"dz",
"ee",
"egl",
"el",
"eml",
"en",
"en-ca",
"en-gb",
"eo",
"es",
"es-419",
"es-formal",
"et",
"eu",
"ext",
"fa",
"ff",
"fi",
"fit",
"fiu-vro",
"fj",
"fo",
"fr",
"frc",
"frp",
"frr",
"fur",
"fy",
"ga",
"gag",
"gan",
"gan-hans",
"gan-hant",
"gcr",
"gd",
"gl",
"glk",
"gn",
"gom",
"gom-deva",
"gom-latn",
"gor",
"got",
"grc",
"gsw",
"gu",
"gv",
"ha",
"hak",
"haw",
"he",
"hi",
"hif",
"hif-latn",
"hil",
"ho",
"hr",
"hrx",
"hsb",
"ht",
"hu",
"hu-formal",
"hy",
"hyw",
"hz",
"ia",
"id",
"ie",
"ig",
"ii",
"ik",
"ike-cans",
"ike-latn",
"ilo",
"inh",
"io",
"is",
"it",
"iu",
"ja",
"jam",
"jbo",
"jut",
"jv",
"ka",
"kaa",
"kab",
"kbd",
"kbd-cyrl",
"kbp",
"kg",
"khw",
"ki",
"kiu",
"kj",
"kjp",
"kk",
"kk-arab",
"kk-cn",
"kk-cyrl",
"kk-kz",
"kk-latn",
"kk-tr",
"kl",
"km",
"kn",
"ko",
"ko-kp",
"koi",
"kr",
"krc",
"kri",
"krj",
"krl",
"ks",
"ks-arab",
"ks-deva",
"ksh",
"ku",
"ku-arab",
"ku-latn",
"kum",
"kv",
"kw",
"ky",
"la",
"lad",
"lb",
"lbe",
"lez",
"lfn",
"lg",
"li",
"lij",
"liv",
"lki",
"lld",
"lmo",
"ln",
"lo",
"loz",
"lrc",
"lt",
"ltg",
"lus",
"luz",
"lv",
"lzh",
"lzz",
"mad",
"mai",
"map-bms",
"mdf",
"mg",
"mh",
"mhr",
"mi",
"min",
"mk",
"ml",
"mn",
"mni",
"mnw",
"mo",
"mr",
"mrh",
"mrj",
"ms",
"mt",
"mus",
"mwl",
"my",
"myv",
"mzn",
"na",
"nah",
"nan",
"nap",
"nb",
"nds",
"nds-nl",
"ne",
"new",
"ng",
"nia",
"niu",
"nl",
"nl-informal",
"nn",
"no",
"nov",
"nqo",
"nrm",
"nso",
"nv",
"ny",
"nys",
"oc",
"olo",
"om",
"or",
"os",
"pa",
"pag",
"pam",
"pap",
"pcd",
"pdc",
"pdt",
"pfl",
"pi",
"pih",
"pl",
"pms",
"pnb",
"pnt",
"prg",
"ps",
"pt",
"pt-br",
"qu",
"qug",
"rgn",
"rif",
"rm",
"rmy",
"rn",
"ro",
"roa-rup",
"roa-tara",
"ru",
"rue",
"rup",
"ruq",
"ruq-cyrl",
"ruq-latn",
"rw",
"sa",
"sah",
"sat",
"sc",
"scn",
"sco",
"sd",
"sdc",
"sdh",
"se",
"sei",
"ses",
"sg",
"sgs",
"sh",
"shi",
"shi-latn",
"shi-tfng",
"shn",
"shy-latn",
"si",
"simple",
"sk",
"skr",
"skr-arab",
"sl",
"sli",
"sm",
"sma",
"smn",
"sn",
"so",
"sq",
"sr",
"sr-ec",
"sr-el",
"srn",
"ss",
"st",
"stq",
"sty",
"su",
"sv",
"sw",
"szl",
"szy",
"ta",
"tay",
"tcy",
"te",
"tet",
"tg",
"tg-cyrl",
"tg-latn",
"th",
"ti",
"tk",
"tl",
"tly",
"tn",
"to",
"tpi",
"tr",
"tru",
"trv",
"ts",
"tt",
"tt-cyrl",
"tt-latn",
"tum",
"tw",
"ty",
"tyv",
"tzm",
"udm",
"ug",
"ug-arab",
"ug-latn",
"uk",
"ur",
"uz",
"uz-cyrl",
"uz-latn",
"ve",
"vec",
"vep",
"vi",
"vls",
"vmf",
"vo",
"vot",
"vro",
"wa",
"war",
"wo",
"wuu",
"xal",
"xh",
"xmf",
"xsy",
"yi",
"yo",
"yue",
"za",
"zea",
"zgh",
"zh",
"zh-classical",
"zh-cn",
"zh-hans",
"zh-hant",
"zh-hk",
"zh-min-nan",
"zh-mo",
"zh-my",
"zh-sg",
"zh-tw",
"zh-yue",
"zu",
]
def main_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = [
"Definition 📖",
"Title 🖊️️",
"URL 🔗",
"Language 🔣",
"Random 🔀",
"Help ⚠️",
"Map 🗺️",
"Nearby 📍",
]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def support_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = ["🧑🏻💻️ Dev", "🐛 Bug", "💻️ Source", "🔙 Back"]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def extra_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = ["Suggest 💡", "Fluky 💫", "Back ⬅️"]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def check(text, command):
checker = str(text).replace("/", "").replace("#", "").lower().split(" ")
if command in checker:
return 1
return 0
def change_lan(message):
user_id = str(message.from_user.id)
set_lang(z[user_id])
@bot.message_handler(func=lambda message: check(message.text, "start"))
def welcome(message):
user_id = message.from_user.id
ref.update({user_id: "en"})
welcome_msg = (
"Greetings " + message.from_user.first_name + ", I am Wikibot 🤖\n\n"
"What can I do? Use <b>help</b>."
)
bot.send_message(
chat_id=message.chat.id,
text=welcome_msg,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "definition"))
def definition(message):
def_msg = bot.reply_to(message, "<b>Definition</b>" + word, parse_mode="html")
bot.register_next_step_handler(def_msg, process_definition)
def process_definition(message):
try:
def_msg = str(message.text)
change_lan(message)
def_str = summary(def_msg, sentences=10)
def_split = def_str.split("\n\n", 1)[0]
bot.send_message(
chat_id=message.chat.id,
text="<b>" + def_msg + "</b> \n\n" + def_split,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception as c:
if str(c)[:7] == "Page id":
msg = "<b>Not Found!</b>"
else:
msg = str(c).replace("may refer to", "<b>may refer to</b>")
bot.send_message(
chat_id=message.chat.id,
text=msg,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "title"))
def title(message):
title_msg = bot.reply_to(message, "<b>Title</b>" + word, parse_mode="html")
bot.register_next_step_handler(title_msg, process_title)
def process_title(message):
try:
title_msg = str(message.text)
change_lan(message)
title_result = search(title_msg)
if title_result:
bot.send_message(
chat_id=message.chat.id,
text="Possible titles are...",
parse_mode="html",
)
for i in title_result:
bot.send_message(
chat_id=message.chat.id,
text=i.replace(title_msg, "<b>" + title_msg + "</b>").replace(
title_msg.lower(), "<b>" + title_msg.lower() + "</b>"
),
parse_mode="html",
reply_markup=main_keyboard(),
)
else:
bot.send_message(
chat_id=message.chat.id,
text=error2,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text=error2,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "help"))
def aid(message):
text = (
"These keywords can be used to control me - \n\n"
"<b>Primary</b> \n"
"Definition 📖 - fetches definition of a word \n"
"Title 🖊️️ - fetches a bunch of related titles\n"
"URL 🔗 - gives the URL of wiki page of the word \n"
"Prefix 🔡 - show all available languages \n"
"Language 🔣 - set the language you want \n\n"
"<b>Secondary</b> \n"
"Nearby 📍 - locations near a coordinate \n"
"Map 🗺️ - location in map with wiki database \n"
"Random 🔀 - pops a random article from wiki \n\n"
"<b>Extra</b> \n"
"Fluky 💫 - fetches a random title from wiki \n"
"Suggest 💡 - returns a suggested word if found \n"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "url"))
def url(message):
url_msg = bot.reply_to(message, "<b>URL</b>" + word, parse_mode="html")
bot.register_next_step_handler(url_msg, process_url)
def process_url(message):
try:
url_message = str(message.text)
change_lan(message)
url_page = page(url_message).url
url_link = "<a href='" + url_page + "'>🔗</a>"
bot.send_message(
chat_id=message.chat.id,
text=url_link + "for <b>" + url_message + "</b>",
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text=error,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "language"))
def ln(message):
ln_msg = bot.reply_to(
message, "Type the prefix of your <b>language</b>...", parse_mode="html"
)
bot.register_next_step_handler(ln_msg, process_ln)
def process_ln(message):
try:
ln_msg = str(message.text).lower()
if ln_msg in lang_list:
user_id = message.from_user.id
ref.update({user_id: str(message.text).lower()})
global z
z = ref.get()
text = "Set Successfully."
else:
text = (
"Please, check for the correct <a href="
'"https://github.com/themagicalmammal/wikibot/blob/master/Lang.md"'
">prefix</a>."
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text="Error, changing language",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "support"))
def support(message):
text = (
"Support commands that I provide - \n\n"
"Bugs 🐛 - to report bugs or suggest mods\n"
"Dev 🧑🏻💻️ - provides information about my creator\n"
"Source 💻️ - to view the source code"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "prefix"))
def prefix(message):
text = (
"Language is set with the help of it's Prefix. \n<b>Example</b> - English:en<a "
'href="https://github.com/themagicalmammal/wikibot/blob/master/Lang.md"'
">.</a>"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "random"))
def randomize(message):
try:
change_lan(message)
random_title = page(random(pages=1)).url
random_text = "<a href='" + random_title + "'>✨</a>"
bot.send_message(
chat_id=message.chat.id,
text=random_text,
parse_mode="html",
reply_markup=main_keyboard(),
)
except:
randomize(message)
@bot.message_handler(func=lambda message: check(message.text, "map"))
def chart(message):
co_msg = bot.reply_to(message, "<b>Location</b> of the place...", parse_mode="html")
bot.register_next_step_handler(co_msg, process_co)
def process_co(message):
try:
co_msg = str(message.text)
set_lang("en")
lat, lon = WikipediaPage(co_msg).coordinates
bot.send_message(
chat_id=message.chat.id, text=str(round(lat, 5)) + ", " + str(round(lon, 5))
)
bot.send_location(
chat_id=message.chat.id,
latitude=lat,
longitude=lon,
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text="Not a location.",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "nearby"))
def geo(message):
geo_msg = bot.reply_to(
message, "Send me the <b>coordinates</b>...", parse_mode="html"
)
bot.register_next_step_handler(geo_msg, process_geo)
def process_geo(message):
try:
lat, lan = (
str(message.text)
.replace("E", "")
.replace("W", "")
.replace("N", "")
.replace("S", "")
.replace("° ", "")
.replace("°", "")
.replace(",", "")
.replace(" ", " ")
.split(" ")
)
set_lang("en")
locations = geosearch(latitude=lat, longitude=lan, results=10, radius=1000)
if locations:
nearby = "<b>Nearby locations</b> are..."
| |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks_dev/neighbors.ipynb (unless otherwise specified).
__all__ = ['sparsify', 'hstack', 'vstack', 'stack', 'NMSLibSklearnWrapper', 'FastCosineNN', 'FastJaccardNN', 'FastL2NN',
'FastKLDivNN', 'sparsify', 'hstack', 'vstack', 'stack', 'NMSLibSklearnWrapper', 'FastCosineNN',
'FastJaccardNN', 'FastL2NN', 'FastKLDivNN']
# Cell
from pathlib import Path
import time
import numpy as np
from scipy import sparse
import nmslib
from sklearn.base import BaseEstimator, TransformerMixin
# Cell
def sparsify(*arrs):
'''
makes input arrs sparse
'''
arrs = list(arrs)
for i in range(len(arrs)):
if not sparse.issparse(arrs[i]):
arrs[i] = sparse.csr_matrix(arrs[i])
return arrs
def _robust_stack(blocks, stack_method = 'stack', **kwargs):
if any(sparse.issparse(i) for i in blocks):
stacked = getattr(sparse, stack_method)(blocks, **kwargs)
else:
stacked = getattr(np, stack_method)(blocks, **kwargs)
return stacked
def hstack(blocks, **kwargs):
return _robust_stack(blocks, stack_method = 'hstack', **kwargs)
def vstack(blocks, **kwargs):
return _robust_stack(blocks, stack_method = 'vstack', **kwargs)
def stack(blocks, **kwargs):
return _robust_stack(blocks, stack_method = 'stack', **kwargs)
# Cell
class NMSLibSklearnWrapper(BaseEstimator):
'''
Generic wrapper for nmslib nearest neighbors under sklearn NN API.
for distance types avalible, refer to https://github.com/nmslib/nmslib/blob/master/manual/spaces.md
'''
def __init__(
self,
#init index params
nmslib_method='hnsw',
nmslib_space='jaccard_sparse',
nmslib_data_type=nmslib.DataType.OBJECT_AS_STRING,
nmslib_dtype = nmslib.DistType.FLOAT,
nmslib_space_params = {},
#index creation params
index_time_params = {'M': 30, 'indexThreadQty': 4, 'efConstruction': 100, 'post' : 0},
query_time_params = {'efSearch': 100},
#
n_neighbors = 30,
verbose = False,
#x_prep_function
X_prep_function = None
):
self.nmslib_method = nmslib_method
self.nmslib_space=nmslib_space
self.nmslib_data_type=nmslib_data_type
self.nmslib_space_params = nmslib_space_params
self.nmslib_dtype = nmslib_dtype
#index creation params
self.index_time_params = index_time_params
self.query_time_params = query_time_params
#
self.n_neighbors = n_neighbors
self.verbose = verbose
#x_prep_function
self.X_prep_function = X_prep_function
pass
def _preprocess_X(self, X):
'''
encodes sparse rows into str of id of nonzero columns
'''
if not self.X_prep_function is None:
X = self.X_prep_function(X)
return X
def _instantiate_index(self,):
'''
method for instantiating index.
usefull for pickling
'''
index = nmslib.init(
method = self.nmslib_method,
space = self.nmslib_space,
data_type = self.nmslib_data_type,
space_params = self.nmslib_space_params,
dtype = self.nmslib_dtype,
)
return index
def fit(self, X, y = None, **kwargs):
'''
instantiates and creates index
'''
#instantiate index
index = self._instantiate_index()
# preprocess X
X_prep = self._preprocess_X(X)
#add points to index
index.addDataPointBatch(X_prep)
# Create an index
index.createIndex(self.index_time_params, self.verbose)
#handle None for y (data to save under indexes)
if y is None:
y = np.zeros((X.shape[0], 0)) #empty array
# save states
self.index_ = index
self.y_ = y
self.X_ = X
self.n_samples_fit_ = self.X_.shape[0]
return self
def partial_fit(self, X, y = None, **kwargs):
'''
adds new datapoints to index and y.
estimator needs to be fit prior to calling partial fit,
so first call fit in the first batch of data, then call partial fit
passing the subsequent batches
'''
#assume index is already instantiated
# preprocess X
X_prep = self._preprocess_X(X)
#add points to index
self.index_.addDataPointBatch(X_prep)
# Create an index
self.index_.createIndex(self.index_time_params, self.verbose)
#handle None for y (data to save under indexes)
if y is None:
y = np.ones((X.shape[0], 0)) #empty array
# save states
self.y_ = vstack([self.y_, y])
self.X_ = vstack([self.X_, X])
self.n_samples_fit_ = self.X_.shape[0]
return self
def kneighbors(self, X = None, n_neighbors = None, return_distance = True, query_time_params = None, n_jobs = 4):
'''
query neighbors, if X is None, will return the neighbors of each point in index
'''
if query_time_params is None:
query_time_params = self.query_time_params
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is None:
X = self.X_
#preprocess X
X = self._preprocess_X(X)
self.index_.setQueryTimeParams(query_time_params)
# Querying
start = time.time()
nbrs = self.index_.knnQueryBatch(X, k = n_neighbors, num_threads = n_jobs)
end = time.time()
if self.verbose:
try:
query_qty = len(X)
except:
query_qty = X.shape[0]
print('kNN time total=%f (sec), per query=%f (sec), per query adjusted for thread number=%f (sec)' %
(end-start, float(end-start)/query_qty, n_jobs*float(end-start)/query_qty))
if return_distance:
distances = [nb[1] for nb in nbrs]
nbrs = [nb[0] for nb in nbrs]
return distances, nbrs
else:
nbrs = [nb[0] for nb in nbrs]
return nbrs
def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', \
default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
For ``metric='precomputed'`` the shape should be
(n_queries, n_indexed). Otherwise the shape should be
(n_queries, n_features).
n_neighbors : int, default=None
Number of neighbors for each sample. The default is the value
passed to the constructor.
mode : {'connectivity', 'distance','similarity'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected subclass. Similarity will return 1 - distance
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph
of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from nmslearn.neighbors import FastL2NN
>>> neigh = FastL2NN(n_neighbors=2)
>>> neigh.fit(X)
FastL2NN(n_neighbors=2)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# check the input only in self.kneighbors
# construct CSR matrix representation of the k-NN graph
if mode == "connectivity":
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
n_queries = A_ind.shape[0]
A_data = np.ones(n_queries * n_neighbors)
elif mode == "distance":
A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
elif mode == "similarity":
A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
A_data = 1 - np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", "similarity" '
'or "distance" but got "%s" instead' % mode
)
n_queries = len(A_ind)
n_samples_fit = self.n_samples_fit_
n_nonzero = n_queries * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
kneighbors_graph = sparse.csr_matrix(
(A_data, np.ravel(A_ind), A_indptr), shape=(n_queries, n_samples_fit)
)
return kneighbors_graph
def __getstate__(self,):
'''
creates binary file for index and then saves into object attribute to be pickled alongside
other attributes.
'''
#read tempfiles with binaries to save binary str inside object
tempfile_name = fr'.~nmslib_index_{str(int(time.time()*1e7))}'
self.index_.saveIndex(tempfile_name, save_data = True)
with open(tempfile_name, 'rb') as f:
fb = f.read()
with open(tempfile_name+'.dat', 'rb') as f:
fb_dat = f.read()
#save binary as attribute (index and data)
self.index_ = (fb,fb_dat)
#delete tempfiles
Path(tempfile_name).unlink()
Path(tempfile_name+'.dat').unlink()
return self.__dict__
def __setstate__(self,d):
'''
sets state during unpickling.
instantiates index and loads binary index
'''
self.__dict__ = d
#write tempfiles with binaries to load from index.loadIndex
tempfile_name = fr'.~nmslib_index_{str(int(time.time()*1e7))}'
with open(tempfile_name, 'wb') as f:
f.write(self.index_[0])
with open(tempfile_name+'.dat', 'wb') as f:
f.write(self.index_[1])
index = self._instantiate_index()
index.loadIndex(tempfile_name, load_data = True)
#sets self.index_
self.index_ = index
#delete tempfile
Path(tempfile_name).unlink()
Path(tempfile_name+'.dat').unlink()
return
# Cell
def _preprocess_sparse_to_idx_str(X):
'''
encodes sparse rows into str of id of nonzero columns
'''
#ensure is sparse
X = sparse.csr_matrix(X)
indptr = X.indptr
cols = X.tocoo().col.astype(str)
id_strs = [*(' '.join(cols[slice(*indptr[i:i+2])]) for i in range(len(indptr)-1))]
return id_strs
class FastCosineNN(NMSLibSklearnWrapper):
def __init__(
self,
n_neighbors = 30,
index_time_params = {'M': 30, 'indexThreadQty': 4, 'efConstruction': 100, 'post' : 0},
query_time_params = {'efSearch': 100},
verbose = False,
):
super().__init__(
#jaccard init params
nmslib_method='hnsw',
nmslib_space= 'cosinesimil_sparse_fast',
nmslib_data_type=nmslib.DataType.OBJECT_AS_STRING,
nmslib_dtype = nmslib.DistType.FLOAT,
nmslib_space_params = {},
#other params
X_prep_function = _preprocess_sparse_to_idx_str,
n_neighbors = n_neighbors,
index_time_params = index_time_params,
query_time_params = query_time_params,
verbose = verbose,
)
return
class FastJaccardNN(NMSLibSklearnWrapper):
def __init__(
self,
n_neighbors = 30,
index_time_params = {'M': 30, 'indexThreadQty': 4, 'efConstruction': 100, 'post' : 0},
query_time_params = {'efSearch': 100},
verbose = False,
):
super().__init__(
#jaccard init params
nmslib_method='hnsw',
nmslib_space= 'jaccard_sparse',
nmslib_data_type=nmslib.DataType.OBJECT_AS_STRING,
nmslib_dtype = nmslib.DistType.FLOAT,
nmslib_space_params = {},
#other params
X_prep_function = _preprocess_sparse_to_idx_str,
n_neighbors = n_neighbors,
index_time_params = index_time_params,
query_time_params = query_time_params,
verbose = verbose,
)
return
def kneighbors(self, X = None, n_neighbors = None, return_distance = True, query_time_params = None, n_jobs = 4):
'''
Finds kneighbors using jaccard dissimilarity
Returns
-------
indexes or (distances, indexes)
'''
result = super().kneighbors(X, n_neighbors, return_distance, query_time_params, n_jobs)
if return_distance:
dist, idxs = result
dist = [1 - i for i in dist] # get cosine disimilarity
return dist, idxs
else:
idxs = result
return idxs
class FastL2NN(NMSLibSklearnWrapper):
def __init__(
self,
n_neighbors = 30,
index_time_params = {'M': 30, 'indexThreadQty': | |
if not isinstance(labels, list):
labels = [labels]
b = np.zeros((input_image.shape[0],input_image.shape[1],4), dtype=np.uint8)
b[:,:,0] = input_image[:]
b[:,:,1] = input_image[:]
b[:,:,2] = input_image[:]
b[:,:,3] = 255
c = np.zeros((input_image.shape[0],input_image.shape[1],4), dtype=np.uint8)
c[:,:,0] = input_image[:]
c[:,:,1] = input_image[:]
c[:,:,2] = input_image[:]
c[:,:,3] = 255
d = np.zeros((input_image.shape[0],input_image.shape[1],4), dtype=np.uint8)
d[:,:,0] = input_image[:]
d[:,:,1] = input_image[:]
d[:,:,2] = input_image[:]
d[:,:,3] = 255
e = np.zeros((input_image.shape[0],input_image.shape[1],4), dtype=np.uint8)
e[:,:,0] = input_image[:]
e[:,:,1] = input_image[:]
e[:,:,2] = input_image[:]
e[:,:,3] = 255
f = np.zeros((input_image.shape[0],input_image.shape[1],4), dtype=np.uint8)
f[:,:,0] = input_image[:]
f[:,:,1] = input_image[:]
f[:,:,2] = input_image[:]
f[:,:,3] = 255
f[input_rhoana == labels[0]] = (255,0,0,255)
f[input_rhoana == labels[1]] = (255,0,0,255)
thresholded_rhoana = Util.view_labels(input_rhoana, labels, crop=False, return_it=True)
cropped_rhoana_dilated = mh.dilate(thresholded_rhoana.astype(np.uint64))
for dilate in range(30):
cropped_rhoana_dilated = mh.dilate(cropped_rhoana_dilated)
cropped_rhoana_bbox = mh.bbox(cropped_rhoana_dilated)
binary_border = mh.labeled.borders(thresholded_rhoana.astype(np.bool))
b[input_rhoana == labels[0]] = (255,0,0,255)
c[mh.labeled.borders(Util.threshold(input_rhoana, labels[0])) == 1] = (255,0,0,255)
d[binary_border == 1] = (255,0,0,255)
if len(labels) > 1:
b[input_rhoana == labels[1]] = (0,255,0,255)
c[mh.labeled.borders(Util.threshold(input_rhoana, labels[1])) == 1] = (0,255,0,255)
cropped_image = Util.crop_by_bbox(input_image, cropped_rhoana_bbox)
cropped_labels = Util.crop_by_bbox(b, cropped_rhoana_bbox)
cropped_borders = Util.crop_by_bbox(c, cropped_rhoana_bbox)
cropped_binary_border = Util.crop_by_bbox(d, cropped_rhoana_bbox)
cropped_binary_labels = Util.crop_by_bbox(f, cropped_rhoana_bbox)
cropped_slice_overview = Util.crop_by_bbox(e, cropped_rhoana_bbox).copy()
e[cropped_rhoana_bbox[0]:cropped_rhoana_bbox[1], cropped_rhoana_bbox[2]] = (255,255,0,255)
e[cropped_rhoana_bbox[0]:cropped_rhoana_bbox[1], cropped_rhoana_bbox[3]] = (255,255,0,255)
e[cropped_rhoana_bbox[0], cropped_rhoana_bbox[2]:cropped_rhoana_bbox[3]] = (255,255,0,255)
e[cropped_rhoana_bbox[1], cropped_rhoana_bbox[2]:cropped_rhoana_bbox[3]] = (255,255,0,255)
slice_overview = e
if returnbb:
return cropped_image, cropped_labels, cropped_borders, cropped_binary_border, cropped_binary_labels, slice_overview, cropped_slice_overview, cropped_rhoana_bbox
else:
return cropped_image, cropped_labels, cropped_borders, cropped_binary_border, cropped_binary_labels, slice_overview, cropped_slice_overview
@staticmethod
def remove_border_mess(e):
'''
'''
label_sizes = Util.get_histogram(e)
if len(label_sizes) < 2:
#print 'weird'
return e
# we only want to keep the two largest labels
largest1 = np.argmax(label_sizes[1:])+1
label_sizes[largest1] = 0
largest2 = np.argmax(label_sizes[1:])+1
label_sizes[largest2] = 0
for l,s in enumerate(label_sizes):
if l == 0 or s == 0:
# this label has zero pixels anyways or is the background
continue
# find neighbor for l
neighbors = Util.grab_neighbors(e, l)
if largest1 in neighbors:
# prefer the largest
e[e==l] = largest1
elif largest2 in neighbors:
e[e==l] = largest2
return e
@staticmethod
def correct_merge(input_rhoana, label, border):
rhoana_copy = np.array(input_rhoana, dtype=np.uint64)
# split the label using the border
binary = Util.threshold(input_rhoana, label).astype(np.uint64)
border[binary==0] = 0
binary[border==1] = 2
binary_relabeled = Util.relabel(binary)
# Util.view(binary_relabeled, color=True, large=True)
binary_no_border = np.array(binary_relabeled, dtype=np.uint64)
binary_no_border[border==1] = 0
sizes = mh.labeled.labeled_size(binary_no_border)
too_small = np.where(sizes < 200)
labeled_small = mh.labeled.remove_regions(binary_no_border, too_small)
labeled_small_zeros = Util.threshold(labeled_small, 0)
labeled_small = Util.fill(labeled_small, labeled_small_zeros.astype(np.bool))
binary_no_border = Util.frame_image(labeled_small).astype(np.uint64)
binary_no_border[binary==0] = 0
corrected_binary = binary_no_border
# now let's remove the possible border mess
n = 0
while corrected_binary.max() != 2 and n < 6:
corrected_binary = Legacy.remove_border_mess(corrected_binary)
corrected_binary = skimage.measure.label(corrected_binary)
n += 1
return corrected_binary
@staticmethod
def perform_auto_merge_correction(cnn, big_M, input_image, input_prob, input_rhoana, merge_errors, p, input_gold=None):
def dojoVI(gt, seg):
# total_vi = 0
slice_vi = []
for i in range(len(gt)):
current_vi = Util.vi(gt[i].astype(np.int64), seg[i].astype(np.int64))
# total_vi += current_vi
slice_vi.append(current_vi)
# total_vi /= 10
return np.mean(slice_vi), np.median(slice_vi), slice_vi
rhoanas = []
# explicit copy
bigM = [None]*len(big_M)
for z in range(len(big_M)):
bigM[z] = np.array(big_M[z])
rhoana_after_merge_correction = np.array(input_rhoana)
old_labels = []
new_labels = []
fixes = []
for me in merge_errors:
pred = me[2]
if pred < p:
fixes.append('yes')
print 'fixing', pred
z = me[0]
label = me[1]
border = me[3][0][1]
a,b,c,d,e,f,g,h,i,j = Legacy.get_merge_error_image(input_image[z], rhoana_after_merge_correction[z], label, border)
new_rhoana = f
rhoana_after_merge_correction[z] = new_rhoana
# vi = UITools.VI(self._input_gold, rhoana_after_merge_correction)
# print 'New global VI', vi[1]
# if input_gold:
rhoanas.append(dojoVI(input_gold, rhoana_after_merge_correction))
#
# and remove the original label from our bigM matrix
#
bigM[z][label,:] = -3
bigM[z][:,label] = -3
# now add the two new labels
label1 = new_rhoana.max()
label2 = new_rhoana.max()-1
new_m = np.zeros((bigM[z].shape[0]+2, bigM[z].shape[1]+2), dtype=bigM[z].dtype)
new_m[:,:] = -1
new_m[0:-2,0:-2] = bigM[z]
# print 'adding', label1, 'to', z
new_m = Legacy.add_new_label_to_M(cnn, new_m, input_image[z], input_prob[z], new_rhoana, label1)
new_m = Legacy.add_new_label_to_M(cnn, new_m, input_image[z], input_prob[z], new_rhoana, label2)
# re-propapage new_m to bigM
bigM[z] = new_m
else:
fixes.append('no')
return bigM, rhoana_after_merge_correction, fixes, rhoanas
@staticmethod
def perform_sim_user_merge_correction(cnn, big_M, input_image, input_prob, input_rhoana, input_gold, merge_errors):
def dojo3VI(gt, seg):
# total_vi = 0
slice_vi = []
for i in range(len(gt)):
current_vi = Util.vi(gt[i].astype(np.int64), seg[i].astype(np.int64))
# total_vi += current_vi
slice_vi.append(current_vi)
# total_vi /= 10
return np.mean(slice_vi), np.median(slice_vi), slice_vi
rhoanas = []
# explicit copy
bigM = [None]*len(big_M)
for z in range(len(big_M)):
bigM[z] = np.array(big_M[z])
rhoana_after_merge_correction = np.array(input_rhoana)
fixes = []
for me in merge_errors:
pred = me[2]
z = me[0]
label = me[1]
border = me[3][0][1]
a,b,c,d,e,f,g,h,i,j = Legacy.get_merge_error_image(input_image[z], rhoana_after_merge_correction[z], label, border)
new_rhoana = f
# check VI for this slice
vi_before = Util.vi(input_gold[z], input_rhoana[z])
vi_after = Util.vi(input_gold[z], f)
# global vi
if (vi_after < vi_before):
# this is a good fix
rhoana_after_merge_correction[z] = new_rhoana
rhoanas.append(dojo3VI(input_gold, rhoana_after_merge_correction))
#
# and remove the original label from our bigM matrix
#
bigM[z][label,:] = -3
bigM[z][:,label] = -3
# now add the two new labels
label1 = new_rhoana.max()
label2 = new_rhoana.max()-1
new_m = np.zeros((bigM[z].shape[0]+2, bigM[z].shape[1]+2), dtype=bigM[z].dtype)
new_m[:,:] = -1
new_m[0:-2,0:-2] = bigM[z]
# print 'adding', label1, 'to', z, new_rhoana.shape, new_rhoana.max(), len(bigM)
# if label1 >= new_m.shape[0]:
# new_m2 = np.zeros((new_m.shape[0]+2, new_m.shape[1]+2), dtype=bigM[z].dtype)
# new_m2[:,:] = -1
# new_m2[0:-2,0:-2] = new_m
# new_m = new_m2
new_m = Legacy.add_new_label_to_M(cnn, new_m, input_image[z], input_prob[z], new_rhoana, label1)
new_m = Legacy.add_new_label_to_M(cnn, new_m, input_image[z], input_prob[z], new_rhoana, label2)
# re-propapage new_m to bigM
bigM[z] = new_m
fixes.append('Good')
else:
# rhoanas.append(dojoVI(input_gold, rhoana_after_merge_correction))
# skipping this one
fixes.append('Bad')
continue
return bigM, rhoana_after_merge_correction, fixes, rhoanas
@staticmethod
def create_bigM_without_mask(cnn, volume, volume_prob, volume_segmentation, oversampling=False, verbose=False, max=100000):
bigM = []
global_patches = []
if type(volume) is list:
z_s = len(volume)
else:
z_s = volume.shape[0]
t0 = time.time()
for slice in range(z_s):
image = volume[slice]
prob = volume_prob[slice]
segmentation = volume_segmentation[slice]
patches = Patch.patchify(image, prob, segmentation, oversampling=oversampling, max=max, min_pixels=1)
if verbose:
print len(patches), 'generated in', time.time()-t0, 'seconds.'
# return patches
t0 = time.time()
grouped_patches = Patch.group(patches)
if verbose:
print 'Grouped into', len(grouped_patches.keys()), 'patches in', time.time()-t0, 'seconds.'
global_patches.append(patches)
hist = Util.get_histogram(segmentation.astype(np.float))
labels = len(hist)
# create Matrix
M = np.zeros((labels, labels), dtype=np.float)
# .. and initialize with -1
M[:,:] = -1
for l_n in grouped_patches.keys():
l = int(l_n.split('-')[0])
n = int(l_n.split('-')[1])
# test this patch group for l and n
prediction = Patch.test_and_unify(grouped_patches[l_n], cnn)
# fill value into matrix
M[l,n] = prediction
M[n,l] = prediction
# now the matrix for this slice is filled
bigM.append(M)
return bigM
@staticmethod
def VI(gt, seg):
# total_vi = 0
slice_vi = []
if type(gt) is list:
z_s = len(gt)
else:
z_s = gt.shape[0]
for i in range(z_s):
current_vi = Util.vi(gt[i].astype(np.int64), seg[i].astype(np.int64))
# total_vi += current_vi
slice_vi.append(current_vi)
# total_vi /= 10
return np.mean(slice_vi), np.median(slice_vi), slice_vi
@staticmethod
def add_new_label_to_M(cnn, m, input_image, input_prob, input_rhoana, label1):
# calculate neighbors of the two new labels
label1_neighbors = Util.grab_neighbors(input_rhoana, label1)
for l_neighbor in label1_neighbors:
# recalculate new neighbors of l
if l_neighbor == 0:
# ignore neighbor zero
continue
prediction = Patch.grab_group_test_and_unify(cnn, input_image, input_prob, input_rhoana, label1, l_neighbor, oversampling=False)
m[label1,l_neighbor] = prediction
m[l_neighbor,label1] = prediction
return m
@staticmethod
def splits_global_from_M_automatic(cnn, big_M, volume, volume_prob, volume_segmentation, volume_groundtruth=np.zeros((1,1)), sureness_threshold=0.95, smallest_first=False, oversampling=False, verbose=True, maxi=10000, FP=False):
'''
'''
rhoanas = []
def dojoVI(gt, seg):
# total_vi = 0
slice_vi = []
for i in range(len(gt)):
current_vi = Util.vi(gt[i].astype(np.int64), seg[i].astype(np.int64))
# total_vi += current_vi
slice_vi.append(current_vi)
# total_vi /= 10
return np.mean(slice_vi), np.median(slice_vi), slice_vi
# explicit copy
bigM = [None]*len(big_M)
for z in range(len(big_M)):
bigM[z] = np.array(big_M[z])
# for development, we just need the matrix and the patches
# return bigM, None, global_patches
out_volume = np.array(volume_segmentation)
# return out_volume
good_fix_counter = 0
bad_fix_counter = 0
# error_rate = 0
fixes = []
vi_s_30mins = []
superMax = -np.inf
j = 0 # minute counter
# for i in range(60): # no. corrections in 1 minute
#for i in range(17280): # no. corrections in 24 h
i = 0
time_counter = 0
while True: # no time limit
# print 'Correction', i
if (j>0 and j % 30 == 0):
# compute VI every 30 minutes
vi_after_30_min = []
for ov in range(out_volume.shape[0]):
vi = Util.vi(volume_groundtruth[ov], out_volume[ov])
vi_after_30_min.append(vi)
| |
<filename>pypyr/utils/filesystem.py
"""Utility functions for file system operations. Read, format files, write."""
from abc import ABC, abstractmethod
import glob
from itertools import chain
import json
import logging
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
from pypyr.errors import Error
import pypyr.yaml
# pypyr logger means the log level will be set correctly and output formatted.
logger = logging.getLogger(__name__)
class FileRewriter(ABC):
"""FileRewriter reads input file, formats it and write to output file.
Use this abstract base class to implement rewriters.
This base class contains useful functionality to loop through input and
output paths, leaving the handling of individual files up to the deriving
classes.
"""
def __init__(self, formatter):
"""Initialize formatter.
Args:
formatter: Callable object that will format the IN file payload to
create OUT file.
"""
self.formatter = formatter
@abstractmethod
def in_to_out(self, in_path, out_path):
"""Take in_path, applies formatting, writes to out_path.
Input arguments can be str or path-like. Relative or absolute paths
will work.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
Returns:
None.
"""
raise NotImplementedError(
'you must implement in_to_out(in_path, out_path) for a '
'FileFormatter')
def files_in_to_out(self, in_path, out_path=None):
"""Write in files to out, calling the line_handler on each line.
Calls file_in_to_out under the hood to format the in_path payload. The
formatting processing is done by the self.formatter instance.
Args:
in_path: str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
out_path: str or path-like. Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is no an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
Top tip: Path-like objects strip the trailing slash. If
you want to pass in a dir that does not exist yet as
out-path with a trailing /, you should be passing it as a
str to preserve the /.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
in_paths = get_glob(in_path)
in_count = len(in_paths)
if in_count == 0:
logger.debug(f'in path found {in_count} paths.')
else:
logger.debug(f'in path found {in_count} paths:')
for path in in_paths:
logger.debug(f'{path}')
logger.debug(
'herewith ends the paths. will now process each file.')
if in_paths:
# derive the destination directory, ensure it's ready for writing
basedir_out = None
is_outfile_name_known = False
if out_path:
# outpath could be a file, or a dir
pathlib_out = Path(out_path)
# yep, Path() strips trailing /, hence check original string
if isinstance(out_path, str) and out_path.endswith(os.sep):
# ensure dir - mimic posix mkdir -p
pathlib_out.mkdir(parents=True, exist_ok=True)
basedir_out = pathlib_out
elif pathlib_out.is_dir():
basedir_out = pathlib_out
else:
if len(in_paths) > 1:
raise Error(
f'{in_path} resolves to {len(in_paths)} files, '
'but you specified only a single file as out '
f'{out_path}. If the outpath is meant to be a '
'directory, put a / at the end.')
# at this point it must be a file (not dir) path
# make sure that the parent dir exists
basedir_out = pathlib_out.parent
basedir_out.parent.mkdir(parents=True, exist_ok=True)
is_outfile_name_known = True
# loop through all the in files and write them to the out dir
file_counter = 0
is_edit = False
for path in in_paths:
actual_in = Path(path)
# recursive glob returns dirs too, only interested in files
if actual_in.is_file():
if basedir_out:
if is_outfile_name_known:
actual_out = pathlib_out
else:
# default to original src file name if only out dir
# specified without an out file name
actual_out = basedir_out.joinpath(actual_in.name)
logger.debug("writing %s to %s", path, actual_out)
self.in_to_out(in_path=actual_in, out_path=actual_out)
else:
logger.debug("editing %s", path)
self.in_to_out(in_path=actual_in)
is_edit = True
file_counter += 1
if is_edit:
logger.info(
"edited & wrote %s file(s) at %s", file_counter, in_path)
else:
logger.info(
"read %s, formatted and wrote %s file(s) to %s",
in_path, file_counter, out_path)
else:
logger.info("%s found no files", in_path)
class ObjectRewriter(FileRewriter):
"""Load a single file into an object, run formatter on it and write out.
Object instantiation takes a formatter.
writer = StreamRewriter(formatter)
Formatter signature: iterable = formatter(iterable)
It returns an iterator. The single input argument is an iterable.
Tip, use function or callable object with __call__
Object instantion also takes an ObjectRepresenter. An ObjectRepresenter
has a load and a dump method that handles the object deserialization and
serialization.
"""
def __init__(self, formatter, object_representer):
"""Initialize formatter and object representer.
Args:
formatter: Callable object/function that will format object loaded
from in file. Formatter signature:
iterable = formatter(iterable)
object_representer: An ObjectRepresenter instance.
"""
super().__init__(formatter)
self.object_representer = object_representer
logger.debug('obj loader set')
def in_to_out(self, in_path, out_path=None):
"""Load file into object, formats, writes object to out.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
if is_same_file(in_path, out_path):
logger.debug(
"in path and out path are the same file. writing to temp "
"file and then replacing in path with the temp file.")
out_path = None
logger.debug("opening source file: %s", in_path)
with open(in_path) as infile:
obj = self.object_representer.load(infile)
if out_path:
logger.debug(
f"opening destination file for writing: {out_path}")
ensure_dir(out_path)
with open(out_path, 'w') as outfile:
self.object_representer.dump(outfile, self.formatter(obj))
return
else:
logger.debug("opening temp file for writing...")
with NamedTemporaryFile(mode='w+t',
dir=os.path.dirname(in_path),
delete=False) as outfile:
self.object_representer.dump(outfile, self.formatter(obj))
logger.debug("moving temp file to: %s", in_path)
move_temp_file(outfile.name, infile.name)
class StreamRewriter(FileRewriter):
"""Streaming style in-to-out reader and writer.
Reads IN file line-by-line, formats each line and writes to OUT in a
stream. You can expect memory use to stay more or less flat, depending on
how big your lines are.
Object instantiation takes a formatter.
writer = StreamRewriter(formatter)
Formatter signature: iterator = formatter(iterable)
It returns an iterator. The single input argument is an iterable.
Tip, use function or callable object with __call__
"""
def in_to_out(self, in_path, out_path=None):
"""Write a single file in to out, running self.formatter on each line.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
is_in_place_edit = False
if is_same_file(in_path, out_path):
logger.debug(
"in path and out path are the same file. writing to temp "
"file and then replacing in path with the temp file.")
out_path = None
is_in_place_edit = True
logger.debug("opening source file: %s", in_path)
with open(in_path) as infile:
if out_path:
logger.debug(
"opening destination file for writing: %s", out_path)
ensure_dir(out_path)
with open(out_path, 'w') as outfile:
outfile.writelines(self.formatter(infile))
return
else:
logger.debug("opening temp file for writing...")
with NamedTemporaryFile(mode='w+t',
dir=os.path.dirname(in_path),
delete=False) as outfile:
outfile.writelines(self.formatter(infile))
is_in_place_edit = True
# only replace infile AFTER it's closed, outside the with.
# pragma exclude because func actually returns on 287 in if out_path,
# and cov not smart enough to realize that !is_in_place_edit won't ever
# happen here (the function will have exited already)
if is_in_place_edit: # pragma: no branch
logger.debug("moving temp file to: %s", in_path)
move_temp_file(outfile.name, infile.name)
class ObjectRepresenter(ABC):
"""Abstract base class to handle object serialization and deserialization.
Derive from this base | |
** 2 + 4), x) == asinh(x) / 2
assert manualintegrate(1 / sqrt(4 * x ** 2 + 1), x) == asinh(2 * x) / 2
assert manualintegrate(1 / sqrt(a * x ** 2 + 1), x) == Piecewise(
(sqrt(-1 / a) * asin(x * sqrt(-a)), a < 0),
(sqrt(1 / a) * asinh(sqrt(a) * x), a > 0),
)
assert manualintegrate(1 / sqrt(a + x ** 2), x) == Piecewise(
(asinh(x * sqrt(1 / a)), a > 0), (acosh(x * sqrt(-1 / a)), a < 0)
)
# acosh
assert manualintegrate(1 / sqrt(x ** 2 - 1), x) == acosh(x)
assert manualintegrate(1 / sqrt(x ** 2 - 4), x) == acosh(x / 2)
assert manualintegrate(1 / sqrt(4 * x ** 2 - 4), x) == acosh(x) / 2
assert manualintegrate(1 / sqrt(9 * x ** 2 - 1), x) == acosh(3 * x) / 3
assert manualintegrate(1 / sqrt(a * x ** 2 - 4), x) == Piecewise(
(sqrt(1 / a) * acosh(sqrt(a) * x / 2), a > 0)
)
assert manualintegrate(1 / sqrt(-a + 4 * x ** 2), x) == Piecewise(
(asinh(2 * x * sqrt(-1 / a)) / 2, -a > 0),
(acosh(2 * x * sqrt(1 / a)) / 2, -a < 0),
)
# piecewise
assert manualintegrate(1 / sqrt(a - b * x ** 2), x) == Piecewise(
(sqrt(a / b) * asin(x * sqrt(b / a)) / sqrt(a), And(-b < 0, a > 0)),
(sqrt(-a / b) * asinh(x * sqrt(-b / a)) / sqrt(a), And(-b > 0, a > 0)),
(sqrt(a / b) * acosh(x * sqrt(b / a)) / sqrt(-a), And(-b > 0, a < 0)),
)
assert manualintegrate(1 / sqrt(a + b * x ** 2), x) == Piecewise(
(sqrt(-a / b) * asin(x * sqrt(-b / a)) / sqrt(a), And(a > 0, b < 0)),
(sqrt(a / b) * asinh(x * sqrt(b / a)) / sqrt(a), And(a > 0, b > 0)),
(sqrt(-a / b) * acosh(x * sqrt(-b / a)) / sqrt(-a), And(a < 0, b > 0)),
)
def test_manualintegrate_trig_substitution():
assert manualintegrate(sqrt(16 * x ** 2 - 9) / x, x) == Piecewise(
(
sqrt(16 * x ** 2 - 9) - 3 * acos(3 / (4 * x)),
And(x < Rational(3, 4), x > Rational(-3, 4)),
)
)
assert manualintegrate(1 / (x ** 4 * sqrt(25 - x ** 2)), x) == Piecewise(
(
-sqrt(-(x ** 2) / 25 + 1) / (125 * x)
- (-(x ** 2) / 25 + 1) ** (3 * S.Half) / (15 * x ** 3),
And(x < 5, x > -5),
)
)
assert manualintegrate(x ** 7 / (49 * x ** 2 + 1) ** (3 * S.Half), x) == (
(49 * x ** 2 + 1) ** (5 * S.Half) / 28824005
- (49 * x ** 2 + 1) ** (3 * S.Half) / 5764801
+ 3 * sqrt(49 * x ** 2 + 1) / 5764801
+ 1 / (5764801 * sqrt(49 * x ** 2 + 1))
)
def test_manualintegrate_trivial_substitution():
assert manualintegrate((exp(x) - exp(-x)) / x, x) == -Ei(-x) + Ei(x)
f = Function("f")
assert manualintegrate((f(x) - f(-x)) / x, x) == -Integral(f(-x) / x, x) + Integral(
f(x) / x, x
)
def test_manualintegrate_rational():
assert manualintegrate(1 / (4 - x ** 2), x) == Piecewise(
(acoth(x / 2) / 2, x ** 2 > 4), (atanh(x / 2) / 2, x ** 2 < 4)
)
assert manualintegrate(1 / (-1 + x ** 2), x) == Piecewise(
(-acoth(x), x ** 2 > 1), (-atanh(x), x ** 2 < 1)
)
def test_manualintegrate_special():
f, F = 4 * exp(-(x ** 2) / 3), 2 * sqrt(3) * sqrt(pi) * erf(sqrt(3) * x / 3)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = 3 * exp(4 * x ** 2), 3 * sqrt(pi) * erfi(2 * x) / 4
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = x ** Rational(1, 3) * exp(-x / 8), -16 * uppergamma(Rational(4, 3), x / 8)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = exp(2 * x) / x, Ei(2 * x)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = exp(1 + 2 * x - x ** 2), sqrt(pi) * exp(2) * erf(x - 1) / 2
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f = sin(x ** 2 + 4 * x + 1)
F = (
sqrt(2)
* sqrt(pi)
* (
-sin(3) * fresnelc(sqrt(2) * (2 * x + 4) / (2 * sqrt(pi)))
+ cos(3) * fresnels(sqrt(2) * (2 * x + 4) / (2 * sqrt(pi)))
)
/ 2
)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = (
cos(4 * x ** 2),
sqrt(2) * sqrt(pi) * fresnelc(2 * sqrt(2) * x / sqrt(pi)) / 4,
)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = sin(3 * x + 2) / x, sin(2) * Ci(3 * x) + cos(2) * Si(3 * x)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = sinh(3 * x - 2) / x, -sinh(2) * Chi(3 * x) + cosh(2) * Shi(3 * x)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = 5 * cos(2 * x - 3) / x, 5 * cos(3) * Ci(2 * x) + 5 * sin(3) * Si(2 * x)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = cosh(x / 2) / x, Chi(x / 2)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = cos(x ** 2) / x, Ci(x ** 2) / 2
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = 1 / log(2 * x + 1), li(2 * x + 1) / 2
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = polylog(2, 5 * x) / x, polylog(3, 5 * x)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = (
5 / sqrt(3 - 2 * sin(x) ** 2),
5 * sqrt(3) * elliptic_f(x, Rational(2, 3)) / 3,
)
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
f, F = sqrt(4 + 9 * sin(x) ** 2), 2 * elliptic_e(x, Rational(-9, 4))
assert manualintegrate(f, x) == F and F.diff(x).equals(f)
def test_manualintegrate_derivative():
assert manualintegrate(pi * Derivative(x ** 2 + 2 * x + 3), x) == pi * (
(x ** 2 + 2 * x + 3)
)
assert manualintegrate(Derivative(x ** 2 + 2 * x + 3, y), x) == Integral(
Derivative(x ** 2 + 2 * x + 3, y)
)
assert manualintegrate(Derivative(sin(x), x, x, x, y), x) == Derivative(
sin(x), x, x, y
)
def test_manualintegrate_Heaviside():
assert manualintegrate(Heaviside(x), x) == x * Heaviside(x)
assert manualintegrate(x * Heaviside(2), x) == x ** 2 / 2
assert manualintegrate(x * Heaviside(-2), x) == 0
assert manualintegrate(x * Heaviside(x), x) == x ** 2 * Heaviside(x) / 2
assert manualintegrate(x * Heaviside(-x), x) == x ** 2 * Heaviside(-x) / 2
assert manualintegrate(Heaviside(2 * x + 4), x) == (x + 2) * Heaviside(2 * x + 4)
assert manualintegrate(x * Heaviside(x), x) == x ** 2 * Heaviside(x) / 2
assert manualintegrate(Heaviside(x + 1) * Heaviside(1 - x) * x ** 2, x) == (
(x ** 3 / 3 + Rational(1, 3)) * Heaviside(x + 1) - Rational(2, 3)
) * Heaviside(-x + 1)
y = Symbol("y")
assert manualintegrate(sin(7 + x) * Heaviside(3 * x - 7), x) == (
-cos(x + 7) + cos(Rational(28, 3))
) * Heaviside(3 * x - S(7))
assert manualintegrate(sin(y + x) * Heaviside(3 * x - y), x) == (
cos(y * Rational(4, 3)) - cos(x + y)
) * Heaviside(3 * x - y)
def test_manualintegrate_orthogonal_poly():
n = symbols("n")
a, | |
<reponame>SubstraFoundation/distributed-learning-contributivity
# -*- coding: utf-8 -*-
"""
This enables to parameterize a desired scenario to mock a multi-partner ML project.
"""
import datetime
import os
import re
import uuid
from pathlib import Path
import numpy as np
import pandas as pd
from loguru import logger
from sklearn.preprocessing import LabelEncoder
from mplc.multi_partner_learning import MULTI_PARTNER_LEARNING_APPROACHES
from mplc.multi_partner_learning.utils import AGGREGATORS, Aggregator
from . import contributivity, constants, utils
from . import dataset as dataset_module
from .corruption import Corruption, NoCorruption, IMPLEMENTED_CORRUPTION, Duplication
from .partner import Partner
from .splitter import Splitter, IMPLEMENTED_SPLITTERS
class Scenario:
def __init__(
self,
partners_count,
amounts_per_partner,
active_partners_count=1,
dataset=constants.MNIST,
dataset_proportion=1,
samples_split_option='random',
corruption_parameters=None,
init_model_from="random_initialization",
multi_partner_learning_approach="fedavg",
aggregation="data-volume",
gradient_updates_per_pass_count=constants.DEFAULT_GRADIENT_UPDATES_PER_PASS_COUNT,
minibatch_count=constants.DEFAULT_BATCH_COUNT,
epoch_count=constants.DEFAULT_EPOCH_COUNT,
is_early_stopping=True,
contributivity_methods=None,
is_quick_demo=False,
save_path=constants.SINGLE_SCENARIOS_FOLDER_NAME,
scenario_id=1,
val_set='global',
test_set='global',
**kwargs,
):
"""
:param partners_count: int, number of partners. Example: partners_count = 3
:param amounts_per_partner: [float]. Fractions of the
original dataset each partner receives to mock a collaborative ML scenario where each partner provides data
for the ML training.
:param active_partners_count: int, the size of the subset of partners that will participate in
each collaborative learning round, this parameter is only used when 'drfa' is specified as a
learning approach.
:param dataset: dataset.Dataset object, or its string identifier. Default is MNIST.
:param dataset_proportion: float (default: 1)
:param samples_split_option: Splitter object, or its string identifier (for instance 'random', or 'stratified')
Define the strategy to use to split the data samples between the partners.
Default, RandomSplitter.
:param corruption_parameters: list of Corruption object, or its string identifier, one for each partner.
Enable to artificially corrupt partner's data.
For instance: [Permutation(proportion=0.2), 'random', 'not-corrupted']
:param init_model_from: None (default) or path
:param multi_partner_learning_approach: 'fedavg' (default), 'seq-pure', 'seq-with-final-agg' or 'seqavg'
Define the multi-partner learning approach
:param aggregation:Aggregator object, or string identifier: 'data_volume' (default), 'uniform' or 'local_score'
:param gradient_updates_per_pass_count: int
:param minibatch_count: int
:param epoch_count: int
:param is_early_stopping: boolean. Stop the training if scores on val_set reach a plateau
:param contributivity_methods: A declarative list `[]` of the contributivity measurement methods to be executed.
:param is_quick_demo: boolean. Useful for debugging
:param save_path: path where to save the scenario outputs (relative to current working directory)
:param scenario_id: str
:param **kwargs:
"""
# ---------------------------------------------------------------------
# Initialization of the dataset defined in the config of the experiment
# ---------------------------------------------------------------------
# Raise Exception if unknown parameters in the config of the scenario
params_known = [
"dataset",
"dataset_proportion",
"val_set",
"test_set"
] # Dataset related
params_known += [
"contributivity_methods",
"multi_partner_learning_approach",
"aggregation",
] # Federated learning related
params_known += [
"partners_count",
"active_partners_count",
"amounts_per_partner",
"corruption_parameters",
"samples_split_option",
"samples_split_configuration"
] # Partners related
params_known += [
"gradient_updates_per_pass_count",
"epoch_count",
"minibatch_count",
"is_early_stopping",
] # Computation related
params_known += ["init_model_from"] # Model related
params_known += ["is_quick_demo"]
params_known += ["is_run_as_part_of_an_experiment",
"save_path",
"scenario_name",
"repeat_count",
]
unrecognised_parameters = [x for x in kwargs.keys() if (x not in params_known and not x.startswith('mpl_'))]
if len(unrecognised_parameters) > 0:
for x in unrecognised_parameters:
logger.debug(f"Unrecognised parameter: {x}")
raise Exception(
f"Unrecognised parameters {unrecognised_parameters}, check your configuration"
)
# Get and verify which dataset is configured
if isinstance(dataset, dataset_module.Dataset):
self.dataset = dataset
elif isinstance(dataset, str):
# Reference the object corresponding to the dataset selected and initialize it
if dataset == constants.MNIST: # default
self.dataset = dataset_module.Mnist()
elif dataset == constants.CIFAR10:
self.dataset = dataset_module.Cifar10()
elif dataset == constants.TITANIC:
self.dataset = dataset_module.Titanic()
elif dataset == constants.ESC50:
self.dataset = dataset_module.Esc50()
elif dataset == constants.IMDB:
self.dataset = dataset_module.Imdb()
elif dataset == constants.FMNIST:
self.dataset = dataset_module.Fmnist()
else:
raise Exception(
f"Dataset named '{dataset}' is not supported (yet). You can construct your own "
f"dataset object, or even add it by contributing to the project !"
)
logger.debug(f"Dataset selected: {self.dataset.name}")
else:
raise AttributeError(f'The dataset parameter cannot be an {type(dataset)}.'
f' Please provides a Dataset instance or a string identifier')
# Proportion of the dataset the computation will used
self.dataset_proportion = dataset_proportion
assert (
self.dataset_proportion > 0
), "Error in the config file, dataset_proportion should be > 0"
assert (
self.dataset_proportion <= 1
), "Error in the config file, dataset_proportion should be <= 1"
if self.dataset_proportion < 1:
self.dataset.shorten_dataset_proportion(self.dataset_proportion)
else:
logger.debug("The full dataset will be used (dataset_proportion is configured to 1)")
logger.debug(
f"Computation use the full dataset for scenario #{scenario_id}"
)
# --------------------------------------
# Definition of collaborative scenarios
# --------------------------------------
# Partners mock different partners in a collaborative data science project
self.partners_list = [] # List of all partners defined in the scenario
self.partners_count = partners_count # Number of partners in the scenario
# For configuring the respective sizes of the partners' datasets
# (% of samples of the dataset for each partner, ...
# ... has to sum to 1, and number of items has to equal partners_count)
self.amounts_per_partner = amounts_per_partner
if np.sum(self.amounts_per_partner) != 1:
raise ValueError("The sum of the amount per partners you provided isn't equal to 1")
if len(self.amounts_per_partner) != self.partners_count:
raise AttributeError(f"The amounts_per_partner list should have a size ({len(self.amounts_per_partner)}) "
f"equals to partners_count ({self.partners_count})")
# To configure how validation set and test set will be organized.
if test_set in ['local', 'global']:
self.test_set = test_set
else:
raise ValueError(f'Test set can be \'local\' or \'global\' not {test_set}')
if val_set in ['local', 'global']:
self.val_set = val_set
else:
raise ValueError(f'Validation set can be \'local\' or \'global\' not {val_set}')
# To configure if data samples are split between partners randomly or in a stratified way...
# ... so that they cover distinct areas of the samples space
if isinstance(samples_split_option, Splitter):
if self.val_set != samples_split_option.val_set:
logger.warning('The validation set organisation (local/global) is differently configured between the '
'provided Splitter and Scenario')
if self.test_set != samples_split_option.test_set:
logger.warning('The test set organisation (local/global) is differently configured between the '
'provided Splitter and Scenario')
self.splitter = samples_split_option
else:
splitter_param = {'amounts_per_partner': self.amounts_per_partner,
'val_set': self.val_set,
'test_set': self.test_set,
}
if "samples_split_configuration" in kwargs.keys():
splitter_param.update({'configuration': kwargs["samples_split_configuration"]})
self.splitter = IMPLEMENTED_SPLITTERS[samples_split_option](**splitter_param)
# To configure if the data of the partners are corrupted or not (useful for testing contributivity measures)
if corruption_parameters:
self.corruption_parameters = list(
map(lambda x: x if isinstance(x, Corruption) else IMPLEMENTED_CORRUPTION[x](),
corruption_parameters))
else:
self.corruption_parameters = [NoCorruption() for _ in range(self.partners_count)] # default
# ---------------------------------------------------
# Configuration of the distributed learning approach
# ---------------------------------------------------
self.mpl = None
# Multi-partner learning approach
self.multi_partner_learning_approach = multi_partner_learning_approach
try:
self._multi_partner_learning_approach = MULTI_PARTNER_LEARNING_APPROACHES[
multi_partner_learning_approach]
except KeyError:
text_error = f"Multi-partner learning approach '{multi_partner_learning_approach}' is not a valid "
text_error += "approach. List of supported approach : "
for key in MULTI_PARTNER_LEARNING_APPROACHES.keys():
text_error += f"{key}, "
raise KeyError(text_error)
# Number of active partners per collaborative learning round
self.active_partners_count = active_partners_count
if self._multi_partner_learning_approach == 'drfa':
assert (
1 <= self.active_partners_count < partners_count
), "Number of active partners must be strictly smaller than the total number of partners"
# Define how federated learning aggregation steps are weighted...
# ... Toggle between 'uniform' (default) and 'data_volume'
if isinstance(aggregation, Aggregator):
self.aggregation = aggregation
else:
try:
self.aggregation = AGGREGATORS[aggregation]
except KeyError:
raise ValueError(f"aggregation approach '{aggregation}' is not a valid approach. ")
# Number of epochs, mini-batches and fit_batches in ML training
self.epoch_count = epoch_count
assert (
self.epoch_count > 0
), "Error: in the provided config file, epoch_count should be > 0"
self.minibatch_count = minibatch_count
assert (
self.minibatch_count > 0
), "Error: in the provided config file, minibatch_count should be > 0"
self.gradient_updates_per_pass_count = gradient_updates_per_pass_count
assert self.gradient_updates_per_pass_count > 0, (
"Error: in the provided config file, "
"gradient_updates_per_pass_count should be > 0 "
)
# Early stopping stops ML training when performance increase is not significant anymore
# It is used to optimize the number of epochs and the execution time
self.is_early_stopping = is_early_stopping
# Model used to initialise model
self.init_model_from = init_model_from
if init_model_from == "random_initialization":
self.use_saved_weights = False
else:
self.use_saved_weights = True
# -----------------------------------------------------------------
# Configuration of contributivity measurement contributivity_methods to be tested
# -----------------------------------------------------------------
# List of contributivity measures selected and computed in the scenario
self.contributivity_list = []
# Contributivity methods
self.contributivity_methods = []
if contributivity_methods is not None:
for method in contributivity_methods:
if method in constants.CONTRIBUTIVITY_METHODS:
self.contributivity_methods.append(method)
else:
raise Exception(f"Contributivity method '{method}' is not in contributivity_methods list.")
# -------------
# Miscellaneous
# -------------
# Misc.
self.scenario_id = scenario_id
self.repeat_count = kwargs.get('repeat_count', 1)
# The quick demo parameters overwrites previously defined parameters | |
# Space Invaders
# Created by <NAME>
# Adapted by <NAME>
# !/usr/bin/env python
from pygame import *
import pygame.draw
from nave import Ship
from tiro import Bullet
from inimigo import Enemy
from barreira import Blocker
from ufo import Mystery
from explosao import Explosion
from vida import Life
from texto import Text
import sys
from random import shuffle, choice
import numpy as np
import peewee
from pontos_orm import ScoreOrm
from estados_orm import StateOrm
from estados import State
from pontos import Score
# RGB Constants
WHITE = (255, 255, 255)
GREEN = (153, 255, 187)
YELLOW = (241, 255, 0)
BLUE = (80, 255, 239)
PURPLE = (203, 0, 255)
RED = (237, 28, 36)
SCREEN = display.set_mode((800, 600))
FONT = "fonts/space_invaders.ttf"
IMG_NAMES = ["ship", "ship", "mystery", "enemy1_1", "enemy1_2", "enemy2_1", "enemy2_2",
"enemy3_1", "enemy3_2", "explosionblue", "explosiongreen", "explosionpurple", "laser", "enemylaser"]
IMAGE = {name: image.load("images/{}.png".format(name)).convert_alpha() for name in IMG_NAMES}
class SpaceInvaders(object):
def __init__(self):
mixer.pre_init(44100, -16, 1, 512)
init()
self.caption = display.set_caption('Space Invaders')
self.screen = SCREEN
self.background = image.load('images/background.jpg').convert()
self.startGame = False
self.mainScreen = True
self.gameOver = False
self.scoreBoard = False
# Initial value for a new game
self.enemyPositionDefault = 65
# Counter for enemy starting position (increased each new round)
self.enemyPositionStart = self.enemyPositionDefault
# Current enemy starting position
self.enemyPosition = self.enemyPositionStart
def reset(self, score, lives, newGame=False):
self.player = Ship(IMAGE, game)
self.playerGroup = sprite.Group(self.player)
self.explosionsGroup = sprite.Group()
self.bullets = sprite.Group()
self.mysteryShip = Mystery(IMAGE, game)
self.mysteryGroup = sprite.Group(self.mysteryShip)
self.enemyBullets = sprite.Group()
self.reset_lives(lives)
self.enemyPosition = self.enemyPositionStart
self.make_enemies()
# Only create blockers on a new game, not a new round
if newGame:
self.allBlockers = sprite.Group(self.make_blockers(0), self.make_blockers(1), self.make_blockers(2),
self.make_blockers(3))
self.keys = key.get_pressed()
self.clock = time.Clock()
self.timer = time.get_ticks()
self.noteTimer = time.get_ticks()
self.shipTimer = time.get_ticks()
self.score = score
self.lives = lives
self.create_audio()
self.create_text()
self.killedRow = -1
self.killedColumn = -1
self.makeNewShip = False
self.shipAlive = True
self.killedArray = [[0] * 10 for x in range(5)]
def make_blockers(self, number):
blockerGroup = sprite.Group()
for row in range(4):
for column in range(9):
blocker = Blocker(10, GREEN, row, column, game)
blocker.rect.x = 50 + (200 * number) + (column * blocker.width)
blocker.rect.y = 450 + (row * blocker.height)
blockerGroup.add(blocker)
return blockerGroup
def reset_lives_sprites(self):
self.life1 = Life(657, 3, IMAGE, game)
self.life2 = Life(685, 3, IMAGE, game)
self.life3 = Life(713, 3, IMAGE, game)
self.life4 = Life(741, 3, IMAGE, game)
self.life5 = Life(769, 3, IMAGE, game)
if self.lives == 5:
self.livesGroup = sprite.Group(self.life1, self.life2, self.life3, self.life4, self.life5)
elif self.lives == 4:
self.livesGroup = sprite.Group(self.life1, self.life2, self.life3, self.life4)
elif self.lives == 3:
self.livesGroup = sprite.Group(self.life1, self.life2, self.life3)
elif self.lives == 2:
self.livesGroup = sprite.Group(self.life1, self.life2)
elif self.lives == 1:
self.livesGroup = sprite.Group(self.life1)
def reset_lives(self, lives):
self.lives = lives
self.reset_lives_sprites()
def create_audio(self):
self.sounds = {}
for sound_name in ["shoot", "shoot2", "invaderkilled", "mysterykilled", "shipexplosion"]:
self.sounds[sound_name] = mixer.Sound("sounds/{}.wav".format(sound_name))
self.sounds[sound_name].set_volume(0.2)
self.musicNotes = [mixer.Sound("sounds/{}.wav".format(i)) for i in range(4)]
for sound in self.musicNotes:
sound.set_volume(0.5)
self.noteIndex = 0
def play_main_music(self, currentTime):
moveTime = self.enemies.sprites()[0].moveTime
if currentTime - self.noteTimer > moveTime:
self.note = self.musicNotes[self.noteIndex]
if self.noteIndex < 3:
self.noteIndex += 1
else:
self.noteIndex = 0
self.note.play()
self.noteTimer += moveTime
def background_stars(self, game):
# The background stars:
# Set the position:
self.stars_x = np.random.rand(5) * 800
self.stars_y = np.random.rand(5) * 600
# Set the velocity:
self.stars_v = np.zeros(5)
for i in np.arange(5):
self.stars_v[i] = int(0.5 + np.random.uniform() * 0.1)
game.stars_y = (game.stars_y + game.stars_v * 0.2) % 600
for i in range(5):
game.stars_x[i] = game.stars_x[i] if not game.stars_v[i] else \
game.stars_x[i] + 0.1 * int((np.random.rand() - 0.5) * 2.1)
pygame.draw.aaline(game.screen, WHITE,
(int(game.stars_x[i]), int(game.stars_y[i])),
(int(game.stars_x[i]), int(game.stars_y[i])))
def create_text(self):
self.titleText = Text(FONT, 50, "Space Invaders", WHITE, 164, 155)
self.titleText2 = Text(FONT, 25, "Press any key to continue ...", WHITE, 201, 225)
self.gameOverText = Text(FONT, 50, "Game Over! ", WHITE, 250, 270)
self.nextRoundText = Text(FONT, 50, "Next Round! ", WHITE, 240, 270)
self.enemy1Text = Text(FONT, 25, " = 10 pts", GREEN, 368, 270)
self.enemy2Text = Text(FONT, 25, " = 20 pts", BLUE, 368, 320)
self.enemy3Text = Text(FONT, 25, " = 30 pts", PURPLE, 368, 370)
self.enemy4Text = Text(FONT, 25, " = ?????", RED, 368, 420)
self.scoreText = Text(FONT, 20, "Score:", WHITE, 4, 5)
self.livesText = Text(FONT, 20, "Lives: ", WHITE, 580, 5)
self.leaderboardText = Text(FONT, 50, "Scoreboard: ", WHITE, 150, 100)
def check_input(self):
self.keys = key.get_pressed()
for e in event.get():
if e.type == QUIT:
sys.exit()
if e.type == KEYDOWN:
if e.key == K_SPACE:
if len(self.bullets) == 0 and self.shipAlive:
if self.score < 1000:
bullet = Bullet(self.player.rect.x + 23, self.player.rect.y + 5, -1, 15, "laser", "center",
game, IMAGE)
self.bullets.add(bullet)
self.allSprites.add(self.bullets)
self.sounds["shoot"].play()
else:
leftbullet = Bullet(self.player.rect.x + 8, self.player.rect.y + 5, -1, 15, "laser", "left",
game, IMAGE)
rightbullet = Bullet(self.player.rect.x + 38, self.player.rect.y + 5, -1, 15, "laser",
"right", game, IMAGE)
self.bullets.add(leftbullet)
self.bullets.add(rightbullet)
self.allSprites.add(self.bullets)
self.sounds["shoot2"].play()
def make_enemies(self):
enemies = sprite.Group()
for row in range(5):
for column in range(10):
enemy = Enemy(row, column, IMAGE, game)
enemy.rect.x = 157 + (column * 50)
enemy.rect.y = self.enemyPosition + (row * 45)
enemies.add(enemy)
self.enemies = enemies
self.allSprites = sprite.Group(self.player, self.enemies, self.livesGroup, self.mysteryShip)
def make_enemies_shoot(self):
columnList = []
for enemy in self.enemies:
columnList.append(enemy.column)
columnSet = set(columnList)
columnList = list(columnSet)
shuffle(columnList)
column = columnList[0]
enemyList = []
rowList = []
for enemy in self.enemies:
if enemy.column == column:
rowList.append(enemy.row)
row = max(rowList)
for enemy in self.enemies:
if enemy.column == column and enemy.row == row:
if (time.get_ticks() - self.timer) > 700:
self.enemyBullets.add(
Bullet(enemy.rect.x + 14, enemy.rect.y + 20, 1, 5, "enemylaser", "center", game, IMAGE))
self.allSprites.add(self.enemyBullets)
self.timer = time.get_ticks()
def calculate_score(self, row):
scores = {0: 30,
1: 20,
2: 20,
3: 10,
4: 10,
5: choice([50, 100, 150, 300])
}
score = scores[row]
self.score += score
return score
def create_main_menu(self):
self.enemy1 = IMAGE["enemy3_1"]
self.enemy1 = transform.scale(self.enemy1, (40, 40))
self.enemy2 = IMAGE["enemy2_2"]
self.enemy2 = transform.scale(self.enemy2, (40, 40))
self.enemy3 = IMAGE["enemy1_2"]
self.enemy3 = transform.scale(self.enemy3, (40, 40))
self.enemy4 = IMAGE["mystery"]
self.enemy4 = transform.scale(self.enemy4, (80, 40))
self.screen.blit(self.enemy1, (318, 270))
self.screen.blit(self.enemy2, (318, 320))
self.screen.blit(self.enemy3, (318, 370))
self.screen.blit(self.enemy4, (299, 420))
for e in event.get():
if e.type == QUIT:
sys.exit()
if e.type == KEYUP:
self.startGame = True
self.mainScreen = False
def update_enemy_speed(self):
if len(self.enemies) <= 10:
for enemy in self.enemies:
enemy.moveTime = 400
if len(self.enemies) == 5:
for enemy in self.enemies:
enemy.moveTime = 200
def check_collisions(self):
collidedict = sprite.groupcollide(self.bullets, self.enemyBullets, True, False)
if collidedict:
for value in collidedict.values():
for currentSprite in value:
self.enemyBullets.remove(currentSprite)
self.allSprites.remove(currentSprite)
enemiesdict = sprite.groupcollide(self.bullets, self.enemies, True, False)
if enemiesdict:
for value in enemiesdict.values():
for currentSprite in value:
self.sounds["invaderkilled"].play()
player_state = State()
player_state.save_state(self.player.rect.x, self.lives, "invader")
self.killedRow = currentSprite.row
self.killedColumn = currentSprite.column
score = self.calculate_score(currentSprite.row)
explosion = Explosion(currentSprite.rect.x, currentSprite.rect.y, currentSprite.row, False, False,
score, FONT, WHITE, IMAGE, game)
self.explosionsGroup.add(explosion)
self.allSprites.remove(currentSprite)
self.enemies.remove(currentSprite)
self.gameTimer = time.get_ticks()
break
mysterydict = sprite.groupcollide(self.bullets, self.mysteryGroup, True, True)
if mysterydict:
for value in mysterydict.values():
for currentSprite in value:
currentSprite.mysteryEntered.stop()
self.sounds["mysterykilled"].play()
player_state = State()
player_state.save_state(self.player.rect.x, self.lives, "mystery")
score = self.calculate_score(currentSprite.row)
explosion = Explosion(currentSprite.rect.x, currentSprite.rect.y, currentSprite.row, False, True,
score, FONT, WHITE, IMAGE, game)
self.explosionsGroup.add(explosion)
self.allSprites.remove(currentSprite)
self.mysteryGroup.remove(currentSprite)
newShip = Mystery(IMAGE, game)
self.allSprites.add(newShip)
self.mysteryGroup.add(newShip)
break
bulletsdict = sprite.groupcollide(self.enemyBullets, self.playerGroup, True, False)
if bulletsdict:
for value in bulletsdict.values():
for playerShip in value:
if self.lives == 5:
self.lives -= 1
self.livesGroup.remove(self.life5)
self.allSprites.remove(self.life5)
elif self.lives == 4:
self.lives -= 1
self.livesGroup.remove(self.life4)
self.allSprites.remove(self.life4)
elif self.lives == 3:
self.lives -= 1
self.livesGroup.remove(self.life3)
self.allSprites.remove(self.life3)
elif self.lives == 2:
self.lives -= 1
self.livesGroup.remove(self.life2)
self.allSprites.remove(self.life2)
elif self.lives == 1:
self.lives -= 1
self.livesGroup.remove(self.life1)
self.allSprites.remove(self.life1)
elif self.lives == 0:
self.gameOver = True
self.startGame = False
self.sounds["shipexplosion"].play()
explosion = Explosion(playerShip.rect.x, playerShip.rect.y, 0, True, False, 0,
FONT, WHITE, IMAGE, game)
self.explosionsGroup.add(explosion)
self.allSprites.remove(playerShip)
self.playerGroup.remove(playerShip)
self.makeNewShip = True
self.shipTimer = time.get_ticks()
self.shipAlive = False
if sprite.groupcollide(self.enemies, self.playerGroup, True, True):
self.gameOver = True
self.startGame = False
sprite.groupcollide(self.bullets, self.allBlockers, True, True)
sprite.groupcollide(self.enemyBullets, self.allBlockers, True, True)
sprite.groupcollide(self.enemies, self.allBlockers, False, True)
def create_new_ship(self, createShip, currentTime):
if createShip and (currentTime - self.shipTimer > 900):
self.player = Ship(IMAGE, game)
self.allSprites.add(self.player)
self.playerGroup.add(self.player)
self.makeNewShip = False
self.shipAlive = True
def create_game_over(self, current_time):
self.screen.blit(self.background, (0, 0))
self.background_stars(game)
if current_time - self.timer < 750:
self.gameOverText.draw(self.screen)
if current_time - self.timer > 750 and current_time - self.timer < 1500:
self.screen.blit(self.background, (0, 0))
if current_time - self.timer > 1500 and current_time - self.timer < 2250:
self.gameOverText.draw(self.screen)
if current_time - self.timer > 2250 and current_time - self.timer < 2750:
| |
on an eCommerce platform
Categorical
These are data that has no inherent numerical meaning, such as man, woman.
Or the state of birth of a group of people
good ways to denote categorical values is through graphs.
Ordinal
This is the mixture of numerical and categorical data
Ratings given by a customer as in 5 stars is better than 1 star
"""
"""
# Mean, Median, Mode
# These are the measures of central tendency of a data set.
# Mean
# Mean is given by the total of the values of the samples divided by the number of samples
# x = [10,20,30,40,50]
# mean = (10+20+30+40+50)/5 = 30
# Median
# To calculate the median, sort the values and take the middle value.
# Now, in case there are even number of values then
# the average of the two middle values are taken as the median.
# x = [23, 40, 6, 74, 38, 1, 70]
# sorted_x = [1, 6, 23, 38, 40, 70, 74]
# Median = 38
# The advantage of the median over the mean is that median is less susceptible to outliers
# So, in situations where there is a high chance that there may be outliers present
# in the data set, it is wiser to take the median instead of the mean.
# For example, to understand what is the per capita income of a country
# Mode
# Mode represents the most common value in a data set.
# The mode is the number that is repeated more often than any other
# For example, a retailer may want to understand the mode of sizes purchased
# so that he can set stocking labels optimally.
"""
"""
# Variance and Standard Deviation
Variance and Standard Deviation are essentially a measure
of the spread of the data in the data set.
Variance is the average of the squared differences from the mean.
Standard deviation is the square root of the variance
1. Calculate the mean
2. Calculate the difference from the mean
3. find the square of the differences
4. Variance is the Sum of the squares of the differences
5. Standard deviation is the square root of the Variance
# observations = [23, 40, 6, 74, 38, 1, 70]
# mean = (23+40+6+74+38+1+70) / 7 = 252 /7 = 36
# difference_from_the_mean = [13, -4, 30, -38, -2, 35, -34]
# square_of_the_differences = [169, 16, 900, 1444, 4, 1225, 1156]
# variance = (169+16+900+1444+4+1225+1156)/7 = 4914/7 = 702
# standard deviation = square_root(702)= 26.49
# Standard deviation is an excellent way to identify outliers.
# Data points that lie more than one standard deviation from the mean can be considered unusual.
# Data points that are more than two standard deviations away from the mean are not considered in analysis.
"""
"""
Mean, Median, Mode
Let's create some fake income data, centered around 27,000
with a normal distribution and standard deviation of 15,000, with 10,000 data points.
Then, compute the mean (average)
"""
import numpy as np
#mean, sd, total
incomes = np.random.normal(27000, 15000, 10000)
#loc=150, scale=20, size=1000
print (type(incomes))
print(incomes.size)
print (incomes)
print (len(incomes))
print (incomes.ndim)
print (incomes.shape)
print (incomes.dtype)
print("Mean value is: ", np.mean(incomes))
print("Median value is: ", np.median(incomes))
from scipy import stats
print("Mode value is: ", stats.mode(incomes)[0])
print("Minimum value is: ", np.min(incomes))
print("Maximum value is: ", np.max(incomes))
print("Standard Deviation is: ", np.std(incomes))
#print("Correlation coefficient value is: ", np.corrcoef(incomes))
#We can segment the income data into 50 buckets, and plot it as a histogram:
import matplotlib.pyplot as plt
plt.hist(incomes, 20)
plt.show()
#box and whisker plot to show distribution
#https://chartio.com/resources/tutorials/what-is-a-box-plot/
plt.boxplot(incomes)
# Explain NumPy_boxplot.png
print("Mean value is: ", np.mean(incomes))
print("Median value is: ", np.median(incomes))
#Adding Bill Gates into the mix. income inequality!(Outliers)
incomes = np.append(incomes, [10000000000])
#Median Remains Almost SAME
print("Median value is: ", np.median(incomes))
#Mean Changes distinctly
print("Mean value is: ", np.mean(incomes))
# Give an example for bincount function
num = np.bincount(incomes).argmax()
"""
Take this rest on Day 13
"""
# Explain the NumPy_Normal_Distribution.png
# https://www.mathsisfun.com/data/standard-normal-distribution.html
"""
Some of the properties of a standard normal distribution are mentioned below:
The normal curve is symmetric about the mean and bell shaped.
mean = median = mode is zero which is the centre of the curve.
symmetry about the center
50% of values less than the mean and 50% greater than the mean
Approximately 68.26% of the data will be between -1 and +1
(i.e. within 1 standard deviation from the mean),
95.44% between -2 and +2 (within 2 SD from the mean) and
99.72% between -3 and 3 (within 3 SD from the mean)
68 | 95 | 99.7 Rule
Question 1:
Suppose that IQ scores have a bell shaped distribution with a mean of 100
and a standard deviation of 15.
What percentage of people should have an IQ score between 85 and 115
What percentage of people should have an IQ score between 70 and 130
What percentage of people should have an IQ score more than 130
A person with an IQ score greater than 145 is considered genius.
Does empirical rule support this statement?
Sigma_sd = 15
Mu = 100
x1 = 85
x2 = 115
Z Score for x1 = (x1- Mu) / Sigma_sd
= 85 - 100 / 15 = -1.00
Z Score for x2 = (x2- Mu) / Sigma_sd
= 115 - 100 / 15 = +1.00
Refer now to the SD(Z) table to get .3413
"""
"""
Example 1:
A town has 330,000 adults. Their heights are normally distributed with a
mean of 175 cm and a variance of 100 cm 2 .
How many people would you expect to be taller than 205 cm?
The variance of the data set is given to be 100cm 2 .
So, the standard deviation is √100 or 10 cm.
Now, 175+3(10)=205, so the number of people taller than 205 cm corresponds
to the subset of data which lies more than 3 standard deviations above the mean.
The graph above shows that this represents about 0.15%of the data.
However, this percentage is approximate, and in this case, we need more precision.
The actual percentage, correct to 4 decimal places, is 0.1318%.
330,000×0.001318≈435
So, there will be about 435 people in the town taller than 205 cm.
"""
"""
Example 2:
The life of a fully-charged cell phone battery is normally distributed with a
mean of 14 hours with a standard deviation of 1 hour.
What is the probability that a battery lasts at least 13 hours?
The mean is 14 and the standard deviation is 1.
50% of the normal distribution lies to the right of the mean, so 50% of the time,
the battery will last longer than 14 hours.
The interval from 13 to 14 hours represents one standard deviation to the left
of the mean.
So, about 34% of time, the battery will last between 13 and 14 hours.
Therefore, the probability that the battery lasts at least 13 hours is about 34%+50% or 0.84 .
"""
"""
Example 3:
The average weight of a raspberry is 4.4 gm with a standard deviation of 1.3 gm.
What is the probability that a randomly selected raspberry would weigh at
least 3.1 gm but not more than 7.0 gm?
The mean is 4.4 and the standard deviation is 1.3.
Note that
4.4−1.3=3.1
and
4.4+2(1.3)=7.0
So, the interval 3.1≤x≤7.0 is actually between one standard deviation below the mean
and 2 standard deviations above the mean.
In normally distributed data, about 34% of the values lie between the mean
and one standard deviation below the mean, and 34% between the mean and
one standard deviation above the mean.
In addition, 13.5% of the values lie between the first
and second standard deviations above the mean.
Adding the areas, we get 34%+34%+13.5%=81.5%.
Therefore, the probability that a randomly selected raspberry will weigh at least 3.1
gm but not more than 7.0 gm is 81.5% or 0.815 .
"""
"""
import numpy as np
import scipy.stats as stats
import pylab as pl
h = sorted([186, 176, 158, 180, 186, 168, 168, 164, 178, 170, 189, 195, 172,
187, 180, 186, 185, 168, 179, 178, 183, 179, 170, 175, 186, 159,
161, 178, 175, 185, 175, 162, 173, 172, 177, 175, 172, 177, 180]) | |
lindex = next_line(fin_lines, max_line, cindex=lindex)
words = line.split('|')
if len(words) <= 1:
# End of table, just write and continue
file.write(line+'\n')
continue
# End if
entries = [x.strip() for x in words[1:-1]]
# Okay, one check
if len(entries) != len(header_locs):
raise ValueError("Malformed table entry")
# End if
# First output the local name
local_name = entries[local_name_ind]
# Then check the local name, skip variables without a standard_name
standard_name = entries[standard_name_ind]
if not standard_name:
if logger is None:
raise ValueError("{} does not have a standard name in {}".format(local_name, table_name))
else:
logger.debug("{} does not have a standard name in {}".format(local_name, table_name))
continue
else:
# Standard names cannot have dashes or periods
standard_name = standard_name.replace('-', '_').replace('.', '_')
# Create var_name: strip old-style DDT references from local_name and try to substitute array indices
var_name = local_name
if "(" in var_name:
if "%" in var_name and var_name.rfind("%") > var_name.rfind(")"):
if mdtable.type == 'ddt':
ddt_reference = var_name[:var_name.rfind('%')]
var_name = var_name[var_name.rfind('%')+1:]
else:
(actual_var_name, array_reference) = split_var_name_and_array_reference(var_name)
if mdtable.type == 'ddt':
ddt_reference = actual_var_name[:actual_var_name.rfind('%')]
actual_var_name = actual_var_name[actual_var_name.rfind('%')+1:]
for index in array_reference.lstrip("(").rstrip(")").split(","):
# Keep literals and colons, substitute variables
match = re.match(r"[0-9]+|:", index)
if match:
continue
else:
if index.lower() in standard_names.keys():
array_reference = array_reference.replace(index, standard_names[index.lower()])
else:
array_reference = array_reference.replace(index, index + "_XX_SubstituteWithStandardName_XX")
# End if
# End if
# End for
var_name = actual_var_name + array_reference
# End if
elif "%" in var_name:
if mdtable.type == 'ddt':
ddt_reference = var_name[:var_name.rfind('%')]
var_name = var_name[var_name.rfind('%')+1:]
else:
ddt_reference = ''
# End if
#
if mdtable.type == 'module':
ddt_reference = ''
if not current_module in ddt_references.keys():
ddt_references[current_module] = {}
if not table_name in ddt_references[current_module].keys():
ddt_references[current_module][table_name] = ddt_reference
elif not ddt_references[current_module][table_name] == ddt_reference:
raise Exception("Conflicting DDT references in table {}: {} vs {}".format(
table_name, ddt_references[current_module][table_name], ddt_reference))
#
mdobj = MetadataEntry(var_name)
mdtable[var_name] = mdobj
# Now, create the rest of the entries
for ind in xrange(len(entries)):
attr_name = table_header[ind]
entry = entries[ind]
if attr_name == 'local_name':
# Already handled this
continue
elif attr_name == 'rank':
attr_name = 'dimensions'
rank = int(entry)
if rank>0:
# Search for key in dimensions dictionary
if local_name.lower() in dimensions.keys():
dim_key = local_name.lower()
# Begin model and file-dependent substitutions
elif model == 'FV3':
if local_name.replace("GFS_Data(cdata%blk_no)%","").lower() in dimensions.keys():
dim_key = local_name.replace("GFS_Data(cdata%blk_no)%","").lower()
elif local_name.replace("GFS_Data(cdata%blk_no)%Intdiag%","Diag%").lower() in dimensions.keys():
dim_key = local_name.replace("GFS_Data(cdata%blk_no)%Intdiag%","Diag%").lower()
elif local_name.replace("GFS_Interstitial(cdata%thrd_no)%","Interstitial%").lower() in dimensions.keys():
dim_key = local_name.replace("GFS_Interstitial(cdata%thrd_no)%","Interstitial%").lower()
elif local_name.replace("CCPP_Interstitial%","Interstitial%").lower() in dimensions.keys():
dim_key = local_name.replace("CCPP_Interstitial%","Interstitial%").lower()
else:
dim_key = None
# End model and file-dependent substitution
else:
dim_key = None
# Begin model and file-dependent substitutions
if model == 'FV3':
if dim_key and 'n_XX_SubstituteWithStandardName_XX' in dimensions[dim_key]:
if local_name in [ 'GFS_Data(cdata%blk_no)%Intdiag%sedim',
'GFS_Data(cdata%blk_no)%Intdiag%drydep',
'GFS_Data(cdata%blk_no)%Intdiag%wetdpl',
'GFS_Data(cdata%blk_no)%Intdiag%wetdpc' ]:
entry = '(horizonal_dimension,number_of_chemical_tracers_for_diagnostics)'
elif local_name == 'GFS_Data(cdata%blk_no)%Intdiag%duem':
entry = '(horizonal_dimension,number_of_dust_bins_for_diagnostics)'
elif local_name == 'GFS_Data(cdata%blk_no)%Intdiag%ssem':
entry = '(horizonal_dimension,number_of_seasalt_bins_for_diagnostics)'
else:
raise Exception("No entry defined for variable {} with dimensions {}".format(
local_name, dimensions[dim_key]))
elif dim_key:
if not rank == len(dimensions[dim_key]):
raise Exception("ERROR, mismatch of variable rank and dimensions for variable {}".format(local_name))
entry = '(' + ','.join(dimensions[dim_key]) + ')'
# Special handling for slices of arrays that do not have an entry in the dimensions dictionary
elif local_name.endswith('(:,1)') and ('at_lowest_model_layer' in standard_name or \
'at_lowest_model_interface' in standard_name):
entry = '(horizontal_dimension)'
elif 'GFS_Data(cdata%blk_no)%Tbd%phy_f2d(:,' in local_name and rank==1:
entry = '(horizontal_dimension)'
elif 'GFS_Data(cdata%blk_no)%Tbd%phy_f3d(:,:' in local_name and rank==2:
entry = '(horizontal_dimension,vertical_dimension)'
elif 'GFS_Data(cdata%blk_no)%Statein%qgrs(:,:,GFS_Control' in local_name or \
'GFS_Data(cdata%blk_no)%Stateout%gq0(:,:,GFS_Control' in local_name or \
'GFS_Interstitial(cdata%thrd_no)%save_q(:,:,GFS_Control' in local_name:
entry = '(horizontal_dimension,vertical_dimension)'
elif 'GFS_Data(cdata%blk_no)%Statein%qgrs(:,1,GFS_Control' in local_name or \
'GFS_Data(cdata%blk_no)%Stateout%gq0(:,1,GFS_Control' in local_name:
entry = '(horizontal_dimension)'
elif ("Intdiag%du3dt" in local_name or \
"Intdiag%dv3dt" in local_name or \
"Intdiag%dt3dt" in local_name or \
"Intdiag%dq3dt" in local_name) and rank==2:
entry = '(horizontal_dimension,vertical_dimension)'
elif ("GFS_Interstitial(cdata%thrd_no)%clouds(:,:" in local_name or \
"GFS_Interstitial(cdata%thrd_no)%clw(:,:" in local_name) and rank==2:
entry = '(horizontal_dimension,vertical_dimension)'
elif "GFS_Interstitial(cdata%thrd_no)%dqdt(:,:,GFS_Control" in local_name:
entry = '(horizontal_dimension,vertical_dimension)'
elif local_name == "GFS_Control%input_nml_file":
entry = '(number_of_lines_of_namelist_filename_for_internal_file_reads)'
elif local_name == 'GFS_Control%blksz':
entry = '(number_of_blocks)'
elif local_name in [ 'GFS_Control%idat',
'GFS_Control%jdat',
]:
entry = '(8)'
elif local_name == 'GFS_Control%idate':
entry = '(4)'
elif local_name in [ 'GFS_Control%psautco',
'GFS_Control%prautco',
'GFS_Control%wminco',
'GFS_Control%mg_ts_auto_ice',
'GFS_Control%mg_qcmin',
'GFS_Control%flgmin',
'GFS_Control%cgwf',
'GFS_Control%ccwf',
'GFS_Control%cdmbgwd',
'GFS_Control%ctei_rm',
'GFS_Control%dlqf',
'GFS_Control%psauras',
'GFS_Control%prauras',
'GFS_Control%wminras',
]:
entry = '(2)'
elif local_name in [ 'GFS_Control%cs_parm' ]:
entry = '(10)'
elif local_name in [ 'GFS_Control%crtrh' ]:
entry = '(3)'
elif local_name in [ 'GFS_Control%pertz0',
'GFS_Control%pertzt',
'GFS_Control%pertshc',
'GFS_Control%pertlai',
'GFS_Control%pertalb',
'GFS_Control%pertvegf',
]:
entry = '(5)'
elif 'GFS_Interstitial(cdata%thrd_no)%faerlw(:,:,:' in local_name and rank==3:
entry = '(horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_longwave_radiation)'
elif 'GFS_Interstitial(cdata%thrd_no)%faersw(:,:,:' in local_name and rank==3:
entry = '(horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation,number_of_aerosol_bands_for_shortwave_radiation)'
elif 'GFS_Interstitial(cdata%thrd_no)%gasvmr(:,:' in local_name and rank==2:
entry = '(horizontal_dimension,adjusted_vertical_layer_dimension_for_radiation)'
elif 'GFS_Interstitial(cdata%thrd_no)%sfcalb(:,' in local_name and rank==1:
entry = '(horizontal_dimension)'
elif local_name in [
'CCPP_interstitial%delp',
'CCPP_interstitial%pt',
'CCPP_interstitial%qv',
'CCPP_interstitial%ql',
'CCPP_interstitial%qi',
'CCPP_interstitial%qr',
'CCPP_interstitial%qs',
'CCPP_interstitial%qg',
'CCPP_interstitial%qc',
]:
entry = '(starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain,1:vertical_dimension_for_fast_physics)'
elif local_name in [
'CCPP_interstitial%delz',
]:
entry = '(starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain,1:vertical_dimension_for_thickness_at_Lagrangian_surface)'
elif local_name in [
'CCPP_interstitial%area',
'CCPP_interstitial%phis',
]:
entry = '(starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain)'
elif local_name in [
'CCPP_interstitial%peln',
]:
entry = '(starting_x_direction_index:ending_x_direction_index,1:vertical_dimension_for_fast_physics_plus_one,starting_y_direction_index:ending_y_direction_index)'
elif local_name in [
'CCPP_interstitial%pkz',
]:
entry = '(starting_x_direction_index:ending_x_direction_index,starting_y_direction_index:ending_y_direction_index,1:vertical_dimension_for_fast_physics)'
elif local_name in [
'CCPP_interstitial%qvi',
]:
entry = '(starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain,1:vertical_dimension_for_fast_physics,1:number_of_gases_for_multi_gases_physics)'
elif local_name in [
'CCPP_interstitial%q_con',
]:
entry = '(starting_x_direction_index_domain:ending_x_direction_index_domain,starting_y_direction_index_domain:ending_y_direction_index_domain,1:vertical_dimension_for_condensed_water_at_Lagrangian_surface)'
elif "CCPP_data" in filename_in and standard_name == 'GFS_data_type_instance_all_blocks':
entry = '(ccpp_block_number)'
elif "CCPP_data" in filename_in and standard_name == 'GFS_interstitial_type_instance_all_threads':
entry = '(ccpp_thread_number)'
else:
entry = '(' + ','.join(dim_names[0:rank]) + ')'
# End model and file-dependent substitutions
else:
if dim_key:
if not rank == len(dimensions[dim_key]):
raise Exception("ERROR, mismatch of variable rank and dimensions for variable {}".format(local_name))
entry = '(' + ','.join(dimensions[dim_key]) + ')'
else:
entry = '(' + ','.join(dim_names[0:rank]) + ')'
# rank == 0
else:
entry = '(' + ','.join(dim_names[0:rank]) + ')'
# End if
elif attr_name == 'standard_name':
# Parsing done earlier
entries[ind] = standard_name
entry = standard_name
elif attr_name == 'intent':
# Don't write intent attribute for variable/type definitions
if in_preamble:
entry = ''
elif entry.lower() == 'none':
if logger is None:
raise ValueError("{} has intent = none in {}".format(var_name, table_name))
else:
logger.warning("{} has intent = none in {}".format(var_name, table_name))
elif attr_name == 'optional':
# Don't write optional attribute for variable/type definitions
if in_preamble:
entry = ''
elif not entry in ['F', 'T']:
if logger is None:
raise ValueError("{} has optional = {} in {}".format(var_name, entry, table_name))
else:
logger.warning("{} has optional = {} in {}".format(var_name, entry, table_name))
# End if
# End if
# No else needed
# End if
# Add attribute
if (len(entry) > 0) or (attr_name in required_attrs):
mdobj[attr_name] = entry
# End if
# End for (done with entry)
# End while (done with table)
else:
# Just write the line (should be a table ending)
if line.strip() != '!!':
raise ValueError("All tables must end with !! line")
# End if
file.write(line+'\n')
# End if (blank table)
else:
# Not a table, just write and continue
file.write(line+'\n')
# End if
# Always load a new line
line, lindex = next_line(fin_lines, max_line, cindex=lindex)
# End while
# End with (file)
# Write out finalized metadata file
with open(metadata_filename_out, 'w') as mdfile:
spacer = ""
# First pass: write type definitions,
# second pass: write module table
for count in xrange(2):
for table in mdconfig:
if (count == 0 and not table.type == 'ddt') or \
(count == 1 and table.type == 'ddt'):
continue
if len(spacer) > 0:
mdfile.write(spacer)
# End if
table.write(mdfile)
spacer = '\n'+72*'#'+'\n'
# End for
# End for
# End with (mdfile)
if ddt_references:
message = """Add the following statement to the CCPP prebuild config (add to existing entry):
TYPEDEFS_NEW_METADATA = {
"""
for module_name in ddt_references.keys():
message += " '{module_name}' : {{\n".format(module_name=module_name)
for table_name in ddt_references[module_name].keys():
message += " '{table_name}' : '{ddt_reference}',\n".format(table_name=table_name,
ddt_reference=ddt_references[module_name][table_name])
message += " },\n"
message += " }\n"
if logger is not None:
logger.info(message)
else:
print message
########################################################################
def usage(cmd):
print("Usage:")
print("{} <source_file> <target_file> <model>".format(cmd))
print("")
print("<model> can be one of '{}'".format(MODELS))
print("")
print("Translate the metadata in <source_file> into a new file")
raise Exception
########################################################################
if __name__ == "__main__":
# Process the files passed in
num_args = len(sys.argv)
if not num_args == 4:
usage(sys.argv[0])
else:
## Init this now so that all Exceptions can be trapped
logger = init_log('ccpp_capgen')
set_log_level(logger, logging.INFO)
| |
################################################################################
## Basic Matrices ##
################################################################################
def null(n, m=None):
"""Creates a null matrix.
Args:
n (int)
Number of rows of the matrix
m (int, optional):
Number of columns of the matrix. Defaults to the number of rows.
Returns
-------
Matrix
A null matrix of order N x M.
"""
if m == None:
m = n
return [[0] * m] * n
def identity(n, mul=1):
"""Creates an identity matrix.
Args:
n (int)
Number of rows of the matrix
mul (int/float, optional)
The multiplication factor. Defaults to 1.
Returns
-------
Matrix
An identity matrix multiplied by the multiplication factor.
"""
identity_matrix = null(n)
for i in range(n):
identity_matrix[i][i] = mul
return identity_matrix
################################################################################
## Types of Matrices ##
################################################################################
def is_matrix(A):
"""Tells whether an input is actually a matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the input is a matrix, False otherwise.
"""
for row in A:
if len(row) != len(A[0]):
return False
for element in row:
if type(element) not in [int, float, complex]:
return False
return True
def is_null(A):
"""Tells whether a matrix is a null matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the matrix is a null matrix, False otherwise.
"""
return [[0] * len(A[0])] * len(A) == A
def is_identity(A):
"""Tells whether a matrix is an identity matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the matrix is an identity matrix, False otherwise.
"""
return A == identity(len(A))
def is_symmetric(A):
"""Tells whether a matrix is a symmetric matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the matrix is a diagonal matrix, False otherwise.
"""
for i in range(len(A)):
for j in range(len(A[0])):
try:
if A[i][j] != A[j][i]:
return False
except IndexError: #Would happen if matrix is not square.
return False
return True
def is_skew_symmetric(A):
"""Tells whether a matrix is a skew triangular matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the matrix is a skew symmetric matrix, False otherwise.
"""
for i in range(len(A)):
for j in range(len(A[0])):
try:
if A[i][j] != -1 * A[j][i]:
return False
except IndexError: #Would happen if matrix is not square.
return False
return True
def is_diagonal(A):
"""Tells whether a matrix is a diagonal matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the matrix is a diagonal matrix, False otherwise.
"""
for i in range(len(A)):
for j in range(len(A[0])):
try:
if i != j and A[i][j] != 0:
return False
except IndexError: #Would happen if matrix is not square.
return False
return True
def is_square(A):
"""Tells whether a matrix is a square matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the matrix is a square matrix, False otherwise.
"""
if is_matrix(A):
return len(A[0]) == len(A)
raise ValueError("The given matrix is not a matrix.")
def is_utriangular(A):
"""Tells whether a matrix is an upper triangular matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the matrix is an upper triangular matrix, False otherwise.
"""
for i in range(len(A)):
for j in range(len(A[0])):
try:
if A[i][j] != 0 and i > j:
return False
except IndexError: #Would happen if matrix is not square.
return False
return True
def is_ltriangular(A):
"""Tells whether a matrix is an lower triangular matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the matrix is an lower triangular matrix, False otherwise.
"""
for i in range(len(A)):
for j in range(len(A[0])):
try:
if A[i][j] != 0 and i < j:
return False
except IndexError: #Would happen if matrix is not square.
return False
return True
################################################################################
## Matrix Compatibility ##
################################################################################
def compatAS(a, b):
"""Tells if the 2 matrices can be added/subtracted.
Args
----
A (compulsory)
A matrix.
B (compulsory):
Another matrix.
Raises
------
ValueError
Raised if the given input is not a matrix.
Returns
-------
bool
True if the given matrix can be added/subtracted, False otherwise.
"""
if is_matrix(a) and is_matrix(b):
return False if len(a) != len(b) or len(a[0]) != len(b[0]) else True
raise ValueError("The given parameter is not a matrix.")
def compatM(A, B):
"""Tells if the 2 matrices can be multiplied.
Args
----
A (compulsory)
A matrix.
B (compulsory):
Another matrix.
Raises
------
ValueError
Raised if the given input is not a matrix.
Returns
-------
bool
True if the given matrix can be multiplied, False otherwise.
"""
if is_matrix(A):
if is_matrix(B):
return True if len(A[0]) == len(B) else False
raise ValueError(f"{B} is not a matrix.")
raise ValueError(f"{A} is not a matrix.")
################################################################################
## Arithmetic Operations ##
################################################################################
def matAdd(a, b):
"""Returns the sum matrix (A+B), if mathematically possible.
Args
----
A (compulsory)
A matrix.
B (compulsory):
Another matrix.
Raises
------
ValueError
Raised when the addition of the two matrices is not possible.
ValueError
Raised if the given input is not a matrix.
Returns
-------
Matrix
The matrix representing the sum of the two matrices.
"""
if compatAS(a, b):
matrix = []
for i in range(len(a)):
matrix.append([])
temp = [(a[i][j] + b[i][j]) for j in range(len(a[0]))]
matrix.append(temp)
return matrix
raise ValueError("The 2 matrices do not have the same order.")
def matSub(a, b):
"""Returns the difference matrix (A-B), if mathematically possible.
Args
----
A (compulsory)
A matrix.
B (compulsory):
Another matrix.
Raises
------
ValueError
Raised when the subtraction of the two matrices is not possible.
ValueError
Raised if the given input is not a matrix.
Returns
-------
Matrix
The matrix representing the difference of the two matrices.
"""
if compatAS(a, b):
matrix = []
for i in range(len(a)):
matrix.append([])
temp = [(a[i][j] - b[i][j]) for j in range(len(a[0]))]
matrix.append(temp)
return matrix
raise ValueError("The 2 matrices do not have the same order.")
def matMul(a, b):
"""Returns the product matrix (AB), if mathematically possible.
Args
----
A (compulsory)
A matrix.
B (compulsory):
Another matrix.
Raises
------
ValueError
Raised when the multiplication of the two matrices is not possible.
ValueError
Raised if the given input is not a matrix.
Returns
-------
Matrix
The matrix representing the product of the two matrices.
"""
if compatM(a, b):
matrix = []
for i in range( len(a) ):
matrix.append([])
for j in range( len(b[0]) ):
total = 0
for k in range( len(b) ):
total += (a[i][k] * b[k][j])
matrix[i].append( total )
return matrix
raise ValueError("The 2 matrices are not compatible for multiplication.")
def power(a, power=2):
"""Returns the matrix representing the n^th power of matrix A, if mathematically possible.
Args
----
A (compulsory)
A matrix.
power (optional, int)
The power of the matrix. defaults to 2.
Raises
------
ValueError
Raised if the matrix is not a square matrix.
ValueError
Raised if the given input is not a matrix.
Returns
-------
Matrix
The matrix represting the n^th power of the matrix A.
"""
if is_square(a):
matrix = a
for _ in range(power-1):
matrix = matMul(a, matrix)
return matrix
raise ValueError("The given matrix is not a square matrix.")
def scalarMul(a, mul=1):
"""Returns the scalar product of A and n (nA)
Args
----
A (compulsory)
A matrix.
mul (int/float, optional)
The multiplication factor. Defaults to 1.
Returns
-------
Matrix
The matrix multiplied with the multiplication factor
"""
for i in range(len(a)):
for j in range(len(a[i])):
a[j][i] *= mul
return a
################################################################################
## Matrix Operations ##
################################################################################
def cut(A, row=0, column=0):
"""Returns a smaller matrix by removing the required row and column.
Args
----
A (compulsory)
A matrix.
row (int, optional)
The row to be removed. Defaults to 0.
column (int, optional)
The column to be removed. Defaults to 0.
Returns
-------
Matrix
The matrix with the required rows and columns removed.
"""
matrix = []
for i in range(len(A)):
if i != row:
x = [A[i][j] for j in range(len(A[i])) if j != column]
matrix.append(x)
return matrix
def rotate(A, turns=1):
"""A matrix which is formed by rotating the given matrix, n times, in clockwise sense.
Args
----
A (compulsory)
A matrix.
| |
word[3] == "l" :
toGuess = toGuess[:3] + "l" + toGuess[4:]
if word[4] == "L" or word[4] == "l" :
toGuess = toGuess[:4] + "l" + toGuess[5:]
if word[5] == "L" or word[5] == "l" :
toGuess = toGuess[:5] + "l" + toGuess[6:]
if word[6] == "L" or word[6] == "l" :
toGuess = toGuess[:6] + "l" + toGuess[7:]
if word[1] != "L" and word[1] != "l" and word[2] != "L" and word[2] != "l" and word[3] != "L" and word[3] != "l" and word[4] != "L" and word[4] != "l" and word[5] != "L" and word[5] != "l" and word[6] != "L" and word[6] != "l" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "l" + ", "
if guessChar == "M" or guessChar == "m" :
if word[1] == "M" or word[1] == "m" :
toGuess = toGuess[:1] + "m" + toGuess[2:]
if word[2] == "M" or word[2] == "m" :
toGuess = toGuess[:2] + "m" + toGuess[3:]
if word[3] == "M" or word[3] == "m" :
toGuess = toGuess[:3] + "m" + toGuess[4:]
if word[4] == "M" or word[4] == "m" :
toGuess = toGuess[:4] + "m" + toGuess[5:]
if word[5] == "M" or word[5] == "m" :
toGuess = toGuess[:5] + "m" + toGuess[6:]
if word[6] == "M" or word[6] == "m" :
toGuess = toGuess[:6] + "m" + toGuess[7:]
if word[1] != "M" and word[1] != "m" and word[2] != "M" and word[2] != "m" and word[3] != "M" and word[3] != "m" and word[4] != "M" and word[4] != "m" and word[5] != "M" and word[5] != "m" and word[6] != "M" and word[6] != "m" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "m" + ", "
if guessChar == "N" or guessChar == "n" :
if word[1] == "N" or word[1] == "n" :
toGuess = toGuess[:1] + "n" + toGuess[2:]
if word[2] == "N" or word[2] == "n" :
toGuess = toGuess[:2] + "n" + toGuess[3:]
if word[3] == "N" or word[3] == "n" :
toGuess = toGuess[:3] + "n" + toGuess[4:]
if word[4] == "N" or word[4] == "n" :
toGuess = toGuess[:4] + "n" + toGuess[5:]
if word[5] == "N" or word[5] == "n" :
toGuess = toGuess[:5] + "n" + toGuess[6:]
if word[6] == "N" or word[6] == "n" :
toGuess = toGuess[:6] + "n" + toGuess[7:]
if word[1] != "N" and word[1] != "n" and word[2] != "N" and word[2] != "n" and word[3] != "N" and word[3] != "n" and word[4] != "N" and word[4] != "n" and word[5] != "N" and word[5] != "n" and word[6] != "N" and word[6] != "n" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "n" + ", "
if guessChar == "O" or guessChar == "o" :
if word[1] == "O" or word[1] == "o" :
toGuess = toGuess[:1] + "o" + toGuess[2:]
if word[2] == "O" or word[2] == "o" :
toGuess = toGuess[:2] + "o" + toGuess[3:]
if word[3] == "O" or word[3] == "o" :
toGuess = toGuess[:3] + "o" + toGuess[4:]
if word[4] == "O" or word[4] == "o" :
toGuess = toGuess[:4] + "o" + toGuess[5:]
if word[5] == "O" or word[5] == "o" :
toGuess = toGuess[:5] + "o" + toGuess[6:]
if word[6] == "O" or word[6] == "o" :
toGuess = toGuess[:6] + "o" + toGuess[7:]
if word[1] != "O" and word[1] != "o" and word[2] != "O" and word[2] != "o" and word[3] != "O" and word[3] != "o" and word[4] != "O" and word[4] != "o" and word[5] != "O" and word[5] != "o" and word[6] != "O" and word[6] != "o" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "o" + ", "
if guessChar == "P" or guessChar == "p" :
if word[1] == "P" or word[1] == "p" :
toGuess = toGuess[:1] + "p" + toGuess[2:]
if word[2] == "P" or word[2] == "p" :
toGuess = toGuess[:2] + "p" + toGuess[3:]
if word[3] == "P" or word[3] == "p" :
toGuess = toGuess[:3] + "p" + toGuess[4:]
if word[4] == "P" or word[4] == "p" :
toGuess = toGuess[:4] + "p" + toGuess[5:]
if word[5] == "P" or word[5] == "p" :
toGuess = toGuess[:5] + "p" + toGuess[6:]
if word[6] == "P" or word[6] == "p" :
toGuess = toGuess[:6] + "p" + toGuess[7:]
if word[1] != "P" and word[1] != "p" and word[2] != "P" and word[2] != "p" and word[3] != "P" and word[3] != "p" and word[4] != "P" and word[4] != "p" and word[5] != "P" and word[5] != "p" and word[6] != "P" and word[6] != "p" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "p" + ", "
if guessChar == "Q" or guessChar == "q" :
if word[1] == "Q" or word[1] == "q" :
toGuess = toGuess[:1] + "q" + toGuess[2:]
if word[2] == "Q" or word[2] == "q" :
toGuess = toGuess[:2] + "q" + toGuess[3:]
if word[3] == "Q" or word[3] == "q" :
toGuess = toGuess[:3] + "q" + toGuess[4:]
if word[4] == "Q" or word[4] == "q" :
toGuess = toGuess[:4] + "q" + toGuess[5:]
if word[5] == "Q" or word[5] == "q" :
toGuess = toGuess[:5] + "q" + toGuess[6:]
if word[6] == "Q" or word[6] == "q" :
toGuess = toGuess[:6] + "q" + toGuess[7:]
if word[1] != "Q" and word[1] != "q" and word[2] != "Q" and word[2] != "q" and word[3] != "Q" and word[3] != "q" and word[4] != "Q" and word[4] != "q" and word[5] != "Q" and word[5] != "q" and word[6] != "Q" and word[6] != "q" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "q" + ", "
if guessChar == "R" or guessChar == "r" :
if word[1] == "R" or word[1] == "r" :
toGuess = toGuess[:1] + "r" + toGuess[2:]
if word[2] == "R" or word[2] == "r" :
toGuess = toGuess[:2] + "r" + toGuess[3:]
if word[3] == "R" or word[3] == "r" :
toGuess = toGuess[:3] + "r" + toGuess[4:]
if word[4] == "R" or word[4] == "r" :
toGuess = toGuess[:4] + "r" + toGuess[5:]
if word[5] == "R" or word[5] == "r" :
toGuess = toGuess[:5] + "r" + toGuess[6:]
if word[6] == "R" or word[6] == "r" :
toGuess = toGuess[:6] + "r" + toGuess[7:]
if word[1] != "R" and word[1] != "r" and word[2] != "R" and word[2] != "r" and word[3] != "R" and word[3] != "r" and word[4] != "R" and word[4] != "r" and word[5] != "R" and word[5] != "r" and word[6] != "R" and word[6] != "r" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "r" + ", "
if guessChar == "S" or guessChar == "s" :
if word[1] == "S" or word[1] == "s" :
toGuess = toGuess[:1] + "s" + toGuess[2:]
if word[2] == "S" or word[2] == "s" :
toGuess = toGuess[:2] + "s" + toGuess[3:]
if word[3] == "S" or word[3] == "s" :
toGuess = toGuess[:3] + "s" + toGuess[4:]
if word[4] == "S" or word[4] == "s" :
toGuess = toGuess[:4] + "s" + toGuess[5:]
if word[5] == "S" or word[5] == "s" :
toGuess = toGuess[:5] + "s" + toGuess[6:]
if word[6] == "S" or word[6] == "s" :
toGuess = toGuess[:6] + "s" + toGuess[7:]
if word[1] != "S" and word[1] != "s" and word[2] != "S" and word[2] != "s" and word[3] != "S" and word[3] != "s" and word[4] != "S" and word[4] != "s" and word[5] != "S" and word[5] != "s" and word[6] != "S" and word[6] != "s" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "s" + ", "
if guessChar == "T" or guessChar == "t" :
if word[1] == "T" or word[1] == "t" :
toGuess = toGuess[:1] + "t" + toGuess[2:]
if word[2] == "T" or word[2] == "t" :
toGuess = toGuess[:2] + "t" + toGuess[3:]
if word[3] == "T" or word[3] == "t" :
toGuess = toGuess[:3] + "t" + toGuess[4:]
if word[4] == "T" or word[4] == "t" :
toGuess = toGuess[:4] + "t" + toGuess[5:]
if word[5] == "T" or word[5] == "t" :
toGuess = toGuess[:5] + "t" + toGuess[6:]
if word[6] == "T" or word[6] == "t" :
toGuess = toGuess[:6] + "t" + toGuess[7:]
if word[1] != "T" and word[1] != "t" and word[2] != "T" and word[2] != "t" and word[3] != "T" and | |
worker_pool = [worker for worker in self.workers.idle]
bases = self.townhalls.ready
geysers = self.geysers.ready
# list of places that need more workers
deficit_mining_places = []
for mining_place in bases | geysers:
difference = mining_place.surplus_harvesters
# perfect amount of workers, skip mining place
if not difference:
continue
if mining_place.is_vespene_geyser:
# get all workers that target the gas extraction site
# or are on their way back from it
local_workers = self.workers.filter(
lambda unit: unit.order_target == mining_place.tag
or (unit.is_carrying_vespene and unit.order_target == bases.closest_to(mining_place).tag)
)
else:
# get tags of minerals around expansion
local_minerals_tags = {
mineral.tag for mineral in self.state.mineral_field if mineral.distance_to(mining_place) <= 8
}
# get all target tags a worker can have
# tags of the minerals he could mine at that base
# get workers that work at that gather site
local_workers = self.workers.filter(
lambda unit: unit.order_target in local_minerals_tags
or (unit.is_carrying_minerals and unit.order_target == mining_place.tag)
)
# too many workers
if difference > 0:
for worker in local_workers[:difference]:
worker_pool.append(worker)
# too few workers
# add mining place to deficit bases for every missing worker
else:
deficit_mining_places += [mining_place for _ in range(-difference)]
# prepare all minerals near a base if we have too many workers
# and need to send them to the closest patch
if len(worker_pool) > len(deficit_mining_places):
all_minerals_near_base = [
mineral
for mineral in self.state.mineral_field
if any(mineral.distance_to(base) <= 8 for base in self.townhalls.ready)
]
# distribute every worker in the pool
for worker in worker_pool:
# as long as have workers and mining places
if deficit_mining_places:
# choose only mineral fields first if current mineral to gas ratio is less than target ratio
if self.vespene and self.minerals / self.vespene < resource_ratio:
possible_mining_places = [place for place in deficit_mining_places if not place.vespene_contents]
# else prefer gas
else:
possible_mining_places = [place for place in deficit_mining_places if place.vespene_contents]
# if preferred type is not available any more, get all other places
if not possible_mining_places:
possible_mining_places = deficit_mining_places
# find closest mining place
current_place = min(deficit_mining_places, key=lambda place: place.distance_to(worker))
# remove it from the list
deficit_mining_places.remove(current_place)
# if current place is a gas extraction site, go there
if current_place.vespene_contents:
actions.append(worker.gather(current_place))
# if current place is a gas extraction site,
# go to the mineral field that is near and has the most minerals left
else:
local_minerals = [
mineral for mineral in self.state.mineral_field if mineral.distance_to(current_place) <= 8
]
target_mineral = max(local_minerals, key=lambda mineral: mineral.mineral_contents)
actions.append(worker.gather(target_mineral))
# more workers to distribute than free mining spots
# send to closest if worker is doing nothing
elif worker.is_idle and all_minerals_near_base:
target_mineral = min(all_minerals_near_base, key=lambda mineral: mineral.distance_to(worker))
actions.append(worker.gather(target_mineral))
else:
# there are no deficit mining places and worker is not idle
# so dont move him
pass
await self.do_actions(actions)
@property
def owned_expansions(self) -> Dict[Point2, Unit]:
"""List of expansions owned by the player."""
owned = {}
for el in self.expansion_locations:
def is_near_to_expansion(t):
return t.distance_to(el) < self.EXPANSION_GAP_THRESHOLD
th = next((x for x in self.townhalls if is_near_to_expansion(x)), None)
if th:
owned[el] = th
return owned
def can_feed(self, unit_type: UnitTypeId) -> bool:
""" Checks if you have enough free supply to build the unit """
required = self._game_data.units[unit_type.value]._proto.food_required
return required == 0 or self.supply_left >= required
def can_afford(
self, item_id: Union[UnitTypeId, UpgradeId, AbilityId], check_supply_cost: bool = True
) -> "CanAffordWrapper":
"""Tests if the player has enough resources to build a unit or cast an ability."""
enough_supply = True
if isinstance(item_id, UnitTypeId):
unit = self._game_data.units[item_id.value]
cost = self._game_data.calculate_ability_cost(unit.creation_ability)
if check_supply_cost:
enough_supply = self.can_feed(item_id)
elif isinstance(item_id, UpgradeId):
cost = self._game_data.upgrades[item_id.value].cost
else:
cost = self._game_data.calculate_ability_cost(item_id)
return CanAffordWrapper(cost.minerals <= self.minerals, cost.vespene <= self.vespene, enough_supply)
async def can_cast(
self,
unit: Unit,
ability_id: AbilityId,
target: Optional[Union[Unit, Point2, Point3]] = None,
only_check_energy_and_cooldown: bool = False,
cached_abilities_of_unit: List[AbilityId] = None,
) -> bool:
"""Tests if a unit has an ability available and enough energy to cast it.
See data_pb2.py (line 161) for the numbers 1-5 to make sense"""
assert isinstance(unit, Unit)
assert isinstance(ability_id, AbilityId)
assert isinstance(target, (type(None), Unit, Point2, Point3))
# check if unit has enough energy to cast or if ability is on cooldown
if cached_abilities_of_unit:
abilities = cached_abilities_of_unit
else:
abilities = (await self.get_available_abilities([unit]))[0]
if ability_id in abilities:
if only_check_energy_and_cooldown:
return True
cast_range = self._game_data.abilities[ability_id.value]._proto.cast_range
ability_target = self._game_data.abilities[ability_id.value]._proto.target
# Check if target is in range (or is a self cast like stimpack)
if (
ability_target == 1
or ability_target == Target.PointOrNone.value
and isinstance(target, (Point2, Point3))
and unit.distance_to(target) <= cast_range
): # cant replace 1 with "Target.None.value" because ".None" doesnt seem to be a valid enum name
return True
# Check if able to use ability on a unit
elif (
ability_target in {Target.Unit.value, Target.PointOrUnit.value}
and isinstance(target, Unit)
and unit.distance_to(target) <= cast_range
):
return True
# Check if able to use ability on a position
elif (
ability_target in {Target.Point.value, Target.PointOrUnit.value}
and isinstance(target, (Point2, Point3))
and unit.distance_to(target) <= cast_range
):
return True
return False
def select_build_worker(self, pos: Union[Unit, Point2, Point3], force: bool = False) -> Optional[Unit]:
"""Select a worker to build a building with."""
workers = (
self.workers.filter(lambda w: (w.is_gathering or w.is_idle) and w.distance_to(pos) < 20) or self.workers
)
if workers:
for worker in workers.sorted_by_distance_to(pos).prefer_idle:
if (
not worker.orders
or len(worker.orders) == 1
and worker.orders[0].ability.id in {AbilityId.MOVE, AbilityId.HARVEST_GATHER}
):
return worker
return workers.random if force else None
async def can_place(self, building: Union[AbilityData, AbilityId, UnitTypeId], position: Point2) -> bool:
"""Tests if a building can be placed in the given location."""
building_type = type(building)
assert building_type in {AbilityData, AbilityId, UnitTypeId}
if building_type == UnitTypeId:
building = self._game_data.units[building.value].creation_ability
elif building_type == AbilityId:
building = self._game_data.abilities[building.value]
r = await self._client.query_building_placement(building, [position])
return r[0] == ActionResult.Success
async def find_placement(
self,
building: UnitTypeId,
near: Union[Unit, Point2, Point3],
max_distance: int = 20,
random_alternative: bool = True,
placement_step: int = 2,
) -> Optional[Point2]:
"""Finds a placement location for building."""
assert isinstance(building, (AbilityId, UnitTypeId))
assert isinstance(near, Point2)
if isinstance(building, UnitTypeId):
building = self._game_data.units[building.value].creation_ability
else: # AbilityId
building = self._game_data.abilities[building.value]
if await self.can_place(building, near):
return near
if max_distance == 0:
return None
for distance in range(placement_step, max_distance, placement_step):
possible_positions = [
Point2(p).offset(near).to2
for p in (
[(dx, -distance) for dx in range(-distance, distance + 1, placement_step)]
+ [(dx, distance) for dx in range(-distance, distance + 1, placement_step)]
+ [(-distance, dy) for dy in range(-distance, distance + 1, placement_step)]
+ [(distance, dy) for dy in range(-distance, distance + 1, placement_step)]
)
]
res = await self._client.query_building_placement(building, possible_positions)
possible = [p for r, p in zip(res, possible_positions) if r == ActionResult.Success]
if not possible:
continue
if random_alternative:
return random.choice(possible)
else:
return min(possible, key=lambda p: p.distance_to_point2(near))
return None
def already_pending_upgrade(self, upgrade_type: UpgradeId) -> Union[int, float]:
""" Check if an upgrade is being researched
Return values:
0: not started
0 < x < 1: researching
1: finished
"""
assert isinstance(upgrade_type, UpgradeId)
if upgrade_type in self.state.upgrades:
return 1
level = None
if "LEVEL" in upgrade_type.name:
level = upgrade_type.name[-1]
creationAbilityID = self._game_data.upgrades[upgrade_type.value].research_ability.id
for structure in self.units.filter(lambda unit: unit.is_structure and unit.is_ready):
for order in structure.orders:
if order.ability.id is creationAbilityID:
if level and order.ability.button_name[-1] != level:
return 0
return order.progress
return 0
@property_cache_once_per_frame
def _abilities_all_units(self) -> Counter:
""" Cache for the already_pending function, includes protoss units warping in, and all units in production, and all structures, and all morphs """
abilities_amount = Counter()
for unit in self.units: # type: Unit
for order in unit.orders:
abilities_amount[order.ability] += 1
if not unit.is_ready:
if self.race != Race.Terran or not unit.is_structure:
# If an SCV is constructing a building, already_pending would count this structure twice (once from the SCV order, and once from "not structure.is_ready")
abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1
return abilities_amount
@property_cache_once_per_frame
def _abilities_workers_and_eggs(self) -> Counter:
""" Cache for the already_pending function, includes all worker orders (including pending).
Zerg units in production (except queens and morphing units) and structures in production,
counts double for terran """
abilities_amount = Counter()
for worker in self.workers: # type: Unit
for order in worker.orders:
abilities_amount[order.ability] += 1
if self.race == Race.Zerg:
for egg in | |
calculated in %s seconds" % (geneset_type,time_diff)
permute_mapp_inputs=[]
return 1, mapp_to_mod_genes
def performOntologyORA(ontology_dir):
""" Perform over-representation analysis (ORA) on any provided Ontology """
start_time = time.time()
ontology_type = getResourceType(ontology_dir)
######### Import Gene-to-Nested-Ontology #########
gene_to_ontology = gene_associations.importGeneToOntologyData(species_code,mod,'nested',ontology_type)
ontology_to_gene = OBO_import.swapKeyValues(gene_to_ontology)
if len(gene_to_ontology)==0:
return 0, None
else:
######### Calculate primary z-scores for GO terms
#a = time.time()
ontology_to_mod_genes = getGenesInPathway(input_gene_list,gene_to_ontology) ### For summary gene reporting
#b = time.time(); print 'a',b-a
ontology_input_gene_count,Rg,input_linked_ontology = countGenesInPathway(input_gene_list,gene_to_ontology,'yes')
#c = time.time(); print 'b',c-b
ontology_denominator_gene_count,Ng,denom_linked_ontology = countGenesInPathway(denominator_gene_list,gene_to_ontology,'yes')
#d = time.time(); print 'c',d-c
#print Ng,"unique genes, linked to GO and in dataset and", Rg, "unique GO linked genes matching criterion."
try:
if PoolVar==False:
multiZScores(ontology_input_gene_count,ontology_denominator_gene_count,Ng,Rg,ontology_to_gene,'Ontology')
else:
calculateZScores(ontology_input_gene_count,ontology_denominator_gene_count,Ng,Rg,ontology_to_gene,'Ontology')
except Exception: calculateZScores(ontology_input_gene_count,ontology_denominator_gene_count,Ng,Rg,ontology_to_gene,'Ontology')
#e = time.time(); print 'd',e-d; sys.exit()
if use_FET == 'no':
###Begining Ontology Permutation Analysis
try: original_increment = int(permutations/10); increment = original_increment
except Exception: null=None
x=0
permute_ontology_inputs=[]
if PoolVar==False:
if permutations!=0: print '*',
for permute_input_list in permute_inputs:
### http://docs.python.org/library/multiprocessing.html
if PoolVar==False:
if x == increment: increment+=original_increment; print '*',
x+=1
permute_ontology_input_gene_count,null,null = countGenesInPathway(permute_input_list,gene_to_ontology,'no'); permute_input_list=[]
permute_ontology_inputs.append(permute_ontology_input_gene_count)
if PoolVar==False:
if permutations !=0: print 'Ontology finished'
calculatePermuteZScores(permute_ontology_inputs,ontology_denominator_gene_count,Ng,Rg)
calculatePermuteStats(original_ontology_z_score_data)
adjustPermuteStats(original_ontology_z_score_data)
go_headers = formatHeaders(gene_file,input_count,input_linked_ontology,denom_count,denom_linked_ontology,Rg,Ng,'Ontology',OBO_date)
exportPathwayData(original_ontology_z_score_data,gene_file,go_headers,ontology_type,'Ontology')
### Export all gene associations (added in version 1.21)
exportPathwayToGeneAssociations(ontology_to_mod_genes,mod,gene_file,gene_annotations,ontology_type,'Ontology')
end_time = time.time()
time_diff = formatTime(start_time,end_time)
if PoolVar==False:
print "Initial results for %s calculated in %s seconds" % (ontology_type,time_diff)
permute_ontology_inputs=[]
return 1, ontology_to_mod_genes
def exportPathwayToGeneAssociations(pathway_to_mod_genes,mod,gene_file,gene_annotations,resource_name,pathway_type):
headers = string.join([mod,'symbol',resource_name],'\t')+'\n'
if resource_name == 'GeneOntology': resource_name = 'GO' ### Makes the output filename compatible with GenMAPP-CS plugin filenames
if resource_name == 'WikiPathways': resource_name = 'local' ### Makes the output filename compatible with GenMAPP-CS plugin filenames
new_file = mappfinder_output_dir+'/'+gene_file[:-4]+'-'+resource_name+'-associations.tab'
data = export.ExportFile(new_file); data.write(headers)
for pathway in pathway_to_mod_genes:
for gene in pathway_to_mod_genes[pathway]:
try: symbol = gene_annotations[gene].Symbol()
except Exception: symbol = ''
if pathway_type == 'Ontology' and ':' not in pathway: pathway = 'GO:'+ pathway
values = string.join([gene,symbol,pathway],'\t')+'\n'
data.write(values)
data.close()
def formatHeaders(gene_file,input_count,input_linked,denom_count,denom_linked,R,N,pathway_type,OBO_date):
headers = []
headers.append('GO-Elite ORA Results')
headers.append('File:')
headers.append('Table:')
if pathway_type == 'Ontology':
headers.append('Database: Based on OBO-Database version: '+OBO_date)
headers.append('colors:')
t = time.localtime(); dt = str(t[1])+'/'+str(t[2])+'/'+str(t[0])
headers.append(dt)
headers.append(species_name)
headers.append('Pvalues = true')
headers.append('Calculation Summary:')
headers.append(str(input_count)+' '+source_data+' source identifiers supplied in the input file:'+gene_file)
headers.append(str(input_linked)+' source identifiers meeting the filter linked to a '+mod+' ID.')
headers.append(str(R)+' genes meeting the criterion linked to a term.')
headers.append(str(denom_count)+' source identifiers in this dataset.')
headers.append(str(denom_linked)+' source identifiers linked to a '+mod+' ID.')
headers.append(str(N)+' Genes linked to a term.')
headers.append('The z score is based on an N of '+str(N)+' and a R of '+str(R)+' distinct genes in all terms.\n')
if use_FET == 'yes': prob = "FisherExactP"
else: prob = "PermuteP"
if pathway_type == 'Ontology':
title = ['Ontology-ID','Ontology Name','Ontology Type','Number Changed','Number Measured','Number in Ontology','Percent Changed','Percent Present','Z Score',prob,'AdjustedP']
title = string.join(title,'\t'); headers.append(title)
else:
title = ['Gene-Set Name','Number Changed','Number Measured','Number in Gene-Set','Percent Changed','Percent Present','Z Score',prob,'AdjustedP']
title = string.join(title,'\t'); headers.append(title)
header_str = string.join(headers,'\n')
return header_str+'\n'
def exportPathwayData(original_pathway_z_score_data,gene_file,headers,resource_name,pathway_type,altOutputDir=None):
if resource_name == 'GeneOntology': resource_name = 'GO' ### Makes the output filename compatible with GenMAPP-CS plugin filenames
if resource_name == 'WikiPathways': resource_name = 'local' ### Makes the output filename compatible with GenMAPP-CS plugin filenames
new_file = mappfinder_output_dir+'/'+gene_file[:-4]+'-'+resource_name+'.txt'
global sort_results
data = export.ExportFile(new_file); data.write(headers); sort_results=[]
#print "Results for",len(original_pathway_z_score_data),"pathways exported to",new_file
for pathway in original_pathway_z_score_data:
zsd=original_pathway_z_score_data[pathway]
try: results = [zsd.Changed(), zsd.Measured(), zsd.InPathway(), zsd.PercentChanged(), zsd.PercentPresent(), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP()]
except AttributeError:
return traceback.format_exc()
#print pathway,len(permuted_z_scores[pathway]);kill
try: ###This is unnecessary, unless using the non-nested GO associations (which can have out of sync GOIDs)
if pathway_type == 'Ontology':
s = ontology_annotations[pathway]
annotations = [s.OntologyID(),s.OntologyTerm(),s.OntologyType()]; results = annotations + results
else:
results = [pathway] + results
results = string.join(results,'\t') + '\n'
sort_results.append([float(zsd.ZScore()),-1/float(zsd.Measured()),results])
except KeyError: null = []
sort_results.sort(); sort_results.reverse()
for values in sort_results:
results = values[2]
data.write(results)
data.close()
def swapKeyValuesTuple(db):
swapped={}
for key in db:
values = tuple(db[key]) ###If the value is not a list, make a list
swapped[values] = [key]
swapped = eliminate_redundant_dict_values(swapped)
return swapped
class ZScoreData:
def __init__(self,pathway,changed,measured,zscore,null_z,in_pathway):
self._pathway = pathway; self._changed = changed; self._measured = measured
self._zscore = zscore; self._null_z = null_z; self._in_pathway = in_pathway
def PathwayID(self): return self._pathway
def Changed(self): return str(int(self._changed))
def Measured(self): return str(int(self._measured))
def InPathway(self): return str(self._in_pathway)
def ZScore(self): return str(self._zscore)
def SetP(self,p): self._permute_p = p
def PermuteP(self): return str(self._permute_p)
def SetAdjP(self,adjp): self._adj_p = adjp
def AdjP(self): return str(self._adj_p)
def setAssociatedIDs(self,ids): self.ids = ids
def AssociatedIDs(self): return self.ids
def PercentChanged(self):
try: pc = float(self.Changed())/float(self.Measured())*100
except Exception: pc = 0
return str(pc)
def PercentPresent(self):
try: pp = float(self.Measured())/float(self.InPathway())*100
except Exception: pp = 0
return str(pp)
def NullZ(self): return self._null_z
def Report(self):
output = self.PathwayID()
return output
def __repr__(self): return self.Report()
def workerExhaustive(queue,pathway,genes_in_pathway,pathway_input_gene_count,pathway_denominator_gene_count,N,R,pathway_type):
permuted_z_scores_instance={}; original_mapp_z_score_data_instance={}; original_ontology_z_score_data_instance={}
""" Exhaustive multiprocessing execution """
try:
n = pathway_denominator_gene_count[pathway]
try: r = pathway_input_gene_count[pathway]
except Exception: r = 0.0000
except Exception: n = 0.0000; r = 0.0000
if n != 0:
try: z = Zscore(r,n,N,R)
except ZeroDivisionError: z = 0.0000
try: null_z = Zscore(0,n,N,R)
except ZeroDivisionError: null_z = 0.000
zsd = ZScoreData(pathway,r,n,z,null_z,genes_in_pathway)
if pathway_type == 'Ontology': original_ontology_z_score_data_instance[pathway] = zsd
else: original_mapp_z_score_data_instance[pathway] = zsd
permuted_z_scores_instance[pathway] = [z]
#if '06878' in pathway: print pathway, z, null_z, r,n, N, R;kill
if use_FET == 'yes':
### Alternatively calculate p using the Fisher's Exact Test
p = FishersExactTest(r,n,R,N)
zsd.SetP(p)
queue.put((permuted_z_scores_instance,original_mapp_z_score_data_instance,original_ontology_z_score_data_instance))
def exhaustiveMultiZScores(pathway_input_gene_count,pathway_denominator_gene_count,N,R,pathway_db,pathway_type):
""" Exhaustive multiprocessing - create a process for each entry in the dictionary for Z-score calcualtion """
procs=list()
queue = mlp.Queue()
for pathway in pathway_db:
genes_in_pathway = len(pathway_db[pathway])
p = mlp.Process(target=workerExhaustive, args=(queue,pathway,genes_in_pathway,pathway_input_gene_count,pathway_denominator_gene_count,N,R,pathway_type))
procs.append(p)
p.start()
permuted_z_scores_list=[]; original_mapp_z_score_data_list=[]; original_ontology_z_score_data_list=[]
for _ in procs:
val = queue.get()
permuted_z_scores_list.append(val[0]); original_mapp_z_score_data_list.append(val[1])
original_ontology_z_score_data_list.append(val[2])
for p in procs:
p.join()
for i in permuted_z_scores_list: permuted_z_scores.update(i)
for i in original_mapp_z_score_data_list: original_mapp_z_score_data.update(i)
for i in original_ontology_z_score_data_list: original_ontology_z_score_data.update(i)
def multiZScores(pathway_input_gene_count,pathway_denominator_gene_count,N,R,pathway_db,pathway_type):
""" Create a finate pool of processes (4) for Z-score calculation """
if mlp.cpu_count() < 3:
processors = mlp.cpu_count()
else: processors = 4
pool = mlp.Pool(processes=processors)
si = (len(pathway_db)/processors)
s = si; b=0
db_ls=[]
if len(pathway_db)<10: forceError ### will si to be zero and an infanite loop
while s<len(pathway_db):
db_ls.append(dict(pathway_db.items()[b:s]))
b+=si; s+=si
db_ls.append(dict(pathway_db.items()[b:s]))
### Create an instance of MultiZscoreWorker (store the variables to save memory)
workerMulti = MultiZscoreWorker(pathway_input_gene_count,pathway_denominator_gene_count,N,R,pathway_type,use_FET)
results = pool.map(workerMulti,db_ls)
pool.close(); pool.join(); pool = None
permuted_z_scores_list=[]; original_mapp_z_score_data_list=[]; original_ontology_z_score_data_list=[]
for (a,b,c) in results:
permuted_z_scores_list.append(a); original_mapp_z_score_data_list.append(b)
original_ontology_z_score_data_list.append(c)
for i in permuted_z_scores_list: permuted_z_scores.update(i)
for i in original_mapp_z_score_data_list: original_mapp_z_score_data.update(i)
for i in original_ontology_z_score_data_list: original_ontology_z_score_data.update(i)
class MultiZscoreWorker:
def __init__(self,pathway_input_gene_count,pathway_denominator_gene_count,N,R,pathway_type,use_FET):
self.pathway_input_gene_count = pathway_input_gene_count
self.pathway_denominator_gene_count = pathway_denominator_gene_count
self.N = N
self.R = R
self.pathway_type = pathway_type
self.use_FET = use_FET
def __call__(self,pathway_db):
N = self.N; R = self.R; use_FET = self.use_FET
zt=0; nzt=0; zsdt=0; rt=0; ft=0
permuted_z_scores_instance={}; original_mapp_z_score_data_instance={}; original_ontology_z_score_data_instance={}
for pathway in pathway_db:
genes_in_pathway = len(pathway_db[pathway])
try:
n = self.pathway_denominator_gene_count[pathway]
#r1 = time.time()
try: r = self.pathway_input_gene_count[pathway]
except Exception: r = 0.0000
#rt += time.time() - r1
except Exception: n = 0.0000; r = 0.0000
if n != 0:
z1 = time.time()
try: z = Zscore(r,n,N,R)
except ZeroDivisionError: z = 0.0000
#zt += time.time() - z1
#nz1 = time.time()
try: null_z = Zscore(0,n,N,R)
except ZeroDivisionError: null_z = 0.000
#nzt+= time.time() - nz1
#zsd1 = time.time()
zsd = ZScoreData(pathway,r,n,z,null_z,genes_in_pathway)
#zsdt+= time.time() - zsd1
if self.pathway_type == 'Ontology': original_ontology_z_score_data_instance[pathway] = zsd
else: original_mapp_z_score_data_instance[pathway] = zsd
permuted_z_scores_instance[pathway] = [z]
#if '06878' in pathway: print pathway, z, null_z, r,n, N, R;kill
if use_FET == 'yes':
### Alternatively calculate p using the Fisher's Exact Test
#ft1 = time.time()
p = FishersExactTest(r,n,R,N)
#ft+= time.time() - ft1
zsd.SetP(p)
#print zt,nzt,zsdt,rt,ft ### Used for efficiency evaluation
return permuted_z_scores_instance,original_mapp_z_score_data_instance, original_ontology_z_score_data_instance
def calculateZScores(pathway_input_gene_count,pathway_denominator_gene_count,N,R,pathway_db,pathway_type):
"""where N is the total number of genes measured:
R is the total number of genes meeting the criterion:
n is the total number of genes in this specific MAPP:
r is the number of genes meeting the criterion in this MAPP: """
for pathway in pathway_db:
try:
n = pathway_denominator_gene_count[pathway]
try: r = pathway_input_gene_count[pathway]
except Exception: r = 0.0000
except Exception: n = 0.0000; r = 0.0000
if n != 0:
try: z = Zscore(r,n,N,R)
except ZeroDivisionError: z = 0.0000
try: null_z = Zscore(0,n,N,R)
except ZeroDivisionError: null_z = 0.000
genes_in_pathway | |
options_list=['--format'], arg_type=get_enum_type(['default', 'email']), help='',
arg_group='Content Info')
c.argument('identifier', type=str, help='', arg_group='Content Info')
c.argument('metadata', action=AddMetadata, nargs='+', help='', arg_group='Content Info')
c.argument('state', arg_type=get_enum_type(['rest', 'motion', 'use']), help='', arg_group='Content Info')
with self.argument_context('identitysignins information-protection-sensitivity-label create-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('applicable_to', arg_type=get_enum_type(['email', 'site', 'unifiedGroup', 'unknownFutureValue']),
help='')
c.argument('application_mode', arg_type=get_enum_type(['manual', 'automatic', 'recommended']), help='')
c.argument('assigned_policies', action=AddAssignedPolicies, nargs='+', help='')
c.argument('auto_labeling', action=AddAutoLabeling, nargs='+', help='autoLabeling')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='')
c.argument('is_default', arg_type=get_three_state_flag(), help='')
c.argument('is_endpoint_protection_enabled', arg_type=get_three_state_flag(), help='')
c.argument('label_actions', action=AddLabelActions, nargs='+', help='')
c.argument('name', type=str, help='')
c.argument('priority', type=int, help='')
c.argument('tool_tip', type=str, help='')
c.argument('sublabels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('identitysignins information-protection-sensitivity-label delete-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('sensitivity_label_id1', type=str, help='key: id of sensitivityLabel')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection-sensitivity-label evaluate') as c:
c.argument('discovered_sensitive_types', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
c.argument('current_label', action=AddCurrentLabel, nargs='+', help='currentLabel')
with self.argument_context('identitysignins information-protection-sensitivity-label list-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-sensitivity-label show-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('sensitivity_label_id1', type=str, help='key: id of sensitivityLabel')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-sensitivity-label update-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('sensitivity_label_id1', type=str, help='key: id of sensitivityLabel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('applicable_to', arg_type=get_enum_type(['email', 'site', 'unifiedGroup', 'unknownFutureValue']),
help='')
c.argument('application_mode', arg_type=get_enum_type(['manual', 'automatic', 'recommended']), help='')
c.argument('assigned_policies', action=AddAssignedPolicies, nargs='+', help='')
c.argument('auto_labeling', action=AddAutoLabeling, nargs='+', help='autoLabeling')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='')
c.argument('is_default', arg_type=get_three_state_flag(), help='')
c.argument('is_endpoint_protection_enabled', arg_type=get_three_state_flag(), help='')
c.argument('label_actions', action=AddLabelActions, nargs='+', help='')
c.argument('name', type=str, help='')
c.argument('priority', type=int, help='')
c.argument('tool_tip', type=str, help='')
c.argument('sublabels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('identitysignins information-protection-sensitivity-label-sublabel evaluate') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('discovered_sensitive_types', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
c.argument('current_label', action=AddCurrentLabel, nargs='+', help='currentLabel')
with self.argument_context('identitysignins information-protection-threat-assessment-request create-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'.')
c.argument('message', type=str, help='The result message for each threat assessment.')
c.argument('result_type', arg_type=get_enum_type(['checkPolicy', 'rescan', 'unknownFutureValue']), help='')
with self.argument_context('identitysignins information-protection-threat-assessment-request delete-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('threat_assessment_result_id', type=str, help='key: id of threatAssessmentResult')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection-threat-assessment-request list-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-threat-assessment-request show-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('threat_assessment_result_id', type=str, help='key: id of threatAssessmentResult')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-threat-assessment-request update-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('threat_assessment_result_id', type=str, help='key: id of threatAssessmentResult')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'.')
c.argument('message', type=str, help='The result message for each threat assessment.')
c.argument('result_type', arg_type=get_enum_type(['checkPolicy', 'rescan', 'unknownFutureValue']), help='')
with self.argument_context('identitysignins invitation-invitation create-invitation') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('invited_user_display_name', type=str, help='The display name of the user being invited.')
c.argument('invited_user_email_address', type=str, help='The email address of the user being invited. '
'Required. The following special characters are not permitted in the email address:Tilde '
'(~)Exclamation point (!)Number sign (#)Dollar sign ($)Percent (%)Circumflex (^)Ampersand '
'(&)Asterisk (*)Parentheses (( ))Plus sign (+)Equal sign (=)Brackets ([ ])Braces ({ })Backslash '
'(/)Slash mark (/)Pipe (/|)Semicolon (;)Colon (:)Quotation marks (\')Angle brackets (< >)Question '
'mark (?)Comma (,)However, the following exceptions apply:A period (.) or a hyphen (-) is permitted '
'anywhere in the user name, except at the beginning or end of the name.An underscore (_) is '
'permitted anywhere in the user name. This includes at the beginning or end of the name.')
c.argument('invited_user_type', type=str, help='The userType of the user being invited. By default, this is '
'Guest. You can invite as Member if you are a company administrator.')
c.argument('invite_redeem_url', type=str,
help='The URL the user can use to redeem their invitation. Read-only')
c.argument('invite_redirect_url', type=str, help='The URL the user should be redirected to once the invitation '
'is redeemed. Required.')
c.argument('reset_redemption', arg_type=get_three_state_flag(), help='')
c.argument('send_invitation_message', arg_type=get_three_state_flag(), help='Indicates whether an email should '
'be sent to the user being invited or not. The default is false.')
c.argument('status', type=str, help='The status of the invitation. Possible values: PendingAcceptance, '
'Completed, InProgress, and Error')
c.argument('invited_user', type=validate_file_or_dict, help='Represents an Azure Active Directory user object. '
'Expected value: json-string/@json-file.')
c.argument('cc_recipients', type=validate_file_or_dict, help='Additional recipients the invitation message '
'should be sent to. Currently only 1 additional recipient is supported. Expected value: '
'json-string/@json-file.', arg_group='Invited User Message Info')
c.argument('customized_message_body', type=str, help='Customized message body you want to send if you don\'t '
'want the default message.', arg_group='Invited User Message Info')
c.argument('message_language', type=str, help='The language you want to send the default message in. If the '
'customizedMessageBody is specified, this property is ignored, and the message is sent using the '
'customizedMessageBody. The language format should be in ISO 639. The default is en-US.',
arg_group='Invited User Message Info')
with self.argument_context('identitysignins invitation-invitation delete-invitation') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins invitation-invitation list-invitation') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins invitation-invitation show-invitation') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins invitation-invitation update-invitation') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('invited_user_display_name', type=str, help='The display name of the user being invited.')
c.argument('invited_user_email_address', type=str, help='The email address of the user being invited. '
'Required. The following special characters are not permitted in the email address:Tilde '
'(~)Exclamation point (!)Number sign (#)Dollar sign ($)Percent (%)Circumflex (^)Ampersand '
'(&)Asterisk (*)Parentheses (( ))Plus sign (+)Equal sign (=)Brackets ([ ])Braces ({ })Backslash '
'(/)Slash mark (/)Pipe (/|)Semicolon (;)Colon (:)Quotation marks (\')Angle brackets (< >)Question '
'mark (?)Comma (,)However, the following exceptions apply:A period (.) or a hyphen (-) is permitted '
'anywhere in the user name, except at the beginning or end of the name.An underscore (_) is '
'permitted anywhere in the user name. This includes at the beginning or end of the name.')
c.argument('invited_user_type', type=str, help='The userType of the user being invited. By default, this is '
'Guest. You can invite as Member if you are a company administrator.')
c.argument('invite_redeem_url', type=str,
help='The URL the user can use to redeem their invitation. Read-only')
c.argument('invite_redirect_url', type=str, help='The URL the user should be redirected to once the invitation '
'is redeemed. Required.')
c.argument('reset_redemption', arg_type=get_three_state_flag(), help='')
c.argument('send_invitation_message', arg_type=get_three_state_flag(), help='Indicates whether an email should '
'be sent to the user being invited or not. The default is false.')
c.argument('status', type=str, help='The status of the invitation. Possible values: PendingAcceptance, '
'Completed, InProgress, and Error')
c.argument('invited_user', type=validate_file_or_dict, help='Represents an Azure Active Directory user object. '
'Expected value: json-string/@json-file.')
c.argument('cc_recipients', type=validate_file_or_dict, help='Additional recipients the invitation message '
'should be sent to. Currently only 1 additional recipient is supported. Expected value: '
'json-string/@json-file.', arg_group='Invited User Message Info')
c.argument('customized_message_body', type=str, help='Customized message body you want to send if you don\'t '
'want the default message.', arg_group='Invited User Message Info')
c.argument('message_language', type=str, help='The language you want to send the default message in. If the '
'customizedMessageBody is specified, this property is ignored, and the message is sent using the '
'customizedMessageBody. The language format should be in ISO 639. The default is en-US.',
arg_group='Invited User Message Info')
with self.argument_context('identitysignins invitation delete-ref-invited-user') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins invitation set-ref-invited-user') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('identitysignins invitation show-invited-user') as c:
c.argument('invitation_id', type=str, | |
A vertex at (-1.0, -0.4, -1.0),
A vertex at (-1.0, -1.0, -0.4))
sage: edge_trunc = Cube.face_truncation(Cube.faces(1)[11])
sage: edge_trunc.f_vector()
(1, 10, 15, 7, 1)
sage: tuple(f.ambient_V_indices() for f in edge_trunc.faces(2))
((0, 5, 6, 7),
(1, 4, 5, 6, 8),
(6, 7, 8, 9),
(0, 2, 3, 7, 9),
(1, 2, 8, 9),
(0, 3, 4, 5),
(1, 2, 3, 4))
sage: face_trunc = Cube.face_truncation(Cube.faces(2)[2])
sage: face_trunc.vertices()
(A vertex at (1, -1, -1),
A vertex at (1, 1, -1),
A vertex at (1, 1, 1),
A vertex at (1, -1, 1),
A vertex at (-1/3, -1, 1),
A vertex at (-1/3, 1, 1),
A vertex at (-1/3, 1, -1),
A vertex at (-1/3, -1, -1))
sage: face_trunc.face_lattice().is_isomorphic(Cube.face_lattice())
True
TESTS:
Testing that the backend is preserved::
sage: Cube = polytopes.cube(backend='field')
sage: face_trunc = Cube.face_truncation(Cube.faces(2)[0])
sage: face_trunc.backend()
'field'
Testing that :trac:`28506` is fixed::
sage: P = polytopes.twenty_four_cell()
sage: P = P.dilation(6)
sage: P = P.change_ring(ZZ)
sage: P.face_truncation(P.faces(2)[0], cut_frac=1)
A 4-dimensional polyhedron in QQ^4 defined as the convex hull of 27 vertices
"""
if cut_frac is None:
cut_frac = ZZ.one() / 3
face_vertices = face.vertices()
normal_vectors = []
for facet in self.Hrepresentation():
if all(facet.contains(x) and not facet.interior_contains(x)
for x in face_vertices):
# The facet contains the face
normal_vectors.append(facet.A())
if linear_coefficients is not None:
normal_vector = sum(linear_coefficients[i]*normal_vectors[i]
for i in range(len(normal_vectors)))
else:
normal_vector = sum(normal_vectors)
B = - normal_vector * (face_vertices[0].vector())
linear_evaluation = set(-normal_vector * (v.vector()) for v in self.vertices())
if B == max(linear_evaluation):
C = max(linear_evaluation.difference(set([B])))
else:
C = min(linear_evaluation.difference(set([B])))
cut_height = (1 - cut_frac) * B + cut_frac * C
ineq_vector = tuple([cut_height]) + tuple(normal_vector)
new_ieqs = self.inequalities_list() + [ineq_vector]
new_eqns = self.equations_list()
# Some vertices might need fractions.
parent = self.parent().base_extend(cut_frac/1)
return parent.element_class(parent, None, [new_ieqs, new_eqns])
def stack(self, face, position=None):
r"""
Return a new polyhedron formed by stacking onto a ``face``. Stacking a
face adds a new vertex located slightly outside of the designated face.
INPUT:
- ``face`` -- a PolyhedronFace
- ``position`` -- a positive number. Determines a relative distance
from the barycenter of ``face``. A value close to 0 will place the
new vertex close to the face and a large value further away. Default
is `1`. If the given value is too large, an error is returned.
OUTPUT:
A Polyhedron object
EXAMPLES::
sage: cube = polytopes.cube()
sage: square_face = cube.facets()[2]
sage: stacked_square = cube.stack(square_face)
sage: stacked_square.f_vector()
(1, 9, 16, 9, 1)
sage: edge_face = cube.faces(1)[3]
sage: stacked_edge = cube.stack(edge_face)
sage: stacked_edge.f_vector()
(1, 9, 17, 10, 1)
sage: cube.stack(cube.faces(0)[0])
Traceback (most recent call last):
...
ValueError: cannot stack onto a vertex
sage: stacked_square_half = cube.stack(square_face,position=1/2)
sage: stacked_square_half.f_vector()
(1, 9, 16, 9, 1)
sage: stacked_square_large = cube.stack(square_face,position=10)
sage: hexaprism = polytopes.regular_polygon(6).prism()
sage: hexaprism.f_vector()
(1, 12, 18, 8, 1)
sage: square_face = hexaprism.faces(2)[0]
sage: stacked_hexaprism = hexaprism.stack(square_face)
sage: stacked_hexaprism.f_vector()
(1, 13, 22, 11, 1)
sage: hexaprism.stack(square_face,position=4)
Traceback (most recent call last):
...
ValueError: the chosen position is too large
sage: s = polytopes.simplex(7)
sage: f = s.faces(3)[69]
sage: sf = s.stack(f); sf
A 7-dimensional polyhedron in QQ^8 defined as the convex hull of 9 vertices
sage: sf.vertices()
(A vertex at (-4, -4, -4, -4, 17/4, 17/4, 17/4, 17/4),
A vertex at (0, 0, 0, 0, 0, 0, 0, 1),
A vertex at (0, 0, 0, 0, 0, 0, 1, 0),
A vertex at (0, 0, 0, 0, 0, 1, 0, 0),
A vertex at (0, 0, 0, 0, 1, 0, 0, 0),
A vertex at (0, 0, 0, 1, 0, 0, 0, 0),
A vertex at (0, 0, 1, 0, 0, 0, 0, 0),
A vertex at (0, 1, 0, 0, 0, 0, 0, 0),
A vertex at (1, 0, 0, 0, 0, 0, 0, 0))
It is possible to stack on unbounded faces::
sage: Q = Polyhedron(vertices=[[0,1],[1,0]],rays=[[1,1]])
sage: E = Q.faces(1)
sage: Q.stack(E[0],1/2).Vrepresentation()
(A vertex at (0, 1),
A vertex at (1, 0),
A ray in the direction (1, 1),
A vertex at (2, 0))
sage: Q.stack(E[1],1/2).Vrepresentation()
(A vertex at (0, 1),
A vertex at (0, 2),
A vertex at (1, 0),
A ray in the direction (1, 1))
sage: Q.stack(E[2],1/2).Vrepresentation()
(A vertex at (0, 0),
A vertex at (0, 1),
A vertex at (1, 0),
A ray in the direction (1, 1))
Stacking requires a proper face::
sage: Q.stack(Q.faces(2)[0])
Traceback (most recent call last):
...
ValueError: can only stack on proper face
TESTS:
Checking that the backend is preserved::
sage: Cube = polytopes.cube(backend='field')
sage: stack = Cube.stack(Cube.faces(2)[0])
sage: stack.backend()
'field'
Taking the stacking vertex too far with the parameter ``position``
may result in a failure to produce the desired
(combinatorial type of) polytope.
The interval of permitted values is always open.
This is the smallest unpermitted value::
sage: P = polytopes.octahedron()
sage: P.stack(P.faces(2)[0], position=4)
Traceback (most recent call last):
...
ValueError: the chosen position is too large
Testing that :trac:`29057` is fixed::
sage: P = polytopes.cross_polytope(4)
sage: P.stack(P.faces(3)[0])
A 4-dimensional polyhedron in QQ^4 defined as the convex hull of 9 vertices
"""
from sage.geometry.polyhedron.face import PolyhedronFace
if not isinstance(face, PolyhedronFace):
raise TypeError("{} should be a PolyhedronFace of {}".format(face, self))
elif face.dim() == 0:
raise ValueError("cannot stack onto a vertex")
elif face.dim() == -1 or face.dim() == self.dim():
raise ValueError("can only stack on proper face")
if position is None:
position = 1
face_vertices = face.vertices()
n_vertices = len(face_vertices)
barycenter = ZZ.one()*sum([v.vector() for v in face_vertices]) / n_vertices
# Taking all facets that contain the face
if face.dim() == self.dim() - 1:
face_star = set([face.ambient_Hrepresentation()[-1]])
else:
face_star = set(facet for facet in self.Hrepresentation() if facet.is_inequality()
if all(not facet.interior_contains(x) for x in face_vertices))
neighboring_facets = set()
for facet in face_star:
for neighbor_facet in facet.neighbors():
if neighbor_facet not in face_star:
neighboring_facets.add(neighbor_facet)
# Create the polyhedron where we can put the new vertex
locus_ieqs = [facet.vector() for facet in neighboring_facets]
locus_ieqs += [-facet.vector() for facet in face_star]
locus_eqns = self.equations_list()
locus_polyhedron = Polyhedron(ieqs=locus_ieqs, eqns=locus_eqns,
base_ring=self.base_ring().fraction_field(),
backend=self.backend())
repr_point = locus_polyhedron.representative_point()
new_vertex = (1-position)*barycenter + position*repr_point
if not locus_polyhedron.relative_interior_contains(new_vertex):
raise ValueError("the chosen position is too large")
parent = self.parent().base_extend(new_vertex)
return parent.element_class(parent, [self.vertices() + (new_vertex,), self.rays(), self.lines()], None)
def wedge(self, face, width=1):
r"""
Return the wedge over a ``face`` of the polytope ``self``.
The wedge over a face `F` of a polytope `P` with width `w \not= 0`
is defined as:
.. MATH::
(P \times \mathbb{R}) \cap \{a^\top x + |w x_{d+1}| \leq b\}
where `\{x | a^\top x = b\}` is a supporting hyperplane defining `F`.
INPUT:
- ``face`` -- a PolyhedronFace of ``self``, the face which we take
the wedge over
- ``width`` -- a nonzero number (default: ``1``);
specifies how wide the wedge will be
OUTPUT:
A (bounded) polyhedron
EXAMPLES::
sage: P_4 = polytopes.regular_polygon(4)
sage: W1 = P_4.wedge(P_4.faces(1)[0]); W1
A 3-dimensional polyhedron in AA^3 defined as the convex hull of 6 vertices
sage: triangular_prism = polytopes.regular_polygon(3).prism()
sage: W1.is_combinatorially_isomorphic(triangular_prism)
True
sage: Q = polytopes.hypersimplex(4,2)
sage: W2 = Q.wedge(Q.faces(2)[7]); W2
A 4-dimensional polyhedron in QQ^5 defined as the convex hull of 9 vertices
sage: W2.vertices()
(A vertex at (0, 1, 0, 1, 0),
A vertex at (0, 0, 1, 1, 0),
A vertex at (1, 0, 0, 1, -1),
A vertex at (1, 0, 0, 1, 1),
A vertex at (1, 0, 1, 0, 1),
A vertex at (1, 1, 0, 0, -1),
A vertex at (0, 1, 1, 0, 0),
A vertex at (1, 0, 1, 0, -1),
A vertex at (1, 1, 0, 0, 1))
sage: W3 = Q.wedge(Q.faces(1)[11]); W3
A 4-dimensional polyhedron in QQ^5 defined as the convex hull of 10 vertices
sage: W3.vertices()
(A vertex at (0, 1, 0, 1, 0),
A vertex at (0, 0, 1, 1, 0),
A vertex at (1, 0, 0, 1, -1),
A vertex at (1, 0, 0, 1, 1),
A vertex at (1, 0, 1, 0, 2),
A vertex at (0, 1, 1, 0, 1),
A vertex at | |
# coding: utf-8
"""
Masking API
Schema for the Masking Engine API # noqa: E501
OpenAPI spec version: 5.1.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dxm.lib.masking_api.api_client import ApiClient
class MountFilesystemApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def connect_mount_filesystem(self, mount_id, **kwargs): # noqa: E501
"""Connect filesystem mount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_mount_filesystem(mount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mount_id: The ID of the mount to connect (required)
:return: MountInformation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_mount_filesystem_with_http_info(mount_id, **kwargs) # noqa: E501
else:
(data) = self.connect_mount_filesystem_with_http_info(mount_id, **kwargs) # noqa: E501
return data
def connect_mount_filesystem_with_http_info(self, mount_id, **kwargs): # noqa: E501
"""Connect filesystem mount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_mount_filesystem_with_http_info(mount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mount_id: The ID of the mount to connect (required)
:return: MountInformation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['mount_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connect_mount_filesystem" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'mount_id' is set
if self.api_client.client_side_validation and ('mount_id' not in params or
params['mount_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `mount_id` when calling `connect_mount_filesystem`") # noqa: E501
collection_formats = {}
path_params = {}
if 'mount_id' in params:
path_params['mountID'] = params['mount_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/mount-filesystem/{mountID}/connect', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MountInformation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_mount_filesystem(self, body, **kwargs): # noqa: E501
"""Create filesystem mount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mount_filesystem(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MountInformation body: The filesystem to mount (required)
:return: MountInformation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_mount_filesystem_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_mount_filesystem_with_http_info(body, **kwargs) # noqa: E501
return data
def create_mount_filesystem_with_http_info(self, body, **kwargs): # noqa: E501
"""Create filesystem mount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_mount_filesystem_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MountInformation body: The filesystem to mount (required)
:return: MountInformation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_mount_filesystem" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_mount_filesystem`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/mount-filesystem', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MountInformation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_mount_filesystem(self, mount_id, **kwargs): # noqa: E501
"""Delete filesystem mount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mount_filesystem(mount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mount_id: The ID of the mount to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_mount_filesystem_with_http_info(mount_id, **kwargs) # noqa: E501
else:
(data) = self.delete_mount_filesystem_with_http_info(mount_id, **kwargs) # noqa: E501
return data
def delete_mount_filesystem_with_http_info(self, mount_id, **kwargs): # noqa: E501
"""Delete filesystem mount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mount_filesystem_with_http_info(mount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mount_id: The ID of the mount to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['mount_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_mount_filesystem" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'mount_id' is set
if self.api_client.client_side_validation and ('mount_id' not in params or
params['mount_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `mount_id` when calling `delete_mount_filesystem`") # noqa: E501
collection_formats = {}
path_params = {}
if 'mount_id' in params:
path_params['mountID'] = params['mount_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/mount-filesystem/{mountID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def disconnect_mount_filesystem(self, mount_id, **kwargs): # noqa: E501
"""Disconnect filesystem mount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.disconnect_mount_filesystem(mount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mount_id: The ID of the mount to disconnect (required)
:return: MountInformation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.disconnect_mount_filesystem_with_http_info(mount_id, **kwargs) # noqa: E501
else:
(data) = self.disconnect_mount_filesystem_with_http_info(mount_id, **kwargs) # noqa: E501
return data
def disconnect_mount_filesystem_with_http_info(self, mount_id, **kwargs): # noqa: E501
"""Disconnect filesystem mount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.disconnect_mount_filesystem_with_http_info(mount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int mount_id: The ID of the mount to disconnect (required)
:return: MountInformation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['mount_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method disconnect_mount_filesystem" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'mount_id' is set
if self.api_client.client_side_validation and ('mount_id' not in params or
params['mount_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `mount_id` when calling `disconnect_mount_filesystem`") # noqa: E501
collection_formats = {}
path_params = {}
if 'mount_id' in params:
path_params['mountID'] = params['mount_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/mount-filesystem/{mountID}/disconnect', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MountInformation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
| |
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
try: # test to import the modules for the parallel execution
from KratosMultiphysics.mpi import *
from KratosMultiphysics.MetisApplication import *
from KratosMultiphysics.TrilinosApplication import *
except:
pass
from KratosMultiphysics.MappingApplication import *
import KratosMultiphysics.KratosUnittest as KratosUnittest
class KratosExecuteMapperTests(KratosUnittest.TestCase):
def __init__(self, GidOutput, set_up_test_1, set_up_test_2):
self.GiD_output = GidOutput
self.set_up_test_1 = set_up_test_1
self.set_up_test_2 = set_up_test_2
# Mdpa Input files
input_file_origin = "MapperTests_mdpa/MappingApplication_test_geometry_tri"
input_file_destination = "MapperTests_mdpa/MappingApplication_test_geometry_quad"
self.variable_list_scalar = [PRESSURE, TEMPERATURE]
self.variable_list_vector = [FORCE, VELOCITY]
variable_list = []
variable_list.extend(self.variable_list_scalar)
variable_list.extend(self.variable_list_vector)
# check if executed in parallel
try:
num_processors = mpi.size
except:
num_processors = 1
if (num_processors == 1): # serial execution
self.parallel_execution = False
else:
# Partition and Read Model Parts
variable_list.extend([PARTITION_INDEX])
self.parallel_execution = True
self.model = Model()
self.model_part_origin = self.partition_and_read_model_part(self.model,
"ModelPartNameOrigin",
input_file_origin, 3,
variable_list,
num_processors)
self.model_part_destination = self.partition_and_read_model_part(self.model,
"ModelPartNameDestination",
input_file_destination, 3,
variable_list,
num_processors)
def SetUpMapper(self, file_name):
self.ResetValuesModelParts()
if (self.GiD_output):
self.InitializeGiD(file_name)
parameter_file_name = "MapperTests_json/" + file_name + "_parameters.json"
result_file_name = "MapperTests_results/" + file_name + "_results.json"
try: # to read project parameters file
parameter_file = open(parameter_file_name, 'r')
project_parameters = Parameters(parameter_file.read())
mapper_settings = project_parameters["mapper_settings"][0]
except:
raise("Project Parameter JSON File \"", parameter_file_name, "\" could not be read")
results_read = False
try: # to read the result ifle
result_file = open(result_file_name, 'r')
self.results = Parameters(result_file.read())
results_read = True
except:
print("Warning: Result JSON File \"", result_file_name, "\" could not be read")
# needed for the tests only, usually one does not need to get the submodel-parts for the mapper
self.interface_sub_model_part_origin = self.model_part_origin.GetSubModelPart(
mapper_settings["interface_submodel_part_origin"].GetString())
self.interface_sub_model_part_destination = self.model_part_destination.GetSubModelPart(
mapper_settings["interface_submodel_part_destination"].GetString())
# Initialize Mapper
if self.parallel_execution:
fct_ptr = MapperFactory.CreateMPIMapper
print("Creating an MPI Mapper")
else:
fct_ptr = MapperFactory.CreateMapper
print("Creating a serial Mapper")
self.mapper = fct_ptr(self.model_part_origin,
self.model_part_destination,
mapper_settings)
if (self.set_up_test_1):
self.PrintValuesForJson() # needed to set up the test
if (results_read):
self.SetPrescribedValues()
else:
raise("Result JSON File \"", result_file_name, "\" could not be read")
##### Testing Functions
def TestMapConstantScalarValues(self, output_time):
map_value = 5.123
variable_origin = PRESSURE
variable_destination = TEMPERATURE
self.SetValuesOnNodes(self.model_part_origin,
variable_origin,
map_value)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time)
# Overwriting Values
self.mapper.Map(variable_origin,
variable_destination)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time)
self.CheckValues(self.interface_sub_model_part_destination,
variable_destination, map_value)
# Adding Values
self.mapper.Map(variable_origin,
variable_destination,
Mapper.ADD_VALUES)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time + 0.1)
self.CheckValues(self.interface_sub_model_part_destination,
variable_destination, map_value*2)
# Swaping the sign of the Values
self.mapper.Map(variable_origin,
variable_destination,
Mapper.SWAP_SIGN)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time + 0.2)
self.CheckValues(self.interface_sub_model_part_destination,
variable_destination,
-map_value)
# Swaping the sign of the Values
self.mapper.Map(variable_origin,
variable_destination,
Mapper.ADD_VALUES | Mapper.SWAP_SIGN)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time + 0.3)
self.CheckValues(self.interface_sub_model_part_destination,
variable_destination,
-map_value*2)
# # USE_TRANSPOSE Mapping
# # Number of Nodes on Origin: 37
# # Number of Nodes in Destination: 25
# # => Values in Destination are multiplied with a factor of 1.48 (37/25)
# # to conserve the sum of quantities aka USE_TRANSPOSE mapping
# self.mapper.Map(variable_origin,
# variable_destination,
# Mapper.USE_TRANSPOSE)
# if (self.GiD_output):
# self.WriteNodalResultsCustom(self.gid_io_destination,
# self.model_part_destination,
# variable_destination,
# output_time + 0.3)
# self.CheckValues(self.interface_sub_model_part_destination,
# variable_destination,
# map_value*1.48)
self.mapper.UpdateInterface()
def TestInverseMapConstantScalarValues(self, output_time):
map_value = -8.6647
variable_origin = TEMPERATURE
variable_destination = PRESSURE
self.SetValuesOnNodes(self.model_part_destination,
variable_destination,
map_value)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time)
# Overwriting Values
self.mapper.InverseMap(variable_origin,
variable_destination)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time)
self.CheckValues(self.interface_sub_model_part_origin,
variable_origin,
map_value)
# Adding Values
self.mapper.InverseMap(variable_origin,
variable_destination,
Mapper.ADD_VALUES)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time + 0.1)
self.CheckValues(self.interface_sub_model_part_origin,
variable_origin,
map_value*2)
# Swaping the sign of the Values and adding them
self.mapper.InverseMap(variable_origin,
variable_destination,
Mapper.ADD_VALUES | Mapper.SWAP_SIGN)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time + 0.2)
self.CheckValues(self.interface_sub_model_part_destination,
variable_destination,
map_value)
self.mapper.UpdateInterface(Mapper.REMESHED)
def TestMapConstantVectorValues(self, output_time):
map_value = Vector([15.99, -2.88, 3.123])
variable_origin = FORCE
variable_destination = VELOCITY
self.SetValuesOnNodes(self.model_part_origin,
variable_origin,
map_value)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time)
# Overwriting Values
self.mapper.Map(variable_origin,
variable_destination)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time)
self.CheckValues(self.interface_sub_model_part_destination,
variable_destination,
map_value)
# Adding Values
self.mapper.Map(variable_origin,
variable_destination,
Mapper.ADD_VALUES)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time + 0.1)
self.CheckValues(self.interface_sub_model_part_destination,
variable_destination,
[2*x for x in map_value])
# Swaping the sign of the Values
self.mapper.Map(variable_origin,
variable_destination,
Mapper.SWAP_SIGN)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time + 0.2)
self.CheckValues(self.interface_sub_model_part_destination,
variable_destination,
[-x for x in map_value])
self.mapper.UpdateInterface(0.05)
def TestInverseMapConstantVectorValues(self, output_time):
map_value = Vector([1.4785, -0.88, -33.123])
variable_origin = VELOCITY
variable_destination = FORCE
self.SetValuesOnNodes(self.model_part_destination,
variable_destination,
map_value)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time)
# Overwriting Values
self.mapper.InverseMap(variable_origin,
variable_destination)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time)
self.CheckValues(self.interface_sub_model_part_origin,
variable_origin,
map_value)
# Adding Values
self.mapper.InverseMap(variable_origin,
variable_destination,
Mapper.ADD_VALUES)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time + 0.1)
self.CheckValues(self.interface_sub_model_part_origin,
variable_origin,
[2*x for x in map_value])
# # USE_TRANSPOSE Mapping
# # Number of Nodes on Origin: 37
# # Number of Nodes in Destination: 25
# # => Values in Origin are multiplied with a factor of 0.675675676 (25/37)
# # to conserve the sum of quantities aka USE_TRANSPOSE mapping
# self.mapper.InverseMap(variable_origin,
# variable_destination,
# Mapper.USE_TRANSPOSE)
# if (self.GiD_output):
# self.WriteNodalResultsCustom(self.gid_io_origin,
# self.model_part_origin,
# variable_origin,
# output_time + 0.2)
# self.CheckValues(self.interface_sub_model_part_origin,
# variable_origin,
# [0.675675676*x for x in map_value])
def TestMapNonConstantScalarValues(self, output_time):
variable_origin = PRESSURE
variable_destination = TEMPERATURE
self.SetValuesOnNodesPrescribed(self.interface_sub_model_part_origin,
variable_origin,
self.scalar_values_origin_send)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time)
self.mapper.Map(variable_origin,
variable_destination)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time)
if (self.set_up_test_2):
self.PrintMappedValues(self.interface_sub_model_part_destination,
variable_destination,
"Destination_receive Scalar")
else:
self.CheckValuesPrescribed(self.interface_sub_model_part_destination,
variable_destination,
self.scalar_values_destination_receive)
def TestInverseMapNonConstantScalarValues(self, output_time):
variable_origin = TEMPERATURE
variable_destination = PRESSURE
self.SetValuesOnNodesPrescribed(self.interface_sub_model_part_destination,
variable_destination,
self.scalar_values_destination_send)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time)
self.mapper.InverseMap(variable_origin,
variable_destination)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time)
if (self.set_up_test_2):
self.PrintMappedValues(self.interface_sub_model_part_origin,
variable_origin,
"Origin_receive Scalar")
else:
self.CheckValuesPrescribed(self.interface_sub_model_part_origin,
variable_origin,
self.scalar_values_origin_receive)
def TestMapNonConstantVectorValues(self, output_time):
variable_origin = FORCE
variable_destination = VELOCITY
self.SetValuesOnNodesPrescribed(self.interface_sub_model_part_origin,
variable_origin,
self.vector_values_origin_send)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time)
self.mapper.Map(variable_origin,
variable_destination)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time)
if (self.set_up_test_2):
self.PrintMappedValues(self.interface_sub_model_part_destination,
variable_destination,
"Destination_receive Vector")
else:
self.CheckValuesPrescribed(self.interface_sub_model_part_destination,
variable_destination,
self.vector_values_destination_receive)
def TestInverseMapNonConstantVectorValues(self, output_time):
variable_origin = VELOCITY
variable_destination = FORCE
self.SetValuesOnNodesPrescribed(self.interface_sub_model_part_destination,
variable_destination,
self.vector_values_destination_send)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_destination,
self.model_part_destination,
variable_destination,
output_time)
self.mapper.InverseMap(variable_origin,
variable_destination)
if (self.GiD_output):
self.WriteNodalResultsCustom(self.gid_io_origin,
self.model_part_origin,
variable_origin,
output_time)
if (self.set_up_test_2):
self.PrintMappedValues(self.interface_sub_model_part_origin,
variable_origin,
"Origin_receive Vector")
else:
self.CheckValuesPrescribed(self.interface_sub_model_part_origin,
variable_origin,
self.vector_values_origin_receive)
##### Value Checking Functions
def ResetValuesModelParts(self):
for node in self.model_part_origin.Nodes:
for variable in self.variable_list_scalar:
node.SetSolutionStepValue(variable, 0.0)
for variable in self.variable_list_vector:
node.SetSolutionStepValue(variable, Vector([0.0, 0.0, 0.0]))
for node in self.model_part_destination.Nodes:
for variable in self.variable_list_scalar:
node.SetSolutionStepValue(variable, 0.0)
for variable in self.variable_list_vector:
node.SetSolutionStepValue(variable, Vector([0.0, 0.0, 0.0]))
def SetValuesOnNodes(self, model_part, variable, value):
for node in model_part.Nodes:
if (self.parallel_execution):
if (node.GetSolutionStepValue(PARTITION_INDEX) == mpi.rank):
self.SetValuesOnNodesExec(node, variable, value)
else:
self.SetValuesOnNodesExec(node, variable, value)
def SetValuesOnNodesExec(self, node, variable, value):
node.SetSolutionStepValue(variable, value)
def SetValuesOnNodesPrescribed(self, model_part, variable, nodal_values):
for node in model_part.Nodes:
if (self.parallel_execution):
if (node.GetSolutionStepValue(PARTITION_INDEX) == mpi.rank):
self.SetValuesOnNodesPrescribedExec(node, variable, nodal_values)
else:
self.SetValuesOnNodesPrescribedExec(node, variable, nodal_values)
def SetValuesOnNodesPrescribedExec(self, node, variable, nodal_values):
nodal_coords = (node.X, node.Y, node.Z)
value_to_prescribe = nodal_values[nodal_coords]
if isinstance(value_to_prescribe, tuple):
value_to_prescribe = Vector(list(value_to_prescribe))
node.SetSolutionStepValue(variable, value_to_prescribe)
def CheckValues(self, model_part, variable, value_mapped):
for node in model_part.Nodes:
if (self.parallel_execution):
if (node.GetSolutionStepValue(PARTITION_INDEX) == mpi.rank):
self.CheckValuesExec(node, variable, value_mapped)
else:
self.CheckValuesExec(node, variable, value_mapped)
def CheckValuesExec(self, node, variable, value_mapped):
value_expected = node.GetSolutionStepValue(variable)
self.assertAlmostEqualCustom(value_mapped,value_expected)
def CheckValuesPrescribed(self, model_part, variable, nodal_values):
for node in model_part.Nodes:
if (self.parallel_execution):
if (node.GetSolutionStepValue(PARTITION_INDEX) == mpi.rank):
self.CheckValuesPrescribedExec(node, variable, nodal_values)
else:
self.CheckValuesPrescribedExec(node, variable, nodal_values)
def CheckValuesPrescribedExec(self, node, variable, nodal_values):
value_mapped = node.GetSolutionStepValue(variable)
nodal_coords = (node.X, node.Y, node.Z)
value_expected = nodal_values[nodal_coords]
self.assertAlmostEqualCustom(value_mapped,value_expected)
def assertAlmostEqualCustom(self, value_mapped, value_expected):
if (isinstance(value_mapped, float) or isinstance(value_mapped, int)): # Variable is a scalar
self.assertAlmostEqual(value_mapped,value_expected,4)
else: # Variable is a vector
for i in range(0,3):
self.assertAlmostEqual(value_mapped[i],value_expected[i],4)
##### IO related Functions #####
def partition_and_read_model_part(self, current_model, model_part_name,
model_part_input_file,
size_domain, variable_list,
number_of_partitions):
model_part = current_model.CreateModelPart(model_part_name)
for variable in variable_list:
model_part.AddNodalSolutionStepVariable(variable)
if (number_of_partitions > 1):
if (mpi.size > 1):
if (mpi.rank == 0):
model_part_io = ReorderConsecutiveModelPartIO(model_part_input_file)
partitioner = MetisDivideHeterogeneousInputProcess(
model_part_io,
number_of_partitions,
size_domain,
0, # verbosity, set to 1 for more detailed output
True)
partitioner.Execute()
mpi.world.barrier()
model_part_input_file = model_part_input_file + "_" + str(mpi.rank)
model_part_io = ModelPartIO(model_part_input_file)
model_part_io.ReadModelPart(model_part)
if (number_of_partitions > 1):
MPICommSetup = SetMPICommunicatorProcess(model_part)
MPICommSetup.Execute()
ParallelFillComm = ParallelFillCommunicator(model_part.GetRootModelPart())
ParallelFillComm.Execute()
model_part.ProcessInfo.SetValue(DOMAIN_SIZE, size_domain)
model_part.SetBufferSize(1)
return model_part
def InitializeGiD(self, file_name):
# Initialize GidIO
output_file_origin = "MapperTests_gid_output/output_" + file_name + "_origin"
output_file_destination = "MapperTests_gid_output/output_" + file_name + "_destination"
if (self.parallel_execution):
output_file_origin += "_r" + str(mpi.rank)
output_file_destination += "_r" + str(mpi.rank)
gid_mode = GiDPostMode.GiD_PostAscii
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteConditions
self.gid_io_origin = GidIO(output_file_origin, gid_mode, multifile,
deformed_mesh_flag, write_conditions)
self.gid_io_destination = GidIO(output_file_destination, gid_mode, multifile,
deformed_mesh_flag, write_conditions)
# Initialize Results Output
self.gid_io_origin.InitializeResults(0, self.model_part_origin.GetMesh())
self.gid_io_destination.InitializeResults( 0, self.model_part_destination.GetMesh())
# Print original meshes
self.write_mesh(self.model_part_origin, self.gid_io_origin)
self.write_mesh(self.model_part_destination, self.gid_io_destination)
def WriteNodalResultsCustom(self, gid_io, model_part, variable, | |
<gh_stars>10-100
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flax XGLM model."""
import math
import random
from functools import partial
from typing import Optional, Tuple
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from jax import lax
from jax.random import PRNGKey
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_flax_outputs import (
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
)
from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
from ...utils import logging
from .configuration_xglm import XGLMConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/xglm-564M"
_CONFIG_FOR_DOC = "XGLMConfig"
_TOKENIZER_FOR_DOC = "XGLMTokenizer"
XGLM_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Flax Linen
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`XGLMConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
XGLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`~XGLMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
def create_sinusoidal_positions(n_pos, dim, padding_idx=1):
half_dim = dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = np.exp(np.arange(half_dim) * -emb)
emb = np.expand_dims(np.arange(n_pos), 1) * np.expand_dims(emb, 0)
emb = np.concatenate([np.sin(emb), np.cos(emb)], 1)
emb = np.reshape(emb, (n_pos, dim))
if padding_idx is not None:
emb[padding_idx, :] = 0
return jnp.array(emb)
def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = jnp.roll(input_ids, 1, axis=-1)
shifted_input_ids = jax.ops.index_update(shifted_input_ids, (..., 0), decoder_start_token_id)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
class FlaxXGLMAttention(nn.Module):
config: XGLMConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} "
"and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend
# to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
else:
# self_attention
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class FlaxXGLMDecoderLayer(nn.Module):
config: XGLMConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxXGLMAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.attention_heads,
dropout=self.config.attention_dropout,
causal=True,
dtype=self.dtype,
)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
if self.config.add_cross_attention:
self.encoder_attn = FlaxXGLMAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.encoder_attn_layer_norm = | |
<reponame>ooblog/sensortray
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import sys
import os
import subprocess
import codecs
import ctypes
import struct
import uuid
import datetime
import math
from LTsv_file import *
from LTsv_printf import *
LTsv_Tkinter=True
try:
import tkinter as Tk
import tkinter.scrolledtext as Tk_sc
import tkinter.filedialog as Tk_fd
# import messagebox as Tk_mb
except:
LTsv_Tkinter=False
LTsv_libgtk,LTsv_libgdk,LTsv_libobj=None,None,None
LTsv_user32,LTsv_shell32,LTsv_kernel32,LTsv_gdi32=None,None,None,None
LTsv_GUI_ERROR,LTsv_GUI_GTK2,LTsv_GUI_Tkinter,LTsv_GUI_WinAPI="","GTK2","Tkinter","WinAPI"
LTsv_GUI,LTsv_Notify=LTsv_GUI_ERROR,LTsv_GUI_ERROR
#LTsv_CALLBACLTYPE=ctypes.CFUNCTYPE(ctypes.c_void_p,ctypes.POINTER(ctypes.c_ulong))
#LTsv_CALLBACLTYPE=ctypes.CFUNCTYPE(ctypes.c_bool,ctypes.c_void_p)
#LTsv_CALLBACLTYPE=ctypes.CFUNCTYPE(ctypes.c_void_p,ctypes.c_int)
LTsv_CALLBACLTYPE=ctypes.CFUNCTYPE(ctypes.c_void_p,ctypes.c_void_p)
LTsv_widgetLTSV=LTsv_newfile("LTsv_gui",LTsv_default=None)
LTsv_widgetOBJ={}; LTsv_widgetOBJcount=0
LTsv_timerOBJ={}; LTsv_timer_cbk={}
LTsv_canvas_motion_X,LTsv_canvas_motion_Y,LTsv_canvas_motion_Z=0,0,""
canvas_EMLenter,canvas_EMLmotion,canvas_EMLleave={},{},{}
canvas_CBKenter,canvas_CBKmotion,canvas_CBKleave,canvas_CBKtimeout,canvas_CBKafter,LTsv_canvasCBKpagename={},{},{},{},{},{}
LTsv_pictureOBJ,LTsv_pictureW,LTsv_pictureH={},{},{}
LTsv_iconOBJ={}; LTsv_iconOBJnotify=[]
LTsv_popupmenuOBJ={}
LTsv_default_iconuri=""
def LTsv_guiCDLLver(LTsv_libname,LTsv_libvermin,LTsv_libvermax):
LTsv_min,LTsv_max=(LTsv_libvermin,LTsv_libvermax) if LTsv_libvermin <= LTsv_libvermax else (LTsv_libvermax,LTsv_libvermin)
if LTsv_min == LTsv_max:
LTsv_max+=1
LTsv_CDLL=None
for LTsv_libver in range(LTsv_min,LTsv_max):
LTsv_CDLL=ctypes.CDLL(LTsv_libname.replace('?',str(LTsv_libver)))
if LTsv_CDLL != None:
break
return LTsv_CDLL
def LTsv_guiinit(LTsv_guistyle=LTsv_GUI_GTK2,LTsv_libvermin=0,LTsv_libvermax=0):
global LTsv_GUI,LTsv_Notify,LTsv_default_iconuri
global LTsv_libgtk,LTsv_libgdk,LTsv_libobj,LTsv_user32,LTsv_shell32,LTsv_kernel32,LTsv_gdi32
LTsv_GUI=LTsv_guistyle
if LTsv_GUI == LTsv_GUI_GTK2:
LTsv_Notify=LTsv_GUI_GTK2; LTsv_default_iconuri="/usr/share/pixmaps/python.xpm"
if sys.platform.startswith("linux"): #"/usr/lib/libgtk-x11-2.0.so.0"
LTsv_libgtk=LTsv_guiCDLLver("libgtk-x11-2.0.so.?",LTsv_libvermin,LTsv_libvermax)
LTsv_libgtk.gtk_range_get_value.restype=ctypes.c_double
LTsv_libgdk=LTsv_guiCDLLver("libgdk-x11-2.0.so.?",LTsv_libvermin,LTsv_libvermax)
LTsv_libobj=LTsv_guiCDLLver("libgobject-2.0.so.?",LTsv_libvermin,LTsv_libvermax)
LTsv_libobj.g_timeout_add.restype=ctypes.c_uint
# if sys.platform.startswith("cygwin"):
# LTsv_libgtk=LTsv_guiCDLLver("cyggtk-x11-2.0-?.dll",0,10)
# LTsv_libgdk=LTsv_guiCDLLver("cyggdk-x11-2.0-?.dll",0,10)
# LTsv_libobj=LTsv_guiCDLLver("cyggobject-2.0-?.dll",0,10)
# if sys.platform.startswith("darwin"):
# LTsv_libgtk=ctypes.CDLL("/opt/local/lib/libgtk-x11-2.0.0.dylib")#"/Library/Frameworks/Gtk.framework/Libraries/libgtk-quartz-2.0.0.dylib"
# LTsv_libgdk=ctypes.CDLL("/opt/local/lib/libgdk-x11-2.0.0.dylib")#"/Library/Frameworks/Gtk.framework/Libraries/libgdk-quartz-2.0.0.dylib"
# LTsv_libobj=ctypes.CDLL("/opt/local/lib/libgobject-2.0.0.dylib")#"/Library/Frameworks/Glib.framework/Libraries/libgobject-2.0.0.dylib"
if LTsv_libgtk == None or LTsv_libgdk == None or LTsv_libobj == None:
# if sys.platform.startswith("win"):
# LTsv_GUI=LTsv_GUI_WinAPI
LTsv_GUI=LTsv_GUI_Tkinter
else:
LTsv_libgtk.gtk_init(0,0)
if LTsv_GUI == LTsv_GUI_WinAPI or LTsv_GUI == LTsv_GUI_Tkinter:
if sys.platform.startswith("win"):
LTsv_Notify=LTsv_GUI_WinAPI; LTsv_default_iconuri=sys.executable
LTsv_shell32=ctypes.windll.shell32
LTsv_user32=ctypes.windll.user32
LTsv_kernel32=ctypes.windll.kernel32
LTsv_gdi32=ctypes.windll.gdi32
elif sys.platform.startswith("linux"):
pass
else:
LTsv_GUI,LTsv_Notify=LTsv_GUI_ERROR,LTsv_GUI_ERROR; LTsv_default_iconuri=""
if not LTsv_GUI in [LTsv_GUI_ERROR,LTsv_GUI_GTK2,LTsv_GUI_Tkinter,LTsv_GUI_WinAPI]: LTsv_GUI=LTsv_GUI_ERROR
return LTsv_GUI
def LTsv_global_GUI(): return LTsv_GUI
def LTsv_global_Notify(): return LTsv_Notify
def LTsv_global_GTK2(): return LTsv_GUI_GTK2
def LTsv_global_Tkinter(): return LTsv_GUI_Tkinter
def LTsv_global_WinAPI(): return LTsv_GUI_WinAPI
def LTsv_global_libgtk(): return LTsv_libgtk
def LTsv_global_libgdk(): return LTsv_libgdk
def LTsv_global_libobj(): return LTsv_libobj
def LTsv_global_canvasmotionZ(): return LTsv_canvas_motion_Z
def LTsv_global_canvasmotionX(motionZ=None):
global LTsv_canvas_motion_X
if (motionZ != None):
LTsv_canvas_motion_X=LTsv_canvas_motion_X if motionZ == LTsv_canvas_motion_Z else -1
return LTsv_canvas_motion_X
def LTsv_global_canvasmotionY(motionZ=None):
global LTsv_canvas_motion_Y
if (motionZ != None):
LTsv_canvas_motion_Y=LTsv_canvas_motion_Y if motionZ == LTsv_canvas_motion_Z else -1
return LTsv_canvas_motion_Y
def LTsv_global_canvascolor(): return LTsv_canvascolor
def LTsv_global_canvasbgcolor(): return LTsv_canvasbgcolor
def LTsv_global_canvasTAG(TkinterTAG=None):
global LTsv_Tkintercanvas_TAG
LTsv_Tkintercanvas_TAG=LTsv_Tkintercanvas_TAG if TkinterTAG == None else TkinterTAG
return LTsv_Tkintercanvas_TAG
def LTsv_global_widgetltsv(new_LTSV=None):
global LTsv_widgetLTSV
LTsv_widgetLTSV=LTsv_widgetLTSV if new_LTSV == None else new_LTSV
return LTsv_widgetLTSV
def LTsv_global_widgetgetpage(LTsv_widgetPAGENAME): return LTsv_getpage(LTsv_widgetLTSV,LTsv_widgetPAGENAME)
def LTsv_global_widgetOBJ(LTsv_objid): return LTsv_widgetOBJ[LTsv_objid]
def LTsv_global_pictureOBJ(LTsv_objid): return LTsv_pictureOBJ[LTsv_objid]
def LTsv_global_pictureW(LTsv_objid): return LTsv_pictureW[LTsv_objid]
def LTsv_global_pictureH(LTsv_objid): return LTsv_pictureH[LTsv_objid]
def LTsv_global_iconOBJ(LTsv_objid): return LTsv_iconOBJ[LTsv_objid]
def LTsv_global_popupmenuOBJ(LTsv_objid): return LTsv_popupmenuOBJ[LTsv_objid]
def LTsv_widget_newUUID(LTsv_widgetID=None):
global LTsv_widget_oldID
if LTsv_widgetID == False:
LTsv_uuid=LTsv_widget_oldID
else:
LTsv_uuid=uuid.uuid4().hex+'+'+str(time.time())
LTsv_widget_oldID=LTsv_uuid
return LTsv_uuid
LTsv_widget_oldID=LTsv_widget_newUUID()
def LTsv_widget_newobj(LTsv_widgetPAGE,LTsv_widgetoption,widget_obj):
global LTsv_widgetOBJ,LTsv_widgetOBJcount
LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,LTsv_widgetoption,str(LTsv_widgetOBJcount))
LTsv_widgetOBJ[str(LTsv_widgetOBJcount)]=widget_obj; LTsv_widgetOBJcount+=1
return LTsv_widgetPAGE
def LTsv_widget_getobj(LTsv_widgetPAGE,LTsv_widgetoption):
LTsv_widgetOBJcount=LTsv_readlinerest(LTsv_widgetPAGE,LTsv_widgetoption)
if LTsv_widgetOBJcount in LTsv_widgetOBJ:
return LTsv_widgetOBJ[LTsv_widgetOBJcount]
else:
return None
def LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_o=None,widget_k=None,widget_t=None,widget_u=None,widget_s=None,widget_e=None,widget_a=None,widget_v=None,widget_b=None, \
widget_p=None,widget_m=None,widget_g=None,widget_f=None,widget_x=None,widget_y=None,widget_w=None,widget_h=None,widget_c=None, \
event_z=None,event_k=None,event_y=None,event_b=None,event_p=None,event_r=None,event_e=None,event_m=None,event_l=None,event_a=None,event_u=None, \
menu_o=None,menu_b=None,menu_c=None,dialog_t=None,dialog_c=None):
if widget_o != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetobj",widget_o)
if widget_k != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetkind",widget_k)
if widget_t != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgettext",widget_t)
if widget_u != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgeturi",widget_u)
if widget_s != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetstart",str(widget_s))
if widget_e != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetend",str(widget_e))
if widget_a != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetadd",str(widget_a))
if widget_v != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetstringvar",widget_v)
if widget_b != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetbooleanvar",widget_b)
if widget_p != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetphotoimage",widget_p)
if widget_m != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetpixmap",widget_m)
if widget_g != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetgc",widget_g)
if widget_f != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetfont",widget_f)
if widget_x != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetsizeX",str(widget_x))
if widget_y != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetsizeY",str(widget_y))
if widget_w != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetsizeW",str(widget_w))
if widget_h != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"widgetsizeH",str(widget_h))
if widget_c != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetcontainer",widget_c)
if event_z != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetresize",event_z)
if event_k != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"keyboard_press",event_k)
if event_y != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"keyboard_release",event_y)
if event_b != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"widgetcallback",event_b)
if event_p != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"mouse_press",event_p)
if event_r != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"mouse_release",event_r)
if event_e != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"mouse_enter",event_e)
if event_m != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"mouse_motion",event_m)
if event_l != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"mouse_leave",event_l)
if event_a != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"notify_activate",event_a)
if event_u != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"notify_popupmenu",event_u)
if menu_o != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"popupmenuobj",menu_o)
if menu_b != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"popupmenulist",menu_b)
if menu_c != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"popupmenuclick",menu_c)
if dialog_t != None: LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,"dialog_type",str(dialog_t))
if dialog_c != None: LTsv_widgetPAGE=LTsv_widget_newobj(LTsv_widgetPAGE,"dialog_close",dialog_c)
return LTsv_widgetPAGE
def LTsv_fonttuple(LTsv_line):
LTsv_fontlist=None
if LTsv_line != None:
LTsv_fontopts=LTsv_line.replace('\n','\t').replace('\t',',').strip(',').split(',')
LTsv_fontlist=[]
for LTsv_fontopt in LTsv_fontopts:
LTsv_fontlist.append(LTsv_fontopt)
if len(LTsv_fontlist)>=3:
break
return tuple(LTsv_fontlist) if LTsv_fontlist != None else None
def LTsv_GTKwidget_fixed(window_c,widget_o,widget_x,widget_y,widget_w,widget_h,widget_f=None,widget_d=False):
LTsv_libgtk.gtk_widget_set_size_request(widget_o,widget_w,widget_h)
LTsv_libgtk.gtk_fixed_put(window_c,widget_o,widget_x,widget_y)
if widget_f != None:
LTsv_fontDesc=LTsv_libgtk.pango_font_description_from_string(widget_f.encode("utf-8"))
if widget_d:
LTsv_libgtk.gtk_widget_modify_font(LTsv_libgtk.gtk_bin_get_child(widget_o),LTsv_fontDesc)
else:
LTsv_libgtk.gtk_widget_modify_font(widget_o,LTsv_fontDesc)
LTsv_libgtk.pango_font_description_free(LTsv_fontDesc)
def LTsv_hideondelete_shell(LTsv_windowPAGENAME=None):
def gtk_hideondelete_kernel(window_objvoid=None,window_objptr=None):
LTsv_libgtk.gtk_widget_hide_on_delete
return 0
def tkinter_hideondelete_kernel(window_objvoid=None,window_objptr=None):
global LTsv_widgetLTSV
LTsv_windowPAGE=LTsv_getpage(LTsv_widgetLTSV,LTsv_windowPAGENAME)
widget_o=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_windowPAGE,"widgetobj")]
widget_o.withdraw()
return 0
if LTsv_GUI == LTsv_GUI_GTK2: return LTsv_libgtk.gtk_widget_hide_on_delete
if LTsv_GUI == LTsv_GUI_Tkinter: return tkinter_hideondelete_kernel
return None
LTsv_GTK_WINDOW_TOPLEVEL=0
LTsv_GTK_WIN_POS_CENTER=1
class LTsv_GdkEventKey(ctypes.Structure):
_fields_ = [
('type',ctypes.c_int),
('window',ctypes.c_void_p),
('send_event',ctypes.c_ubyte),
('time',ctypes.c_uint),
('state',ctypes.c_uint),
('keyval',ctypes.c_uint),
]
LTsv_CALLBACLTYPE_GdkEventKey=ctypes.CFUNCTYPE(ctypes.c_void_p,ctypes.POINTER(LTsv_GdkEventKey))
def LTsv_window_new(widget_n=None,event_b=None,widget_t="LTsv_window",widget_w=200,widget_h=120,event_z=None,event_k=None,event_y=None):
global LTsv_widgetLTSV
LTsv_windowPAGENAME=LTsv_widget_newUUID(widget_n); LTsv_windowPAGE=""
LTsv_windowPAGE=LTsv_widgetPAGEXYWH(LTsv_windowPAGE,widget_k="window",widget_t=widget_t,widget_w=widget_w,widget_h=widget_h)
if LTsv_GUI == LTsv_GUI_GTK2:
window_o=LTsv_libgtk.gtk_window_new(LTsv_GTK_WINDOW_TOPLEVEL)
LTsv_libgtk.gtk_window_set_title(window_o,widget_t.encode("utf-8","xmlcharrefreplace"))
LTsv_libgtk.gtk_widget_set_size_request(window_o,widget_w,widget_h)
LTsv_libgtk.gtk_window_set_resizable(window_o,True if event_z !=None else False)
LTsv_libgtk.gtk_window_set_position(window_o,LTsv_GTK_WIN_POS_CENTER)
widget_c=LTsv_libgtk.gtk_fixed_new()
LTsv_libgtk.gtk_container_add(window_o,widget_c)
# event_b_cbk=LTsv_CALLBACLTYPE(event_b) if event_b != None else LTsv_libgtk.gtk_widget_hide_on_delete
# event_b_cbk=LTsv_CALLBACLTYPE(event_b) if event_b != None else LTsv_hideondelete_shell(LTsv_windowPAGENAME)
event_b_cbk=LTsv_CALLBACLTYPE(event_b) if event_b != None else LTsv_window_exit_cbk
LTsv_libobj.g_signal_connect_data(window_o,"delete-event".encode("utf-8"),event_b_cbk,0,0,0)
event_z_cbk,event_k_cbk,event_y_cbk=None,None,None
if event_z:
event_z_cbk=LTsv_CALLBACLTYPE(event_z)
LTsv_libobj.g_signal_connect_data(window_o,"configure-event".encode("utf-8"),event_z_cbk,0,0,0)
if event_k:
# event_k_cbk=LTsv_CALLBACLTYPE(event_k)
event_k_cbk=LTsv_CALLBACLTYPE_GdkEventKey(event_k)
LTsv_libobj.g_signal_connect_data(window_o,"key-press-event".encode("utf-8"),event_k_cbk,0,0,0)
if event_y:
# event_y_cbk=LTsv_CALLBACLTYPE(event_y)
event_y_cbk=LTsv_CALLBACLTYPE_GdkEventKey(event_y)
LTsv_libobj.g_signal_connect_data(window_o,"key-release-event".encode("utf-8"),event_y_cbk,0,0,0)
LTsv_windowPAGE=LTsv_widgetPAGEXYWH(LTsv_windowPAGE,widget_o=window_o,widget_t=widget_t,widget_c=widget_c,event_b=event_b_cbk,event_z=event_z_cbk,event_k=event_k_cbk,event_y=event_y_cbk)
if LTsv_GUI == LTsv_GUI_Tkinter:
window_o=Tk.Tk()
window_o.title(widget_t)
window_o.minsize(widget_w,widget_h)
window_o.geometry("{0}x{1}+{2}+{3}".format(widget_w,widget_h,(window_o.winfo_vrootwidth()-widget_w)//2,(window_o.winfo_vrootheight()-widget_h)//2))
# event_b_cbk=event_b if event_b != None else LTsv_hideondelete_shell(LTsv_windowPAGENAME)
# window_o.protocol("WM_DELETE_WINDOW",event_b_cbk)
if event_b != None:
window_o.protocol("WM_DELETE_WINDOW",event_b)
if event_z:
window_o.maxsize(window_o.winfo_vrootwidth(),window_o.winfo_vrootheight())
window_o.bind('<Configure>',event_z)
else:
window_o.maxsize(widget_w,widget_h); window_o.resizable(0,0)
if event_k:
window_o.bind('<KeyPress>',event_k)
if event_y:
window_o.bind('<KeyRelease>',event_y)
LTsv_windowPAGE=LTsv_widgetPAGEXYWH(LTsv_windowPAGE,widget_o=window_o,widget_t=widget_t,event_b=event_b,event_z=event_z,event_k=event_k,event_y=event_y)
LTsv_widgetLTSV=LTsv_putpage(LTsv_widgetLTSV,LTsv_windowPAGENAME,LTsv_windowPAGE)
return LTsv_windowPAGENAME
def LTsv_widget_settext(LTsv_widgetPAGENAME,widget_t=""):
global LTsv_widgetLTSV
LTsv_widgetPAGE=LTsv_getpage(LTsv_widgetLTSV,LTsv_widgetPAGENAME)
widget_o=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetobj")]
widget_k=LTsv_readlinerest(LTsv_widgetPAGE,"widgetkind")
widget_v=None
if widget_k == "window":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_window_set_title(widget_o,widget_t.encode("utf-8","xmlcharrefreplace"))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_o.title(widget_t)
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
if widget_k == "label":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_label_set_text(widget_o,widget_t.encode("utf-8","xmlcharrefreplace"))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_v=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetstringvar")]; widget_v.set(widget_t)
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
if widget_k == "button":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_label_set_text(LTsv_libgtk.gtk_bin_get_child(widget_o),widget_t.encode("utf-8","xmlcharrefreplace"))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_v=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetstringvar")]; widget_v.set(widget_t)
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
if widget_k == "check":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_label_set_text(LTsv_libgtk.gtk_bin_get_child(widget_o),widget_t.encode("utf-8","xmlcharrefreplace"))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_v=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetstringvar")]; widget_v.set(widget_t)
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
if widget_k == "radio":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_label_set_text(LTsv_libgtk.gtk_bin_get_child(widget_o),widget_t.encode("utf-8","xmlcharrefreplace"))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_v=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetstringvar")]; widget_v.set(widget_t)
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
if widget_k == "clipboard":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_clipboard_set_text(widget_o,widget_t.encode("utf-8","xmlcharrefreplace"),-1)
if LTsv_GUI == LTsv_GUI_Tkinter: widget_o.clipboard_append(widget_t)
if widget_k == "edit":
if LTsv_GUI == LTsv_GUI_GTK2: widget_v=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetstringvar")]; LTsv_libgtk.gtk_text_buffer_set_text(widget_v,widget_t.encode("utf-8","xmlcharrefreplace"),-1)
if LTsv_GUI == LTsv_GUI_Tkinter: widget_o.delete(1.0,Tk.END); widget_o.insert(1.0,widget_t)
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE)
if widget_k == "entry":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_entry_set_text(widget_o,widget_t.encode("utf-8","xmlcharrefreplace"))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_o.delete(0,Tk.END); widget_o.insert(0,widget_t)
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
if widget_k == "scale":
widget_s=int(float(widget_t))
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_range_set_value(widget_o,ctypes.c_double(widget_s))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_o.set(int(widget_s))
if widget_k == "spin":
widget_s=int(float(widget_t))
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_spin_button_set_value(widget_o,ctypes.c_double(int(float(widget_s))))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_o.delete(0,Tk.END); widget_o.insert(0,widget_t)
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
if widget_k == "notify":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_status_icon_set_tooltip(widget_o,widget_t.encode("utf-8"))
if LTsv_GUI == LTsv_GUI_Tkinter:
widget_o.szTip=widget_t[:64].encode("utf-8")
LTsv_shell32.Shell_NotifyIcon(ctypes.c_ulong(LTsv_ICON_NIM_MODIFY),ctypes.pointer(widget_o))
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
if widget_k == "combobox":
if LTsv_GUI == LTsv_GUI_GTK2:
if str(widget_o) in LTsv_popupmenuOBJ:
widget_combo=LTsv_popupmenuOBJ[str(widget_o)].split('\n')
widget_s=widget_combo.index(widget_t) if widget_t in widget_combo else 0
LTsv_libgtk.gtk_combo_box_set_active(widget_o,widget_s)
if widget_k == "filedialog":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_window_set_title(widget_o,widget_t.encode("utf-8","xmlcharrefreplace"))
LTsv_widgetPAGE=LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_t=widget_t)
LTsv_widgetLTSV=LTsv_putpage(LTsv_widgetLTSV,LTsv_widgetPAGENAME,LTsv_widgetPAGE)
class LTsv_TextIter(ctypes.Structure):
_fields_ = [
('dummy1', ctypes.c_void_p),
('dummy2', ctypes.c_void_p),
('dummy3', ctypes.c_uint),
('dummy4', ctypes.c_uint),
('dummy5', ctypes.c_uint),
('dummy6', ctypes.c_uint),
('dummy7', ctypes.c_uint),
('dummy8', ctypes.c_uint),
('dummy9', ctypes.c_uint),
('dummy10', ctypes.c_void_p),
('dummy11', ctypes.c_void_p),
('dummy12', ctypes.c_uint),
('dummy13', ctypes.c_uint),
('dummy14', ctypes.c_void_p),
]
def LTsv_widget_gettext(LTsv_widgetPAGENAME):
global LTsv_widgetLTSV
LTsv_widgetPAGE=LTsv_getpage(LTsv_widgetLTSV,LTsv_widgetPAGENAME)
widget_o=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetobj")]
widget_k=LTsv_readlinerest(LTsv_widgetPAGE,"widgetkind")
widget_t=""
if widget_k == "window":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_window_get_title(widget_o)).value.decode("utf-8")
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=LTsv_readlinerest(LTsv_widgetPAGE,"widgettext")
if widget_k == "label":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_label_get_text(widget_o)).value.decode("utf-8")
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=widget_o.cget("text")
if widget_k == "button":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_label_get_text(LTsv_libgtk.gtk_bin_get_child(widget_o))).value.decode("utf-8")
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=widget_o.cget("text")
if widget_k == "check":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_label_get_text(LTsv_libgtk.gtk_bin_get_child(widget_o))).value.decode("utf-8")
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=widget_o.cget("text")
if widget_k == "radio":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_label_get_text(LTsv_libgtk.gtk_bin_get_child(widget_o))).value.decode("utf-8")
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=widget_o.cget("text")
if widget_k == "clipboard":
try:
if LTsv_GUI == LTsv_GUI_GTK2: widget_t="{0}".format(ctypes.c_char_p(LTsv_libgtk.gtk_clipboard_wait_for_text(widget_o)).value.decode("utf-8"))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t="{0}".format(widget_o.clipboard_get())
except:
widget_t=""
if widget_k == "entry":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_entry_get_text(widget_o)).value.decode("utf-8")
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=widget_o.get()
if widget_k == "edit":
if LTsv_GUI == LTsv_GUI_GTK2:
widget_v=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetstringvar")]
start_iter=LTsv_TextIter(); end_iter=LTsv_TextIter()
LTsv_libgtk.gtk_text_buffer_get_start_iter(widget_v,ctypes.pointer(start_iter)); LTsv_libgtk.gtk_text_buffer_get_end_iter(widget_v,ctypes.pointer(end_iter))
widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_text_buffer_get_text(widget_v,ctypes.pointer(start_iter),ctypes.pointer(end_iter),True)).value.decode("utf-8");
# LTsv_libgtk.gtk_text_iter_free(ctypes.pointer(start_iter)); LTsv_libgtk.gtk_text_iter_free(ctypes.pointer(end_iter))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=widget_o.get(1.0,Tk.END)
if widget_k == "scale":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=str(int(ctypes.c_double(LTsv_libgtk.gtk_range_get_value(widget_o)).value))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=str(widget_o.get())
if widget_k == "spin":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=str(int(ctypes.c_int(LTsv_libgtk.gtk_spin_button_get_value_as_int(widget_o)).value))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=str(widget_o.get())
if widget_k == "notify":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=LTsv_readlinerest(LTsv_widgetPAGE,"widgettext")
if widget_k == "combobox":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_combo_box_text_get_active_text(widget_o)).value.decode("utf-8") if LTsv_libgtk.gtk_tree_model_iter_n_children(LTsv_libgtk.gtk_combo_box_get_model(widget_o),None) > 0 else ""
if widget_k == "filedialog":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_window_get_title(widget_o)).value.decode("utf-8")
return widget_t
def LTsv_widget_setnumber(LTsv_widgetPAGENAME,widget_s=0):
global LTsv_widgetLTSV
LTsv_widgetPAGE=LTsv_getpage(LTsv_widgetLTSV,LTsv_widgetPAGENAME)
widget_o=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetobj")]
widget_k=LTsv_readlinerest(LTsv_widgetPAGE,"widgetkind")
widget_v=None
if widget_k == "check":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_toggle_button_set_active(widget_o,ctypes.c_int(min(max(int(float(widget_s)),0),1)))
if LTsv_GUI == LTsv_GUI_Tkinter: LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetbooleanvar")].set(True if int(float(widget_s)) !=0 else False)
if widget_k == "radio":
if LTsv_GUI == LTsv_GUI_GTK2:
radio_group=LTsv_libgtk.gtk_radio_button_get_group(widget_o)
radio_len=LTsv_libgtk.g_slist_length(radio_group); widget_s=min(max(int(float(widget_s)),0),radio_len-1)
LTsv_libgtk.gtk_toggle_button_set_active(LTsv_libgtk.g_slist_nth_data(radio_group,radio_len-widget_s-1),ctypes.c_int(1))
if LTsv_GUI == LTsv_GUI_Tkinter: LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetbooleanvar")].set(widget_s)
if widget_k == "entry":
LTsv_widget_settext(LTsv_widgetPAGENAME,widget_t="{0}".format(widget_s))
if widget_k == "edit":
LTsv_widget_settext(LTsv_widgetPAGENAME,widget_t="{0}".format(widget_s))
if widget_k == "scale":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_range_set_value(widget_o,ctypes.c_double(int(float(widget_s))))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_o.set(int(widget_s))
if widget_k == "spin":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_spin_button_set_value(widget_o,ctypes.c_double(int(float(widget_s))))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_o.delete(0,Tk.END); widget_o.insert(0,str(widget_s))
if widget_k == "combobox":
if LTsv_GUI == LTsv_GUI_GTK2: LTsv_libgtk.gtk_combo_box_set_active(widget_o,max(min(widget_s,LTsv_libgtk.gtk_tree_model_iter_n_children(LTsv_libgtk.gtk_combo_box_get_model(widget_o),None)-1),0))
LTsv_widgetLTSV=LTsv_putpage(LTsv_widgetLTSV,LTsv_widgetPAGENAME,LTsv_widgetPAGE)
def LTsv_widget_getnumber(LTsv_widgetPAGENAME):
global LTsv_widgetLTSV
LTsv_widgetPAGE=LTsv_getpage(LTsv_widgetLTSV,LTsv_widgetPAGENAME)
widget_o=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetobj")]
widget_k=LTsv_readlinerest(LTsv_widgetPAGE,"widgetkind")
widget_s=0
if widget_k == "check":
if LTsv_GUI == LTsv_GUI_GTK2: widget_s=ctypes.c_int(LTsv_libgtk.gtk_toggle_button_get_active(widget_o)).value
if LTsv_GUI == LTsv_GUI_Tkinter: widget_s=1 if LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetbooleanvar")].get() == True else 0
if widget_k == "radio":
if LTsv_GUI == LTsv_GUI_GTK2:
radio_group=LTsv_libgtk.gtk_radio_button_get_group(widget_o)
radio_len=LTsv_libgtk.g_slist_length(radio_group); widget_s=radio_len
for radio_count in range(radio_len):
if ctypes.c_int(LTsv_libgtk.gtk_toggle_button_get_active(LTsv_libgtk.g_slist_nth_data(radio_group,radio_count))).value:
widget_s=radio_len-radio_count-1
if LTsv_GUI == LTsv_GUI_Tkinter: widget_s=LTsv_widgetOBJ[LTsv_readlinerest(LTsv_widgetPAGE,"widgetbooleanvar")].get()
if widget_k == "entry":
if LTsv_GUI == LTsv_GUI_GTK2: widget_t=ctypes.c_char_p(LTsv_libgtk.gtk_entry_get_text(widget_o)).value.decode("utf-8")
if LTsv_GUI == LTsv_GUI_Tkinter: widget_t=widget_o.get()
widget_s=int(widget_t) if widget_t.isdecimal() else 0
if widget_k == "scale":
if LTsv_GUI == LTsv_GUI_GTK2: widget_s=int(float(ctypes.c_double(LTsv_libgtk.gtk_range_get_value(widget_o)).value))
if LTsv_GUI == LTsv_GUI_Tkinter: widget_s=int(widget_o.get())
if widget_k == "spin":
if LTsv_GUI == LTsv_GUI_GTK2: widget_s=int(ctypes.c_int(LTsv_libgtk.gtk_spin_button_get_value_as_int(widget_o)).value)
if LTsv_GUI == LTsv_GUI_Tkinter: widget_s=LTsv_intstr0x(widget_o.get())
if widget_k == "combobox":
if LTsv_GUI == | |
+ noise
pickNum = real_bikes
realbikes = 0
return True, endtime, dropNum, pickNum, realbikes, returnLost, realbikes
else:
minBikes = min(serviceLevel)
maxBikes = max(serviceLevel)
endtime = t_arrive
if minBikes <= real_bikes <= maxBikes:
endtime = t_arrive + noise
if selectedSta == '127':
print('dropNum:' + str(dropNum))
print('pickNum:' + str(pickNum))
realbikes = real_bikes
return False, endtime, dropNum, pickNum, rentalLost, returnLost, realbikes
else:
if real_bikes < minBikes:
dropNum = minBikes - real_bikes
endtime = t_arrive + dropNum * 0.3 + noise
if real_bikes > maxBikes:
pickNum = real_bikes - maxBikes
endtime = t_arrive + pickNum * 0.3 + noise
if selectedSta == '127':
print('dropNum:' + str(dropNum))
print('pickNum:' + str(pickNum))
if pickNum != 0:
realbikes = maxBikes
elif dropNum != 0:
realbikes = minBikes
return True, endtime, dropNum, pickNum, rentalLost, returnLost, realbikes
def getServiceLevel(selectedSta, t_interval, rateData, station_status, totalDocksDict, day):
# mon,day,hour = getMonthDayAndHour()
mon = 8
hour = 7
rateDict = rateData[str(selectedSta)]
t_intervalFlag = 0
if hour == 7:
t_intervalFlag = 0
elif hour == 8:
t_intervalFlag = 12
elif hour == 9:
t_intervalFlag = 24
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(selectedSta)]['availableBikes']
iniDocks = station_status[str(day)][str(selectedSta)]['availableDocks']
totalDocks = totalDocksDict[str(selectedSta)]
serviceLevel = []
availableBikes = iniBikes
availableDocks = iniDocks
if selectedSta == '127':
print('iniBikes:' + str(availableBikes))
print('iniDocks:' + str(availableDocks))
print('t_interval:' + str(t_interval))
print(totalDocks)
rentalLost = 0
returnLost = 0
for i in np.arange(int(t_intervalFlag), int(t_interval) + int(t_intervalFlag)): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0 and deltaNum > 0:
rentalLost += deltaNum
pass # rental_lost += deltNum
if float(availableDocks) < 1.0 and deltaNum < 0:
returnLost += abs(deltaNum)
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
if selectedSta == '127':
print('realBikes:' + str(availableBikes))
print('realDocks:' + str(availableDocks))
realBikes = availableBikes
realDocks = availableDocks
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(int(t_intervalFlag) + int(t_interval), int(t_interval) + int(t_intervalFlag) + 24):
deltaNum = 0
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
# print('availableBikes:'+str(availableBikes))
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
# print('availableDocks:'+str(availableDocks))
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
# print('availableDocks:'+str(availableDocks))
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
if selectedSta == '127':
print(serviceLevel)
return serviceLevel, math.floor(float(realBikes)), math.floor(float(realDocks)), rentalLost, returnLost
def mctsAlgorithm():
experiment_path = './bike_sharing_data/mydata/experiment_result2'
# month, day, hour = getMonthDayAndHour()
month = 8
hour = 7
day1 = [i for i in range(1, 32)]
day2 = [5, 6, 12, 13, 19, 20, 26, 27] # The weekend of August!
days = [i for i in day1 if i not in day2]
# 11 -> 1
for day in days:
position, stations_id = getPositionAndStations_id()
availStations = stations_id
availStations = multiprocessing.Manager().list(availStations)
realtimeBikes = multiprocessing.Manager().dict()
lostNums1 = multiprocessing.Manager().dict()
visitedPath1 = multiprocessing.Manager().list()
cumulativeDis1 = multiprocessing.Manager().list()
balanceNum1 = multiprocessing.Manager().dict()
lostNums2 = multiprocessing.Manager().dict()
visitedPath2 = multiprocessing.Manager().list()
cumulativeDis2 = multiprocessing.Manager().list()
balanceNum2 = multiprocessing.Manager().dict()
neighbor = getNeighbor(stations_id, position)
olderNeighbor = getOlderNeighbor(stations_id, position)
startStation1 = '237'
startStation2 = '369'
mutex = multiprocessing.Lock()
p1 = multiprocessing.Process(target=start, args=(
availStations, neighbor, lostNums1, visitedPath1, cumulativeDis1, startStation1, balanceNum1, mutex,
realtimeBikes, day, olderNeighbor))
p2 = multiprocessing.Process(target=start, args=(
availStations, neighbor, lostNums2, visitedPath2, cumulativeDis2, startStation2, balanceNum2, mutex,
realtimeBikes, day, olderNeighbor))
p1.start()
p2.start()
p1.join()
p2.join()
print('customer loss:' + str(lostNums1))
print('through station:' + str(visitedPath1))
print('balanced number:' + str(balanceNum1))
print('travel distance:' + str(cumulativeDis1))
print('customer loss:' + str(lostNums2))
print('through station:' + str(visitedPath2))
print('balanced number:' + str(balanceNum2))
print('travel distance:' + str(cumulativeDis2))
print('pre-process:pid=%d' % os.getpid())
print('real status of stations:' + str(realtimeBikes))
filename = 'result_month_' + str(month) + '_day_' + str(day) + '_hour_' + str(hour) + '.json'
realtimeBikes1 = {}
for sta, dicts in realtimeBikes.items():
realtimeBikes1[str(sta)] = dicts
experimentResult = {}
resultTruck1 = {}
resultTruck2 = {}
lostNums11 = {}
balanceNum11 = {}
for sta, num in lostNums1.items():
lostNums11[str(sta)] = num
for sta, num in balanceNum1.items():
balanceNum11[str(sta)] = num
resultTruck1['lostUsers'] = lostNums11
resultTruck1['visitedPath'] = list(visitedPath1)
resultTruck1['balanceNum'] = balanceNum11
resultTruck1['travelDis'] = list(cumulativeDis1)
lostNums22 = {}
balanceNum22 = {}
for sta, num in lostNums2.items():
lostNums22[str(sta)] = num
for sta, num in balanceNum2.items():
balanceNum22[str(sta)] = num
resultTruck2['lostUsers'] = lostNums22
resultTruck2['visitedPath'] = list(visitedPath2)
resultTruck2['balanceNum'] = balanceNum22
resultTruck2['travelDis'] = list(cumulativeDis2)
experimentResult['truck1'] = resultTruck1
experimentResult['truck2'] = resultTruck2
experimentResult['afterBalanceRealBikes'] = realtimeBikes1
experiment_path = './bike_sharing_data/mydata/experiment_result2/epsilon_0'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(experimentResult, f)
print('day' + str(day) + 'finished!')
def noRepositionStart(lostNums):
starttime = 0
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon,day2,hour = getMonthDayAndHour()
mon = 8
for day in range(1, 32):
totalLost = 0
lost = {}
for station_id in stations_id:
rateDict = rateData[str(station_id)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(station_id)]['availableBikes']
iniDocks = station_status[str(day)][str(station_id)]['availableDocks']
totalDocks = totalDocksDict[str(station_id)]
availableBikes = iniBikes
availableDocks = iniDocks
rentalLost = 0
returnLost = 0
for i in np.arange(0, 48):
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lost[str(station_id)] = rentalLost + returnLost
totalLost += lost[str(station_id)]
lost['totalLost'] = totalLost
print(totalLost)
lostNums[str(day)] = lost
def noReposition():
experiment_path = './bike_sharing_data/mydata/noReposition'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
# month,day,hour = getMonthDayAndHour()
month = 8
hour = 7
lostNums = {}
noRepositionStart(lostNums)
print(lostNums)
filename = 'noRepositionLost_month_' + str(month) + '_hour_' + str(78910) + '.json'
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(lostNums, f)
def staticRepositionStart(lostNums):
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
mon, day, hour = getMonthDayAndHour()
for day in range(1, 32):
totalLost = 0
lost = {}
for station_id in stations_id:
rateDict = rateData[str(station_id)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = totalDocksDict[str(station_id)]
serviceLevel = []
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(0, 19):
deltaNum = 0
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
# print('availableBikes:'+str(availableBikes))
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
| |
"reg-name", expander = exp_regs)],
type = ["Registers", "Inspecting Simulated State"],
short = "read a register",
namespace_copy = ("processor", obj_read_reg_cmd),
see_also = ['%', 'write-reg', 'pregs', 'pselect'],
doc = """
This command reads a CPU register. For example, to read the
<tt>eax</tt> register in an x86 processor called <obj>cpu0</obj>,
write <cmd>read-reg cpu0 eax</cmd>. You can also use the method
variant: <cmd>cpu0.read-reg eax</cmd>, or the more convenient variant
<cmd>%eax</cmd> that reads a register from the selected frontend CPU.
If no <param>cpu-name</param> is supplied, the current frontend
processor is used.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1952")
new_command("%", read_default_reg_cmd,
[arg(str_t, doc = "reg-name", expander = exp_regs)],
pri = 1000,
check_args = 0,
type = ["Registers", "Inspecting Simulated State"],
short = "read register by name",
repeat = read_default_reg_cmd,
see_also = ["read-reg", "write-reg", "pregs"],
doc ="""
Returns the value of the register <arg>reg-name</arg> for the current
processor. This is a convenient way to use register values in expressions like
<cmd>disassemble (%pc <math>-</math> 4*3) 10</cmd>.
Use <cmd>pselect</cmd> to select the current processor.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1969")
#
# -------------------- write-reg --------------------
#
def obj_write_reg_cmd(cpu, reg_name, value):
value = uint64_t([("int", value)])[0]
try:
local_write_int_register(cpu, reg_name, value)
except:
SIM_write_register(cpu, SIM_get_register_number(cpu, reg_name), value)
def write_reg_cmd(cpu, reg_name, value):
if not cpu:
(cpu, _) = get_cpu()
obj_write_reg_cmd(cpu, reg_name, value)
new_command("write-reg", write_reg_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(str_t, "reg-name", expander = exp_regs),
arg(integer_t, "value")],
type = ["Registers", "Changing Simulated State"],
short = "write to register",
namespace_copy = ("processor", obj_write_reg_cmd),
see_also = ['%', 'read-reg', 'pregs', 'pselect'],
doc = """
Use this command to set the value of a CPU register. For example, to
set the <tt>eax</tt> register on the x86 processor <obj>cpu0</obj> to
3, write <cmd>write-reg cpu0 eax 3</cmd>. You can also use the method
variant: <cmd>cpu0.write-reg eax 3</cmd>.
This function may or may not have the correct side-effects, depending
on target and register. If no <param>cpu-name</param> is given, the
current frontend processor is used.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2000")
#
# -------------------- trace-cr, break-cr --------------------
#
class base_cr_tracker(tracker):
def __init__(self, stop, cmd, short, doc, type, see_also = []):
tracker.__init__(self, stop, cmd, "register", self.expander, short, doc,
namespace = "processor",
see_also = see_also,
group = type,
expander_cpu = self.expander_cpu)
self.hap = "Core_Control_Register_Write"
self.map = {}
self.catchall = {}
def expander(self, comp, cpu):
iface = cpu.iface.int_register
regs = [ SIM_get_register_name(cpu, r)
for r in SIM_get_all_registers(cpu)
if iface.register_info(cpu, r, Sim_RegInfo_Catchable) ]
return get_completions(comp, regs)
def expander_cpu(self, comp):
return self.expander(comp, SIM_current_processor())
# These two are here so that they can be overridden
def get_register_number(self, obj, regname):
return SIM_get_register_number(obj, regname)
def get_register_name(self, obj, reg):
return SIM_get_register_name(obj, reg)
def filter(self, *args):
return SIM_simics_is_running()
def show(self, regname, obj, regno, value):
if not regname:
try:
regname = self.get_register_name(obj, regno)
except:
regname = "[%s] Unknown register %d" % (obj.name, regno)
if value < 0:
value += 1 << 64
print "[%s] %s <- %s" % (obj.name, regname, number_str(value, 16))
def list(self, obj):
if obj in self.catchall.keys():
print "[%s] %s enabled for all control registers" % (obj.name, iff(self.stop, "breaking", "tracing"))
else:
print "[%s] %s enabled for these control registers:" % (obj.name, iff(self.stop, "breaking", "tracing"))
if obj in self.map.keys():
for reg in self.map[obj].keys():
print " %s" % self.get_register_name(obj, reg)
def resolve_target(self, obj, regname):
return (regname, self.get_register_number(obj, regname))
def is_tracked(self, obj, target):
regname, regno = target
return ((obj in self.catchall.keys())
or (obj in self.map.keys() and self.map[obj].has_key(regno)))
def track_all(self, obj):
if self.catchall.has_key(obj):
return
if not (obj in self.map.keys()):
self.map[obj] = {}
for regno,hdl in self.map[obj].items():
SIM_hap_delete_callback_obj_id("Core_Control_Register_Write",
obj, hdl)
del self.map[obj][regno]
self.catchall[obj] = SIM_hap_add_callback_obj(
"Core_Control_Register_Write", # hap
obj, # trigger object
0, # flags
self.callback, # callback
None) # user value
def track_none(self, obj):
if (obj in self.catchall.keys()):
SIM_hap_delete_callback_obj_id("Core_Control_Register_Write",
obj,
self.catchall[obj])
del self.catchall[obj]
else:
if not (obj in self.map.keys()):
self.map[obj] = {}
for regno,hdl in self.map[obj].items():
SIM_hap_delete_callback_obj_id("Core_Control_Register_Write",
obj, hdl)
del self.map[obj][regno]
def track_on(self, obj, target):
regname, regno = target
if obj in self.catchall.keys():
print "[%s] Already %s all control registers" % (obj.name, iff(self.stop, "breaking on", "tracing"))
return
if self.is_tracked(obj, target):
print "[%s] Already %s %s" % (obj.name, iff(self.stop, "breaking on", "tracing"), regname)
return
if not obj.iface.int_register.register_info(obj, regno, Sim_RegInfo_Catchable):
print "[%s] Cannot %s on %s" % (obj.name, iff(self.stop, "break", "trace"), regname)
return
if not (obj in self.map.keys()):
self.map[obj] = {}
self.map[obj][regno] = SIM_hap_add_callback_obj_index(
"Core_Control_Register_Write", # hap
obj, # trigger object
0, # flags
self.callback, # callback
regname, # user value
regno) # index
def track_off(self, obj, target):
regname, regno = target
if obj in self.catchall.keys():
# All tracked, remove all
self.track_none(obj)
# Reinstall all catchable registers, except the one removed
iface = obj.iface.int_register
for r in SIM_get_all_registers(obj):
if r != regno:
if iface.register_info(obj, r, Sim_RegInfo_Catchable):
regname = SIM_get_register_name(obj, r)
self.track_on(obj, (regname, r))
return
if not self.is_tracked(obj, target):
print "[%s] Not %s %s" % (obj.name, iff(self.stop, "breaking on", "tracing"), regname)
return
SIM_hap_delete_callback_obj_id("Core_Control_Register_Write", obj,
self.map[obj][regno])
del self.map[obj][regno]
cr_tracker = base_cr_tracker
if "cr_tracker" in dir():
trace_cr_cmds = cr_tracker(0, "trace-cr",
short = "trace control register updates",
type = "inspect/change",
see_also = [ "break-cr" ],
doc = """
Enables and disables tracing of control register updates. When this
is enabled, every time the specified control register is updated
during simulation a message is printed. The message will name the
register being updated, and the new value. The new value will be
printed even if it is identical to the previous value.
The <i>reg-name</i> parameter specifies which control register should
be traced. The available control registers depends on the simulated
target.
Instead of a register name, the <tt>-all</tt> flag may be given. This
will enable or disable tracing of all control register.
""")
break_cr_cmds = cr_tracker(1, "break-cr",
short = "break on control register updates",
type = "breakpoint",
see_also = [ "trace-cr", "<breakpoint>.break" ],
doc = """
Enables and disables breaking simulation on control register updates.
When this is enabled, every time the specified control register is
updated during simulation a message is printed. The message will name
the register being updated, and the new value. The new value will be
printed even if it is identical to the previous value.
The <i>reg-name</i> parameter specifies which control register should
be traced. The available control registers depends on the simulated
target.
Instead of a register name, the <tt>-all</tt> flag may be given. This
will enable or disable tracing of all control register.
""")
#
# -------------------- trace-exception, break-exception --------------------
#
class exception_tracker(tracker):
def __init__(self, stop, cmd, short, doc, type, see_also = []):
tracker.__init__(self, stop, cmd, ((int_t, str_t), ("number", "name")),
(0, self.expander), short, doc,
group = type, see_also = see_also)
self.hap = "Core_Exception"
self.map = {}
self.catchall = 0
self.names = {}
def expander(self, comp):
try:
cpu = current_processor()
except:
return []
if self.names.has_key(cpu):
names = self.names[cpu]
else:
iface = cpu.iface.exception
names = [ iface.get_name(cpu, exc).replace(' ', '_')
for exc in iface.all_exceptions(cpu) ]
self.names[cpu] = names
return get_completions(comp, names)
# These two are here so that they can be overridden
def get_exception_number(self, excname, cpu = None):
if not cpu:
cpu = current_processor()
return cpu.iface.exception.get_number(cpu, excname)
def get_exception_name(self, exc, cpu = None):
if not cpu:
cpu = current_processor()
return cpu.iface.exception.get_name(cpu, exc)
def show(self, excname, cpu, excno):
if not excname:
excname = self.get_exception_name(excno, cpu)
print ("[%s] (@ cycle %s) Exception %d: %s"
% (cpu.name, number_str(SIM_cycle_count(cpu), 10),
excno, excname))
def list(self):
if self.catchall:
print "%s enabled for all exceptions" % iff(self.stop, "breaking", "tracing")
else:
print "%s enabled for these exceptions:" % iff(self.stop, "breaking", "tracing")
l = self.map.keys()
l.sort()
for exc in l:
print " %3d %s" % (exc, self.get_exception_name(exc))
def resolve_target(self, exc):
if type(exc) == type("hej"):
try:
name = exc
num = self.get_exception_number(exc)
except:
# some targets have spaces in the exception name
name = exc.replace('_', ' ')
num = self.get_exception_number(name)
else:
name = self.get_exception_name(exc)
num = exc
return (name, num)
def is_tracked(self, target):
excname, excno = target
return self.catchall or self.map.has_key(excno)
def track_all(self):
if self.catchall:
return
for key,hdl in self.map.items():
SIM_hap_delete_callback_id(self.hap, hdl)
del self.map[key]
self.catchall = SIM_hap_add_callback(self.hap,
self.callback, None)
def track_none(self):
if self.catchall:
SIM_hap_delete_callback_id(self.hap, self.catchall)
self.catchall = 0
else:
for key,hdl in self.map.items():
SIM_hap_delete_callback_id(self.hap, hdl)
del self.map[key]
def track_on(self, target):
excname, excno = target
if self.catchall:
print "Already %s all exceptions" % iff(self.stop, "breaking", "tracing")
return
if self.is_tracked(target):
print "Already %s %s" % (iff(self.stop, "breaking", "tracing"), excname)
return
self.map[excno] = SIM_hap_add_callback_index(self.hap,
self.callback, excname,
excno)
def track_off(self, target):
excname, excno = target
if self.catchall:
# | |
# Copyright (c) 2017 <NAME>
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018-2019 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import re
import inspect
import numpy as np
import ast
import phylanx.execution_tree
from phylanx import PhylanxSession
from .physl_db import db
# find name of file that imported this file
_name_of_importing_file = None
if __name__ != '__main__':
if _name_of_importing_file is None:
for frame in inspect.stack()[1:]:
if frame.filename[0] != '<':
_name_of_importing_file = frame.filename
def physl_zip(loop):
def define(i, idx):
return ['define', (i, ['slice', ('__physl_iterator', str(idx))])]
if isinstance(loop, ast.For):
targets = [it.id for it in loop.target.elts]
args = [arg.id for arg in loop.iter.args]
elif isinstance(loop, list):
if isinstance(loop[0], list):
targets = loop[0][1]
else:
targets = loop[0]
args = loop[1][1]
lambda_ = ['lambda', (*targets, ['list', (*targets, )])]
fmap = ['fmap', (lambda_, *args)]
iterators = tuple(define(i, idx) for idx, i in enumerate(targets))
return (fmap, iterators)
mapped_methods = {
"add": "__add",
"array": "hstack",
"det": "determinant",
"diagonal": "diag",
"divide": "__div",
"matmul": "__mul",
"multiply": "__mul",
"negative": "__minus",
"print": "cout",
"subtract": "__sub",
"len": "__len"
}
numpy_constants = {
"inf": 'inf',
"Inf": 'inf',
"Infinity": 'inf',
"PINF": 'inf',
"infty": 'inf',
"NINF": 'ninf',
"nan": 'nan',
"NaN": 'nan',
"NAN": 'nan',
"PZERO": 'PZERO',
"NZERO": 'NZERO',
"e": 'euler',
"euler_gamma": 'euler_gamma',
"pi": 'pi',
"float": 'float',
"int": 'int',
"bool": 'bool'
}
methods_supporting_dtype = [
'linearmatrix',
'linspace',
'power'
]
def create_array(array_tree, kwargs):
symbol_info = []
hstack_symbol = 'hstack'
vstack_symbol = 'vstack'
dstack_symbol = 'dstack'
def extract_data(arr):
if isinstance(arr, tuple):
if not arr:
return []
elif isinstance(arr[0], str):
return [i for i in arr]
else:
current_dim = []
for entry in arr:
current_dim.append(extract_data(entry))
return current_dim
elif isinstance(arr, list):
symbol_info.append('$' + arr[0].split('$', 1)[1])
return extract_data(arr[1])
data = extract_data(array_tree)
if not symbol_info:
if kwargs:
args = (['list', tuple(data)], kwargs)
else:
args = (['list', tuple(data)], )
return [hstack_symbol, args]
data = np.array(*extract_data(array_tree))
num_dim = len(data.shape)
if 3 == num_dim:
columns = [data[:, :, i] for i in range(data.shape[-1])]
dstacks = []
for i, column in enumerate(columns):
dstacks.append([])
if kwargs:
[dstacks[i].append((['list', tuple(data)], kwargs)) for data in column]
else:
[dstacks[i].append((['list', tuple(data)], )) for data in column]
outer_symbol = '' if not symbol_info else symbol_info.pop(0)
arr = []
for d in dstacks:
vstack = []
for hstacks in d:
vstack.append([hstack_symbol + symbol_info.pop(0), hstacks])
sym_info = '' if not symbol_info else symbol_info.pop(0)
if kwargs:
args = (['list', tuple(vstack)], kwargs)
else:
args = (['list', tuple(vstack)], )
vstack = [vstack_symbol + sym_info, args]
arr.append(vstack)
if kwargs:
args = (['list', tuple(arr)], kwargs, )
else:
args = (['list', tuple(arr)], )
arr = [dstack_symbol + outer_symbol, args]
elif 2 == num_dim:
arr = []
for hstacks in data:
sym_info = '' if not symbol_info else symbol_info.pop(0)
if kwargs:
args = (['list', tuple(hstacks)], kwargs)
else:
args = (['list', tuple(hstacks)], )
arr.append([hstack_symbol + sym_info, args])
sym_info = '' if not symbol_info else symbol_info.pop(0)
if kwargs:
args = (['list', tuple(arr)], kwargs)
else:
args = (['list', tuple(arr)], )
arr = [vstack_symbol + sym_info, args]
elif 1 == num_dim:
sym_info = '' if not symbol_info else symbol_info.pop(0)
if kwargs:
args = (['list', tuple(data)], kwargs)
else:
args = (['list', tuple(data)], )
arr = [hstack_symbol + sym_info, args]
else:
ValueError("Phylanx supports arrays with 3 dimensions or less.")
return (arr,)
def primitive_name(method_name):
"""Given a method_name, returns the corresponding Phylanx primitive.
This primarily used for mapping NumPy mapped_methods to Phylanx primitives,
but there are also other functions in python that would map to primitives
with different name in Phylanx, e.g., `print` is mapped to `cout`.
"""
primitive_name = mapped_methods.get(method_name)
if primitive_name:
return primitive_name
constant_name = numpy_constants.get(method_name)
if constant_name:
return constant_name
return method_name
def print_physl_src(src, with_symbol_info=False, tag=4):
"""Pretty print PhySL source code."""
# Remove line number info
src = re.sub(r'\$\d+', '', src)
if with_symbol_info:
print(src)
return
# The regex below matches one of the following three
# things in order of priority:
# 1: a quoted string, with possible \" or \\ embedded
# 2: a set of balanced parenthesis
# 3: a single character
pat = re.compile(r'"(?:\\.|[^"\\])*"|\([^()]*\)|.')
indent = 0
tab = 4
for s in re.findall(pat, src):
if s in " \t\r\b\n":
pass
elif s == '(':
print(s)
indent += 1
print(" " * indent * tab, end="")
elif s == ')':
indent -= 1
print("", sep="")
print(" " * indent * tab, end="")
print(s, end="")
elif s == ',':
print(s)
print(" " * indent * tab, end="")
else:
print(s, end="", sep="")
print("", sep="")
def get_symbol_info(symbol, name):
"""Adds symbol info (line and column number) to the symbol."""
if name in numpy_constants.keys():
return name
else:
return '%s$%d$%d' % (name, symbol.lineno, symbol.col_offset)
def remove_line(a):
return re.sub(r'\$.*', '', a)
def is_fun(func, ir):
"""
Check that the intermediate representation (ir) describes
a function with name func.
"""
return type(ir) == list and type(ir[0]) == str and re.match(func + r'\b', ir[0])
def check_noreturn(ir):
"""
Check that the intermediate representation (ir) passed
to this routine does not contain ir return statement.
"""
if type(ir) not in [list, tuple]:
return
if len(ir) == 0:
return
elif len(ir) == 1:
check_noreturn(ir[0])
elif is_fun('define', ir):
check_hasreturn(ir)
elif is_fun('return', ir):
msg = "Illegal return"
g = re.match(r'.*\$(\d+)\$(\d+)$', str(ir[0]))
if g:
msg += ": line=%s, col=%s" % (g.group(1), g.group(2))
raise NotImplementedError(msg)
elif is_fun('.*', ir):
check_noreturn(ir[1])
elif type(ir) in [list, tuple]:
for s in ir:
check_noreturn(s)
def check_hasreturn(ir):
"""
Process the intermediate representation (ir) passed
and ensure that if it has ir return statement, it is
at the end.
"""
if type(ir) not in [list, tuple]:
return
if len(ir) == 0:
return
elif len(ir) == 1:
check_hasreturn(ir[0])
elif is_fun('for_each', ir):
check_noreturn(ir[1])
elif is_fun('while', ir):
check_noreturn(ir[1])
elif is_fun('if', ir):
for k in ir[1][1:]:
check_hasreturn(k)
elif is_fun('.*', ir):
check_hasreturn(ir[1])
else:
if len(ir) == 0:
return
check_noreturn(ir[:-1])
check_hasreturn([ir[-1]])
def check_return(ir):
"""
Process the intermediate representation (ir) passed
and check that return statements are only used where
allowed.
"""
if type(ir) not in [list, tuple]:
return
if len(ir) == 0:
return
elif len(ir) == 1:
check_return(ir[0])
elif is_fun('block', ir):
check_hasreturn(ir[1])
elif is_fun('while', ir):
check_noreturn(ir[1])
elif is_fun('if', ir):
for k in ir[1][1:]:
check_hasreturn(k)
elif is_fun('.*', ir):
check_return(ir[1])
else:
for s in ir:
check_return(s)
class PhySLFunction:
functions = []
def __init__(self, physl):
self.physl = physl
def compile_function(self):
self.physl._ensure_is_compiled()
@staticmethod
def compile():
if PhySLFunction.functions:
for func in PhySLFunction.functions:
func.compile_function()
PhySLFunction.functions = []
class PhySL:
"""Python AST to PhySL Transducer."""
compiler_state = None
def _ensure_compiler_state(self):
"""Ensure the compiler state object has been created"""
if PhySL.compiler_state is None:
if "compiler_state" in self.kwargs:
PhySL.compiler_state = self.kwargs['compiler_state']
else:
# the static method compiler_state is constructed only once
PhySL.compiler_state = \
phylanx.execution_tree.global_compiler_state(
self.wrapped_function.__name__, self.file_name)
def _print_progress(self, msg):
if self.kwargs.get("print_progress"):
msg += ' %s(%s)'
print(msg % (self.wrapped_function.__name__, self.file_name))
def _compile_or_load(self):
"""Compile or load this function from database"""
physl_db = None
try:
self._print_progress('physl: compiling')
# create/open database representing the function in this file
physl_db = db(self.file_name) # _name_of_importing_file)
# check whether this Phylanx function is already in database
self.__src__, self.__ast__ = physl_db.select(
self.wrapped_function.__name__)
if self.__src__ is None:
self._print_progress('physl: not found in db')
# this function is not in database, generate physl
self.ir = self._apply_rule(self.python_tree.body[0])
check_return(self.ir)
if self.doc_src is None:
self.__src__ = self._generate_physl(self.ir)
else:
self.__src__ = self.doc_src
self.__ast__ = phylanx.ast.generate_ast(self.__src__)
# now store the PhySL string and AST for this function
physl_db.insert(
self.wrapped_function.__name__, self.__src__, self.__ast__)
physl_db.close()
except Exception as e:
# close database, if needed
if physl_db is not None:
physl_db.close()
# assume something went wrong while handling the database, simply
# compile things withoput db support
self.ir = self._apply_rule(self.python_tree.body[0])
check_return(self.ir)
if self.doc_src is not None:
if type(e) == RuntimeError and "Incomplete parse" in str(e):
# simply re-raise the exception assuming the PhySL provided
# by the doc string was invalid
raise e
self.__src__ = self.doc_src
else:
self.__src__ = self._generate_physl(self.ir)
self.__ast__ = phylanx.ast.generate_ast(self.__src__)
# now, print generated PhySL if required
if self.kwargs.get("debug"):
print_physl_src(self.__src__)
print(end="", flush="")
self._print_progress('physl: compiled')
def _ensure_global_state(self):
"""Ensure global PhySL session has been initialized"""
if not PhylanxSession.is_initialized:
PhylanxSession.init(1)
if not self.is_compiled:
# compile all functions that have so far been collected without an
# initialized session object
PhySLFunction.compile()
def _ensure_is_compiled(self):
"""Ensure this function has been compiled, also compile all functions
that have been collected | |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
import indra.statements as ist
logger = logging.getLogger('english_assembler')
class EnglishAssembler(object):
"""This assembler generates English sentences from INDRA Statements.
Parameters
----------
stmts : Optional[list[indra.statements.Statement]]
A list of INDRA Statements to be added to the assembler.
Attributes
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to assemble.
model : str
The assembled sentences as a single string.
"""
def __init__(self, stmts=None):
if stmts is None:
self.statements = []
else:
self.statements = stmts
self.model = None
def add_statements(self, stmts):
"""Add INDRA Statements to the assembler's list of statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of :py:class:`indra.statements.Statement`
to be added to the statement list of the assembler.
"""
self.statements += stmts
def make_model(self):
"""Assemble text from the set of collected INDRA Statements.
Returns
-------
stmt_strs : str
Return the assembled text as unicode string. By default, the text
is a single string consisting of one or more sentences with
periods at the end.
"""
stmt_strs = []
for stmt in self.statements:
if isinstance(stmt, ist.Modification):
stmt_strs.append(_assemble_modification(stmt))
elif isinstance(stmt, ist.Autophosphorylation):
stmt_strs.append(_assemble_autophosphorylation(stmt))
elif isinstance(stmt, ist.Complex):
stmt_strs.append(_assemble_complex(stmt))
elif isinstance(stmt, ist.RegulateActivity):
stmt_strs.append(_assemble_regulate_activity(stmt))
elif isinstance(stmt, ist.RegulateAmount):
stmt_strs.append(_assemble_regulate_amount(stmt))
elif isinstance(stmt, ist.ActiveForm):
stmt_strs.append(_assemble_activeform(stmt))
elif isinstance(stmt, ist.Translocation):
stmt_strs.append(_assemble_translocation(stmt))
elif isinstance(stmt, ist.Gef):
stmt_strs.append(_assemble_gef(stmt))
elif isinstance(stmt, ist.Gap):
stmt_strs.append(_assemble_gap(stmt))
else:
logger.warning('Unhandled statement type: %s.' % type(stmt))
if stmt_strs:
return ' '.join(stmt_strs)
else:
return ''
def _assemble_agent_str(agent):
"""Assemble an Agent object to text."""
agent_str = agent.name
# Handle mutation conditions
if agent.mutations:
mut_strs = []
for mut in agent.mutations:
res_to = mut.residue_to if mut.residue_to else ''
res_from = mut.residue_from if mut.residue_from else ''
pos = mut.position if mut.position else ''
mut_str = '%s%s%s' % (res_from, pos, res_to)
mut_strs.append(mut_str)
mut_strs = '/'.join(mut_strs)
agent_str = '%s-%s' % (agent_str, mut_strs)
# Handle location
if agent.location is not None:
agent_str += ' in the ' + agent.location
if not agent.mods and not agent.bound_conditions and not agent.activity:
return agent_str
# Handle bound conditions
bound_to = [bc.agent.name for bc in
agent.bound_conditions if bc.is_bound]
not_bound_to = [bc.agent.name for bc in
agent.bound_conditions if not bc.is_bound]
if bound_to:
agent_str += ' bound to ' + _join_list(bound_to)
if not_bound_to:
agent_str += ' and not bound to ' +\
_join_list(not_bound_to)
else:
if not_bound_to:
agent_str += ' not bound to ' +\
_join_list(not_bound_to)
# Handle modification conditions
if agent.mods:
# Special case
if len(agent.mods) == 1 and agent.mods[0].position is None:
prefix = _mod_state_str(agent.mods[0].mod_type)
if agent.mods[0].residue is not None:
residue_str =\
ist.amino_acids[agent.mods[0].residue]['full_name']
prefix = residue_str + '-' + prefix
agent_str = prefix + ' ' + agent_str
else:
if agent.bound_conditions:
agent_str += ' and'
agent_str += ' %s on ' % _mod_state_str(agent.mods[0].mod_type)
mod_lst = []
for m in agent.mods:
if m.position is None:
if m.residue is not None:
residue_str =\
ist.amino_acids[m.residue]['full_name']
mod_lst.append(residue_str)
else:
mod_lst.append('an unknown residue')
elif m.position is not None and m.residue is None:
mod_lst.append('amino acid %s' % m.position)
else:
mod_lst.append(m.residue + m.position)
agent_str += _join_list(mod_lst)
# Handle activity conditions
if agent.activity is not None:
# TODO: handle activity types
if agent.activity.is_active:
prefix = 'active'
else:
prefix = 'inactive'
agent_str = prefix + ' ' + agent_str
return agent_str
def _join_list(lst):
"""Join a list of words in a gramatically correct way."""
if len(lst) > 2:
s = ', '.join(lst[:-1])
s += ' and ' + lst[-1]
elif len(lst) == 2:
s = lst[0] + ' and ' + lst[1]
elif len(lst) == 1:
s = lst[0]
else:
s = ''
return s
def _assemble_activeform(stmt):
"""Assemble ActiveForm statements into text."""
subj_str = _assemble_agent_str(stmt.agent)
if stmt.is_active:
is_active_str = 'active'
else:
is_active_str = 'inactive'
if stmt.activity == 'activity':
stmt_str = subj_str + ' is ' + is_active_str
elif stmt.activity == 'kinase':
stmt_str = subj_str + ' is kinase-' + is_active_str
elif stmt.activity == 'phosphatase':
stmt_str = subj_str + ' is phosphatase-' + is_active_str
elif stmt.activity == 'catalytic':
stmt_str = subj_str + ' is catalytically ' + is_active_str
elif stmt.activity == 'transcription':
stmt_str = subj_str + ' is transcriptionally ' + is_active_str
elif stmt.activity == 'gtpbound':
stmt_str = subj_str + ' is GTP-bound ' + is_active_str
return _make_sentence(stmt_str)
def _assemble_modification(stmt):
"""Assemble Modification statements into text."""
sub_str = _assemble_agent_str(stmt.sub)
if stmt.enz is not None:
enz_str = _assemble_agent_str(stmt.enz)
if _get_is_direct(stmt):
mod_str = ' ' + _mod_process_verb(stmt) + ' '
else:
mod_str = ' leads to the ' + _mod_process_noun(stmt) + ' of '
stmt_str = enz_str + mod_str + sub_str
else:
stmt_str = sub_str + ' is ' + _mod_state_stmt(stmt)
if stmt.residue is not None:
if stmt.position is None:
mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name']
else:
mod_str = 'on ' + stmt.residue + stmt.position
else:
mod_str = ''
stmt_str += ' ' + mod_str
return _make_sentence(stmt_str)
def _assemble_complex(stmt):
"""Assemble Complex statements into text."""
member_strs = [_assemble_agent_str(m) for m in stmt.members]
stmt_str = member_strs[0] + ' binds ' + _join_list(member_strs[1:])
return _make_sentence(stmt_str)
def _assemble_autophosphorylation(stmt):
"""Assemble Autophosphorylation statements into text."""
enz_str = _assemble_agent_str(stmt.enz)
stmt_str = enz_str + ' phosphorylates itself'
if stmt.residue is not None:
if stmt.position is None:
mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name']
else:
mod_str = 'on ' + stmt.residue + stmt.position
else:
mod_str = ''
stmt_str += ' ' + mod_str
return _make_sentence(stmt_str)
def _assemble_regulate_activity(stmt):
"""Assemble RegulateActivity statements into text."""
subj_str = _assemble_agent_str(stmt.subj)
obj_str = _assemble_agent_str(stmt.obj)
if stmt.is_activation:
rel_str = ' activates '
else:
rel_str = ' inhibits '
stmt_str = subj_str + rel_str + obj_str
return _make_sentence(stmt_str)
def _assemble_regulate_amount(stmt):
"""Assemble RegulateAmount statements into text."""
obj_str = _assemble_agent_str(stmt.obj)
if stmt.subj is not None:
subj_str = _assemble_agent_str(stmt.subj)
if isinstance(stmt, ist.IncreaseAmount):
rel_str = ' increases the amount of '
elif isinstance(stmt, ist.DecreaseAmount):
rel_str = ' decreases the amount of '
stmt_str = subj_str + rel_str + obj_str
else:
if isinstance(stmt, ist.IncreaseAmount):
stmt_str = obj_str + ' is produced'
elif isinstance(stmt, ist.DecreaseAmount):
stmt_str = obj_str + ' is degraded'
return _make_sentence(stmt_str)
def _assemble_translocation(stmt):
"""Assemble Translocation statements into text."""
agent_str = _assemble_agent_str(stmt.agent)
stmt_str = agent_str + ' translocates'
if stmt.from_location is not None:
stmt_str += ' from the ' + stmt.from_location
if stmt.to_location is not None:
stmt_str += ' to the ' + stmt.to_location
return _make_sentence(stmt_str)
def _assemble_gap(stmt):
"""Assemble Gap statements into text."""
subj_str = _assemble_agent_str(stmt.gap)
obj_str = _assemble_agent_str(stmt.ras)
stmt_str = subj_str + ' is a GAP for ' + obj_str
return _make_sentence(stmt_str)
def _assemble_gef(stmt):
"""Assemble Gef statements into text."""
subj_str = _assemble_agent_str(stmt.gef)
obj_str = _assemble_agent_str(stmt.ras)
stmt_str = subj_str + ' is a GEF for ' + obj_str
return _make_sentence(stmt_str)
def _make_sentence(txt):
"""Make a sentence from a piece of text."""
#Make sure first letter is capitalized
txt = txt.strip(' ')
txt = txt[0].upper() + txt[1:] + '.'
return txt
def _get_is_direct(stmt):
'''Returns true if there is evidence that the statement is a direct
interaction. If any of the evidences associated with the statement
indicates a direct interatcion then we assume the interaction
is direct. If there is no evidence for the interaction being indirect
then we default to direct.'''
any_indirect = False
for ev in stmt.evidence:
if ev.epistemics.get('direct') is True:
return True
elif ev.epistemics.get('direct') is False:
# This guarantees that we have seen at least
# some evidence that the statement is indirect
any_indirect = True
if any_indirect:
return False
return True
def _get_is_hypothesis(stmt):
'''Returns true if there is evidence that the statement is only
hypothetical. If all of the evidences associated with the statement
indicate a hypothetical interaction then we assume the interaction
is hypothetical.'''
for ev in stmt.evidence:
if not ev.epistemics.get('hypothesis') is True:
return True
return False
def _get_is_hypothesis_adverb(stmt):
'''Returns the string associated with a statement being hypothetical.'''
if _get_is_hypothesis(stmt):
return ' hypothetically '
else:
return ''
def _mod_process_verb(stmt):
mod_name = stmt.__class__.__name__.lower()
return mod_process_prefix.get(mod_name)
def _mod_process_noun(stmt):
mod_name = stmt.__class__.__name__.lower()
return mod_name
def _mod_state_stmt(stmt):
mod_name = stmt.__class__.__name__.lower()
return mod_state_prefix.get(mod_name)
def _mod_state_str(s):
return mod_state_prefix.get(s)
mod_state_prefix = {
'phosphorylation': 'phosphorylated',
'dephosphorylation': 'dephosphorylated',
'ubiquitination': 'ubiquitinated',
'deubiquitination': 'deubiquitinated',
'acetylation': 'acetylated',
'deacetylation': 'deacetylated',
'hydroxylation': 'hydroxylated',
'dehydroxylation': 'dehydroxylated',
'sumoylation': 'sumoylated',
'desumoylation': 'desumoylated',
'farnesylation': 'farnesylated',
'defarnesylation': 'defarnesylated',
'glycosylation': 'glycosylated',
'deglycosylation': 'deglycosylated',
'ribosylation': 'ribosylated',
'deribosylation': 'deribosylated',
'modification': 'modified',
}
mod_process_prefix = {
'phosphorylation': 'phosphorylates',
'dephosphorylation': 'dephosphorylates',
'ubiquitination': 'ubiquitinates',
'deubiquitination': 'deubiquitinates',
'acetylation': 'acetylates',
'deacetylation': 'deacetylates',
'hydroxylation': 'hydroxylates',
'dehydroxylation': 'dehydroxylates',
| |
import datetime
from unittest import mock
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Permission
from django.core.cache import cache
from django.db import IntegrityError
from django.test import TestCase, override_settings
from django.utils import timezone
from legacysms import models as legacymodels
from mediaplatform_jwp.models import CachedResource
from .. import models
User = get_user_model()
class ModelTestCase(TestCase):
fixtures = ['mediaplatform/tests/fixtures/test_data.yaml']
def setUp(self):
self.user = User.objects.get(username='testuser')
self.lookup_groupids_and_instids_for_user_patcher = mock.patch(
'mediaplatform.models._lookup_groupids_and_instids_for_user')
self.lookup_groupids_and_instids_for_user = (
self.lookup_groupids_and_instids_for_user_patcher.start())
self.lookup_groupids_and_instids_for_user.return_value = ([], [])
self.addCleanup(self.lookup_groupids_and_instids_for_user_patcher.stop)
def assert_user_cannot_view(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = self.model.objects_including_deleted.get(id=item_or_id)
self.assertFalse(
self.model.objects_including_deleted.all()
.filter(id=item_or_id.id)
.viewable_by_user(user)
.exists()
)
self.assertFalse(
self.model.objects_including_deleted.all()
.annotate_viewable(user, name='TEST_viewable')
.get(id=item_or_id.id)
.TEST_viewable
)
def assert_user_can_view(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = self.model.objects_including_deleted.get(id=item_or_id)
self.assertTrue(
self.model.objects.all().viewable_by_user(user).filter(id=item_or_id.id).exists()
)
self.assertTrue(
self.model.objects_including_deleted.all()
.annotate_viewable(user, name='TEST_viewable')
.get(id=item_or_id.id)
.TEST_viewable
)
def assert_user_cannot_edit(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = self.model.objects_including_deleted.get(id=item_or_id)
self.assertFalse(
self.model.objects_including_deleted.all()
.filter(id=item_or_id.id)
.editable_by_user(user)
.exists()
)
self.assertFalse(
self.model.objects_including_deleted.all()
.annotate_editable(user, name='TEST_editable')
.get(id=item_or_id.id)
.TEST_editable
)
def assert_user_can_edit(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = self.model.objects_including_deleted.get(id=item_or_id)
self.assertTrue(
self.model.objects.all().editable_by_user(user).filter(id=item_or_id.id).exists()
)
self.assertTrue(
self.model.objects_including_deleted.all()
.annotate_editable(user, name='TEST_editable')
.get(id=item_or_id.id)
.TEST_editable
)
class MediaItemTest(ModelTestCase):
model = models.MediaItem
def test_creation(self):
"""A MediaItem object should be creatable with no field values."""
models.MediaItem.objects.create()
def test_no_deleted_in_objects(self):
"""The default queryset used by MediaItem.objects contains no deleted items."""
self.assertEqual(models.MediaItem.objects.filter(deleted_at__isnull=False).count(), 0)
def test_deleted_in_objects_including_deleted(self):
"""If we explicitly ask for deleted objects, we get them."""
self.assertGreater(
models.MediaItem.objects_including_deleted.filter(deleted_at__isnull=False).count(), 0)
def test_public_item_viewable_by_anon(self):
"""The public video is viewable by anonymous."""
self.assert_user_can_view(AnonymousUser(), 'public')
def test_signed_in_item_not_viewable_by_anon(self):
"""The signed in video is not viewable by anonymous."""
self.assert_user_cannot_view(AnonymousUser(), 'signedin')
def test_user_of_none_is_treated_as_anon(self):
"""
If a user of "None" is passed to viewable_by_user(), it is treated as the anonymous user.
"""
self.assert_user_can_view(None, 'public')
self.assert_user_cannot_view(None, 'signedin')
def test_signed_in_item_viewable_by_signed_in(self):
self.assert_user_can_view(self.user, 'signedin')
def test_public_item_viewable_by_signed_in(self):
self.assert_user_can_view(self.user, 'public')
def test_item_with_no_perms_not_viewable(self):
"""An item with empty permissions is not viewable by the anonymous or signed in user."""
self.assert_user_cannot_view(AnonymousUser(), 'emptyperm')
self.assert_user_cannot_view(self.user, 'emptyperm')
def test_item_with_matching_crsid_viewable(self):
item = models.MediaItem.objects.get(id='emptyperm')
self.assert_user_cannot_view(self.user, item)
item.view_permission.crsids.extend(['spqr1', self.user.username, 'abcd1'])
item.view_permission.save()
self.assert_user_can_view(self.user, item)
def test_item_with_matching_lookup_groups_viewable(self):
"""
A user who has at least one lookup group which is in the set of lookup groups for a media
item can view it.
"""
self.lookup_groupids_and_instids_for_user.return_value = ['A', 'B', 'C'], []
item = models.MediaItem.objects.get(id='emptyperm')
self.assert_user_cannot_view(self.user, item)
item.view_permission.lookup_groups.extend(['X', 'Y', 'A', 'B', 'Z'])
item.view_permission.save()
self.assert_user_can_view(self.user, item)
def test_item_with_matching_lookup_insts_viewable(self):
"""
A user who has at least one lookup institution which is in the set of lookup institutions
for a media item can view it.
"""
self.lookup_groupids_and_instids_for_user.return_value = [], ['A', 'B', 'C']
item = models.MediaItem.objects.get(id='emptyperm')
self.assert_user_cannot_view(self.user, item)
item.view_permission.lookup_insts.extend(['X', 'Y', 'A', 'B', 'Z'])
item.view_permission.save()
self.assert_user_can_view(self.user, item)
def test_public_item_editable_by_anon(self):
"""An item with public editable permissions is still not editable by anonymous."""
item = models.MediaItem.objects.get(id='emptyperm')
self.assert_user_cannot_edit(AnonymousUser(), item)
self.assert_user_cannot_edit(None, item)
item.channel.edit_permission.is_public = True
item.channel.edit_permission.save()
self.assert_user_cannot_edit(AnonymousUser(), item)
self.assert_user_cannot_edit(None, item)
def test_signed_in_edit_permissions(self):
"""An item with signed in edit permissions is not editable by anonymous."""
item = models.MediaItem.objects.get(id='emptyperm')
self.assert_user_cannot_edit(AnonymousUser(), item)
self.assert_user_cannot_edit(None, item)
self.assert_user_cannot_edit(self.user, item)
item.channel.edit_permission.is_signed_in = True
item.channel.edit_permission.save()
self.assert_user_cannot_edit(AnonymousUser(), item)
self.assert_user_cannot_edit(None, item)
self.assert_user_can_edit(self.user, item)
def test_item_with_no_perms_not_editable(self):
"""An item with empty permissions is not editable by the anonymous or signed in user."""
self.assert_user_cannot_edit(AnonymousUser(), 'emptyperm')
self.assert_user_cannot_edit(self.user, 'emptyperm')
def test_item_with_matching_crsid_editable(self):
item = models.MediaItem.objects.get(id='emptyperm')
self.assert_user_cannot_edit(self.user, item)
item.channel.edit_permission.crsids.extend(['spqr1', self.user.username, 'abcd1'])
item.channel.edit_permission.save()
self.assert_user_can_edit(self.user, item)
def test_item_with_matching_lookup_groups_editable(self):
"""
A user who has at least one lookup group which is in the set of lookup groups for a media
item can edit it.
"""
self.lookup_groupids_and_instids_for_user.return_value = ['A', 'B', 'C'], []
item = models.MediaItem.objects.get(id='emptyperm')
self.assert_user_cannot_edit(self.user, item)
item.channel.edit_permission.lookup_groups.extend(['X', 'Y', 'A', 'B', 'Z'])
item.channel.edit_permission.save()
self.assert_user_can_edit(self.user, item)
def test_item_with_matching_lookup_insts_editable(self):
"""
A user who has at least one lookup institution which is in the set of lookup institutions
for a media item can edit it.
"""
self.lookup_groupids_and_instids_for_user.return_value = [], ['A', 'B', 'C']
item = models.MediaItem.objects.get(id='emptyperm')
self.assert_user_cannot_edit(self.user, item)
item.channel.edit_permission.lookup_insts.extend(['X', 'Y', 'A', 'B', 'Z'])
item.channel.edit_permission.save()
self.assert_user_can_edit(self.user, item)
def test_view_permission_created(self):
"""A new MediaItem has a view permission created on save()."""
item = models.MediaItem.objects.create()
self.assertIsNotNone(models.MediaItem.objects.get(id=item.id).view_permission)
def test_view_permission_not_re_created(self):
"""The view_permission is not changes if a MediaItem is updated."""
item = models.MediaItem.objects.create()
permission_id_1 = models.MediaItem.objects.get(id=item.id).view_permission.id
item = models.MediaItem.objects.get(id=item.id)
item.title = 'changed'
item.save()
permission_id_2 = models.MediaItem.objects.get(id=item.id).view_permission.id
self.assertEquals(permission_id_1, permission_id_2)
def test_sms_item_not_editable(self):
"""An item with associated SMS media item or channel is not editable."""
item = models.MediaItem.objects.get(id='emptyperm')
item.channel.edit_permission.is_public = True
item.channel.edit_permission.save()
self.assert_user_can_edit(self.user, item)
# If there is a SMS media item, the editable permission goes away
sms = legacymodels.MediaItem.objects.create(id=12345, item=item)
self.assert_user_cannot_edit(self.user, item)
sms.delete()
self.assert_user_can_edit(self.user, item)
# If there is a SMS collection, the editable permission goes away
sms = legacymodels.Collection.objects.create(id=12345, channel=item.channel)
self.assert_user_cannot_edit(self.user, item)
sms.delete()
self.assert_user_can_edit(self.user, item)
def test_not_yet_published(self):
"""Check that an item with a future published_at is not visible"""
item = models.MediaItem.objects.get(id='public')
item.published_at = datetime.datetime.now() + datetime.timedelta(days=1)
item.save()
self.assert_user_cannot_view(self.user, item)
def test_has_been_published(self):
"""Check that an item with a past published_at is visible"""
item = models.MediaItem.objects.get(id='public')
item.published_at = datetime.datetime.now() - datetime.timedelta(days=1)
item.save()
self.assert_user_can_view(self.user, item)
def test_published_at_is_null(self):
"""Check that an item with a null published_at is visible"""
self.assert_user_can_view(self.user, 'public')
def test_visible_if_not_published_but_editable(self):
"""Check that an editable item with a future published_at is visible"""
item = models.MediaItem.objects.get(id='public')
item.published_at = datetime.datetime.now() + datetime.timedelta(days=1)
item.channel.edit_permission.crsids.append(self.user.username)
item.channel.edit_permission.save()
item.save()
self.assert_user_can_view(self.user, item)
def test_fetched_size_success(self):
""" check that a size is successfully fetched """
resource = models.MediaItem.objects.get(id='public').jwp.resource
resource.data['size'] = 54321
resource.save()
item = models.MediaItem.objects.get(id='public')
self.assertEqual(item.fetched_size, 54321)
def test_fetched_size_no_size(self):
""" check that fetched_size doesn't error when the resource.data doesn't have a size """
CachedResource.objects.create(key='jwpvidpublic', type='video', data={})
item = models.MediaItem.objects.get(id='public')
self.assertEqual(item.fetched_size, 0)
def test_fetched_size_no_resource(self):
""" check that fetched_size doesn't error when the Video doesn't have a resource """
item = models.MediaItem.objects.get(id='public')
self.assertEqual(item.fetched_size, 0)
def test_fetched_size_no_jwp(self):
""" check that fetched_size doesn't error when the item doesn't have a Video """
item = models.MediaItem.objects.get(id='signedin')
self.assertEqual(item.fetched_size, 0)
def test_super_viewer(self):
"""A user with mediaplatform.view_mediaitem permission can always view."""
new_user = get_user_model().objects.create(username='newuser')
item = models.MediaItem.objects.get(id='signedin')
item.view_permission.reset()
item.view_permission.save()
self.assert_user_cannot_view(new_user, item)
# add the permission to the user
view_permission = Permission.objects.get(
codename='view_mediaitem', content_type__app_label='mediaplatform')
new_user.user_permissions.add(view_permission)
new_user.save()
# we need to re-fetch the suer to avoid the permissions cache
new_user = get_user_model().objects.get(username=new_user.username)
self.assertTrue(new_user.has_perm('mediaplatform.view_mediaitem'))
# check that the user can now view the item
self.assert_user_can_view(new_user, item)
def test_super_downloaded(self):
"""A user with mediaplatform.download_mediaitem permission can always download."""
new_user = get_user_model().objects.create(username='newuser')
item = models.MediaItem.objects.get(id='signedin')
item.downloadable = False
self.assert_user_cannot_download(new_user, item)
# add the permission to the user
view_permission = Permission.objects.get(
codename='download_mediaitem', content_type__app_label='mediaplatform')
new_user.user_permissions.add(view_permission)
new_user.save()
# we need to re-fetch the suer to avoid the permissions cache
new_user = get_user_model().objects.get(username=new_user.username)
self.assertTrue(new_user.has_perm('mediaplatform.download_mediaitem'))
# check that the user can now view the item
self.assert_user_can_download(new_user, item)
def test_published_at_in_future_not_published(self):
"""An item with publication date in the future is not published."""
item = models.MediaItem.objects.get(id='public')
self.assert_user_can_view(self.user, item)
item.published_at = timezone.now() + datetime.timedelta(days=1)
item.save()
self.assert_user_cannot_view(self.user, item)
def test_no_jwp_video_required_for_publication(self):
"""An item does not need a JWP video to be published."""
item = models.MediaItem.objects.get(id='public')
self.assert_user_can_view(self.user, item)
item.jwp.delete()
self.assert_user_can_view(self.user, item)
def test_publications_requires_ready_jwp_video(self):
"""An item with a JWP video must have that video be "ready" to be published."""
item = models.MediaItem.objects.get(id='public')
self.assert_user_can_view(self.user, item)
item.jwp.resource.data['status'] = 'error'
item.jwp.resource.save()
self.assert_user_cannot_view(self.user, item)
def test_future_published_items_visible_to_editor(self):
"""An unpublished item can still be seen by someone who can edit it."""
item = models.MediaItem.objects.get(id='public')
item.channel.edit_permission.reset()
item.channel.edit_permission.crsids.append(self.user.username)
item.channel.edit_permission.save()
self.assert_user_can_view(None, item)
self.assert_user_can_view(self.user, item)
item.published_at = timezone.now() + datetime.timedelta(days=1)
item.save()
self.assert_user_cannot_view(None, item)
self.assert_user_can_view(self.user, item)
def test_items_with_errors_visible_to_editor(self):
"""An item with JWP error can still be seen by someone who can edit it."""
item = models.MediaItem.objects.get(id='public')
item.channel.edit_permission.reset()
item.channel.edit_permission.crsids.append(self.user.username)
item.channel.edit_permission.save()
self.assert_user_can_view(None, item)
self.assert_user_can_view(self.user, item)
item.jwp.resource.data['status'] = 'error'
item.jwp.resource.save()
item.save()
self.assert_user_cannot_view(None, item)
self.assert_user_can_view(self.user, item)
def assert_user_cannot_view(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = models.MediaItem.objects_including_deleted.get(id=item_or_id)
self.assertFalse(
models.MediaItem.objects_including_deleted.all()
.filter(id=item_or_id.id)
.viewable_by_user(user)
.exists()
)
self.assertFalse(
models.MediaItem.objects_including_deleted.all()
.annotate_viewable(user, name='TEST_viewable')
.get(id=item_or_id.id)
.TEST_viewable
)
def assert_user_can_view(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = models.MediaItem.objects_including_deleted.get(id=item_or_id)
self.assertTrue(
models.MediaItem.objects.all().viewable_by_user(user).filter(id=item_or_id.id).exists()
)
self.assertTrue(
models.MediaItem.objects_including_deleted.all()
.annotate_viewable(user, name='TEST_viewable')
.get(id=item_or_id.id)
.TEST_viewable
)
def assert_user_cannot_edit(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = models.MediaItem.objects_including_deleted.get(id=item_or_id)
self.assertFalse(
models.MediaItem.objects_including_deleted.all()
.filter(id=item_or_id.id)
.editable_by_user(user)
.exists()
)
self.assertFalse(
models.MediaItem.objects_including_deleted.all()
.annotate_editable(user, name='TEST_editable')
.get(id=item_or_id.id)
.TEST_editable
)
def assert_user_can_edit(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = models.MediaItem.objects_including_deleted.get(id=item_or_id)
self.assertTrue(
models.MediaItem.objects.all().editable_by_user(user).filter(id=item_or_id.id).exists()
)
self.assertTrue(
models.MediaItem.objects_including_deleted.all()
.annotate_editable(user, name='TEST_editable')
.get(id=item_or_id.id)
.TEST_editable
)
def assert_user_cannot_download(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = models.MediaItem.objects_including_deleted.get(id=item_or_id)
self.assertFalse(
models.MediaItem.objects_including_deleted.all()
.filter(id=item_or_id.id)
.downloadable_by_user(user)
.exists()
)
self.assertFalse(
models.MediaItem.objects_including_deleted.all()
.annotate_downloadable(user, name='TEST_downloadable')
.get(id=item_or_id.id)
.TEST_downloadable
)
def assert_user_can_download(self, user, item_or_id):
if isinstance(item_or_id, str):
item_or_id = models.MediaItem.objects_including_deleted.get(id=item_or_id)
self.assertTrue(
models.MediaItem.objects.all().downloadable_by_user(user).filter(id=item_or_id.id)
.exists()
)
self.assertTrue(
models.MediaItem.objects_including_deleted.all()
.annotate_downloadable(user, name='TEST_downloadable')
.get(id=item_or_id.id)
.TEST_downloadable
)
class PermissionTest(TestCase):
def test_creation(self):
"""A Permission object should be creatable with no field values."""
models.Permission.objects.create()
class LookupTest(TestCase):
PERSON_FIXTURE = {
'groups': [
{'groupid': '0123'},
| |
<gh_stars>1-10
# © 2022 <NAME> <<EMAIL>>
# MIT-licensed
import time
import machine
import struct
import uctypes
from micropython import const
# This driver was written from scratch using datasheets and looking at other drivers listed here.
# Required copyright notices of those drivers are included below as necessary.
# This is Pimoroni driver, with Adafruit header (MIT, notice included below):
# https://github.com/pimoroni/st7789-python/blob/master/library/ST7789/__init__.py
# This is c++ Adafruit driver (MIT, notice included below):
# https://github.com/adafruit/Adafruit-ST7735-Library/blob/master/Adafruit_ST7789.cpp
# independent (?) micropython implementation (license unspecified):
# https://techatronic.com/st7789-display-pi-pico/
# st77xx c driver (for uPy), with simplified init sequence (MIT, notice included below):
# https://github.com/szampardi/st77xx_mpy
#
# This is a library for several Adafruit displays based on ST77* drivers.
#
# Works with the Adafruit 1.8" TFT Breakout w/SD card
# ----> http://www.adafruit.com/products/358
# The 1.8" TFT shield
# ----> https://www.adafruit.com/product/802
# The 1.44" TFT breakout
# ----> https://www.adafruit.com/product/2088
# as well as Adafruit raw 1.8" TFT display
# ----> http://www.adafruit.com/products/618
#
# Check out the links above for our tutorials and wiring diagrams.
# These displays use SPI to communicate, 4 or 5 pins are required to
# interface (RST is optional).
#
# Adafruit invests time and resources providing this open source code,
# please support Adafruit and open-source hardware by purchasing
# products from Adafruit!
#
# Written by <NAME>/Ladyada for Adafruit Industries.
# MIT license, all text above must be included in any redistribution.
#
#
# Copyright (c) 2019 <NAME>
#
#
# Copyright (c) 2014 Adafruit Industries
# Author: <NAME>
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
ST77XX_NOP = const(0x00)
ST77XX_SWRESET = const(0x01)
ST77XX_RDDID = const(0x04)
ST77XX_RDDST = const(0x09)
ST77XX_SLPIN = const(0x10)
ST77XX_SLPOUT = const(0x11)
ST77XX_PTLON = const(0x12)
ST77XX_NORON = const(0x13)
ST77XX_INVOFF = const(0x20)
ST77XX_INVON = const(0x21)
ST77XX_DISPOFF = const(0x28)
ST77XX_DISPON = const(0x29)
ST77XX_CASET = const(0x2A)
ST77XX_RASET = const(0x2B)
ST77XX_RAMWR = const(0x2C)
ST77XX_RAMRD = const(0x2E)
ST77XX_PTLAR = const(0x30)
ST77XX_MADCTL = const(0x36)
ST77XX_COLMOD = const(0x3A)
ST7789_WRCACE = const(0x55)
ST77XX_FRMCTR1 = const(0xB1)
ST77XX_FRMCTR2 = ST7789_PORCTRL = const(0xB2)
ST77XX_FRMCTR3 = const(0xB3)
ST77XX_INVCTR = const(0xB4)
ST7789_DISSET5 = const(0xB6)
ST7789_GCTRL = const(0xB7)
ST7789_GTADJ = const(0xB8)
ST7789_VCOMS = const(0xBB)
ST7735_PWCTR1 = ST7789_LCMCTRL = const(0xC0)
ST7735_PWCTR2 = ST7789_IDSET = const(0xC1)
ST7735_PWCTR3 = ST7789_VDVVRHEN = const(0xC2)
ST7735_PWCTR4 = ST7789_VRHS = const(0xC3)
ST7735_PWCTR5 = ST7789_VDVS = const(0xC4)
ST7735_VMCTR1 = ST7789_VMCTR1 = const(0xC5)
ST7789_FRCTRL2 = const(0xC6)
ST7789_CABCCTRL = const(0xC7)
ST7789_PWCTRL1 = const(0xD0)
ST77XX_RDID1 = const(0xDA)
ST77XX_RDID2 = const(0xDB)
ST77XX_RDID3 = const(0xDC)
ST77XX_RDID4 = const(0xDD)
ST7789_GMCTRP1 = ST7789_PVGAMCTRL = const(0xE0)
ST7789_GMCTRN1 = ST7789_NVGAMCTRL = const(0xE1)
ST7735_PWCTR6 = ST7789_PWCTR6 = const(0xFC)
ST77XX_MADCTL_MY = const(0x80) # page address order (0: top to bottom; 1: bottom to top)
ST77XX_MADCTL_MX = const(0x40) # column address order (0: left to right; 1: right to left)
ST77XX_MADCTL_MV = const(0x20) # page/column order (0: normal mode 1; reverse mode)
ST77XX_MADCTL_ML = const(0x10) # line address order (0: refresh to to bottom; 1: refresh bottom to top)
ST77XX_MADCTL_BGR = const(0x08) # colors are BGR (not RGB)
ST77XX_MADCTL_RTL = const(0x04) # refresh right to left
ST77XX_MADCTL_ROTS=(
const(0x00), # 0 = portrait
const(ST77XX_MADCTL_MX | ST77XX_MADCTL_MV), # 1 = landscape
const(ST77XX_MADCTL_MY | ST77XX_MADCTL_MX), # 2 = inverted portrait
const(ST77XX_MADCTL_MY | ST77XX_MADCTL_MV), # 3 = inverted landscape
)
ST77XX_COLOR_MODE_65K = const(0x50)
ST77XX_COLOR_MODE_262K = const(0x60)
ST77XX_COLOR_MODE_12BIT = const(0x03)
ST77XX_COLOR_MODE_16BIT = const(0x05)
ST77XX_COLOR_MODE_18BIT = const(0x06)
ST77XX_COLOR_MODE_16M = const(0x07)
ST77XX_COL_ROW_MODEL_START_ROTMAP={
# ST7789
(240,320,None):[(0,0),(0,0),(0,0),(0,0)],
(240,240,None):[(0,0),(0,0),(0,80),(80,0)],
(135,240,None):[(52,40),(40,53),(53,40),(40,52)],
# ST7735
(128,160,'blacktab'):[(0,0),(0,0),(0,0),(0,0)],
(128,160,'redtab'):[(2,1),(1,2),(2,1),(1,2)],
}
ST77XX_PORTRAIT = const(0)
ST77XX_LANDSCAPE = const(1)
ST77XX_INV_PORTRAIT = const(2)
ST77XX_INV_LANDSCAPE = const(3)
class St77xx_hw(object):
def __init__(self, *, cs, dc, spi, res, suppRes, bl=None, model=None, suppModel=[], rst=None, rot=ST77XX_LANDSCAPE, bgr=False, rp2_dma=None):
'''
This is an abstract low-level driver the ST77xx controllers, not to be instantiated directly.
Derived classes implement chip-specific bits. THe following parameters are recognized:
* *cs*: chip select pin (= slave select, SS)
* *dc*: data/command pin
* *bl*: backlight PWM pin (optional)
* *model*: display model, to account for variations in products
* *rst*: optional reset pin
* *res*: resolution tuple; (width,height) with zero rotation
* *rot*: display orientation (0: portrait, 1: landscape, 2: inverted protrait, 3: inverted landscape); the constants ST77XX_PORTRAIT, ST77XX_LANDSCAPE, ST77XX_INV_POTRAIT, ST77XX_INV_LANDSCAPE may be used.
* *bgr*: color order if BGR (not RGB)
* *rp2_dma*: optional DMA object for the rp2 port
Subclass constructors (implementing concrete chip) set in addition the following, not to be used directly:
* *suppModel*: models supported by the hardware driver
* *suppRes*: resolutions supported by the hardware driver, as list of (width,height) tuples
'''
self.buf1 = bytearray(1)
self.buf2 = bytearray(2)
self.buf4 = bytearray(4)
self.cs,self.dc,self.rst=[(machine.Pin(p,machine.Pin.OUT) if isinstance(p,int) else p) for p in (cs,dc,rst)]
self.bl=bl
if isinstance(self.bl,int): self.bl=machine.PWM(machine.Pin(self.bl,machine.Pin.OUT))
elif isinstance(self.bl,machine.Pin): self.bl=machine.PWM(self.bl)
assert isinstance(self.bl,(machine.PWM,type(None)))
self.set_backlight(10) # set some backlight
self.rot=rot
self.bgr=bgr
self.width,self.height=(0,0) # this is set later in hard_reset->config->apply_rotation
if res not in suppRes: raise ValueError('Unsupported resolution %s; the driver supports: %s.'%(str(res),', '.join(str(r) for r in suppRes)))
if suppModel and model not in suppModel: raise ValueError('Unsupported model %s; the driver supports: %s.'%(str(model),', '.join(str(r) for r in suppModel)))
self.res=res
self.model=model
self.rp2_dma=rp2_dma
self.spi=spi
self.hard_reset()
def off(self): self.set_backlight(0)
def hard_reset(self):
if self.rst:
for v in (1,0,1):
self.rst.value(v)
time.sleep(.2)
time.sleep(.2)
self.config()
def config(self):
self.config_hw() # defined in child classes
self.apply_rotation(self.rot)
def set_backlight(self,percent):
if self.bl is None: return
self.bl.duty_u16(percent*655)
def set_window(self, x, y, w, h):
c0,r0=ST77XX_COL_ROW_MODEL_START_ROTMAP[self.res[0],self.res[1],self.model][self.rot%4]
struct.pack_into('>hh', self.buf4, 0, c0+x, c0+x+w-1)
self.write_register(ST77XX_CASET, self.buf4)
struct.pack_into('>hh', self.buf4, 0, r0+y, r0+y+h-1)
self.write_register(ST77XX_RASET, self.buf4)
def apply_rotation(self,rot):
self.rot=rot
if (self.rot%2)==0: self.width,self.height=self.res
else: self.height,self.width=self.res
self.write_register(ST77XX_MADCTL,bytes([(ST77XX_MADCTL_BGR if self.bgr else 0)|ST77XX_MADCTL_ROTS[self.rot%4]]))
def blit(self, x, y, w, h, buf, is_blocking=True):
self.set_window(x, y, w, h)
if self.rp2_dma: self._rp2_write_register_dma(ST77XX_RAMWR, buf, is_blocking)
else: self.write_register(ST77XX_RAMWR, buf)
def clear(self, color):
bs=128 # write pixels in chunks; makes the fill much faster
struct.pack_into('>h',self.buf2,0,color)
buf=bs*bytes(self.buf2)
npx=self.width*self.height
self.set_window(0, 0, self.width, self.height)
self.write_register(ST77XX_RAMWR, None)
self.cs.value(0)
self.dc.value(1)
for _ in range(npx//bs): self.spi.write(buf)
for _ in range(npx%bs): self.spi.write(self.buf2)
self.cs.value(1)
def write_register(self, reg, buf=None):
struct.pack_into('B', self.buf1, 0, reg)
self.cs.value(0)
self.dc.value(0)
self.spi.write(self.buf1)
if buf is not None:
self.dc.value(1)
self.spi.write(buf)
self.cs.value(1)
def _rp2_write_register_dma(self, reg, buf, is_blocking=True):
'If *is_blocking* is False, used should call wait_dma explicitly.'
SPI1_BASE = 0x40040000 # FIXME: will be different for another SPI bus?
SSPDR = 0x008
self.rp2_dma.config(
src_addr = uctypes.addressof(buf),
dst_addr = SPI1_BASE + SSPDR,
count = len(buf),
src_inc = True,
dst_inc = False,
trig_dreq= self.rp2_dma.DREQ_SPI1_TX
)
struct.pack_into('B',self.buf1,0,reg)
self.cs.value(0)
self.dc.value(0)
self.spi.write(self.buf1)
self.dc.value(1)
self.rp2_dma.enable()
if is_blocking: self.rp2_wait_dma()
def rp2_wait_dma(self):
'''
Wait for rp2-port DMA transfer to finish; no-op unless self.rp2_dma is defined.
Can be used as callback before accessing shared SPI bus e.g. with the xpt2046 driver.
'''
if self.rp2_dma is None: return
while self.rp2_dma.is_busy(): pass
self.rp2_dma.disable()
# wait to send last byte. It should take < 1uS @ 10MHz
time.sleep_us(1)
self.cs.value(1)
def _run_seq(self,seq):
'''
Run sequence of (initialization) commands; those are given as list of tuples, which are either
`(command,data)` or `(command,data,delay_ms)`
'''
for i,cmd in enumerate(seq):
if len(cmd)==2: (reg,data),delay=cmd,0
elif len(cmd)==3: reg,data,delay=cmd
else: raise ValueError('Command #%d has %d items (must be 2 or 3)'%(i,len(cmd)))
self.write_register(reg,data)
if delay>0: time.sleep_ms(delay)
class St7735_hw(St77xx_hw):
'''There are several ST7735-based LCD models, we only tested the blacktab model really.'''
def __init__(self,res,model='greentab',**kw):
super().__init__(res=res,suppRes=[(128,160),],model=model,suppModel=['greentab','redtab','blacktab'],**kw)
def config_hw(self):
# mostly from here
# https://github.com/stechiez/raspberrypi-pico/blob/main/pico_st7735/st7735/ST7735.py
init7735r=[
# see here for explanations: https://github.com/adafruit/Adafruit-ST7735-Library/blob/master/Adafruit_ST7735.cpp
(ST77XX_SWRESET,None, 50),
(ST77XX_SLPOUT, None, 100),
(ST77XX_FRMCTR1,b'\x01\x2c\x2d'),
(ST77XX_FRMCTR2,b'\x01\x2c\x2d'),
(ST77XX_FRMCTR3,b'\x01\x2c\x2d\x01\x2c\x2d'),
(ST77XX_INVCTR,b'\x07'),
(ST7735_PWCTR1,b'\xa2\x02\xb4'),
(ST7735_PWCTR2,b'\xc5'),
(ST7735_PWCTR3,b'\x0a\x00'),
(ST7735_PWCTR4,b'\x8a\x2a'),
(ST7735_PWCTR5,b'\x8a\xee'),
(ST7735_VMCTR1,b'\x0e'),
(ST77XX_INVOFF,None),
# ST77XX_MADCTL: do later, depending on rotation
(ST77XX_COLMOD,bytes([ST77XX_COLOR_MODE_65K | ST77XX_COLOR_MODE_16BIT])),
(ST77XX_CASET,bytes([0x00,0x00,0x00,0x7f])),
(ST77XX_RASET,bytes([0x00,0x00,0x00,0x9f])),
# gamma adjustment: Waveshare values
(ST7789_GMCTRP1,b'\x0f\x1a\x0f\x18\x2f\x28\x20\x22\x1f\x1b\x23\x37\x00\x07\x02\x10'),
(ST7789_GMCTRN1,b'\x0f\x1b\x0f\x17\x33\x2c\x29\x2e\x30\x30\x39\x3f\x00\x07\x03\x10'),
(ST77XX_NORON, None, 10),
(ST77XX_DISPON, None,100),
]
# the "blue version" only (not tested)
init7735=[
# swreset
(ST77XX_SWRESET, None, 50),
# out of sleep mode
(ST77XX_SLPOUT, None, 100),
# RGB565
(ST77XX_COLMOD,bytes([ST77XX_COLOR_MODE_65K | ST77XX_COLOR_MODE_16BIT])),
# fast refresh (??)
(ST77XX_FRMCTR1,bytes([0x00,0x06,0x03])),
(ST77XX_MADCTL,bytes([0x03])),
(ST77XX_INVCTR,b'\x00'),
(ST7735_PWCTR1,b'\x02\x70'),
(ST7735_PWCTR2,b'\x05'),
(ST7735_PWCTR3,b'\x01\x02'),
(ST7735_VMCTR1,b'\x3c\x38'),
(ST7735_PWCTR6,b'\b11\b15'),
# (ST77XX_GMCTRP1,b'\
## memory access direction
# (ST77XX_MADCTL, bytes([ST77XX_MADCTL_ROTS[self.rot%4]]), 0),
# inverted on (?)
#(ST77XX_INVON, None, 10),
# normal | |
**kwargs) for t in times] for tr in traces
}
# Return a DataFrame where each column is a trace and time is the index.
return pd.DataFrame(trace_data, index=times)
def traces_to_table_data(*traces, **kwargs):
"""
Create table of sample times and values for a set of traces.
Args:
*traces: A list of traces with samples. Can also contain non-Traces
which will be ignored.
Keywords Args:
start_time: The earliest (left-most) time bound for the traces.
stop_time: The latest (right-most) time bound for the traces.
step: Set the time increment for filling in between sample times.
If 0, then don't fill in between sample times.
Returns:
Table data and a list of headers for table columns.
"""
# Extract all the traces and ignore all the non-traces.
traces = [t for t in traces if isinstance(t, Trace)]
# Get sample times.
times = _get_sample_times(*traces, **kwargs)
# Create a table from lines of data where the first element in each row
# is the sample time and the following elements are the trace values.
table_data = list()
for time in times:
row = [trace.get_disp_value(time, **kwargs) for trace in traces]
row.insert(0, time)
table_data.append(row)
headers = ["Time"] + [trace.name for trace in traces]
return table_data, headers
def traces_to_table(*traces, **kwargs):
format = kwargs.get("format", "simple")
table_data, headers = traces_to_table_data(*traces, **kwargs)
return tabulate(tabular_data=table_data, headers=headers, tablefmt=format)
def traces_to_text_table(*traces, **kwargs):
if "format" not in kwargs:
kwargs["format"] = "simple"
print(traces_to_table(*traces, **kwargs))
def traces_to_html_table(*traces, **kwargs):
kwargs["format"] = "html"
tbl_html = traces_to_table(*traces, **kwargs)
# Generate the HTML from the JSON.
DISP.display_html(DISP.HTML(tbl_html))
def _interpolate_traces(*traces, times):
"""Interpolate trace values at times in the given list."""
for trace in traces:
trace.interpolate(times)
def traces_to_matplotlib(*traces, **kwargs):
"""
Display waveforms stored in peekers in Jupyter notebook using matplotlib.
Args:
*traces: A list of traces to convert into matplotlib for display.
Can also contain None which will create a blank trace.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
title_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
caption: String containing the title placed across the bottom of the display.
caption_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
grid_fmt (dict): https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
time_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
width: The width of the waveform display in inches.
height: The height of the waveform display in inches.
Returns:
Figure and axes created by matplotlib.pyplot.subplots.
"""
num_traces = len(traces)
trace_hgt = 0.5 # Default trace height in inches.
cycle_wid = 0.5 # Default unit cycle width in inches.
# Handle keyword args explicitly for Python 2 compatibility.
start_time = kwargs.pop(
"start_time",
min([trace.start_time() for trace in traces if isinstance(trace, Trace)]),
)
stop_time = kwargs.pop(
"stop_time",
max([trace.stop_time() for trace in traces if isinstance(trace, Trace)]),
)
title = kwargs.pop("title", "")
title_fmt = {"fontweight": "bold"}
title_fmt.update(kwargs.pop("title_fmt", {}))
caption = kwargs.pop("caption", "")
caption_fmt = {"fontstyle": "oblique"}
caption_fmt.update(kwargs.pop("caption_fmt", {}))
tick = kwargs.pop("tick", False)
tock = kwargs.pop("tock", False)
grid_fmt = {"color": "C1", "alpha": 1.0}
grid_fmt.update(kwargs.pop("grid_fmt", {}))
time_fmt = {}
time_fmt.update(kwargs.pop("time_fmt", {}))
width = kwargs.pop("width", (stop_time - start_time) / Trace.unit_time * cycle_wid)
height = kwargs.pop("height", num_traces * trace_hgt)
# Create separate plot traces for each selected waveform.
trace_hgt_pctg = 1.0 / num_traces
fig, axes = plt.subplots(
nrows=num_traces,
sharex=True,
squeeze=False,
subplot_kw=None,
gridspec_kw=None,
figsize=(width, height),
)
axes = axes[:, 0] # Collapse 2D matrix of subplots into a 1D list.
# Set the caption on the X-axis label on the bottom-most trace.
axes[-1].set_xlabel(caption, **caption_fmt)
# Set the title for the collection of traces on the top-most trace.
axes[0].set_title(title, **title_fmt)
# Set X-axis ticks at the bottom of the stack of traces.
start = math.floor(start_time / Trace.unit_time)
stop = math.ceil(stop_time / Trace.unit_time)
axes[-1].tick_params(axis="x", length=0, which="both") # No tick marks.
# Set positions of tick marks so grid lines will work.
axes[-1].set_xticks(
[x * Trace.unit_time for x in range(start, stop + 1)], minor=False
)
axes[-1].set_xticks(
[(x + 0.5) * Trace.unit_time for x in range(start, stop)], minor=True
)
# Place cycle times at tick marks or between them.
if not tick:
axes[-1].set_xticklabels([], minor=False, **time_fmt)
if tock:
axes[-1].set_xticklabels(
[str(x) for x in range(start, stop)], minor=True, **time_fmt
)
# Adjust the limits of the X axis so the grid doesn't get chopped-off and
# produce artifacts if a grid line is at the right or left edge.
bbox = axes[-1].get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width_in_pixels = bbox.width * fig.dpi
time_per_pixel = (stop_time - start_time) / width_in_pixels
xlim = (start_time - time_per_pixel, stop_time + time_per_pixel)
# Plot each trace waveform.
for i, (trace, axis) in enumerate(zip(traces, axes), 1):
# Set position of trace within stacked traces.
axis.set_position([0.1, (num_traces - i) * trace_hgt_pctg, 0.8, trace_hgt_pctg])
# Place grid on X axis.
axis.grid(axis="x", **grid_fmt)
if not trace:
# Leave a blank space for non-traces.
# Remove ticks from Y axis.
axis.set_yticks([])
axis.tick_params(axis="y", length=0, which="both")
# Remove the box around the subplot.
axis.spines["left"].set_visible(False)
axis.spines["right"].set_visible(False)
axis.spines["top"].set_visible(False)
axis.spines["bottom"].set_visible(False)
else:
trace.to_matplotlib(axis, start_time, stop_time, xlim, **kwargs)
# Return figure and axes for possible further processing.
return fig, axes
def wavejson_to_wavedrom(wavejson, width=None, skin="default"):
"""
Create WaveDrom display from WaveJSON data.
This code is from https://github.com/witchard/ipython-wavedrom.
Inputs:
width: Width of the display window in pixels. If left as None, the entire
waveform will be squashed into the width of the page. To prevent
this, set width to a large value. The display will then become scrollable.
skin: Selects the set of graphic elements used to draw the waveforms.
Allowable values are 'default' and 'narrow'.
"""
# Set the width of the waveform display.
style = ""
if width != None:
style = ' style="width: {w}px"'.format(w=str(int(width)))
# Generate the HTML from the JSON.
htmldata = '<div{style}><script type="WaveDrom">{json}</script></div>'.format(
style=style, json=json.dumps(wavejson)
)
DISP.display_html(DISP.HTML(htmldata))
# Trigger the WaveDrom Javascript that creates the graphical display.
DISP.display_javascript(
DISP.Javascript(
data="WaveDrom.ProcessAll();",
lib=[
"https://wavedrom.com/wavedrom.min.js",
"https://wavedrom.com/skins/{skin}.js".format(skin=skin),
],
)
)
# The following allows the display of WaveDROM in the HTML files generated by nbconvert.
# It's disabled because it makes Github's nbconvert freak out.
setup = """
<script src="https://wavedrom.com/skins/{skin}.js" type="text/javascript"></script>
<script src="https://wavedrom.com/wavedrom.min.js" type="text/javascript"></script>
<body onload="WaveDrom.ProcessAll()">
""".format(
skin=skin
)
# DISP.display_html(DISP.HTML(setup))
def traces_to_wavejson(*traces, **kwargs):
"""
Convert traces into a WaveJSON data structure.
Args:
*traces: A list of traces to convert into WaveJSON for display.
Can also contain None which will create a blank trace.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
caption: String containing the title placed across the bottom of the display.
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
Returns:
A dictionary with the JSON data for the waveforms.
"""
# Handle keyword args explicitly for Python 2 compatibility.
tock = kwargs.get("tock", False)
tick = kwargs.get("tick", False)
caption = kwargs.get("caption")
title = kwargs.get("title")
stop_time = kwargs.get(
"stop_time",
max([trace.stop_time() for trace in traces if isinstance(trace, Trace)]),
)
start_time = kwargs.get(
"start_time",
min([trace.start_time() for trace in traces if isinstance(trace, Trace)]),
)
wavejson = dict()
wavejson["signal"] = list()
for trace in traces:
if isinstance(trace, Trace):
wavejson["signal"].append(trace.to_wavejson(start_time, stop_time))
else:
# Insert an empty dictionary to create a blank line.
wavejson["signal"].append(dict())
# Integer start time for calculating tick/tock values.
int_start_time = round(start_time / Trace.unit_time)
# Create a header for the set of waveforms.
if title or tick or tock:
head = dict()
if title:
head["text"] = [
"tspan",
[
"tspan",
{"fill": "blue", "font-size": "16", "font-weight": "bold"},
title,
],
]
if tick:
head["tick"] = int_start_time
if tock:
head["tock"] = int_start_time
wavejson["head"] = head
# Create a footer for the set of waveforms.
if caption or tick or tock:
foot = dict()
if caption:
foot["text"] = ["tspan", ["tspan", {"font-style": "italic"}, caption]]
if tick:
foot["tick"] = int_start_time
if tock:
foot["tock"] = int_start_time
wavejson["foot"] = foot
return wavejson
def traces_to_wavedrom(*traces, **kwargs):
| |
import threading
import time
from meerk40t.tools.zinglplotter import ZinglPlotter
from ...core.drivers import Driver
from ...core.plotplanner import grouped
from ...kernel import (
STATE_ACTIVE,
STATE_BUSY,
STATE_END,
STATE_IDLE,
STATE_INITIALIZE,
STATE_PAUSE,
STATE_SUSPEND,
STATE_TERMINATE,
STATE_UNKNOWN,
STATE_WAIT,
)
from ..basedevice import (
DRIVER_STATE_FINISH,
DRIVER_STATE_MODECHANGE,
DRIVER_STATE_PROGRAM,
DRIVER_STATE_RAPID,
DRIVER_STATE_RASTER,
PLOT_AXIS,
PLOT_DIRECTION,
PLOT_FINISH,
PLOT_JOG,
PLOT_LEFT_UPPER,
PLOT_RAPID,
PLOT_RIGHT_LOWER,
PLOT_SETTING,
PLOT_START,
)
from ..lasercommandconstants import *
from .laserspeed import LaserSpeed
from .lhystudiosemulator import EgvLoader, LhystudiosEmulator
from ...svgelements import Length
MILS_IN_MM = 39.3701
def plugin(kernel, lifecycle=None):
if lifecycle == "register":
_ = kernel.translation
kernel.register("driver/lhystudios", LhystudiosDriver)
kernel.register("output/lhystudios", LhystudiosController)
kernel.register("emulator/lhystudios", LhystudiosEmulator)
kernel.register("load/EgvLoader", EgvLoader)
context = kernel.root
@context.console_option(
"idonotlovemyhouse",
type=bool,
action="store_true",
help=_("override one second laser fire pulse duration"),
)
@context.console_argument(
"time", type=float, help=_("laser fire pulse duration")
)
@context.console_command(
"pulse",
input_type="lhystudios",
help=_("pulse <time>: Pulse the laser in place."),
)
def pulse(
command, channel, _, time=None, idonotlovemyhouse=False, data=None, **kwargs
):
spooler, driver, output = data
if time is None:
channel(_("Must specify a pulse time in milliseconds."))
return
value = time / 1000.0
if value > 1.0:
channel(
_('"%s" exceeds 1 second limit to fire a standing laser.') % value
)
try:
if not idonotlovemyhouse:
return
except IndexError:
return
def timed_fire():
yield COMMAND_WAIT_FINISH
yield COMMAND_LASER_ON
yield COMMAND_WAIT, value
yield COMMAND_LASER_OFF
if spooler.job_if_idle(timed_fire):
channel(_("Pulse laser for %f milliseconds") % (value * 1000.0))
else:
channel(_("Pulse laser failed: Busy"))
return
@context.console_argument("speed", type=float, help=_("Set the movement speed"))
@context.console_argument("dx", type=Length, help=_("change in x"))
@context.console_argument("dy", type=Length, help=_("change in y"))
@context.console_command(
"move_at_speed",
input_type="lhystudios",
help=_("move_at_speed <speed> <dx> <dy>"),
)
def move_speed(channel, _, speed, dx, dy, data=None, **kwgs):
spooler, driver, output = data
dx = Length(dx).value(
ppi=1000.0
)
dy = Length(dy).value(
ppi=1000.0
)
def move_at_speed():
yield COMMAND_SET_SPEED, speed
yield COMMAND_MODE_PROGRAM
x = driver.current_x
y = driver.current_y
yield COMMAND_MOVE, x + dx, y + dy
yield COMMAND_MODE_RAPID
if not spooler.job_if_idle(move_at_speed):
channel(_("Busy"))
return
@context.console_option(
"difference",
"d",
type=bool,
action="store_true",
help=_("Change speed by this amount."),
)
@context.console_argument("speed", type=str, help=_("Set the driver speed."))
@context.console_command(
"speed", input_type="lhystudios", help=_("Set current speed of driver.")
)
def speed(
command,
channel,
_,
data=None,
speed=None,
increment=False,
decrement=False,
**kwargs
):
spooler, driver, output = data
if speed is None or (increment and decrement):
channel(_("Speed set at: %f mm/s") % driver.speed)
return
if speed.endswith("%"):
speed = speed[:-1]
percent = True
else:
percent = False
try:
s = float(speed)
except ValueError:
channel(_("Not a valid speed or percent."))
return
if percent and increment:
s = driver.speed + driver.speed * (s / 100.0)
elif increment:
s += driver.speed
elif percent:
s = driver.speed * (s / 100.0)
driver.set_speed(s)
channel(_("Speed set at: %f mm/s") % driver.speed)
@context.console_argument("ppi", type=int, help=_("pulses per inch [0-1000]"))
@context.console_command(
"power", input_type="lhystudios", help=_("Set Driver Power")
)
def power(command, channel, _, data, ppi=None, **kwargs):
spooler, driver, output = data
if ppi is None:
channel(_("Power set at: %d pulses per inch") % driver.power)
else:
try:
driver.set_power(ppi)
except ValueError:
pass
@context.console_argument(
"accel", type=int, help=_("Acceleration amount [1-4]")
)
@context.console_command(
"acceleration",
input_type="lhystudios",
help=_("Set Driver Acceleration [1-4]"),
)
def acceleration(channel, _, data=None, accel=None, **kwargs):
"""
Lhymicro-gl speedcodes have a single character of either 1,2,3,4 which indicates
the acceleration value of the laser. This is typically 1 below 25.4, 2 below 60,
3 below 127, and 4 at any value greater than that. Manually setting this on the
fly can be used to check the various properties of that mode.
"""
spooler, driver, output = data
if accel is None:
if driver.acceleration is None:
channel(_("Acceleration is set to default."))
else:
channel(_("Acceleration: %d") % driver.acceleration)
else:
try:
v = accel
if v not in (1, 2, 3, 4):
driver.set_acceleration(None)
channel(_("Acceleration is set to default."))
return
driver.set_acceleration(v)
channel(_("Acceleration: %d") % driver.acceleration)
except ValueError:
channel(_("Invalid Acceleration [1-4]."))
return
@context.console_command(
"code_update",
input_type="lhystudios",
help=_("update movement codes for the drivers"),
)
def realtime_pause(data=None, **kwargs):
spooler, driver, output = data
driver.update_codes()
@context.console_command(
"status",
input_type="lhystudios",
help=_("abort waiting process on the controller."),
)
def realtime_pause(channel, _, data=None, **kwargs):
spooler, driver, output = data
try:
output.update_status()
except ConnectionError:
channel(_("Could not check status, usb not connected."))
@context.console_command(
"continue",
input_type="lhystudios",
help=_("abort waiting process on the controller."),
)
def realtime_pause(data=None, **kwargs):
spooler, driver, output = data
output.abort_waiting = True
@context.console_command(
"pause",
input_type="lhystudios",
help=_("realtime pause/resume of the machine"),
)
def realtime_pause(data=None, **kwargs):
spooler, driver, output = data
if driver.is_paused:
driver.resume()
else:
driver.pause()
@context.console_command(
("estop", "abort"), input_type="lhystudios", help=_("Abort Job")
)
def pipe_abort(channel, _, data=None, **kwargs):
spooler, driver, output = data
driver.reset()
channel(_("Lhystudios Channel Aborted."))
@context.console_argument(
"rapid_x", type=float, help=_("limit x speed for rapid.")
)
@context.console_argument(
"rapid_y", type=float, help=_("limit y speed for rapid.")
)
@context.console_command(
"rapid_override",
input_type="lhystudios",
help=_("limit speed of typical rapid moves."),
)
def rapid_override(channel, _, data=None, rapid_x=None, rapid_y=None, **kwargs):
spooler, driver, output = data
if rapid_x is not None:
if rapid_y is None:
rapid_y = rapid_x
driver.rapid_override = True
driver.rapid_override_speed_x = rapid_x
driver.rapid_override_speed_y = rapid_y
channel(
_("Rapid Limit: %f, %f")
% (driver.rapid_override_speed_x, driver.rapid_override_speed_y)
)
else:
driver.rapid_override = False
channel(_("Rapid Limit Off"))
@context.console_argument("filename", type=str)
@context.console_command(
"egv_import",
input_type="lhystudios",
help=_("Lhystudios Engrave Buffer Import. egv_import <egv_file>"),
)
def egv_import(filename, data=None, **kwargs):
spooler, driver, output = data
if filename is None:
raise SyntaxError
def skip(read, byte, count):
"""Skips forward in the file until we find <count> instances of <byte>"""
pos = read.tell()
while count > 0:
char = read.read(1)
if char == byte:
count -= 1
if char is None or len(char) == 0:
read.seek(pos, 0)
# If we didn't skip the right stuff, reset the position.
break
def skip_header(file):
skip(file, "\n", 3)
skip(file, "%", 5)
with open(filename, "r") as f:
skip_header(f)
while True:
data = f.read(1024)
if not data:
break
buffer = bytes(data, "utf8")
output.write(buffer)
output.write(b"\n")
@context.console_argument("filename", type=str)
@context.console_command(
"egv_export",
input_type="lhystudios",
help=_("Lhystudios Engrave Buffer Export. egv_export <egv_file>"),
)
def egv_export(channel, _, filename, data=None, **kwargs):
spooler, driver, output = data
if filename is None:
raise SyntaxError
try:
with open(filename, "w") as f:
f.write("Document type : LHYMICRO-GL file\n")
f.write("File version: 1.0.01\n")
f.write("Copyright: Unknown\n")
f.write(
"Creator-Software: %s v%s\n"
% (output.context.kernel.name, output.context.kernel.version)
)
f.write("\n")
f.write("%0%0%0%0%\n")
buffer = bytes(output._buffer)
buffer += bytes(output._queue)
f.write(buffer.decode("utf-8"))
except (PermissionError, IOError):
channel(_("Could not save: %s" % filename))
@context.console_command(
"egv",
input_type="lhystudios",
help=_("Lhystudios Engrave Code Sender. egv <lhymicro-gl>"),
)
def egv(command, channel, _, data=None, remainder=None, **kwargs):
spooler, driver, output = data
if len(remainder) == 0:
channel("Lhystudios Engrave Code Sender. egv <lhymicro-gl>")
else:
output.write(
bytes(remainder.replace("$", "\n").replace(" ", "\n"), "utf8")
)
@context.console_command(
"start", input_type="lhystudios", help=_("Start Pipe to Controller")
)
def pipe_start(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.update_state(STATE_ACTIVE)
output.start()
channel(_("Lhystudios Channel Started."))
@context.console_command(
"hold", input_type="lhystudios", help=_("Hold Controller")
)
def pipe_pause(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.update_state(STATE_PAUSE)
output.pause()
channel("Lhystudios Channel Paused.")
@context.console_command(
"resume", input_type="lhystudios", help=_("Resume Controller")
)
def pipe_resume(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.update_state(STATE_ACTIVE)
output.start()
channel(_("Lhystudios Channel Resumed."))
@context.console_command(
"usb_connect", input_type="lhystudios", help=_("Connects USB")
)
def usb_connect(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.open()
channel(_("CH341 Opened."))
@context.console_command(
"usb_disconnect", input_type="lhystudios", help=_("Disconnects USB")
)
def usb_disconnect(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.close()
channel(_("CH341 Closed."))
@context.console_command(
"usb_reset", input_type="lhystudios", help=_("Reset USB device")
)
def usb_reset(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.usb_reset()
@context.console_command(
"usb_release", input_type="lhystudios", help=_("Release USB device")
)
def usb_release(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.usb_release()
@context.console_command(
"usb_abort", input_type="lhystudios", help=_("Stops USB retries")
)
def usb_abort(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.abort_retry()
@context.console_command(
"usb_continue", input_type="lhystudios", help=_("Continues USB retries")
)
def usb_continue(command, channel, _, data=None, **kwargs):
spooler, driver, output = data
output.continue_retry()
@kernel.console_option(
"port", "p", type=int, default=23, help=_("port to listen on.")
)
@kernel.console_option(
"silent",
"s",
type=bool,
action="store_true",
help=_("do not watch server channels"),
)
@kernel.console_option(
"watch", "w", type=bool, action="store_true", help=_("watch send/recv data")
)
@kernel.console_option(
"quit",
"q",
type=bool,
action="store_true",
help=_("shutdown current lhyserver"),
)
@kernel.console_command("lhyserver", help=_("activate the lhyserver."))
def lhyserver(
channel, _, port=23, silent=False, watch=False, quit=False, **kwargs
):
root = kernel.root
try:
spooler, input_driver, output = root.registered[
"device/%s" % root.active
]
if output is None:
channel(
_(
"Output for device %s does not exist. Lhyserver cannot attach."
)
% root.active
)
return
server = root.open_as("module/TCPServer", "lhyserver", port=port)
if quit:
root.close("lhyserver")
return
channel(_("TCP Server for Lhystudios on port: %d" % port))
if not silent:
console = kernel.channel("console")
server.events_channel.watch(console)
if watch:
server.data_channel.watch(console)
channel(_("Watching Channel: %s") % "server")
root.channel("lhyserver/recv").watch(output.write)
| |
<gh_stars>1-10
import math
import time
from random import *
from random import shuffle
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from tqdm import tqdm, tqdm_notebook
from util import data_utils
def prepare_parameters(model, optim_args):
"""Produce two groups of parameters:
- the pretrained parameters (PlacesCNN, ResNet50), and
- the other parameters (not pretrained)
This allows us to set the lr of the two groups separately
(other params as the lr given as input, pretrained params as this lr * 0.1)
"""
pretrained_parameters = [
param
for name, param in model.named_parameters()
if (name.startswith("local_feats.") and not ".bn" in name)
or name.startswith("context.")
]
new_parameters = [
param
for name, param in model.named_parameters()
if not (name.startswith("local_feats.") or name.startswith("context."))
]
pretrained_param_group = optim_args.copy()
pretrained_param_group["lr"] *= 1e-1
pretrained_param_group["params"] = pretrained_parameters
# Fix the scale (weight) and bias params of the BN layers in the local_feats model
for name, param in model.named_parameters():
if ".bn" in name:
param.requires_grad = False
return pretrained_param_group, new_parameters
def get_time_format(time_in_seconds):
hours = int(time_in_seconds // 3600)
minutes = int((time_in_seconds % 3600) // 60)
seconds = int(time_in_seconds % 60)
# Convert to two digit format, as string
hours = "0" + str(hours) if hours < 10 else str(hours)
minutes = "0" + str(minutes) if minutes < 10 else str(minutes)
seconds = "0" + str(seconds) if seconds < 10 else str(seconds)
return hours, minutes, seconds
class Solver(object):
default_adam_args = {
"lr": 1e-4,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 0.0,
}
default_SGD_args = {"lr": 1e-2, "weight_decay": 0.0005, "momentum": 0.9}
def __init__(
self,
optim=torch.optim.Adam,
optim_args={},
loss_func=torch.nn.KLDivLoss(),
location="ncc",
):
if optim == torch.optim.Adam:
optim_args_merged = self.default_adam_args.copy()
else:
optim_args_merged = self.default_SGD_args.copy()
optim_args_merged.update(optim_args)
self.optim_args = optim_args_merged
self.optim = optim
self.loss_func = loss_func
self._reset_histories()
self.location = location
def _reset_histories(self):
"""
Resets train and val histories for the accuracy and the loss.
"""
self.train_loss_history = []
self.val_loss_history = []
self.best_val_loss = 1
def train(
self,
model,
train_loader,
val_loader,
num_epochs=10,
num_minibatches=1,
log_nth=0,
filename_args={},
backprop_frames=1,
):
"""
Train a given model with the provided data.
Inputs:
- model: model object initialized from a torch.nn.Module
- train_loader: train data, list of torch.utils.data.DataLoader objects
- val_loader: validation data, list of torch.utils.data.DataLoader objects
- num_epochs: total number of training epochs
- num_minibatches: the number of minibatches per bath
- log_nth: log training accuracy and loss every nth iteration
- filename_args: parameters for naming the checkpoint file
- backprop_frames: the number of frames to backpropagate through if the model
is temporal
"""
### Prepare optimiser ###
# Move model to cuda first, if available, so optimiser is initialized properly
if torch.cuda.is_available():
model.cuda()
# Reducing lr of pretrained parameters and freeze batch-norm weights
pretrained_parameters, new_parameters = prepare_parameters(
model, self.optim_args
)
optim = self.optim(new_parameters, **self.optim_args)
optim.add_param_group(pretrained_parameters)
self._reset_histories()
# Create the scheduler to allow lr adjustment
scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=1, gamma=0.4)
### Training ###
# Sum up the length of each loader in train_loader
iter_per_epoch = int(
math.ceil(sum([len(loader) for loader in train_loader]) / num_minibatches)
) # Count an iter as a full batch, not a minibatch
val_iter_per_epoch = int(
math.ceil(sum([len(loader) for loader in val_loader]) / num_minibatches)
) # Count an iter as a full batch, not a minibatch
tqdm.write("START TRAIN.")
nIterations = num_epochs * iter_per_epoch
tqdm.write("")
tqdm.write("Number of epochs: {}".format(num_epochs))
tqdm.write(
"Approx. train frames per epoch: {}".format(
iter_per_epoch * filename_args["batchsize"]
)
)
tqdm.write(
"Approx. val frames per epoch: {}".format(
val_iter_per_epoch * filename_args["batchsize"]
)
)
tqdm.write("Frames per batch: {}".format(filename_args["batchsize"]))
tqdm.write(
"Number of iterations/batches per (train) epoch: {}".format(iter_per_epoch)
)
tqdm.write("Train accuracy recorded every {} iterations".format(log_nth))
tqdm.write("")
epoch_loop = range(num_epochs)
if self.location != "ncc":
if self.location == "jupyter":
epoch_loop = tqdm_notebook(epoch_loop, desc="Epochs")
else:
epoch_loop = tqdm(epoch_loop, desc="Epochs")
# Iteration counter of batches (NOT minibatches)
it = 0
total_time = 0
# Epoch
for j in epoch_loop:
start_time = time.time()
train_loss_logs = 0
# Downscale the learning rate by a factor of 2.5 every epoch
scheduler.step()
# Set the model to training mode
model.train()
if self.location == "ncc":
outer_train_loop = enumerate(train_loader, 0)
elif self.location == "jupyter":
outer_train_loop = enumerate(
tqdm_notebook(train_loader, desc="Videos (train)"), 0
)
else:
outer_train_loop = enumerate(
tqdm(train_loader, desc="Videos (train)"), 0
)
counter = 0 # counter for minibatches
it = j * iter_per_epoch
# Repeat training for each loader in the train_loader
for k, loader in outer_train_loop:
if self.location == "ncc":
inner_train_loop = enumerate(loader, 0)
elif self.location == "jupyter":
inner_train_loop = enumerate(
tqdm_notebook(loader, desc="Minibatches"), 0
)
else:
inner_train_loop = enumerate(tqdm(loader, desc="Minibatches"), 0)
# If the model is temporal, reset its temporal state
# at the start of each video
if model.temporal:
model.clear_temporal_state()
# Repeat training for each batch in the loader
for i, data in inner_train_loop:
# Batch of items in training set
# If the model is temporal, detach its temporal state
# every 'backprop_frames' batches (so it doesn't backpropagate beyond
# the last 'backprop_frames' frames - to reduce memory consumption)
if model.temporal and i % backprop_frames == 0:
model.detach_temporal_state()
# Count the number of minibatches performed since last backprop
counter += 1
# Load the items and labels in this batch from the train_loader
inputs, labels = data
# Unsqueeze labels so they're shaped as [10, 96, 128, 1]
labels = labels.unsqueeze(3)
# Convert these to cuda if cuda is available
if torch.cuda.is_available():
inputs, labels = inputs.cuda(), labels.cuda()
inputs = Variable(inputs)
labels = Variable(labels)
# train the model (forward propagation) on the inputs
outputs = model(inputs)
# permute the outputs so it's in the order [N, H, W, C]
# instead of [N, C, H, W]
outputs = outputs.permute(0, 2, 3, 1)
loss = self.loss_func(outputs, labels)
# Keep the backprop graph if model is temporal and we are not about
# to detach its temporal state
if model.temporal and i % backprop_frames != -1:
loss.backward(retain_graph=True)
else:
loss.backward()
# Only step and zero the gradients every num_minibatches steps
if counter == num_minibatches:
counter = 0 # Reset the minibatch counter
optim.step()
optim.zero_grad()
# Print results every log_nth batches,
# or if this is the last batch of the last loader
if it % log_nth == 0 or (
(i == len(loader) - 1) and (k == len(train_loader) - 1)
):
tqdm.write(
"[Iteration %i/%i] TRAIN loss: %f"
% (it, nIterations, loss)
)
self.train_loss_history.append(loss.item())
train_loss_logs += 1
it += 1 # iteration (batch) number
# Free up memory
del inputs, outputs, labels, loss
# Free up memory
del outer_train_loop, inner_train_loop, data, loader
### Validation ###
model.eval()
if self.location == "ncc":
outer_val_loop = enumerate(val_loader, 0)
elif self.location == "jupyter":
outer_val_loop = enumerate(
tqdm_notebook(val_loader, desc="Videos (val)"), 0
)
else:
outer_val_loop = enumerate(tqdm(val_loader, desc="Videos (val)"), 0)
val_loss = 0
# Repeat validation for each loader in outer_val_loop
for kk, loader in outer_val_loop:
if self.location == "ncc":
inner_val_loop = enumerate(loader, 0)
elif self.location == "jupyter":
inner_val_loop = enumerate(
tqdm_notebook(loader, desc="Minibatches"), 0
)
else:
inner_val_loop = enumerate(tqdm(loader, desc="Minibatches"), 0)
# If the model is temporal, reset its temporal state
# at the start of each video
if model.temporal:
model.clear_temporal_state()
for ii, data in inner_val_loop:
# If model is temporal, deatch the state (precaution)
if model.temporal:
model.clear_temporal_state()
inputs, labels = data
# Unsqueeze labels so they're shaped as [batch_size, H, W, 1]
labels = labels.unsqueeze(3)
if torch.cuda.is_available():
inputs, labels = inputs.cuda(), labels.cuda()
inputs_val = Variable(inputs)
labels_val = Variable(labels)
outputs_val = model(inputs_val)
# permute the outputs so it's in the order [N, H, W, C]
# instead of [N, C, H, W]
outputs_val = outputs_val.permute(0, 2, 3, 1)
val_loss += self.loss_func(outputs_val, labels_val).item()
# Free up memory
del inputs_val, outputs_val, labels_val, inputs, labels
# Free up memory
del outer_val_loop, inner_val_loop, data, loader
# Compute avg loss
val_loss /= sum([len(vloader) for vloader in val_loader])
self.val_loss_history.append(val_loss)
# Check if this is the best validation loss so far.
# If so, save the current model state
if val_loss < self.best_val_loss:
if len(filename_args) < 2:
filename = "trained_models/model_state_dict_best_loss_{:6f}.pth".format(
val_loss
)
else:
filename = "trained_models/best_model_{}_{}_batch{}_epoch{}.pth".format(
type(model).__name__,
self.loss_func.__name__,
filename_args["batchsize"],
filename_args["epoch_number"],
)
self.best_val_loss = val_loss
model.cpu()
torch.save(
{
"epoch": j + 1,
"state_dict": model.state_dict(),
"best_accuracy": val_loss,
},
filename,
| |
self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'use\'')
if address1 is not FAILURE:
address2 = FAILURE
address2 = self._read__()
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
address3 = self._read_expression()
if address3 is not FAILURE:
elements0.append(address3)
address4 = FAILURE
address4 = self._read__()
if address4 is not FAILURE:
elements0.append(address4)
address5 = FAILURE
chunk1, max1 = None, self._offset + 2
if max1 <= self._input_size:
chunk1 = self._input[self._offset:max1]
if chunk1 == 'if':
address5 = TreeNode(self._input[self._offset:self._offset + 2], self._offset, [])
self._offset = self._offset + 2
else:
address5 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'if\'')
if address5 is not FAILURE:
address6 = FAILURE
address6 = self._read__()
if address6 is not FAILURE:
elements0.append(address6)
address7 = FAILURE
address7 = self._read_anor()
if address7 is not FAILURE:
elements0.append(address7)
address8 = FAILURE
address8 = self._read__()
if address8 is not FAILURE:
elements0.append(address8)
address9 = FAILURE
chunk2, max2 = None, self._offset + 9
if max2 <= self._input_size:
chunk2 = self._input[self._offset:max2]
if chunk2 == 'otherwise':
address9 = TreeNode(self._input[self._offset:self._offset + 9], self._offset, [])
self._offset = self._offset + 9
else:
address9 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'otherwise\'')
if address9 is not FAILURE:
address10 = FAILURE
address10 = self._read__()
if address10 is not FAILURE:
elements0.append(address10)
address11 = FAILURE
address11 = self._read_expression()
if address11 is not FAILURE:
elements0.append(address11)
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.use_if(self._input, index2, self._offset, elements0)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index3, elements1 = self._offset, []
address12 = FAILURE
chunk3, max3 = None, self._offset + 3
if max3 <= self._input_size:
chunk3 = self._input[self._offset:max3]
if chunk3 == 'not':
address12 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address12 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'not\'')
if address12 is not FAILURE:
address13 = FAILURE
address13 = self._read__()
if address13 is not FAILURE:
elements1.append(address13)
address14 = FAILURE
address14 = self._read_anor()
if address14 is not FAILURE:
elements1.append(address14)
else:
elements1 = None
self._offset = index3
else:
elements1 = None
self._offset = index3
else:
elements1 = None
self._offset = index3
if elements1 is None:
address0 = FAILURE
else:
address0 = self._actions.negate(self._input, index3, self._offset, elements1)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
address0 = self._read_anor()
if address0 is FAILURE:
self._offset = index1
self._cache['use_if'][index0] = (address0, self._offset)
return address0
def _read_anor(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['anor'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
index2, elements0 = self._offset, []
address1 = FAILURE
address1 = self._read_comparison()
if address1 is not FAILURE:
elements0.append(address1)
address2 = FAILURE
address2 = self._read__()
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
address3 = self._read_anor_op()
if address3 is not FAILURE:
elements0.append(address3)
address4 = FAILURE
address4 = self._read__()
if address4 is not FAILURE:
elements0.append(address4)
address5 = FAILURE
address5 = self._read_anor()
if address5 is not FAILURE:
elements0.append(address5)
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.binary_op(self._input, index2, self._offset, elements0)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
address0 = self._read_comparison()
if address0 is FAILURE:
self._offset = index1
self._cache['anor'][index0] = (address0, self._offset)
return address0
def _read_comparison(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['comparison'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
index2, elements0 = self._offset, []
address1 = FAILURE
address1 = self._read_roll_math()
if address1 is not FAILURE:
elements0.append(address1)
address2 = FAILURE
address2 = self._read__()
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
address3 = self._read_comp_op()
if address3 is not FAILURE:
elements0.append(address3)
address4 = FAILURE
address4 = self._read__()
if address4 is not FAILURE:
elements0.append(address4)
address5 = FAILURE
address5 = self._read_comparison()
if address5 is not FAILURE:
elements0.append(address5)
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.binary_op(self._input, index2, self._offset, elements0)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
address0 = self._read_roll_math()
if address0 is FAILURE:
self._offset = index1
self._cache['comparison'][index0] = (address0, self._offset)
return address0
def _read_roll_math(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['roll_math'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
index2, elements0 = self._offset, []
address1 = FAILURE
address1 = self._read_add_math()
if address1 is not FAILURE:
elements0.append(address1)
address2 = FAILURE
address2 = self._read__()
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
chunk0, max0 = None, self._offset + 1
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 == '&':
address3 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address3 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'&\'')
if address3 is not FAILURE:
elements0.append(address3)
address4 = FAILURE
index3 = self._offset
chunk1, max1 = None, self._offset + 1
if max1 <= self._input_size:
chunk1 = self._input[self._offset:max1]
if chunk1 == '=':
address4 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address4 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'=\'')
self._offset = index3
if address4 is FAILURE:
address4 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address4 = FAILURE
if address4 is not FAILURE:
elements0.append(address4)
address5 = FAILURE
address5 = self._read__()
if address5 is not FAILURE:
elements0.append(address5)
address6 = FAILURE
address6 = self._read_roll_math()
if address6 is not FAILURE:
elements0.append(address6)
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.binary_op(self._input, index2, self._offset, elements0)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
address0 = self._read_add_math()
if address0 is FAILURE:
self._offset = index1
self._cache['roll_math'][index0] = (address0, self._offset)
return address0
def _read_add_math(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['add_math'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
index2, elements0 = self._offset, []
address1 = FAILURE
address1 = self._read_mult_math()
if address1 is not FAILURE:
elements0.append(address1)
address2 = FAILURE
address2 = self._read__()
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
address3 = self._read_add_op()
if address3 is not FAILURE:
elements0.append(address3)
address4 = FAILURE
index3 = self._offset
chunk0, max0 = None, self._offset + 1
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 == '=':
address4 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address4 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'=\'')
self._offset = index3
if address4 is FAILURE:
address4 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address4 = FAILURE
if address4 is not FAILURE:
elements0.append(address4)
address5 = FAILURE
address5 = self._read__()
if address5 is not FAILURE:
elements0.append(address5)
address6 = FAILURE
address6 = self._read_add_math()
if address6 is not FAILURE:
elements0.append(address6)
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = | |
grid_points) / 7, vac_pareto*n_vacc,
np.repeat(0, len(vac_pareto))*n_vacc, color=col_pareto, alpha = 0.3)
ax.plot(np.linspace(0, total_length, grid_points) / 7,
np.repeat(0.5, len(vac_pareto))*n_vacc,
color="black", linestyle="dashed", label="Population \nallocation")
time = np.linspace(0, total_length, grid_points) / 7
area = trapz(vac_pareto*n_vacc, dx=(time[1] - time[0]))
ax.text(x_total, y_total, f"Total doses: \n{np.round(area, 2)}",
horizontalalignment="center",
verticalalignment="center",
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
if plot == "optimal" or plot is None:
ax.plot(
np.linspace(0, total_length, grid_points) / 7,
vac_unconstrained*n_vacc,
color=col_unconstrained,
linewidth=linewidth,
label=label_unconstrained,
)
ax.plot(np.linspace(0, total_length, grid_points) / 7,
np.repeat(0.5, len(vac_unconstrained))*n_vacc,
color="black", linestyle="dashed", label="Population \nallocation")
time = np.linspace(0, total_length, grid_points-1) / 7
area = trapz(vac_unconstrained*n_vacc, dx=(time[1] - time[0]))
ax.fill_between(np.linspace(0, total_length, grid_points) / 7, vac_unconstrained*n_vacc,
np.repeat(0, len(vac_unconstrained))*n_vacc, color=col_unconstrained,
alpha = 0.3)
ax.text(x_total, y_total, f"Total doses: \n{np.round(area, 2)}",
horizontalalignment="center",
verticalalignment="center",
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
#ax.scatter(x_scatter, scatter_vac_pareto, s=s_scatter, label=label_scatter,
# color=col_pareto)
#ax.scatter(x_scatter, scatter_vac_unconstr, s=s_scatter, label=label_scatter,
# color=col_pareto)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_ylim([-0.05, 1.05])
return ax
def plot_distance_curves(
dict_use,
max_index=None,
linewidth=1,
color_optimal="C0",
color_pareto="C1",
color_pop="C2",
label_optimal="Optimal",
label_pareto="pareto",
label_pop="pop",
var="fval",
relative=True,
ax=None,
x_label="Distance parameter",
y_label="% deaths compared to the Population based strategy",
title="",
vline=None,
vline_color="C3",
vline_width=4,
vline_label="Previous parameter",
v_ymin=0,
v_ymax=-10,
ylim=None,
):
if ax is None:
fig, ax = plt.subplots()
optimal = (
dict_use["optimal"]
.sort_values(by=["distance"])[var][
(dict_use["optimal"]["fval"] > 0)
& (dict_use["optimal"]["countryA"] > 0)
& (dict_use["optimal"]["countryB"] > 0)
& (dict_use["pareto"]["fval"] > 0)
& (dict_use["pareto"]["countryA"] > 0)
& (dict_use["pareto"]["countryB"] > 0)
]
.reset_index(drop=True)
)
pareto = (
dict_use["pareto"]
.sort_values(by=["distance"])[var][
(dict_use["pareto"]["fval"] > 0)
& (dict_use["pareto"]["countryA"] > 0)
& (dict_use["pareto"]["countryB"] > 0)
& (dict_use["optimal"]["fval"] > 0)
& (dict_use["optimal"]["countryA"] > 0)
& (dict_use["optimal"]["countryB"] > 0)
]
.reset_index(drop=True)
)
pop = (
dict_use["pop_based"]
.sort_values(by=["distance"])[var][
(dict_use["pop_based"]["fval"] > 0)
& (dict_use["pop_based"]["countryA"] > 0)
& (dict_use["pop_based"]["countryB"] > 0)
& (dict_use["pareto"]["fval"] > 0)
& (dict_use["pareto"]["countryA"] > 0)
& (dict_use["pareto"]["countryB"] > 0)
& (dict_use["optimal"]["fval"] > 0)
& (dict_use["optimal"]["countryA"] > 0)
& (dict_use["optimal"]["countryB"] > 0)
]
.reset_index(drop=True)
)
distance = 1 - dict_use["optimal"].sort_values(by=["distance"])["distance"][
(dict_use["pareto"]["fval"] > 0)
& (dict_use["pareto"]["countryA"] > 0)
& (dict_use["pareto"]["countryB"] > 0)
& (dict_use["optimal"]["fval"] > 0)
& (dict_use["optimal"]["countryA"] > 0)
& (dict_use["optimal"]["countryB"] > 0)
].reset_index(drop=True)
if max_index is None:
max_index = len(pop)
a = (optimal[0:max_index] - pop[0:max_index]) / pop[0:max_index]
b = (pareto[0:max_index] - pop[0:max_index]) / pop[0:max_index]
if relative is True:
ax.plot(
distance[0:max_index],
a * 100,
color=color_optimal,
linewidth=linewidth,
label=label_optimal,
)
ax.plot(
distance[0:max_index],
b * 100,
color=color_pareto,
linewidth=linewidth,
label=label_pareto,
)
elif relative is False:
ax.plot(
distance[0:max_index],
optimal[0:max_index],
color=color_optimal,
linewidth=linewidth,
label=label_optimal,
)
ax.plot(
distance[0:max_index],
pareto[0:max_index],
color=color_pareto,
linewidth=linewidth,
label=label_pareto,
)
ax.plot(
distance[0:max_index],
pop[0:max_index],
color=color_pop,
linewidth=linewidth,
label=label_pop,
)
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
if not (vline is None):
ax.vlines(
vline,
ymin=v_ymin,
ymax=v_ymax,
color=vline_color,
linewidth=vline_width,
linestyles="dashed",
label=vline_label,
)
if not (ylim is None):
ax.set_ylim(ylim)
return ax
# ---------------------------------------------------------------------------------------------------------------------
def plot_bars_multiple(
ax,
unconstr_deaths,
pop_deaths,
constr_deaths,
label_optimal="Optimal strategy",
label_Pareto="Pareto strategy",
X=["Total"] + ["Belgium", "France", "Germany", "United \nKingdom"],
xlabel="Countries",
ylabel="Difference in %",
title="Number of deaths per strategy and country compared to population strategy",
color_good="seagreen",
color_bad="firebrick",
alpha=0.3,
label_good="Improvement",
label_bad="Deterioration",
xlim=None,
):
unrestricted = (unconstr_deaths / pop_deaths - 1) * 100
pareto = (constr_deaths / pop_deaths - 1) * 100
minimum = np.min([pareto, unrestricted]) * 1.05
maximum = np.max([pareto, unrestricted]) * 1.05
X_axis = np.arange(len(X))
ax.bar(X_axis - 0.3, unrestricted, 0.3, label=label_optimal, edgecolor="grey")
ax.bar(X_axis, pareto, 0.3, label=label_Pareto, edgecolor="grey")
ax.set_xticks(X_axis - 0.1)
ax.set_xticklabels(X)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
a = None
if not (xlim is None):
ax.set_xlim(xlim)
a = xlim[1]
if a is None:
a = len(X)
# ax.fill_between([-0.4, a + 0.2], [0, 0], [minimum,minimum], step="pre", alpha=alpha, color = color_good,
# label=label_good)
# ax.fill_between([-0.4, a + 0.2], [maximum, maximum], [0,0], step="pre", alpha=alpha, color = color_bad,
# label=label_bad)
if not (xlim is None):
ax.set_xlim(xlim)
ax.set_ylim([minimum, maximum])
ax.legend()
def plot_trajectories_aggregated(
ax,
length,
pop_trajectory,
unconstr_trajectory,
constr_trajectory,
labels=["Population", "Optimal", "Pareto"],
colors=["C0", "C1", "C2"],
alphas=[0.6, 0.4, 0.2],
xlabel="Weeks",
ylabel="Infected individuals \nin millions",
title="Total number of infected individuals",
target="infectious",
scale=10 ** 6,
fill_between=False,
plot_legend=True,
):
index_axis = np.array(list(pop_trajectory.reset_index(drop=True).index))
x_axis = index_axis / index_axis[-1] * length / 7
trajectories = [unconstr_trajectory, constr_trajectory, pop_trajectory]
sum_infectious = {}
for index in range(len(trajectories)):
df = trajectories[index]
if len(target) == 1:
states_infectious = [x for x in df.columns if target[0] in x]
if len(target) == 2:
states_infectious = [
x for x in df.columns if (target[0] in x) and (target[1] in x)
]
sum_infectious = df[states_infectious].sum(axis=1)
if labels[index] == "Population":
linestyle = "dashed"
alpha = 0.8
else:
linestyle = "solid"
alpha = 1
ax.plot(
x_axis,
sum_infectious / scale,
label=labels[index],
color=colors[index],
linestyle=linestyle,
alpha=alpha,
)
if fill_between is True:
ax.fill_between(
x_axis,
sum_infectious / scale,
np.repeat(0, len(x_axis)),
step="pre",
alpha=alphas[index],
color=colors[index],
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if plot_legend is True:
ax.legend()
def plot_trajectories_aggregated_vac(
ax,
length,
pop_trajectory,
unconstr_trajectory,
constr_trajectory,
labels=["Population", "Optimal", "Pareto"],
colors=["C0", "C1", "C2"],
alphas=[0.6, 0.4, 0.2],
xlabel="Weeks",
ylabel="",
title="",
scale=10 ** 6,
fill_between=False,
plot_legend=True,
):
index_axis = np.array(list(pop_trajectory.reset_index(drop=True).index))
x_axis = index_axis / index_axis[-1] * length / 7
trajectories = [unconstr_trajectory, constr_trajectory, pop_trajectory]
sum_infectious = {}
for index in range(len(trajectories)):
df = trajectories[index]
states_vaccinated = [
x
for x in df.columns
if (("vac1" in x) or ("vac2" in x) or ("recoverede" in x))
and not ("dead" in x)
]
states_alive = [x for x in df.columns if not ("dead" in x)]
sum_vaccinated = df[states_vaccinated].sum(axis=1)
sum_alive = df[states_alive].sum(axis=1)
prop_vac = sum_vaccinated / sum_alive
ax.plot(
x_axis,
prop_vac,
label=labels[index],
color=colors[index],
)
if fill_between is True:
ax.fill_between(
x_axis,
sum_infectious / scale,
np.repeat(0, len(x_axis)),
step="pre",
alpha=alphas[index],
color=colors[index],
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if plot_legend is True:
ax.legend()
def plot_vac_allocated(
ax,
colors,
time,
dict_out,
index_vac,
index_areas,
areas,
scale,
countries,
col_vac1="C7",
col_vac2="C8",
label_vac1="mRNA",
label_vac2="Vector",
vac=["vac1", "vac2"],
types=["unconstrained", "constrained", "pop"],
ylabel="% received",
xlabel="Weeks",
labels=["Optimal", "Pareto", "Population"],
alphas=[0.1, 0.1, 0.1],
axvline_x=40,
ylim=[-0.05, 0.9],
title="Vaccine received in ",
total=True,
spline_xx=None,
numb_xx=4,
s=5,
):
for index_type in range(len(types)):
vac_available = dict_out["vaccine"][vac[index_vac]]
name = f"{types[index_type]}_{areas[index_areas]}_{vac[index_vac]}"
vac_prop = dict_out["allocated_best"][name]
vac_allocated = vac_available * vac_prop
if total is True:
y = vac_allocated / scale
else:
y = vac_prop
if types[index_type] == "pop":
alpha = 0.8
linestyle = "dashed"
else:
alpha = 1
linestyle = "solid"
ax.plot(
time / 7,
y,
color=colors[index_type],
label=labels[index_type],
linestyle=linestyle,
alpha=alpha,
)
if not (spline_xx is None):
if types[index_type] != "pop":
xx = np.array(list(spline_xx.values()))
y_index = xx / xx[-1] * (len(y) - 1)
ax.scatter(
xx[0:numb_xx] / 7,
y.loc[np.round(y_index[0:numb_xx])],
s=s,
)
# y_lim = [ax.get_yticks()[0], ax.get_yticks()[-1]]
# ax.fill_between([mini, axvline_x], y_lim,
# color="grey", step="pre", alpha=0.5)
# ax.fill_between([mini, 60], y_lim,
# color="grey", step="pre", alpha=0.2)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title(title + f"{countries[index_areas]}")
# ax.axvline(axvline_x ,0, 1, color = "firebrick",
# linestyle = "dashed", label = "Last optimitaion \npoint", linewidth = 0.7)
if [index_vac, index_areas] == [0, 0]:
ax.legend()
# ----------------------------------------------------------------------------------------------------
def plot_four_country_overview(
spline_xx,
vaccine_inflow,
number_yy,
length,
interventionA,
interventionB,
end_data,
start_population,
infectious_t0,
recovered_t0,
delta,
omega,
countries,
areas,
par_R,
number_xx_R,
total_grid,
df_inf_true,
df_infected,
grid_data,
grid_sim,
scale,
text_x,
text_y,
ylim,
text_str,
text_lockdown_x,
text_lockdown_y=0.02,
text_lockdown_str="Constant \nNPIs",
color_prop=["C4", "C5", "C6", "C7"],
label_vac1="mRNA",
label_vac2="vector",
color_vac1="C7",
color_vac2="C8",
title_vac="Available vaccines",
title_setup="Set-up",
position_start_vac=[0, 0.5],
height_start_vac=0.3,
letter_size=16,
letter_y=1.06,
size=(18, 16),
):
linspace = []
for j in range(len(spline_xx.values()) - 1):
new = list(np.linspace(0, list(spline_xx.values())[j + 1], 1000))
linspace += new
vaccine_available = pd.DataFrame(
{
"vac1": np.repeat(
np.array(list(vaccine_inflow.values())[0 : (number_yy - 1)]), 1000
),
"vac2": np.repeat(
np.array(
list(vaccine_inflow.values())[(number_yy - 1) : (2 * number_yy)]
),
1000,
),
"t": np.linspace(0, length, len(linspace)),
}
)
fig = plt.figure(constrained_layout=True, figsize=size)
gs = GridSpec(3, 4, figure=fig)
count_plot = 97
ax = fig.add_subplot(gs[0, :1])
ax.set_xlim([0, 60])
ax.set_ylim([0, 1])
ax.get_yaxis().set_visible(False)
# ax.spines['left'].set_visible(False)
# ax.spines['bottom'].set_position('center')
ax.text(
-0.05,
letter_y,
chr(count_plot),
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
weight="bold",
size=letter_size,
)
color_tl = "seagreen"
ax.fill_between(
[0, length / 7], [1, 1], [0.8, 0.8], step="pre", alpha=0.6, color=color_tl
)
ax.text(length / 7 / 2, 0.9, "Alpha variant", ha="center", va="center")
ax.fill_between(
[interventionA["t"] / 7, length / 7],
[0.8, 0.8],
[0.6, 0.6],
step="pre",
alpha=0.5,
color=color_tl,
)
ax.text(
((length - interventionA["t"]) / 2 + interventionA["t"]) / 7,
0.7,
"Delta variant",
ha="center",
va="center",
)
for i in range(3):
key1 = f"xx{i}"
key2 = f"<KEY>
ax.fill_between(
[spline_xx[key1] / 7, spline_xx[key2] / 7],
[0.6, 0.6],
[0.4, 0.4],
step="pre",
alpha=0.4,
color=color_tl,
)
ax.text(
((spline_xx[key2] - spline_xx[key1]) / 2 + spline_xx[key1]) / 7,
0.5,
f"Spline {i+1}",
ha="center",
va="center",
)
ax.fill_between(
[0, end_data / 7], [0.4, 0.4], [0.2, 0.2], step="pre", alpha=0.3, color=color_tl
)
ax.text(end_data / 7 / 2, 0.3, "Optimize vaccinations", ha="center", va="center")
ax.fill_between(
[end_data / 7, length / 7],
[0.4, 0.4],
[0.2, 0.2],
step="pre",
alpha=0.3,
color=color_tl,
)
ax.text(
((length - end_data) / 2 + | |
<filename>models/resnet/tensorflow/train_imagenet_resnet_hvd.py
#!/usr/bin/env python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
try:
from builtins import range
except ImportError:
pass
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import data_flow_ops
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.data.python.ops import batching
import horovod.tensorflow as hvd
import os
import sys
import time
import argparse
import random
import shutil
import logging
import re
from glob import glob
from operator import itemgetter
from tensorflow.python.util import nest
# uncomment to suppress TF info messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
def rank0log(logger, *args, **kwargs):
if hvd.rank() == 0:
if logger:
logger.info(''.join([str(x) for x in list(args)]))
else:
print(*args, **kwargs)
class LayerBuilder(object):
def __init__(self, activation=None, data_format='channels_last',
training=False, use_batch_norm=False, batch_norm_config=None,
conv_initializer=None, adv_bn_init=False):
self.activation = activation
self.data_format = data_format
self.training = training
self.use_batch_norm = use_batch_norm
self.batch_norm_config = batch_norm_config
self.conv_initializer = conv_initializer
self.adv_bn_init = adv_bn_init
if self.batch_norm_config is None:
self.batch_norm_config = {
'decay': 0.9,
'epsilon': 1e-4,
'scale': True,
'zero_debias_moving_mean': False,
}
def _conv2d(self, inputs, activation, *args, **kwargs):
x = tf.layers.conv2d(
inputs, data_format=self.data_format,
use_bias=not self.use_batch_norm,
kernel_initializer=self.conv_initializer,
activation=None if self.use_batch_norm else activation,
*args, **kwargs)
if self.use_batch_norm:
x = self.batch_norm(x)
x = activation(x) if activation is not None else x
return x
def conv2d_linear_last_bn(self, inputs, *args, **kwargs):
x = tf.layers.conv2d(
inputs, data_format=self.data_format,
use_bias=False,
kernel_initializer=self.conv_initializer,
activation=None, *args, **kwargs)
param_initializers = {
'moving_mean': tf.zeros_initializer(),
'moving_variance': tf.ones_initializer(),
'beta': tf.zeros_initializer(),
}
if self.adv_bn_init:
param_initializers['gamma'] = tf.zeros_initializer()
else:
param_initializers['gamma'] = tf.ones_initializer()
x = self.batch_norm(x, param_initializers=param_initializers)
return x
def conv2d_linear(self, inputs, *args, **kwargs):
return self._conv2d(inputs, None, *args, **kwargs)
def conv2d(self, inputs, *args, **kwargs):
return self._conv2d(inputs, self.activation, *args, **kwargs)
def pad2d(self, inputs, begin, end=None):
if end is None:
end = begin
try:
_ = begin[1]
except TypeError:
begin = [begin, begin]
try:
_ = end[1]
except TypeError:
end = [end, end]
if self.data_format == 'channels_last':
padding = [[0, 0], [begin[0], end[0]], [begin[1], end[1]], [0, 0]]
else:
padding = [[0, 0], [0, 0], [begin[0], end[0]], [begin[1], end[1]]]
return tf.pad(inputs, padding)
def max_pooling2d(self, inputs, *args, **kwargs):
return tf.layers.max_pooling2d(
inputs, data_format=self.data_format, *args, **kwargs)
def average_pooling2d(self, inputs, *args, **kwargs):
return tf.layers.average_pooling2d(
inputs, data_format=self.data_format, *args, **kwargs)
def dense_linear(self, inputs, units, **kwargs):
return tf.layers.dense(inputs, units, activation=None)
def dense(self, inputs, units, **kwargs):
return tf.layers.dense(inputs, units, activation=self.activation)
def activate(self, inputs, activation=None):
activation = activation or self.activation
return activation(inputs) if activation is not None else inputs
def batch_norm(self, inputs, **kwargs):
all_kwargs = dict(self.batch_norm_config)
all_kwargs.update(kwargs)
data_format = 'NHWC' if self.data_format == 'channels_last' else 'NCHW'
return tf.contrib.layers.batch_norm(
inputs, is_training=self.training, data_format=data_format,
fused=True, **all_kwargs)
def spatial_average2d(self, inputs):
shape = inputs.get_shape().as_list()
if self.data_format == 'channels_last':
n, h, w, c = shape
else:
n, c, h, w = shape
n = -1 if n is None else n
x = tf.layers.average_pooling2d(inputs, (h, w), (1, 1),
data_format=self.data_format)
return tf.reshape(x, [n, c])
def flatten2d(self, inputs):
x = inputs
if self.data_format != 'channel_last':
# Note: This ensures the output order matches that of NHWC networks
x = tf.transpose(x, [0, 2, 3, 1])
input_shape = x.get_shape().as_list()
num_inputs = 1
for dim in input_shape[1:]:
num_inputs *= dim
return tf.reshape(x, [-1, num_inputs], name='flatten')
def residual2d(self, inputs, network, units=None, scale=1.0, activate=False):
outputs = network(inputs)
c_axis = -1 if self.data_format == 'channels_last' else 1
h_axis = 1 if self.data_format == 'channels_last' else 2
w_axis = h_axis + 1
ishape, oshape = [y.get_shape().as_list() for y in [inputs, outputs]]
ichans, ochans = ishape[c_axis], oshape[c_axis]
strides = ((ishape[h_axis] - 1) // oshape[h_axis] + 1,
(ishape[w_axis] - 1) // oshape[w_axis] + 1)
with tf.name_scope('residual'):
if (ochans != ichans or strides[0] != 1 or strides[1] != 1):
inputs = self.conv2d_linear(inputs, units, 1, strides, 'SAME')
x = inputs + scale * outputs
if activate:
x = self.activate(x)
return x
def resnet_bottleneck_v1(builder, inputs, depth, depth_bottleneck, stride,
basic=False):
num_inputs = inputs.get_shape().as_list()[1]
x = inputs
with tf.name_scope('resnet_v1'):
if depth == num_inputs:
if stride == 1:
shortcut = x
else:
shortcut = builder.max_pooling2d(x, 1, stride)
else:
shortcut = builder.conv2d_linear(x, depth, 1, stride, 'SAME')
if basic:
x = builder.pad2d(x, 1)
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'VALID')
x = builder.conv2d_linear(x, depth, 3, 1, 'SAME')
else:
x = builder.conv2d(x, depth_bottleneck, 1, 1, 'SAME')
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
# x = builder.conv2d_linear(x, depth, 1, 1, 'SAME')
x = builder.conv2d_linear_last_bn(x, depth, 1, 1, 'SAME')
x = tf.nn.relu(x + shortcut)
return x
def inference_resnet_v1_impl(builder, inputs, layer_counts, basic=False):
x = inputs
x = builder.pad2d(x, 3)
x = builder.conv2d(x, 64, 7, 2, 'VALID')
x = builder.max_pooling2d(x, 3, 2, 'SAME')
for i in range(layer_counts[0]):
x = resnet_bottleneck_v1(builder, x, 256, 64, 1, basic)
for i in range(layer_counts[1]):
x = resnet_bottleneck_v1(builder, x, 512, 128, 2 if i == 0 else 1, basic)
for i in range(layer_counts[2]):
x = resnet_bottleneck_v1(builder, x, 1024, 256, 2 if i == 0 else 1, basic)
for i in range(layer_counts[3]):
x = resnet_bottleneck_v1(builder, x, 2048, 512, 2 if i == 0 else 1, basic)
return builder.spatial_average2d(x)
def inference_resnet_v1(inputs, nlayer, data_format='channels_last',
training=False, conv_initializer=None, adv_bn_init=False):
"""Deep Residual Networks family of models
https://arxiv.org/abs/1512.03385
"""
builder = LayerBuilder(tf.nn.relu, data_format, training, use_batch_norm=True,
conv_initializer=conv_initializer, adv_bn_init=adv_bn_init)
if nlayer == 18:
return inference_resnet_v1_impl(builder, inputs, [2, 2, 2, 2], basic=True)
elif nlayer == 34:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 6, 3], basic=True)
elif nlayer == 50:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 6, 3])
elif nlayer == 101:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 23, 3])
elif nlayer == 152:
return inference_resnet_v1_impl(builder, inputs, [3, 8, 36, 3])
else:
raise ValueError("Invalid nlayer (%i); must be one of: 18,34,50,101,152" %
nlayer)
def get_model_func(model_name):
if model_name.startswith('resnet'):
nlayer = int(model_name[len('resnet'):])
return lambda images, *args, **kwargs: \
inference_resnet_v1(images, nlayer, *args, **kwargs)
else:
raise ValueError("Invalid model type: %s" % model_name)
def deserialize_image_record(record):
feature_map = {
'image/encoded': tf.FixedLenFeature([], tf.string, ''),
'image/class/label': tf.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32)
}
with tf.name_scope('deserialize_image_record'):
obj = tf.parse_single_example(record, feature_map)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
bbox = tf.stack([obj['image/object/bbox/%s' % x].values
for x in ['ymin', 'xmin', 'ymax', 'xmax']])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1])
text = obj['image/class/text']
return imgdata, label, bbox, text
def decode_jpeg(imgdata, channels=3):
return tf.image.decode_jpeg(imgdata, channels=channels,
fancy_upscaling=False,
dct_method='INTEGER_FAST')
def crop_and_resize_image(image, original_bbox, height, width, distort=False):
with tf.name_scope('crop_and_resize'):
# Evaluation is done on a center-crop of this ratio
eval_crop_ratio = 0.8
if distort:
initial_shape = [int(round(height / eval_crop_ratio)),
int(round(width / eval_crop_ratio)),
3]
bbox_begin, bbox_size, bbox = \
tf.image.sample_distorted_bounding_box(
initial_shape,
bounding_boxes=tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]),
# tf.zeros(shape=[1,0,4]), # No bounding boxes
min_object_covered=0.1,
aspect_ratio_range=[3. / 4., 4. / 3.],
area_range=[0.08, 1.0],
max_attempts=100,
seed=11 * hvd.rank(), # Need to set for deterministic results
use_image_if_no_bounding_boxes=True)
bbox = bbox[0, 0] # Remove batch, box_idx dims
else:
# Central crop
ratio_y = ratio_x = eval_crop_ratio
bbox = tf.constant([0.5 * (1 - ratio_y), 0.5 * (1 - ratio_x),
0.5 * (1 + ratio_y), 0.5 * (1 + ratio_x)])
image = tf.image.crop_and_resize(
image[None, :, :, :], bbox[None, :], [0], [height, width])[0]
image = tf.clip_by_value(image, 0., 255.)
image = tf.cast(image, tf.uint8)
return image
def parse_and_preprocess_image_record(record, | |
import PySimpleGUI as sg
import pandas as pd
from functools import reduce
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from math import pi
from pylab import *
def user_input_GUI():
global stock_share_hash, index_hash, chart_num
layout = [
[sg.Text('Please enter Portfolio and its individual stock share', font=("Helvetica bold", 20))],
[sg.Text('Portfolio', size=(7, 1), font=("Helvetica", 16)),
sg.InputText('', key='stock', do_not_clear=True, font=("Helvetica", 16))],
[sg.Text('Share', size=(7, 1), font=("Helvetica", 16)),
sg.InputText('', key='share', do_not_clear=True, font=("Helvetica", 16))],
[sg.Text('Data Timeline:', font=("Helvetica bold", 16))],
[sg.InputCombo(('Most Recent Week', 'Most Recent Month', 'All Data'), key='time', font=("Helvetica", 16), size=(16, 1))],
[sg.Text('Number of Radar Chart (max 8):', font=("Helvetica bold", 16))],
[sg.InputText('3', key='chart', do_not_clear=True, size=(3, 1), font=("Helvetica", 16))],
[sg.Text('Indices Weight (0 - 1)', font=("Helvetica bold", 16))],
[sg.Text('SPI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='spi_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16)),
sg.Text('TPI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='tpi_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16)),
sg.Text('SLI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='sli_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16)),
sg.Text('PRI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='pri_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16)),
sg.Text('ATSI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='atsi_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16))],
[sg.Submit('Analyze', font=("Helvetica", 16)), sg.Exit(font=("Helvetica", 16))]
]
window = sg.Window('Client Tool for Finding Optimal ATS', location=(800, 50)).Layout(layout)
while True:
event, stock_share_hash_old = window.Read()
if event is None or event == 'Exit':
break
else:
for key, value in stock_share_hash_old.items():
stock_share_hash_old.update({key: value.split(',')})
newlist = []
for value in stock_share_hash_old['share']:
newlist.append(int(value))
stock_share_hash_old.update({'share': newlist})
stock_share_hash = {}
for index in range(len(stock_share_hash_old['stock'])):
stock_share_hash[stock_share_hash_old['stock'][index].upper()] = stock_share_hash_old['share'][index]
chart_num = int(stock_share_hash_old['chart'][0])
time = stock_share_hash_old['time'][0]
index_hash = {}
index_hash.update({'spi_weight': stock_share_hash_old['spi_weight']})
# stock_share_hash.pop('spi_weight')
index_hash.update({'tpi_weight': stock_share_hash_old['tpi_weight']})
# stock_share_hash.pop('tpi_weight')
index_hash.update({'sli_weight': stock_share_hash_old['sli_weight']})
# stock_share_hash.pop('sli_weight')
index_hash.update({'pri_weight': stock_share_hash_old['pri_weight']})
# stock_share_hash.pop('pri_weight')
index_hash.update({'atsi_weight': stock_share_hash_old['atsi_weight']})
# stock_share_hash.pop('atsi_weight')
# Remove spaces in key
stock_share_hash = {k.replace(' ', ''): v for k, v in stock_share_hash.items()}
finra = subset_data(choice=time, finra_data=finra_data)
overall_score(input=stock_share_hash, finra_data=finra, sector_data=sector_data)
market_liquidity_ratio(stock_share_hash=stock_share_hash, finra_data=finra, ratio_data=ratio_data)
sg.Popup('Most Optimal ATS for Routing this Portfolio:',
stock_share_hash, score_sorted, '\n'.join(list_mlr), font=("Helvetica", 16), location=(800, 450))
window.Close()
return
def subset_data(choice, finra_data):
global week
finra_data['Week'] = pd.to_datetime(finra_data['Week'])
if choice == 'Most Recent Week':
week = 1*5
data = finra_data[finra_data['Week'] == min(finra_data.Week.unique())]
elif choice == 'Most Recent Month':
week = 4*5
data = finra_data[finra_data['Week'].isin(sorted(finra_data.Week.unique())[0:4])]
else:
data = finra_data
week = len(data.Week.unique())*5
return data
def portfolio_share_prop_index(portfolio, data):
portfolio_data = data[data['Symbol'].isin(portfolio)]
ats_list = data.ATS_MPID.unique()
hash_portfolio = {stock: [] for stock in portfolio}
for stock in portfolio:
each_stock = portfolio_data[portfolio_data['Symbol'] == stock]
stock_sum_by_ats = each_stock.groupby(['ATS_MPID'])['Shares'].sum()
model = stock_sum_by_ats / sum(stock_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_portfolio.update({stock: model.sort_values(ascending=False)})
worthfullness_index = pd.Series()
for ats in ats_list:
if ats not in worthfullness_index.index:
new_ats = pd.Series([0], index=[ats])
worthfullness_index = worthfullness_index.append(new_ats)
for stock in portfolio:
worthfullness_index += hash_portfolio[stock]
worthfullness_index_normalized = \
(worthfullness_index - min(worthfullness_index)) / (max(worthfullness_index) - min(worthfullness_index))
# worthfullness_index /= len(portfolio)
return worthfullness_index_normalized
def portfolio_trade_prop_index(portfolio, data):
portfolio_data = data[data['Symbol'].isin(portfolio)]
ats_list = data.ATS_MPID.unique()
hash_portfolio = {stock: [] for stock in portfolio}
for stock in portfolio:
each_stock = portfolio_data[portfolio_data['Symbol'] == stock]
stock_sum_by_ats = each_stock.groupby(['ATS_MPID'])['Trades'].sum()
model = stock_sum_by_ats / sum(stock_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_portfolio.update({stock: model.sort_values(ascending=False)})
worthfullness_index = pd.Series()
for ats in ats_list:
if ats not in worthfullness_index.index:
new_ats = pd.Series([0], index=[ats])
worthfullness_index = worthfullness_index.append(new_ats)
for stock in portfolio:
worthfullness_index += hash_portfolio[stock]
worthfullness_index_normalized = \
(worthfullness_index - min(worthfullness_index)) / (max(worthfullness_index) - min(worthfullness_index))
# worthfullness_index /= len(portfolio)
return worthfullness_index_normalized
# test_portfolio = ['A', 'AA']
# data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
# portfolio_share_prop_index(test_portfolio, data)
# a = portfolio_share_prop_index(test_portfolio, data) + portfolio_trade_prop_index(portfolio, data)
def sector_liquidity_index(portfolio, data, sector_data):
sector_list = []
sector_stock_hash = {}
hash_index = {}
ats_list = data.ATS_MPID.unique()
for stock in portfolio:
sector_list.append(sector_data.loc[sector_data['Symbol'] == stock, 'sector'].iloc[0])
sector_list = set(sector_list)
for sector in sector_list:
sector_stock_hash.update(
{sector: sector_data.loc[sector_data['sector'] == sector, 'Symbol'].values[:].tolist()})
for sector in sector_stock_hash:
portfolio_data = data[data['Symbol'].isin(sector_stock_hash[sector])]
sector_sum_by_ats = portfolio_data.groupby(['ATS_MPID'])['Shares'].sum()
model = sector_sum_by_ats / sum(sector_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_index.update({sector: model})
sl_index = pd.Series()
for ats in ats_list:
if ats not in sl_index.index:
new_ats = pd.Series([0], index=[ats])
sl_index = sl_index.append(new_ats)
for sector in sector_list:
sl_index += hash_index[sector]
sl_index_normalized = (sl_index - min(sl_index)) / (max(sl_index) - min(sl_index))
# sl_index /= len(sector_list)
return sl_index_normalized
# data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
# sector_data = pd.read_csv('/Users/TonY/Desktop/capstone/market_cap_sector_mktcapcategory_by_symbol.csv', encoding='utf-8')
# test_portfolio = ['A', 'AA', 'ADRO', 'AABA']
# b = sector_liquidity_index(test_portfolio, data, sector_data)
# len(b)
def participation_rate_index(hash_portfolio_share, data):
hash_par_rate_index = {}
ats_list = data.ATS_MPID.unique()
for stock in hash_portfolio_share:
data_selected = data.loc[data['Symbol'] == stock]
result = data_selected.groupby('ATS_MPID')['Shares'].sum() / week
model = hash_portfolio_share[stock] / result
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_par_rate_index.update({stock: model})
pr_index = pd.Series()
for ats in ats_list:
if ats not in pr_index.index:
new_ats = pd.Series([0], index=[ats])
pr_index = pr_index.append(new_ats)
for stock in hash_portfolio_share:
pr_index += hash_par_rate_index[stock]
pr_index_normalized = (pr_index - min(pr_index)) / (max(pr_index) - min(pr_index))
# pr_index /= len(hash_portfolio_share)
for i in range(len(pr_index_normalized)):
if pr_index_normalized[i] != 0:
pr_index_normalized[i] = 1 - pr_index_normalized[i]
return pr_index_normalized
# data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
#
# hash_portfolio_share = {'A': 100, "AA": 200}
# participation_rate_index(hash_portfolio_share, data)
def avg_trade_size_index(hash_portfolio_share, data):
hash_par_rate_index = {}
ats_list = data.ATS_MPID.unique()
for stock in hash_portfolio_share:
data_selected = data.loc[data['Symbol'] == stock]
share_sum = data_selected.groupby('ATS_MPID')['Shares'].sum()
trade_sum = data_selected.groupby('ATS_MPID')['Trades'].sum()
model = hash_portfolio_share[stock] / ((share_sum / trade_sum) / week)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_par_rate_index.update({stock: model})
pr_index = pd.Series()
for ats in ats_list:
if ats not in pr_index.index:
new_ats = pd.Series([0], index=[ats])
pr_index = pr_index.append(new_ats)
for stock in hash_portfolio_share:
pr_index += hash_par_rate_index[stock]
pr_index_normalized = (pr_index - min(pr_index)) / (max(pr_index) - min(pr_index))
# pr_index /= len(hash_portfolio_share)
return pr_index_normalized
def overall_score(input, finra_data, sector_data):
# input = user_input_GUI()
global spi, tpi, sli, pri, atsi, score_sorted
spi = portfolio_share_prop_index(portfolio=input.keys(), data=finra_data)
tpi = portfolio_trade_prop_index(portfolio=input.keys(), data=finra_data)
sli = sector_liquidity_index(portfolio=input.keys(), data=finra_data, sector_data=sector_data)
pri = participation_rate_index(hash_portfolio_share=input, data=finra_data)
atsi = avg_trade_size_index(hash_portfolio_share=input, data=finra_data)
score = float(index_hash['spi_weight'][0]) * spi + float(index_hash['tpi_weight'][0]) * tpi + \
float(index_hash['sli_weight'][0]) * sli + float(index_hash['pri_weight'][0]) * pri + \
float(index_hash['atsi_weight'][0]) * atsi
weight_list = [float(index_hash['spi_weight'][0]), float(index_hash['tpi_weight'][0]),
float(index_hash['sli_weight'][0]), float(index_hash['pri_weight'][0]),
float(index_hash['atsi_weight'][0])]
count_non_zero = 0
for weight in weight_list:
if weight != 0:
count_non_zero += 1
score /= count_non_zero
score_sorted = round(score.sort_values(ascending=False), 3)[0:chart_num+3]
# print(stock_share_hash, '\n', score_sorted[0:5])
return radar_chart()
def index_to_dataframe():
data_frame_spi = pd.DataFrame(spi, columns=['SPI'])
data_frame_spi.index.name = 'ATS'
data_frame_tpi = pd.DataFrame(tpi, columns=['TPI'])
data_frame_tpi.index.name = 'ATS'
data_frame_sli = pd.DataFrame(sli, columns=['SLI'])
data_frame_sli.index.name = 'ATS'
data_frame_pri = pd.DataFrame(pri, columns=['PRI'])
data_frame_pri.index.name = 'ATS'
data_frame_atsi = pd.DataFrame(atsi, columns=['ATSI'])
data_frame_atsi.index.name = 'ATS'
data_frames = [data_frame_spi, data_frame_tpi, data_frame_sli, data_frame_pri, data_frame_atsi]
df_merged = reduce(lambda left, right: pd.merge(left, right, on=['ATS'], how='outer'), data_frames)
return df_merged
def radar_chart():
plt.close('all')
df = index_to_dataframe()
# number of variable
categories = list(df)[0:]
N = len(categories)
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
# Initialise the spider plot
ax = plt.subplot(111, polar=True)
# If you want the first axis to be on top:
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], categories)
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks([0.2, 0.4, 0.6, 0.8], ["0.2", "0.4", "0.6", '0.8'], color="grey", size=7)
plt.ylim(0, 1)
# ------- PART 2: Add plots
# Plot each individual = each line of the data
# I don't do a loop, because plotting more than 3 groups makes the chart unreadable
top_ats = score_sorted
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
for chart in range(chart_num):
values = df.loc[df.index == top_ats.index[chart]].values.flatten().tolist()
values += values[:1]
ax.plot(angles, values, linewidth=1, linestyle='solid', label=top_ats.index[chart])
ax.fill(angles, values, color[chart], alpha=0.1)
# Add legend
plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
plt.title(stock_share_hash, y=1.08)
thismanager = get_current_fig_manager()
thismanager.window.wm_geometry("+100+150")
plt.show(block=False)
def market_liquidity_ratio(stock_share_hash, finra_data, ratio_data):
global list_mlr
# finra_data['Week'] = pd.to_datetime(finra_data['Week'])
last_week = finra_data['Week'].max()
last_week_data = finra_data[finra_data['Week'] == last_week]
lastweek_shares = last_week_data.groupby(['Symbol'])['Shares'].sum()
lastweek_shares = pd.DataFrame(lastweek_shares)
ratio_data_merged = pd.merge(left=lastweek_shares, right=ratio_data, left_on="Symbol", right_on="symbol", how="left")
ratio_data_merged['Total_Volume'] = ratio_data_merged['volume'] / | |
from iocbuilder import AutoSubstitution, Device
from iocbuilder.arginfo import makeArgInfo, Simple, Ident, Choice
from iocbuilder.iocinit import IocDataStream
from iocbuilder.modules.asyn import AsynPort
from iocbuilder.modules.ADCore import ADCore, ADBaseTemplate, makeTemplateInstance
from iocbuilder.modules.restClient import restClient
from iocbuilder.modules.calc import Calc
from util import debug_print, OdinPaths, data_file_path, expand_template_file, \
create_batch_entry, create_config_entry
# ~~~~~~~~ #
# OdinData #
# ~~~~~~~~ #
debug_print("OdinData: {}".format(OdinPaths.ODIN_DATA), 1)
class _OdinDataTemplate(AutoSubstitution):
TemplateFile = "OdinData.template"
class _OdinData(Device):
"""Store configuration for an OdinData process"""
INDEX = 1 # Unique index for each OdinData instance
RANK = None
FP_ENDPOINT = ""
FR_ENDPOINT = ""
# Device attributes
AutoInstantiate = True
def __init__(self, server, READY, RELEASE, META, PLUGINS):
self.__super.__init__()
# Update attributes with parameters
self.__dict__.update(locals())
self.IP = server.IP
self.plugins = PLUGINS
# Create unique R MACRO for template file - OD1, OD2 etc.
self.R = ":OD{}:".format(self.INDEX)
self.index = _OdinData.INDEX
_OdinData.INDEX += 1
def create_config_file(self, prefix, template, extra_macros=None):
macros = dict(
IP=self.server.IP, ODIN_DATA=OdinPaths.ODIN_DATA,
RD_PORT=self.READY, RL_PORT=self.RELEASE, META_PORT=self.META
)
if extra_macros is not None:
macros.update(extra_macros)
if self.plugins is not None:
load_entries = []
connect_entries = []
config_entries = []
for plugin in self.plugins:
load_entries.append(plugin.create_config_load_entry())
connect_entries.append(create_config_entry(plugin.create_config_connect_entry()))
config_entries += plugin.create_extra_config_entries(self.RANK, self.TOTAL)
for mode in self.plugins.modes:
valid_entries = False
mode_config_dict = {'store': {'index': mode, 'value': [{'plugin': {'disconnect': 'all'}}]}}
for plugin in self.plugins:
entry = plugin.create_config_connect_entry(mode)
if entry is not None:
valid_entries = True
mode_config_dict['store']['value'].append(entry)
if valid_entries:
connect_entries.append(create_config_entry(mode_config_dict))
custom_plugin_config_macros = dict(
LOAD_ENTRIES=",\n ".join(load_entries),
CONNECT_ENTRIES=",\n ".join(connect_entries),
CONFIG_ENTRIES=",\n ".join(config_entries)
)
macros.update(custom_plugin_config_macros)
expand_template_file(template, macros, "{}{}.json".format(prefix, self.RANK + 1))
def create_config_files(self, index, total):
raise NotImplementedError("Method must be implemented by child classes")
def add_batch_entries(self, entries, beamline, number):
entries.append(
create_batch_entry(beamline, number, "FrameReceiver{}".format(self.RANK + 1))
)
number += 1
entries.append(
create_batch_entry(beamline, number, "FrameProcessor{}".format(self.RANK + 1))
)
return number + 1
class _FrameProcessorPlugin(Device):
NAME = None
CLASS_NAME = None
LIBRARY_NAME = None
LIBRARY_PATH = OdinPaths.ODIN_DATA
TEMPLATE = None
TEMPLATE_INSTANTIATED = False
def __init__(self, source=None):
self.connections = {}
if source is not None:
self.source = source.NAME
else:
self.source = "frame_receiver"
def add_mode(self, mode, source=None):
if source is not None:
self.connections[mode] = source.NAME
else:
self.connections[mode] = "frame_receiver"
def create_config_load_entry(self):
library_name = self.LIBRARY_NAME if self.LIBRARY_NAME is not None else self.CLASS_NAME
entry = {
"plugin": {
"load": {
"index": self.NAME,
"name": self.CLASS_NAME,
"library": "{}/prefix/lib/lib{}.so".format(self.LIBRARY_PATH, library_name)
}
}
}
return create_config_entry(entry)
def create_config_connect_entry(self, mode=None):
cnxn = None
if mode is None:
cnxn = self.source
elif mode in self.connections:
cnxn = self.connections[mode]
entry = None
if cnxn is not None:
entry = {
"plugin": {
"connect": {
"index": self.NAME,
"connection": cnxn,
}
}
}
return entry
def create_extra_config_entries(self, rank, total):
return []
def create_template(self, template_args):
if self.TEMPLATE is not None and not self.TEMPLATE_INSTANTIATED:
makeTemplateInstance(self.TEMPLATE, locals(), template_args)
self.TEMPLATE_INSTANTIATED = True
_FrameProcessorPlugin.ArgInfo = makeArgInfo(_FrameProcessorPlugin.__init__,
source=Ident("Plugin to connect to", _FrameProcessorPlugin)
)
class _PluginConfig(Device):
def __init__(self, PLUGIN_1=None, PLUGIN_2=None, PLUGIN_3=None, PLUGIN_4=None, PLUGIN_5=None,
PLUGIN_6=None, PLUGIN_7=None, PLUGIN_8=None):
self.plugins = [plugin for plugin in
[PLUGIN_1, PLUGIN_2, PLUGIN_3, PLUGIN_4,
PLUGIN_5, PLUGIN_6, PLUGIN_7, PLUGIN_8]
if plugin is not None]
self.modes = []
ArgInfo = makeArgInfo(__init__,
PLUGIN_1=Ident("Plugin 1", _FrameProcessorPlugin),
PLUGIN_2=Ident("Plugin 2", _FrameProcessorPlugin),
PLUGIN_3=Ident("Plugin 3", _FrameProcessorPlugin),
PLUGIN_4=Ident("Plugin 4", _FrameProcessorPlugin),
PLUGIN_5=Ident("Plugin 5", _FrameProcessorPlugin),
PLUGIN_6=Ident("Plugin 6", _FrameProcessorPlugin),
PLUGIN_7=Ident("Plugin 7", _FrameProcessorPlugin),
PLUGIN_8=Ident("Plugin 8", _FrameProcessorPlugin)
)
def detector_setup(self, od_args):
# No op, should be overridden by specific detector
pass
def __iter__(self):
for plugin in self.plugins:
yield plugin
class _OdinDataServer(Device):
"""Store configuration for an OdinDataServer"""
PORT_BASE = 5000
PROCESS_COUNT = 0
# Device attributes
AutoInstantiate = True
def __init__(self, IP, PROCESSES, SHARED_MEM_SIZE, PLUGIN_CONFIG=None,
IO_THREADS=1, TOTAL_NUMA_NODES=0):
self.__super.__init__()
# Update attributes with parameters
self.__dict__.update(locals())
self.plugins = PLUGIN_CONFIG
self.processes = []
for _ in range(PROCESSES):
self.processes.append(
self.create_odin_data_process(
self, self.PORT_BASE + 1, self.PORT_BASE + 2, self.PORT_BASE + 8, PLUGIN_CONFIG)
)
self.PORT_BASE += 10
self.instantiated = False # Make sure instances are only used once
ArgInfo = makeArgInfo(__init__,
IP=Simple("IP address of server hosting OdinData processes", str),
PROCESSES=Simple("Number of OdinData processes on this server", int),
SHARED_MEM_SIZE=Simple("Size of shared memory buffers in bytes", int),
PLUGIN_CONFIG=Ident("Define a custom set of plugins", _PluginConfig),
IO_THREADS=Simple("Number of FR Ipc Channel IO threads to use", int),
TOTAL_NUMA_NODES=Simple("Total number of numa nodes available to distribute processes over"
" - Optional for performance tuning", int)
)
def create_odin_data_process(self, server, ready, release, meta, plugin_config):
raise NotImplementedError("Method must be implemented by child classes")
def configure_processes(self, server_rank, total_servers, total_processes):
rank = server_rank
for idx, process in enumerate(self.processes):
process.RANK = rank
process.TOTAL = total_processes
rank += total_servers
def create_od_startup_scripts(self):
for idx, process in enumerate(self.processes):
fp_port_number = 5004 + (10 * idx)
fr_port_number = 5000 + (10 * idx)
ready_port_number = 5001 + (10 * idx)
release_port_number = 5002 + (10 * idx)
# If TOTAL_NUMA_NODES was set, we enable the NUMA call macro instantitation
if self.TOTAL_NUMA_NODES > 0:
numa_node = idx % int(self.TOTAL_NUMA_NODES)
numa_call = "numactl --membind={node} --cpunodebind={node} ".format(node=numa_node)
else:
numa_call = ""
# Store server designation on OdinData object
process.FP_ENDPOINT = "{}:{}".format(self.IP, fp_port_number)
process.FR_ENDPOINT = "{}:{}".format(self.IP, fr_port_number)
output_file = "stFrameReceiver{}.sh".format(process.RANK + 1)
macros = dict(
NUMBER=process.RANK + 1,
ODIN_DATA=OdinPaths.ODIN_DATA,
BUFFER_IDX=idx + 1, SHARED_MEMORY=self.SHARED_MEM_SIZE,
CTRL_PORT=fr_port_number, IO_THREADS=self.IO_THREADS,
READY_PORT=ready_port_number, RELEASE_PORT=release_port_number,
LOG_CONFIG=data_file_path("log4cxx.xml"),
NUMA=numa_call)
expand_template_file("fr_startup", macros, output_file, executable=True)
output_file = "stFrameProcessor{}.sh".format(process.RANK + 1)
macros = dict(
NUMBER=process.RANK + 1,
ODIN_DATA=OdinPaths.ODIN_DATA,
HDF5_FILTERS=OdinPaths.HDF5_FILTERS,
CTRL_PORT=fp_port_number,
READY_PORT=ready_port_number, RELEASE_PORT=release_port_number,
LOG_CONFIG=data_file_path("log4cxx.xml"),
NUMA=numa_call)
expand_template_file("fp_startup", macros, output_file, executable=True)
class OdinLogConfig(Device):
"""Create logging configuration file"""
# Device attributes
AutoInstantiate = True
def __init__(self, BEAMLINE, DETECTOR):
self.__super.__init__()
# Update attributes with parameters
self.__dict__.update(locals())
self.create_config_file(BEAMLINE, DETECTOR)
def create_config_file(self, BEAMLINE, DETECTOR):
macros = dict(BEAMLINE=BEAMLINE, DETECTOR=DETECTOR)
expand_template_file("log4cxx_template.xml", macros, "log4cxx.xml")
# __init__ arguments
ArgInfo = makeArgInfo(__init__,
BEAMLINE=Simple("Beamline name, e.g. b21, i02-2", str),
DETECTOR=Choice("Detector type", ["Excalibur1M", "Excalibur3M", "Eiger4M", "Eiger9M", "Eiger16M"])
)
# ~~~~~~~~~~~ #
# OdinControl #
# ~~~~~~~~~~~ #
class _OdinControlServer(Device):
"""Store configuration for an OdinControlServer"""
ODIN_SERVER = None
ADAPTERS = ["fp", "fr"]
# Device attributes
AutoInstantiate = True
def __init__(self, IP, PORT=8888,
ODIN_DATA_SERVER_1=None, ODIN_DATA_SERVER_2=None,
ODIN_DATA_SERVER_3=None, ODIN_DATA_SERVER_4=None,
ODIN_DATA_SERVER_5=None, ODIN_DATA_SERVER_6=None,
ODIN_DATA_SERVER_7=None, ODIN_DATA_SERVER_8=None,
ODIN_DATA_SERVER_9=None, ODIN_DATA_SERVER_10=None):
self.__super.__init__()
# Update attributes with parameters
self.__dict__.update(locals())
self.odin_data_servers = [
server for server in [
ODIN_DATA_SERVER_1, ODIN_DATA_SERVER_2, ODIN_DATA_SERVER_3, ODIN_DATA_SERVER_4
] if server is not None
]
if not self.odin_data_servers:
raise ValueError("Received no control endpoints for Odin Server")
self.odin_data_processes = []
for server in self.odin_data_servers:
if server is not None:
self.odin_data_processes += server.processes
self.create_startup_script()
ArgInfo = makeArgInfo(__init__,
IP=Simple("IP address of control server", str),
PORT=Simple("Port of control server", int),
ODIN_DATA_SERVER_1=Ident("OdinDataServer 1 configuration", _OdinDataServer),
ODIN_DATA_SERVER_2=Ident("OdinDataServer 2 configuration", _OdinDataServer),
ODIN_DATA_SERVER_3=Ident("OdinDataServer 3 configuration", _OdinDataServer),
ODIN_DATA_SERVER_4=Ident("OdinDataServer 4 configuration", _OdinDataServer),
ODIN_DATA_SERVER_5=Ident("OdinDataServer 5 configuration", _OdinDataServer),
ODIN_DATA_SERVER_6=Ident("OdinDataServer 6 configuration", _OdinDataServer),
ODIN_DATA_SERVER_7=Ident("OdinDataServer 7 configuration", _OdinDataServer),
ODIN_DATA_SERVER_8=Ident("OdinDataServer 8 configuration", _OdinDataServer),
ODIN_DATA_SERVER_9=Ident("OdinDataServer 9 configuration", _OdinDataServer),
ODIN_DATA_SERVER_10=Ident("OdinDataServer 10 configuration", _OdinDataServer)
)
def get_extra_startup_macro(self):
return ""
def create_startup_script(self):
macros = dict(ODIN_SERVER=self.ODIN_SERVER, CONFIG="odin_server.cfg", EXTRA_PARAMS=self.get_extra_startup_macro())
expand_template_file("odin_server_startup", macros, "stOdinServer.sh", executable=True)
def create_config_file(self):
macros = dict(PORT=self.PORT,
ADAPTERS=", ".join(self.ADAPTERS),
ADAPTER_CONFIG="\n\n".join(self.create_odin_server_config_entries()),
STATIC_PATH=self.create_odin_server_static_path())
expand_template_file("odin_server.ini", macros, "odin_server.cfg")
def create_odin_server_static_path(self):
return "./static"
def create_odin_server_config_entries(self):
raise NotImplementedError("Method must be implemented by child classes")
def _create_odin_data_config_entry(self):
fp_endpoints = []
fr_endpoints = []
for process in sorted(self.odin_data_processes, key=lambda x: x.RANK):
fp_endpoints.append(process.FP_ENDPOINT)
fr_endpoints.append(process.FR_ENDPOINT)
return "[adapter.fp]\n" \
"module = odin_data.frame_processor_adapter.FrameProcessorAdapter\n" \
"endpoints = {}\n" \
"update_interval = 0.2\n\n" \
"[adapter.fr]\n" \
"module = odin_data.frame_receiver_adapter.FrameReceiverAdapter\n" \
"endpoints = {}\n" \
"update_interval = 0.2".format(", ".join(fp_endpoints), ", ".join(fr_endpoints))
def add_batch_entry(self, entries, beamline, number):
entries.append(create_batch_entry(beamline, number, "OdinServer"))
return number + 1
# ~~~~~~~~~~~~ #
# AreaDetector #
# ~~~~~~~~~~~~ #
class _OdinDetectorTemplate(AutoSubstitution):
TemplateFile = "OdinDetector.template"
class _OdinDetector(AsynPort):
"""Create an odin detector"""
Dependencies = (ADCore, restClient)
# This tells xmlbuilder to use PORT instead of name as the row ID
UniqueName = "PORT"
def __init__(self, PORT, ODIN_CONTROL_SERVER, DETECTOR, BUFFERS = 0, MEMORY = 0, **args):
# Init the superclass (AsynPort)
self.__super.__init__(PORT)
# Update the attributes of self from the commandline args
self.__dict__.update(locals())
# Define Macros for Initialise substitutions
self.CONTROL_SERVER_IP = ODIN_CONTROL_SERVER.IP
self.CONTROL_SERVER_PORT = ODIN_CONTROL_SERVER.PORT
# __init__ arguments
ArgInfo = ADBaseTemplate.ArgInfo + makeArgInfo(__init__,
PORT=Simple("Port name for the detector", str),
ODIN_CONTROL_SERVER=Ident("Odin control server", _OdinControlServer),
DETECTOR=Simple("Name of detector", str),
BUFFERS=Simple("Maximum number of NDArray buffers to be created for plugin callbacks", int),
MEMORY=Simple("Max memory to allocate, should be maxw*maxh*nbuffer for driver and all "
"attached plugins", int)
)
# Device attributes
LibFileList = ["OdinDetector"]
DbdFileList = ["OdinDetectorSupport"]
def Initialise(self):
print "# odinDetectorConfig(const char * portName, const char * serverPort, " \
"int odinServerPort, const char * detectorName, " \
"int maxBuffers, size_t maxMemory, int priority, int stackSize)"
print "odinDetectorConfig(\"%(PORT)s\", \"%(CONTROL_SERVER_IP)s\", " \
"%(CONTROL_SERVER_PORT)d, \"%(DETECTOR)s\", " \
"%(BUFFERS)d, %(MEMORY)d)" % self.__dict__
class _OdinDataDriverTemplate(AutoSubstitution):
TemplateFile = | |
"""
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import os
import sys
from html import escape
from re import sub as re_sub
from sys import version as pyver
from time import ctime, time
from fuzzysearch import find_near_matches
from motor import version as mongover
from pykeyboard import InlineKeyboard
from pyrogram import __version__ as pyrover
from pyrogram import filters
from pyrogram.raw.functions import Ping
from pyrogram.types import (CallbackQuery, InlineKeyboardButton,
InlineQueryResultArticle, InlineQueryResultPhoto,
InputTextMessageContent)
from search_engine_parser import GoogleSearch
from wbb import (BOT_USERNAME, MESSAGE_DUMP_CHAT, SUDOERS, USERBOT_ID,
USERBOT_NAME, USERBOT_USERNAME, app, app2, arq)
from wbb.core.keyboard import ikb
from wbb.core.tasks import _get_tasks_text, all_tasks, rm_task
from wbb.core.types import InlineQueryResultCachedDocument
from wbb.modules.info import get_chat_info, get_user_info
from wbb.modules.music import download_youtube_audio
from wbb.utils.functions import test_speedtest
from wbb.utils.pastebin import paste
keywords_list = [
"image",
"wall",
"tmdb",
"lyrics",
"exec",
"speedtest",
"search",
"ping",
"tr",
"ud",
"yt",
"info",
"google",
"torrent",
"wiki",
"music",
"ytmusic",
]
async def inline_help_func(__HELP__):
buttons = InlineKeyboard(row_width=4)
buttons.add(
*[
(InlineKeyboardButton(text=i, switch_inline_query_current_chat=i))
for i in keywords_list
]
)
answerss = [
InlineQueryResultArticle(
title="Inline Commands",
description="Help Related To Inline Usage.",
input_message_content=InputTextMessageContent(
"Click A Button To Get Started."
),
thumb_url="https://telegra.ph/file/1d976a1e12866bbfb1ac5.jpg",
reply_markup=buttons,
),
]
answerss = await alive_function(answerss)
return answerss
async def alive_function(answers):
buttons = InlineKeyboard(row_width=2)
bot_state = "Dead" if not await app.get_me() else "Alive"
ubot_state = "Dead" if not await app2.get_me() else "Alive"
buttons.add(
InlineKeyboardButton("Stats", callback_data="stats_callback"),
InlineKeyboardButton(
"Go Inline!", switch_inline_query_current_chat=""
),
)
msg = f"""
**HyLix:**
**MainBot:** `{bot_state}`
**UserBot:** `{ubot_state}`
**Python:** `{pyver.split()[0]}`
**Pyrogram:** `{pyrover}`
**MongoDB:** `{mongover}`
**Platform:** `{sys.platform}`
**Profiles:** [BOT](t.me/{BOT_USERNAME}) | [UBOT](t.me/{USERBOT_USERNAME})
"""
answers.append(
InlineQueryResultArticle(
title="Alive",
description="Check Bot's Stats",
thumb_url="https://telegra.ph/file/31f163c37a58736e1cd3b.jpg",
input_message_content=InputTextMessageContent(
msg, disable_web_page_preview=True
),
reply_markup=buttons,
)
)
return answers
async def translate_func(answers, lang, tex):
result = await arq.translate(tex, lang)
if not result.ok:
answers.append(
InlineQueryResultArticle(
title="Error",
description=result.result,
input_message_content=InputTextMessageContent(result.result),
)
)
return answers
result = result.result
msg = f"""
__**Translated From {result.src} To {result.dest}**__
**INPUT:**
{tex}
**OUTPUT:**
{result.translatedText}"""
answers.extend(
[
InlineQueryResultArticle(
title=f"Translated From {result.src} To {result.dest}.",
description=result.translatedText,
input_message_content=InputTextMessageContent(msg),
),
InlineQueryResultArticle(
title=result.translatedText,
input_message_content=InputTextMessageContent(
result.translatedText
),
),
]
)
return answers
async def urban_func(answers, text):
results = await arq.urbandict(text)
if not results.ok:
answers.append(
InlineQueryResultArticle(
title="Error",
description=results.result,
input_message_content=InputTextMessageContent(results.result),
)
)
return answers
results = results.result[0:48]
for i in results:
clean = lambda x: re_sub(r"[\[\]]", "", x)
msg = f"""
**Query:** {text}
**Definition:** __{clean(i.definition)}__
**Example:** __{clean(i.example)}__"""
answers.append(
InlineQueryResultArticle(
title=i.word,
description=clean(i.definition),
input_message_content=InputTextMessageContent(msg),
)
)
return answers
async def google_search_func(answers, text):
gresults = await GoogleSearch().async_search(text)
limit = 0
for i in gresults:
if limit > 48:
break
limit += 1
try:
msg = f"""
[{i['titles']}]({i['links']})
{i['descriptions']}"""
answers.append(
InlineQueryResultArticle(
title=i["titles"],
description=i["descriptions"],
input_message_content=InputTextMessageContent(
msg, disable_web_page_preview=True
),
)
)
except KeyError:
pass
return answers
async def wall_func(answers, text):
results = await arq.wall(text)
if not results.ok:
answers.append(
InlineQueryResultArticle(
title="Error",
description=results.result,
input_message_content=InputTextMessageContent(results.result),
)
)
return answers
results = results.result[0:48]
for i in results:
answers.append(
InlineQueryResultPhoto(
photo_url=i.url_image,
thumb_url=i.url_thumb,
caption=f"[Source]({i.url_image})",
)
)
return answers
async def torrent_func(answers, text):
results = await arq.torrent(text)
if not results.ok:
answers.append(
InlineQueryResultArticle(
title="Error",
description=results.result,
input_message_content=InputTextMessageContent(results.result),
)
)
return answers
results = results.result[0:48]
for i in results:
title = i.name
size = i.size
seeds = i.seeds
leechs = i.leechs
upload_date = i.uploaded
magnet = i.magnet
caption = f"""
**Title:** __{title}__
**Size:** __{size}__
**Seeds:** __{seeds}__
**Leechs:** __{leechs}__
**Uploaded:** __{upload_date}__
**Magnet:** `{magnet}`"""
description = f"{size} | {upload_date} | Seeds: {seeds}"
answers.append(
InlineQueryResultArticle(
title=title,
description=description,
input_message_content=InputTextMessageContent(
caption, disable_web_page_preview=True
),
)
)
pass
return answers
async def youtube_func(answers, text):
results = await arq.youtube(text)
if not results.ok:
answers.append(
InlineQueryResultArticle(
title="Error",
description=results.result,
input_message_content=InputTextMessageContent(results.result),
)
)
return answers
results = results.result[0:48]
for i in results:
buttons = InlineKeyboard(row_width=1)
video_url = f"https://youtube.com{i.url_suffix}"
buttons.add(InlineKeyboardButton("Watch", url=video_url))
caption = f"""
**Title:** {i.title}
**Views:** {i.views}
**Channel:** {i.channel}
**Duration:** {i.duration}
**Uploaded:** {i.publish_time}
**Description:** {i.long_desc}"""
description = (
f"{i.views} | {i.channel} | {i.duration} | {i.publish_time}"
)
answers.append(
InlineQueryResultArticle(
title=i.title,
thumb_url=i.thumbnails[0],
description=description,
input_message_content=InputTextMessageContent(
caption, disable_web_page_preview=True
),
reply_markup=buttons,
)
)
return answers
async def lyrics_func(answers, text):
song = await arq.lyrics(text)
if not song.ok:
answers.append(
InlineQueryResultArticle(
title="Error",
description=song.result,
input_message_content=InputTextMessageContent(song.result),
)
)
return answers
lyrics = song.result
song = lyrics.splitlines()
song_name = song[0]
artist = song[1]
if len(lyrics) > 4095:
lyrics = await paste(lyrics)
lyrics = f"**LYRICS_TOO_LONG:** [URL]({lyrics})"
msg = f"__{lyrics}__"
answers.append(
InlineQueryResultArticle(
title=song_name,
description=artist,
input_message_content=InputTextMessageContent(msg),
)
)
return answers
async def tg_search_func(answers, text, user_id):
if user_id not in SUDOERS:
msg = "**ERROR**\n__THIS FEATURE IS ONLY FOR SUDO USERS__"
answers.append(
InlineQueryResultArticle(
title="ERROR",
description="THIS FEATURE IS ONLY FOR SUDO USERS",
input_message_content=InputTextMessageContent(msg),
)
)
return answers
if str(text)[-1] != ":":
msg = "**ERROR**\n__Put A ':' After The Text To Search__"
answers.append(
InlineQueryResultArticle(
title="ERROR",
description="Put A ':' After The Text To Search",
input_message_content=InputTextMessageContent(msg),
)
)
return answers
text = text[0:-1]
async for message in app2.search_global(text, limit=49):
buttons = InlineKeyboard(row_width=2)
buttons.add(
InlineKeyboardButton(
text="Origin",
url=message.link if message.link else "https://t.me/telegram",
),
InlineKeyboardButton(
text="Search again",
switch_inline_query_current_chat="search",
),
)
name = (
message.from_user.first_name
if message.from_user.first_name
else "<NAME>"
)
caption = f"""
**Query:** {text}
**Name:** {str(name)} [`{message.from_user.id}`]
**Chat:** {str(message.chat.title)} [`{message.chat.id}`]
**Date:** {ctime(message.date)}
**Text:** >>
{message.text.markdown if message.text else message.caption if message.caption else '[NO_TEXT]'}
"""
result = InlineQueryResultArticle(
title=name,
description=message.text if message.text else "[NO_TEXT]",
reply_markup=buttons,
input_message_content=InputTextMessageContent(
caption, disable_web_page_preview=True
),
)
answers.append(result)
return answers
async def music_inline_func(answers, query):
chat_id = -1001445180719
group_invite = "https://t.me/joinchat/vSDE2DuGK4Y4Nzll"
try:
messages = [
m
async for m in app2.search_messages(
chat_id, query, filter="audio", limit=100
)
]
except Exception as e:
print(e)
msg = f"You Need To Join Here With Your Bot And Userbot To Get Cached Music.\n{group_invite}"
answers.append(
InlineQueryResultArticle(
title="ERROR",
description="Click Here To Know More.",
input_message_content=InputTextMessageContent(
msg, disable_web_page_preview=True
),
)
)
return answers
messages_ids_and_duration = []
for f_ in messages:
messages_ids_and_duration.append(
{
"message_id": f_.message_id,
"duration": f_.audio.duration if f_.audio.duration else 0,
}
)
messages = list(
{v["duration"]: v for v in messages_ids_and_duration}.values()
)
messages_ids = [ff_["message_id"] for ff_ in messages]
messages = await app.get_messages(chat_id, messages_ids[0:48])
return [
InlineQueryResultCachedDocument(
file_id=message_.audio.file_id,
title=message_.audio.title,
)
for message_ in messages
]
async def wiki_func(answers, text):
data = await arq.wiki(text)
if not data.ok:
answers.append(
InlineQueryResultArticle(
title="Error",
description=data.result,
input_message_content=InputTextMessageContent(data.result),
)
)
return answers
data = data.result
msg = f"""
**QUERY:**
{data.title}
**ANSWER:**
__{data.answer}__"""
answers.append(
InlineQueryResultArticle(
title=data.title,
description=data.answer,
input_message_content=InputTextMessageContent(msg),
)
)
return answers
async def speedtest_init(query):
answers = []
user_id = query.from_user.id
if user_id not in SUDOERS:
msg = "**ERROR**\n__THIS FEATURE IS ONLY FOR SUDO USERS__"
answers.append(
InlineQueryResultArticle(
title="ERROR",
description="THIS FEATURE IS ONLY FOR SUDO USERS",
input_message_content=InputTextMessageContent(msg),
)
)
return answers
msg = "**Click The Button Below To Perform A Speedtest**"
button = InlineKeyboard(row_width=1)
button.add(
InlineKeyboardButton(text="Test", callback_data="test_speedtest")
)
answers.append(
InlineQueryResultArticle(
title="Click Here",
input_message_content=InputTextMessageContent(msg),
reply_markup=button,
)
)
return answers
# CallbackQuery for the function above
@app.on_callback_query(filters.regex("test_speedtest"))
async def test_speedtest_cq(_, cq):
if cq.from_user.id not in SUDOERS:
return await cq.answer("This Isn't For You!")
inline_message_id = cq.inline_message_id
await app.edit_inline_text(inline_message_id, "**Testing**")
loop = asyncio.get_running_loop()
download, upload, info = await loop.run_in_executor(None, test_speedtest)
msg = f"""
**Download:** `{download}`
**Upload:** `{upload}`
**Latency:** `{info['latency']} ms`
**Country:** `{info['country']} [{info['cc']}]`
**Latitude:** `{info['lat']}`
**Longitude:** `{info['lon']}`
"""
await app.edit_inline_text(inline_message_id, msg)
async def pmpermit_func(answers, user_id, victim):
if user_id != USERBOT_ID:
return
caption = f"🎭 **Hi This Is {USERBOT_NAME} PM Protection** 🎭\n➰ Pls Wait Till I Approve You To PM\n➰ Don't Send More Than 5 Msg Cause,\n➰ You'll Get Blocked & Reported !"
buttons = InlineKeyboard(row_width=2)
buttons.add(
InlineKeyboardButton(
text="To Scam You", callback_data="pmpermit to_scam_you a"
),
InlineKeyboardButton(
text="For Promotion",
callback_data="pmpermit to_scam_you a",
),
InlineKeyboardButton(
text="Approve Me", callback_data="pmpermit approve_me a"
),
InlineKeyboardButton(
text="Approve", callback_data=f"pmpermit approve {victim}"
),
InlineKeyboardButton(
text="Block & Delete",
callback_data=f"pmpermit block {victim}",
),
)
answers.append(
InlineQueryResultArticle(
title="do_not_click_here",
reply_markup=buttons,
input_message_content=InputTextMessageContent(caption),
)
)
return answers
async def ping_func(answers):
ping = Ping(ping_id=app.rnd_id())
t1 = time()
await app.send(ping)
t2 = time()
ping = f"{str(round((t2 - t1) * 1000, 2))} ms"
answers.append(
InlineQueryResultArticle(
title=ping,
input_message_content=InputTextMessageContent(f"__**{ping}**__"),
)
)
return answers
async def yt_music_func(answers, url):
arq_resp = await arq.youtube(url)
loop = asyncio.get_running_loop()
music = await loop.run_in_executor(None, download_youtube_audio, arq_resp)
if not music:
msg = "**ERROR**\n__MUSIC TOO LONG__"
answers.append(
InlineQueryResultArticle(
title="ERROR",
description="MUSIC TOO | |
import json
import pickle
from copy import deepcopy
from pathlib import Path
import subprocess
import fire
import numpy as np
from second.data import kitti_common as kitti
from second.data.dataset import Dataset, register_dataset
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.utils.progress_bar import progress_bar_iter as prog_bar
@register_dataset
class NuScenesDataset(Dataset):
NumPointFeatures = 4 # xyz, timestamp. set 4 to use kitti pretrain
NameMapping = {
'movable_object.barrier': 'barrier',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.car': 'car',
'vehicle.construction': 'construction_vehicle',
'vehicle.motorcycle': 'motorcycle',
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'human.pedestrian.police_officer': 'pedestrian',
'movable_object.trafficcone': 'traffic_cone',
'vehicle.trailer': 'trailer',
'vehicle.truck': 'truck'
}
DefaultAttribute = {
"car": "vehicle.parked",
"pedestrian": "pedestrian.moving",
"trailer": "vehicle.parked",
"truck": "vehicle.parked",
"bus": "vehicle.parked",
"motorcycle": "cycle.without_rider",
"construction_vehicle": "vehicle.parked",
"bicycle": "cycle.without_rider",
"barrier": "",
"traffic_cone": "",
}
def __init__(self,
root_path,
info_path,
class_names=None,
prep_func=None,
num_point_features=None):
self._root_path = Path(root_path)
with open(info_path, 'rb') as f:
data = pickle.load(f)
self._nusc_infos = data["infos"]
self._nusc_infos = list(
sorted(self._nusc_infos, key=lambda e: e["timestamp"]))
self._metadata = data["metadata"]
self._class_names = class_names
self._prep_func = prep_func
# kitti map: nusc det name -> kitti eval name
self._kitti_name_mapping = {
"car": "car",
"pedestrian": "pedestrian",
} # we only eval these classes in kitti
self.version = self._metadata["version"]
self.eval_version = "cvpr_2019"
self._with_velocity = False
def __len__(self):
return len(self._nusc_infos)
@property
def ground_truth_annotations(self):
if "gt_boxes" not in self._nusc_infos[0]:
return None
from nuscenes.eval.detection.config import eval_detection_configs
cls_range_map = eval_detection_configs[self.
eval_version]["class_range"]
gt_annos = []
for info in self._nusc_infos:
gt_names = info["gt_names"]
gt_boxes = info["gt_boxes"]
num_lidar_pts = info["num_lidar_pts"]
mask = num_lidar_pts > 0
gt_names = gt_names[mask]
gt_boxes = gt_boxes[mask]
num_lidar_pts = num_lidar_pts[mask]
mask = np.array([n in self._kitti_name_mapping for n in gt_names],
dtype=np.bool_)
gt_names = gt_names[mask]
gt_boxes = gt_boxes[mask]
num_lidar_pts = num_lidar_pts[mask]
gt_names_mapped = [self._kitti_name_mapping[n] for n in gt_names]
det_range = np.array([cls_range_map[n] for n in gt_names_mapped])
det_range = det_range[..., np.newaxis] @ np.array([[-1, -1, 1, 1]])
mask = (gt_boxes[:, :2] >= det_range[:, :2]).all(1)
mask &= (gt_boxes[:, :2] <= det_range[:, 2:]).all(1)
gt_names = gt_names[mask]
gt_boxes = gt_boxes[mask]
num_lidar_pts = num_lidar_pts[mask]
# use occluded to control easy/moderate/hard in kitti
easy_mask = num_lidar_pts > 15
moderate_mask = num_lidar_pts > 7
occluded = np.zeros([num_lidar_pts.shape[0]])
occluded[:] = 2
occluded[moderate_mask] = 1
occluded[easy_mask] = 0
N = len(gt_boxes)
gt_annos.append({
"bbox":
np.tile(np.array([[0, 0, 50, 50]]), [N, 1]),
"alpha":
np.full(N, -10),
"occluded":
occluded,
"truncated":
np.zeros(N),
"name":
gt_names,
"location":
gt_boxes[:, :3],
"dimensions":
gt_boxes[:, 3:6],
"rotation_y":
gt_boxes[:, 6],
})
return gt_annos
def __getitem__(self, idx):
input_dict = self.get_sensor_data(idx)
example = self._prep_func(input_dict=input_dict)
example["metadata"] = input_dict["metadata"]
if "anchors_mask" in example:
example["anchors_mask"] = example["anchors_mask"].astype(np.uint8)
return example
def get_sensor_data(self, query):
idx = query
read_test_image = False
if isinstance(query, dict):
assert "lidar" in query
idx = query["lidar"]["idx"]
read_test_image = "cam" in query
info = self._nusc_infos[idx]
res = {
"lidar": {
"type": "lidar",
"points": None,
},
"metadata": {
"token": info["token"]
},
}
lidar_path = Path(info['lidar_path'])
points = np.fromfile(
str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
points[:, 3] /= 255
points[:, 4] = 0
sweep_points_list = [points]
ts = info["timestamp"] / 1e6
for sweep in info["sweeps"]:
points_sweep = np.fromfile(
str(sweep["lidar_path"]), dtype=np.float32,
count=-1).reshape([-1, 5])
sweep_ts = sweep["timestamp"] / 1e6
points_sweep[:, 3] /= 255
points_sweep[:, :3] = points_sweep[:, :3] @ sweep[
"sweep2lidar_rotation"].T
points_sweep[:, :3] += sweep["sweep2lidar_translation"]
points_sweep[:, 4] = ts - sweep_ts
sweep_points_list.append(points_sweep)
points = np.concatenate(sweep_points_list, axis=0)[:, [0, 1, 2, 4]]
if read_test_image:
if Path(info["cam_front_path"]).exists():
with open(str(info["cam_front_path"]), 'rb') as f:
image_str = f.read()
else:
image_str = None
res["cam"] = {
"type": "camera",
"data": image_str,
"datatype": Path(info["cam_front_path"]).suffix[1:],
}
res["lidar"]["points"] = points
if 'gt_boxes' in info:
mask = info["num_lidar_pts"] > 0
gt_boxes = info["gt_boxes"][mask]
if self._with_velocity:
gt_velocity = info["gt_velocity"][mask]
nan_mask = np.isnan(gt_velocity[:, 0])
gt_velocity[nan_mask] = [0.0, 0.0]
gt_boxes = np.concatenate([gt_boxes, gt_velocity], axis=-1)
res["lidar"]["annotations"] = {
'boxes': gt_boxes,
'names': info["gt_names"][mask],
}
return res
def evaluation_kitti(self, detections, output_dir):
"""eval by kitti evaluation tool.
I use num_lidar_pts to set easy, mod, hard.
easy: num>15, mod: num>7, hard: num>0.
"""
print("++++++++NuScenes KITTI unofficial Evaluation:")
print(
"++++++++easy: num_lidar_pts>15, mod: num_lidar_pts>7, hard: num_lidar_pts>0"
)
print("++++++++The bbox AP is invalid. Don't forget to ignore it.")
class_names = self._class_names
gt_annos = self.ground_truth_annotations
if gt_annos is None:
return None
gt_annos = deepcopy(gt_annos)
detections = deepcopy(detections)
dt_annos = []
for det in detections:
final_box_preds = det["box3d_lidar"].detach().cpu().numpy()
label_preds = det["label_preds"].detach().cpu().numpy()
scores = det["scores"].detach().cpu().numpy()
anno = kitti.get_start_result_anno()
num_example = 0
box3d_lidar = final_box_preds
for j in range(box3d_lidar.shape[0]):
anno["bbox"].append(np.array([0, 0, 50, 50]))
anno["alpha"].append(-10)
anno["dimensions"].append(box3d_lidar[j, 3:6])
anno["location"].append(box3d_lidar[j, :3])
anno["rotation_y"].append(box3d_lidar[j, 6])
anno["name"].append(class_names[int(label_preds[j])])
anno["truncated"].append(0.0)
anno["occluded"].append(0)
anno["score"].append(scores[j])
num_example += 1
if num_example != 0:
anno = {n: np.stack(v) for n, v in anno.items()}
dt_annos.append(anno)
else:
dt_annos.append(kitti.empty_result_anno())
num_example = dt_annos[-1]["name"].shape[0]
dt_annos[-1]["metadata"] = det["metadata"]
for anno in gt_annos:
names = anno["name"].tolist()
mapped_names = []
for n in names:
if n in self.NameMapping:
mapped_names.append(self.NameMapping[n])
else:
mapped_names.append(n)
anno["name"] = np.array(mapped_names)
for anno in dt_annos:
names = anno["name"].tolist()
mapped_names = []
for n in names:
if n in self.NameMapping:
mapped_names.append(self.NameMapping[n])
else:
mapped_names.append(n)
anno["name"] = np.array(mapped_names)
mapped_class_names = []
for n in self._class_names:
if n in self.NameMapping:
mapped_class_names.append(self.NameMapping[n])
else:
mapped_class_names.append(n)
z_axis = 2
z_center = 0.5
# for regular raw lidar data, z_axis = 2, z_center = 0.5.
result_official_dict = get_official_eval_result(
gt_annos,
dt_annos,
mapped_class_names,
z_axis=z_axis,
z_center=z_center)
result_coco = get_coco_eval_result(
gt_annos,
dt_annos,
mapped_class_names,
z_axis=z_axis,
z_center=z_center)
return {
"results": {
"official": result_official_dict["result"],
"coco": result_coco["result"],
},
"detail": {
"official": result_official_dict["detail"],
"coco": result_coco["detail"],
},
}
def evaluation_nusc(self, detections, output_dir):
version = self.version
eval_set_map = {
"v1.0-mini": "mini_train",
"v1.0-trainval": "val",
}
gt_annos = self.ground_truth_annotations
if gt_annos is None:
return None
nusc_annos = {}
mapped_class_names = self._class_names
token2info = {}
for info in self._nusc_infos:
token2info[info["token"]] = info
for det in detections:
annos = []
boxes = _second_det_to_nusc_box(det)
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
velocity = box.velocity[:2].tolist()
if len(token2info[det["metadata"]["token"]]["sweeps"]) == 0:
velocity = (np.nan, np.nan)
box.velocity = np.array([*velocity, 0.0])
boxes = _lidar_nusc_box_to_global(
token2info[det["metadata"]["token"]], boxes,
mapped_class_names, "cvpr_2019")
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
velocity = box.velocity[:2].tolist()
nusc_anno = {
"sample_token": det["metadata"]["token"],
"translation": box.center.tolist(),
"size": box.wlh.tolist(),
"rotation": box.orientation.elements.tolist(),
"velocity": velocity,
"detection_name": name,
"detection_score": box.score,
"attribute_name": NuScenesDataset.DefaultAttribute[name],
}
annos.append(nusc_anno)
nusc_annos[det["metadata"]["token"]] = annos
nusc_submissions = {
"meta": {
"use_camera": False,
"use_lidar": False,
"use_radar": False,
"use_map": False,
"use_external": False,
},
"results": nusc_annos,
}
res_path = Path(output_dir) / "results_nusc.json"
with open(res_path, "w") as f:
json.dump(nusc_submissions, f)
eval_main_file = Path(__file__).resolve().parent / "nusc_eval.py"
# why add \"{}\"? to support path with spaces.
cmd = f"python {str(eval_main_file)} --root_path=\"{str(self._root_path)}\""
cmd += f" --version={self.version} --eval_version={self.eval_version}"
cmd += f" --res_path=\"{str(res_path)}\" --eval_set={eval_set_map[self.version]}"
cmd += f" --output_dir=\"{output_dir}\""
# use subprocess can release all nusc memory after evaluation
subprocess.check_output(cmd, shell=True)
with open(Path(output_dir) / "metrics_summary.json", "r") as f:
metrics = json.load(f)
detail = {}
res_path.unlink() # delete results_nusc.json since it's very large
result = f"Nusc {version} Evaluation\n"
for name in mapped_class_names:
detail[name] = {}
for k, v in metrics["label_aps"][name].items():
detail[name][f"dist@{k}"] = v
tp_errs = []
tp_names = []
for k, v in metrics["label_tp_errors"][name].items():
detail[name][k] = v
tp_errs.append(f"{v:.4f}")
tp_names.append(k)
threshs = ', '.join(list(metrics["label_aps"][name].keys()))
scores = list(metrics["label_aps"][name].values())
scores = ', '.join([f"{s * 100:.2f}" for s in scores])
result += f"{name} Nusc dist AP@{threshs} and TP errors\n"
result += scores
result += "\n"
result += ', '.join(tp_names) + ": " + ', '.join(tp_errs)
result += "\n"
return {
"results": {
"nusc": result
},
"detail": {
"nusc": detail
},
}
def evaluation(self, detections, output_dir):
"""kitti evaluation is very slow, remove it.
"""
# res_kitti = self.evaluation_kitti(detections, output_dir)
res_nusc = self.evaluation_nusc(detections, output_dir)
res = {
"results": {
"nusc": res_nusc["results"]["nusc"],
# "kitti.official": res_kitti["results"]["official"],
# "kitti.coco": res_kitti["results"]["coco"],
},
"detail": {
"eval.nusc": res_nusc["detail"]["nusc"],
# "eval.kitti": {
# "official": res_kitti["detail"]["official"],
# "coco": res_kitti["detail"]["coco"],
# },
},
}
return res
@register_dataset
class NuScenesDatasetD8(NuScenesDataset):
"""Nuscenes mini train set. only contains ~3500 samples.
recommend to use this to develop, train full set once before submit.
"""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
if len(self._nusc_infos) > 28000:
self._nusc_infos = list(
sorted(self._nusc_infos, key=lambda e: e["timestamp"]))
self._nusc_infos = self._nusc_infos[::8]
@register_dataset
class NuScenesDatasetD8Velo(NuScenesDatasetD8):
"""Nuscenes mini train set with velocity.
"""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._with_velocity = True
@register_dataset
class NuScenesDatasetVelo(NuScenesDataset):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._with_velocity = True
@register_dataset
class NuScenesDatasetD7(NuScenesDataset):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
if len(self._nusc_infos) > 28000:
self._nusc_infos = list(
sorted(self._nusc_infos, key=lambda e: e["timestamp"]))
self._nusc_infos = self._nusc_infos[::7]
@register_dataset
class NuScenesDatasetD6(NuScenesDataset):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
if len(self._nusc_infos) > 28000:
self._nusc_infos = list(
sorted(self._nusc_infos, key=lambda e: e["timestamp"]))
self._nusc_infos = self._nusc_infos[::6]
@register_dataset
class NuScenesDatasetD5(NuScenesDataset):
def __init__(self, *args, **kw):
| |
def placeholders(self):
return set(self._placeholders)
@property
def roots(self):
return self._roots
@property
def executable(self):
for ph in self._placeholders:
if not ph.has_default: return False
return True
def negate(self):
neg = not self._negative
return FunctionComparator(self._func,self._pathsig,neg,
assignment=self._assignment)
def dealias(self):
newpathsig = [path(hp).meta.dealiased for hp in self._pathsig]
if _hashables(newpathsig) == _hashables(self._pathsig): return self
return FunctionComparator(self._func, newpathsig, self._negative,
assignment=self._assignment)
def fixed(self):
return self.ground()
def ground(self, *args, **kwargs):
if self._assignment is not None: return self
assignment = {}
# Assign any positional arguments first then add the keyword arguments
# and make sure there is no repeats. Finally, assign any placeholders
# with defaults. Note: funcsig is an orderedDict
for idx,(k,_) in enumerate(self._funcsig.items()):
if idx >= len(args): break
assignment[k] = args[idx]
for k,v in kwargs.items():
if k in assignment:
raise ValueError(("Both positional and keyword values given "
"for the argument '{}'").format(k))
assignment[k] = v
for ph in self._placeholders:
if isinstance(ph, NamedPlaceholder) and ph.name not in assignment:
if ph.has_default:
assignment[ph.name] = ph.default
else:
raise ValueError(("Missing named placeholder argument '{}' "
"when grounding '{}' with arguments: "
"{}").format(ph.name,self,kwargs))
return FunctionComparator(self._func,self._pathsig,
self._negative,assignment)
def make_callable(self, root_signature):
if self._assignment is None:
raise RuntimeError(("Internal bug: make_callable called on a "
"ungrounded object: {}").format(self))
# from the function signature and the assignment generate the fixed
# values for the non-path items
funcsigparam = [ self._assignment[k] for k,_ in self._funcsig.items() ]
outputsig = tuple(list(self._pathsig) + funcsigparam)
alignfunc = make_input_alignment_functor(root_signature,outputsig)
op = self._func if not self._negative else lambda *args : not self._func(*args)
return ComparisonCallable(op,alignfunc)
def __eq__(self, other):
if not isinstance(other, FunctionComparator): return NotImplemented
if self._func != other._func: return False
if self._pathsig != other._pathsig: return False
if self._negative != other._negative: return False
if self._assignment != other._assignment: return False
return True
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented: return NotImplemented
return not result
def __hash__(self):
return hash((self._func,) + self._pathsig + self._assignment_tuple)
def __str__(self):
assignstr = ": {}".format(self._assignment) if self._assignment else ""
funcstr = "func({}{}, {})".format(self._pathsig,assignstr,self._func,)
if not self._negative: return funcstr
return "not_({})".format(funcstr)
def __repr__(self):
return self.__str__()
# ------------------------------------------------------------------------------
# Comparators (Standard and Function) have a comparison function and input of
# some form; eg "F.anum == 3" has operator.eq_ and input (F.anum,3) where F.anum
# is a path and will be replaced by some fact sub-field value.
#
# We need to extract the field input from facts and then call the comparison
# function with the appropriate input. But it is not straight forward to get the
# field input. If the query search is on a single predicate type then the input
# will be a singleton tuple. However, if there is a join in the query there will
# be multiple elements to the tuple. Furthermore, the order of facts will be
# determined by the query optimiser as it may be more efficient to join X with Y
# rather than Y with X.
#
# With this complication we need a way to remap a search input fact-tuple into
# the expected form for each query condition component.
#
# make_input_alignment_functor() returns a function that takes a tuple
# of facts as given by the input signature and returns a tuple of values as
# given by the output signature.
# ------------------------------------------------------------------------------
def make_input_alignment_functor(input_root_signature, output_signature):
# Input signature are paths that must correspond to predicate types
def validate_input_signature():
if not input_root_signature:
raise TypeError("Empty input predicate path signature")
inputs=[]
try:
for p in input_root_signature:
pp = path(p)
if not pp.meta.is_root:
raise ValueError("path '{}' is not a predicate root".format(pp))
inputs.append(pp)
except Exception as e:
raise TypeError(("Invalid input predicate path signature {}: "
"{}").format(input_root_signature,e)) from None
return inputs
# Output signature are field paths or statics (but not placeholders)
def validate_output_signature():
if not output_signature: raise TypeError("Empty output path signature")
outputs=[]
for a in output_signature:
p = path(a,exception=False)
outputs.append(p if p else a)
if p: continue
if isinstance(a, Placeholder):
raise TypeError(("Output signature '{}' contains a placeholder "
"'{}'").format(output_signature,a))
return outputs
insig = validate_input_signature()
outsig = validate_output_signature()
# build a list of lambdas one for each output item that chooses the
# appropriate item from the input.
pp2idx = { hashable_path(pp) : idx for idx,pp in enumerate(insig) }
getters = []
for out in outsig:
if isinstance(out,PredicatePath):
idx = pp2idx.get(hashable_path(out.meta.root),None)
if idx is None:
raise TypeError(("Invalid signature match between {} and {}: "
"missing input predicate path for "
"{}").format(input_root_signature,
output_signature,out))
ag=out.meta.attrgetter
getters.append(lambda facts, ag=ag, idx=idx: ag(facts[idx]))
else:
getters.append(lambda facts, out=out: out)
getters = tuple(getters)
# Create the getter
def func(facts):
try:
return tuple(getter(facts) for getter in getters)
except IndexError as e:
raise TypeError(("Invalid input to getter function: expecting "
"a tuple with {} elements and got a tuple with "
"{}").format(len(insig),len(facts))) from None
except TypeError as e:
raise TypeError(("Invalid input to getter function: "
"{}").format(e)) from None
except AttributeError as e:
raise TypeError(("Invalid input to getter function: "
"{}").format(e)) from None
return func
# ------------------------------------------------------------------------------
# ComparisonCallable is a functional object that wraps a comparison operator and
# ensures the comparison operator gets the correct input. The input to a
# ComparisonCallable is a tuple of facts (the form of which is determined by a
# signature) and returns whether the facts satisfy some condition.
# ------------------------------------------------------------------------------
class ComparisonCallable(object):
def __init__(self, operator, getter_map):
self._operator = operator
self._getter_map = getter_map
def __call__(self, facts):
args = self._getter_map(facts)
return self._operator(*args)
# ------------------------------------------------------------------------------
# 'Where' query clauses handling.
#
# The goal is to turn the where clause into a CNF clausal normal form. So
# functions to validate the 'where' clause and then turn it into NNF, then CNF,
# and then a pure clausal form.
# ------------------------------------------------------------------------------
g_bool_operators = {
operator.and_ : True, operator.or_ : True, operator.not_ : True }
def is_boolean_qcondition(cond):
# if isinstance(cond, FunctionComparator): return False
if isinstance(cond, FuncInputSpec): return False
elif not isinstance(cond, QCondition): return False
v = g_bool_operators.get(cond.operator,False)
return v
def is_comparison_qcondition(cond):
if not isinstance(cond, QCondition): return False
spec = StandardComparator.operators.get(cond.operator,None)
if not spec: return False
return True
# ------------------------------------------------------------------------------
# Validates and turns non-boolean QCondition objects into the appropriate
# comparator (functors are wrapped in a FunctionComparator object - and
# FuncInputSpec are also turned into FunctionComparator objects). Also
# simplifies any static conditions (conditions that can be evaluated without a
# fact) which are replaced with their a boolean evaluation.
#
# The where expression is validated with respect to a sequence of predicate root
# paths that indicate the valid predicates (and aliases) that are being
# reference in the query.
# ------------------------------------------------------------------------------
def validate_where_expression(qcond, roots=[]):
# Make sure we have a set of hashable paths
try:
roots = set([ hashable_path(r) for r in roots ])
except Exception as e:
raise ValueError(("Invalid predicate paths signature {}: "
"{}").format(roots,e)) from None
for pp in roots:
if not pp.path.meta.is_root:
raise ValueError(("Invalid roots element {} does not refer to "
"the root of a predicate path ").format(pp))
# Check that the path is a sub-path of one of the roots
def check_path(path):
if hashable_path(path.meta.root) not in roots:
raise ValueError(("Invalid 'where' expression '{}' contains a path "
"'{}' that is not a sub-path of one of the "
"roots '{}'").format(qcond, path, roots))
# Check if a condition is static - to be called after validating the
# sub-parts of the conidition.
def is_static_condition(cond):
if isinstance(cond,Comparator): return False
if isinstance(cond,QCondition): return False
if callable(cond):
raise TYpeError(("Internal bug: invalid static test "
"with callable: {}").format(cond))
return True
# Check callable - construct a FunctionComparator
def validate_callable(func):
if len(roots) != 1:
raise ValueError(("Incompatible usage between raw functor {} and "
"non-singleton predicates {}").format(func,roots))
return FunctionComparator.from_specification(roots,func)
# Check boolean condition - simplifying if it is a static condition
def validate_bool_condition(bcond):
if bcond.operator == operator.not_:
newsubcond = validate_condition(bcond.args[0])
if is_static_condition(newsubcond): return bcond.operator(newsubcond)
if newsubcond == bcond.args[0]: return bcond
return QCondition(bcond.operator,newsubcond)
newargs = [validate_condition(a) for a in bcond.args]
if is_static_condition(newargs[0]) and is_static_condition(newargs[1]):
return bcond.operator(newargs[0],newargs[1])
if bcond.operator == operator.and_:
if is_static_condition(newargs[0]):
return False if not newargs[0] else newargs[1]
if is_static_condition(newargs[1]):
return False if not newargs[1] else newargs[0]
if bcond.operator == operator.or_:
if is_static_condition(newargs[0]):
return True if newargs[0] else newargs[1]
if is_static_condition(newargs[1]):
return True if newargs[1] else newargs[0]
if bcond.args == newargs: return bcond
return QCondition(bcond.operator,*newargs)
# Check comparison condition - at least | |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# region header
'''
This module provides classes for dealing with python's way to transport \
strings to any output stream.
'''
# # python3.5
# # pass
from __future__ import absolute_import, division, print_function, \
unicode_literals
# #
'''
For conventions see "boostnode/__init__.py" on \
https://github.com/thaibault/boostnode
'''
__author__ = '<NAME>'
__copyright__ = 'see boostnode/__init__.py'
__credits__ = '<NAME>',
__license__ = 'see boostnode/__init__.py'
__maintainer__ = '<NAME>'
__maintainer_email__ = 'info["~at~"]torben.website'
__status__ = 'stable'
__version__ = '1.0'
# # python3.5 import builtins
import __builtin__ as builtins
from copy import copy
import inspect
import logging
# # python3.5 from logging import getLoggerClass, getLogger, LogRecord
from logging import getLoggerClass, getLogger
from logging import StreamHandler as LoggingStreamHandler
from logging import Formatter as LoggingFormatter
import multiprocessing
import os
import sys
import threading
# # python3.5 import queue as native_queue
import Queue as native_queue
'''Make boostnode packages and modules importable via relative paths.'''
sys.path.append(os.path.abspath(sys.path[0] + 2 * (os.sep + '..')))
# # python3.5 pass
from boostnode import convert_to_string, convert_to_unicode
from boostnode.extension.file import Handler as FileHandler
from boostnode.extension.native import Module
# # python3.5 from boostnode.extension.type import Self, SelfClass
pass
from boostnode.paradigm.aspectOrientation import JointPoint
from boostnode.paradigm.objectOrientation import Class
# endregion
# region constants
SET_ATTRIBUTE_MODE = '\033[%dm'
RESET_ATTRIBUTE_MODE = 0
BOLD = 1
DIM = 2
ITALIC = 3
UNDERLINE = 4
BLINK = 5
BLINK_RAPID = 6
REVERSE = 7
HIDDEN = 8
CROSSED_OUT = 9
DEFAULT_FONT = 10
FONT_1 = 11
FONT_2 = 12
FONT_3 = 13
FONT_4 = 14
FONT_5 = 15
FONT_6 = 16
FONT_7 = 17
FRAKTUR_HARDLY = 20
BOLD_OFF = 21
BOLD_INTENSITY_OFF = 22
ITALIC_OFF = 23
UNDERLINE_OFF = 24
BLINK_OFF = 25
RESERVERD_1 = 26
REVERSE_OFF = 27
REVEAL_OFF = 28
CROSSED_OUT_OFF = 29
COLOR = {
'foreground': {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
'extended': 38,
'fallback': 39
}, 'background': {
'black': 40,
'red': 41,
'green': 42,
'yellow': 43,
'blue': 44,
'magenta': 45,
'cyan': 46,
'white': 47,
'extended': 48,
'fallback': 49
}
}
HIGH_COLOR = {
'foreground': {
'black': 90,
'red': 91,
'green': 92,
'yellow': 93,
'blue': 94,
'magenta': 95,
'cyan': 96,
'white': 97
}, 'background': {
'black': 100,
'red': 101,
'green': 102,
'yellow': 103,
'blue': 104,
'magenta': 105,
'cyan': 106,
'white': 107
}
}
RESERVED_2 = 50
FRAMED = 51
ENCIRCLED = 52
OVERLINED = 53
FRAMED_ENCIRCLED_OFF = 54
OVERLINED_OFF = 55
RESERVED_3 = 56
RESERVED_4 = 57
RESERVED_5 = 58
RESERVED_6 = 59
IDEOGRAM_UNDERLINE = 60
IDEOGRAM_DOUBLE_UNDERLINE = 61
IDEOGRAM_OVERLINE = 62
IDEOGRAM_DOUBLE_OVERLINE = 63
IDEOGRAM_STRESS_MARKING = 64
IDEOGRAM_OFF = 65
# endregion
# region classes
class Buffer(Class, LoggingStreamHandler):
'''
This class represents a layer for writing and reading to an output \
buffer realized as file, queue or variable.
**file** - a file path or file handler to use as \
buffer
**queue** - a queue object to use as buffer
**support_multiprocessing** - indicates whether buffer read and write \
requests should be multiprocessing save
Examples:
>>> buffer = Buffer(file=__test_folder__.path + 'Buffer')
>>> buffer.clear() # doctest: +ELLIPSIS
'...'
>>> print('hans', file=buffer, end='+')
>>> buffer.content
'hans+'
'''
# region dynamic methods
# # region public
# # # region special
@JointPoint
# # python3.5
# # def __init__(
# # self: Self, file=None, queue=None, support_multiprocessing=False
# # ) -> None:
def __init__(
self, file=None, queue=None, support_multiprocessing=False,
force_string=False
):
# #
'''
Saves the file path in the current instance. If "file" is "None" \
an instance variable is used as buffer.
Examples:
>>> Buffer(
... file=__test_folder__.path + '__init__'
... ).file # doctest: +ELLIPSIS
Object of "Handler" with path "...__init__" ...
>>> Buffer(
... queue=True, support_multiprocessing=True
... ).queue # doctest: +ELLIPSIS
<multiprocessing.queues.Queue object at ...>
'''
# # python3.5 pass
self.force_string = force_string
'''Saves the last written input.'''
self.last_written = ''
if support_multiprocessing:
self._lock = multiprocessing.Lock()
'''Saves the file handler instance for writing content into.'''
self.file = None
'''Saves the queue instance for writing content into.'''
self.queue = None
if queue is not None:
self.queue = native_queue.Queue()
if support_multiprocessing:
self.queue = multiprocessing.Queue()
if(builtins.isinstance(queue, native_queue.Queue) or
support_multiprocessing and
builtins.isinstance(queue, multiprocessing.queues.Queue)):
self.queue = queue
elif file is not None:
self.file = FileHandler(location=file)
'''
A lock object to guarantee that no other thread read from buffer \
during truncating or writing.
'''
self._lock = threading.Lock()
'''Saves the current buffer content.'''
# # python3.5
# # self._content = ''
self._content = builtins.str() if self.force_string else ''
# #
@JointPoint
# # python3.5 def __repr__(self: Self) -> builtins.str:
def __repr__(self):
'''
Invokes if this object should describe itself by a string.
Examples:
>>> repr(Buffer())
'Object of "Buffer" (memory buffered) with content "".'
>>> buffer = Buffer(file=__test_folder__.path + '__repr__')
>>> buffer.write('hans') # doctest: +ELLIPSIS
Object of "Buffer" (file buffered with "...__repr__" (type: file...
>>> repr(Buffer(queue=True))
'Object of "Buffer" (queue buffered) with content "".'
>>> repr(Buffer(queue=native_queue.Queue()))
'Object of "Buffer" (queue buffered) with content "".'
'''
buffer_type = 'memory'
type_addition = ''
if self.file:
buffer_type = 'file'
type_addition = ' with "%s"' % builtins.repr(self.file)
elif self.queue:
buffer_type = 'queue'
# # python3.5
# # pass
if self.force_string:
return (
'Object of "{class_name}" ({type} buffered{type_addition})'
' with content "{content}".'.format(
class_name=self.__class__.__name__,
type=buffer_type, type_addition=type_addition,
content=convert_to_unicode(self.content)))
# #
return (
'Object of "{class_name}" ({type} buffered{type_addition}) '
'with content "{content}".'.format(
class_name=self.__class__.__name__, type=buffer_type,
type_addition=type_addition, content=self.content))
@JointPoint
# # python3.5 def __str__(self: Self) -> builtins.str:
def __str__(self):
'''
Invokes if this object is tried to interpreted as string.
Examples:
>>> str(Buffer().write('test'))
'test'
'''
return self.content
@JointPoint
# # python3.5 def __bool__(self: Self) -> builtins.bool:
def __nonzero__(self):
'''
Invokes if this object is tried to interpreted as boolean.
Examples:
>>> bool(Buffer().write('test'))
True
>>> bool(Buffer())
False
'''
return builtins.bool(self.content)
# # # endregion
# # endregion
# # region getter
@JointPoint
# # python3.5 def get_content(self: Self) -> builtins.str:
def get_content(self):
'''
Getter for the current content.
Examples:
>>> Buffer().write('test').content
'test'
>>> Buffer(queue=True).write('test').content
'test'
'''
with self._lock:
if self.file is not None:
self._content = self.file.content
elif self.queue:
self._content = ''
temp_buffer = []
while not self.queue.empty():
# # python3.5
# # temp_buffer.append(self.queue.get())
temp_buffer.append(convert_to_unicode(
self.queue.get()))
# #
self._content += temp_buffer[-1]
for content in temp_buffer:
self.queue.put(content)
# # python3.5
# # pass
if self.force_string and builtins.isinstance(
self._content, builtins.unicode
):
self._content = convert_to_string(self._content)
# #
return self._content
# # endregion
@JointPoint
# # python3.5 def write(self: Self, content: builtins.str) -> Self:
def write(self, content):
'''
Writes content to the current output buffer file. If the current \
given file "Buffer.file" doesn't exists it will be created.
**content** - content to write into current buffer instance
Examples:
>>> buffer = Buffer(file=__test_folder__.path + 'write')
>>> buffer.clear() # doctest: +ELLIPSIS
'...'
>>> buffer.write('hans') # doctest: +ELLIPSIS
Object of "Buffer" (file buffered with "...write...nt "hans".
>>> buffer.content
'hans'
>>> buffer = Buffer()
>>> buffer.write('hans')
Object of "Buffer" (memory buffered) with content "hans".
>>> buffer.content
'hans'
'''
# # python3.5
# # pass
if self.force_string and builtins.isinstance(
content, builtins.unicode
):
content = convert_to_string(content)
# #
with self._lock:
self.last_written = content
if self.file is not None:
self.file.content += self.last_written
elif self.queue:
self.queue.put(self.last_written)
else:
self._content += self.last_written
return self
@JointPoint
# # python3.5 def flush(self: Self) -> Self:
def flush(self):
'''
Flush methods usually called to guarantee that all objects putted \
to "write()" are materialized on their provided media. This \
implementation exists only for compatibility reasons.
Examples:
>>> Buffer().flush()
Object of "Buffer" (memory buffered) with content "".
'''
return self
@JointPoint
# # python3.5 def clear(self: Self, delete=True) -> builtins.str:
def clear(self, delete=True):
'''
Removes the current output buffer content.
**delete** - indicates whether a file buffer should be deleted or \
truncated
Examples:
>>> buffer = Buffer(file=__test_folder__.path + 'clear')
>>> buffer.clear() # doctest: +ELLIPSIS
'...'
>>> buffer.write('hans') # doctest: +ELLIPSIS
Objec...(file buffered with "...clear...with content "hans".
>>> buffer.clear(False)
'hans'
>>> buffer.content
''
>>> buffer = Buffer()
>>> buffer.write('hans')
Object of "Buffer" (memory buffered) with content "hans".
>>> buffer.clear()
'hans'
>>> buffer.content
''
>>> buffer = Buffer(queue=True)
>>> buffer.clear()
''
>>> buffer.write('hans')
Object of "Buffer" (queue buffered) with content "hans".
>>> buffer.write('hans')
Object of "Buffer" (queue buffered) with content "hanshans".
>>> buffer.clear()
'hanshans'
>>> buffer.content
''
'''
with self._lock:
if self.file is not None:
content = self.file.content
if delete:
self.file.remove_file()
else:
self.file.content = ''
elif self.queue:
content = ''
while not self.queue.empty():
content += self.queue.get()
else:
content = self._content
self._content = ''
# # python3.5
# # pass
if self.force_string:
self._content = builtins.str()
content = convert_to_string(content)
# #
return content
# endregion
class Print(Class):
'''
Provides a high level printing class on top of pythons native print \
function.
**output** - are the strings which should be printed or saved.
| |
# This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
"""The Console widget implements a vim-like console"""
import curses
import re
from collections import deque
from ranger.gui.widgets import Widget
from ranger.ext.direction import Direction
from ranger.ext.widestring import uwid, WideString
from ranger.container.history import History, HistoryEmptyException
import ranger
class Console(Widget):
visible = False
last_cursor_mode = None
history_search_pattern = None
prompt = ':'
copy = ''
tab_deque = None
original_line = None
history = None
history_backup = None
override = None
allow_close = False
historypath = None
wait_for_command_input = False
unicode_buffer = ""
def __init__(self, win):
Widget.__init__(self, win)
self.clear()
self.history = History(self.settings.max_console_history_size)
# load history from files
if not ranger.arg.clean:
self.historypath = self.fm.confpath('history')
try:
f = open(self.historypath, 'r')
except Exception:
pass
else:
for line in f:
self.history.add(line[:-1])
f.close()
self.line = ""
self.history_backup = History(self.history)
# NOTE: the console is considered in the "question mode" when the
# question_queue is non-empty. In that case, the console will draw the
# question instead of the regular console, and the input you give is
# used to answer the question instead of typing in commands.
#
# A question is a tuple of (question_string, callback_func,
# tuple_of_choices). callback_func is a function that is called when
# the question is answered which gets the answer as an argument.
# tuple_of_choices looks like ('y', 'n'). Only one-letter-answers are
# currently supported. Pressing enter uses the first choice whereas
# pressing ESC uses the second choice.
self.question_queue = []
def destroy(self):
# save history to files
if ranger.arg.clean or not self.settings.save_console_history:
return
if self.historypath:
try:
f = open(self.historypath, 'w')
except Exception:
pass
else:
for entry in self.history_backup:
try:
f.write(entry + '\n')
except UnicodeEncodeError:
pass
f.close()
Widget.destroy(self)
def draw(self):
self.win.erase()
if self.question_queue:
assert isinstance(self.question_queue[0], tuple)
assert len(self.question_queue[0]) == 3
self.addstr(0, 0, self.question_queue[0][0])
return
self.addstr(0, 0, self.prompt)
line = WideString(self.line)
overflow = -self.wid + len(self.prompt) + len(line) + 1
if overflow > 0:
self.addstr(0, len(self.prompt), str(line[overflow:]))
else:
self.addstr(0, len(self.prompt), self.line)
def finalize(self):
move = self.fm.ui.win.move
if self.question_queue:
try:
move(self.y, len(self.question_queue[0][0]))
except Exception:
pass
else:
try:
pos = uwid(self.line[0:self.pos]) + len(self.prompt)
move(self.y, self.x + min(self.wid - 1, pos))
except Exception:
pass
def open(self, string='', prompt=None, position=None):
if prompt is not None:
assert isinstance(prompt, str)
self.prompt = prompt
elif 'prompt' in self.__dict__:
del self.prompt
if self.last_cursor_mode is None:
try:
self.last_cursor_mode = curses.curs_set(1)
except Exception:
pass
self.allow_close = False
self.tab_deque = None
self.unicode_buffer = ""
self.line = string
self.history_search_pattern = self.line
self.pos = len(string)
if position is not None:
self.pos = min(self.pos, position)
self.history_backup.fast_forward()
self.history = History(self.history_backup)
self.history.add('')
self.wait_for_command_input = True
return True
def close(self, trigger_cancel_function=True):
if self.question_queue:
question = self.question_queue[0]
answers = question[2]
if len(answers) >= 2:
self._answer_question(answers[1])
else:
self._close_command_prompt(trigger_cancel_function)
def _close_command_prompt(self, trigger_cancel_function=True):
if trigger_cancel_function:
cmd = self._get_cmd(quiet=True)
if cmd:
try:
cmd.cancel()
except Exception as error:
self.fm.notify(error)
if self.last_cursor_mode is not None:
try:
curses.curs_set(self.last_cursor_mode)
except Exception:
pass
self.last_cursor_mode = None
self.fm.hide_console_info()
self.add_to_history()
self.tab_deque = None
self.clear()
self.__class__ = Console
self.wait_for_command_input = False
def clear(self):
self.pos = 0
self.line = ''
def press(self, key):
self.fm.ui.keymaps.use_keymap('console')
if not self.fm.ui.press(key):
self.type_key(key)
def _answer_question(self, answer):
if not self.question_queue:
return False
question = self.question_queue[0]
text, callback, answers = question
if answer in answers:
self.question_queue.pop(0)
callback(answer)
return True
return False
def type_key(self, key):
self.tab_deque = None
line = "" if self.question_queue else self.line
result = self._add_character(key, self.unicode_buffer, line, self.pos)
if result[1] == line:
# line didn't change, so we don't need to do anything, just update
# the unicode _buffer.
self.unicode_buffer = result[0]
return
if self.question_queue:
self.unicode_buffer, answer, self.pos = result
self._answer_question(answer)
else:
self.unicode_buffer, self.line, self.pos = result
self.on_line_change()
def _add_character(self, key, unicode_buffer, line, pos):
# Takes the pressed key, a string "unicode_buffer" containing a
# potentially incomplete unicode character, the current line and the
# position of the cursor inside the line.
# This function returns the new unicode buffer, the modified line and
# position.
if isinstance(key, int):
try:
key = chr(key)
except ValueError:
return unicode_buffer, line, pos
if self.fm.py3:
if len(unicode_buffer) >= 4:
unicode_buffer = ""
if ord(key) in range(0, 256):
unicode_buffer += key
try:
decoded = unicode_buffer.encode("latin-1").decode("utf-8")
except UnicodeDecodeError:
return unicode_buffer, line, pos
except UnicodeEncodeError:
return unicode_buffer, line, pos
else:
unicode_buffer = ""
if pos == len(line):
line += decoded
else:
line = line[:pos] + decoded + line[pos:]
pos += len(decoded)
else:
if pos == len(line):
line += key
else:
line = line[:pos] + key + line[pos:]
pos += len(key)
return unicode_buffer, line, pos
def history_move(self, n):
try:
current = self.history.current()
except HistoryEmptyException:
pass
else:
if self.line != current and self.line != self.history.top():
self.history.modify(self.line)
if self.history_search_pattern:
self.history.search(self.history_search_pattern, n)
else:
self.history.move(n)
current = self.history.current()
if self.line != current:
self.line = self.history.current()
self.pos = len(self.line)
def add_to_history(self):
self.history_backup.fast_forward()
self.history_backup.add(self.line)
self.history = History(self.history_backup)
def move(self, **keywords):
direction = Direction(keywords)
if direction.horizontal():
# Ensure that the pointer is moved utf-char-wise
if self.fm.py3:
self.pos = direction.move(
direction=direction.right(),
minimum=0,
maximum=len(self.line) + 1,
current=self.pos)
else:
if self.fm.py3:
uc = list(self.line)
upos = len(self.line[:self.pos])
else:
uc = list(self.line.decode('utf-8', 'ignore'))
upos = len(self.line[:self.pos].decode('utf-8', 'ignore'))
newupos = direction.move(
direction=direction.right(),
minimum=0,
maximum=len(uc) + 1,
current=upos)
self.pos = len(''.join(uc[:newupos]).encode('utf-8', 'ignore'))
def move_word(self, **keywords):
direction = Direction(keywords)
if direction.horizontal():
self.pos = self.move_by_word(self.line, self.pos, direction.right())
self.on_line_change()
@staticmethod
def move_by_word(line, position, direction):
"""
Returns a new position by moving word-wise in the line
>>> import sys
>>> if sys.version_info < (3, ):
... # Didn't get the unicode test to work on python2, even though
... # it works fine in ranger, even with unicode input...
... line = "ohai world, this is dog"
... else:
... line = "\u30AA\u30CF\u30E8\u30A6 world, this is dog"
>>> Console.move_by_word(line, 0, -1)
0
>>> Console.move_by_word(line, 0, 1)
5
>>> Console.move_by_word(line, 2, -1)
0
>>> Console.move_by_word(line, 2, 1)
5
>>> Console.move_by_word(line, 15, -2)
5
>>> Console.move_by_word(line, 15, 2)
21
>>> Console.move_by_word(line, 24, -1)
21
>>> Console.move_by_word(line, 24, 1)
24
"""
word_beginnings = []
seen_whitespace = True
current_word = None
cursor_inside_word = False
# Scan the line for word boundaries and determine position of cursor
for i, char in enumerate(line):
if i == position:
current_word = len(word_beginnings)
if not seen_whitespace:
cursor_inside_word = True
if char == " ":
seen_whitespace = True
elif seen_whitespace:
seen_whitespace = False
word_beginnings.append(i)
word_beginnings.append(len(line))
# Handle corner cases:
if current_word is None:
current_word = len(word_beginnings)
if direction > 0 and cursor_inside_word:
current_word -= 1
if direction < 0 and position == len(line):
current_word -= 1
new_word = current_word + direction
new_word = max(0, min(len(word_beginnings) - 1, new_word))
return word_beginnings[new_word]
def delete_rest(self, direction):
self.tab_deque = None
if direction > 0:
self.copy = self.line[self.pos:]
self.line = self.line[:self.pos]
else:
self.copy = self.line[:self.pos]
self.line = self.line[self.pos:]
self.pos = 0
self.on_line_change()
def paste(self):
if self.pos == len(self.line):
self.line += self.copy
else:
self.line = self.line[:self.pos] + self.copy + self.line[self.pos:]
self.pos += len(self.copy)
self.on_line_change()
def delete_word(self, backward=True):
if self.line:
self.tab_deque = None
if backward:
right_part = self.line[self.pos:]
i = self.pos - 2
while i >= 0 and re.match(r'[\w\d]', self.line[i], re.U):
i -= 1
self.copy = self.line[i + 1:self.pos]
self.line = self.line[:i + 1] + right_part
self.pos = i + 1
else:
left_part = self.line[:self.pos]
i = self.pos + 1
while i < len(self.line) and re.match(r'[\w\d]', self.line[i], re.U):
i += 1
self.copy = self.line[self.pos:i]
if i >= len(self.line):
self.line = left_part
self.pos = len(self.line)
else:
self.line = left_part + self.line[i:]
self.pos = len(left_part)
self.on_line_change()
def delete(self, mod):
self.tab_deque = None
if mod == -1 and self.pos == 0:
if not self.line:
self.close(trigger_cancel_function=False)
return
# Delete utf-char-wise
if self.fm.py3:
left_part = self.line[:self.pos + mod]
self.pos = len(left_part)
self.line = left_part + self.line[self.pos + 1:]
else:
uc = list(self.line.decode('utf-8', 'ignore'))
upos = len(self.line[:self.pos].decode('utf-8', 'ignore')) + mod
left_part = ''.join(uc[:upos]).encode('utf-8', 'ignore')
self.pos = len(left_part)
self.line = left_part + ''.join(uc[upos + 1:]).encode('utf-8', 'ignore')
self.on_line_change()
def execute(self, cmd=None):
if self.question_queue and cmd is None:
question = self.question_queue[0]
answers = question[2]
if len(answers) >= 1:
self._answer_question(answers[0])
else:
self.question_queue.pop(0)
return
self.allow_close = True
if cmd:
cmd.execute()
| |
the resources for which the user does not have
admin permissions and delete the remaining resources
"""
allowed = []
skipped = []
if not is_truthy(self.request.query_params.get('skip_uneditable', False)):
return None
for resource in resource_object_list:
if resource.has_permission(user, ADMIN):
allowed.append(resource)
else:
skipped.append({'id': resource._id, 'type': object_type})
return {'skipped': skipped, 'allowed': allowed}
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
try:
instance.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
instance.save()
class NodeDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, NodeMixin, WaterButlerMixin):
"""Details about a given node (project or component). *Writeable*.
A registration or withdrawn registration cannot be accessed through this endpoint. See Registration Detail endpoint.
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
###Permissions
Nodes that are made public will give read-only access to everyone. Private nodes require explicit read
permission. Write and admin access are the same for public and private nodes. Administrators on a parent node have
implicit read permissions for all child nodes.
##Attributes
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
registration boolean is this a registration? (always false - may be deprecated in future versions)
fork boolean is this node a fork of another node?
public boolean has this node been made publicly-visible?
collection boolean is this a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
##Relationships
###Children
List of nodes that are children of this node. New child nodes may be added through this endpoint.
###Comments
List of comments on this node. New comments can be left on the node through this endpoint.
###Contributors
List of users who are contributors to this node. Contributors may have "read", "write", or "admin" permissions.
A node must always have at least one "admin" contributor. Contributors may be added via this endpoint.
###Draft Registrations
List of draft registrations of the current node.
###Files
List of top-level folders (actually cloud-storage providers) associated with this node. This is the starting point
for accessing the actual files stored within this node.
###Forked From
If this node was forked from another node, the canonical endpoint of the node that was forked from will be
available in the `/forked_from/links/related/href` key. Otherwise, it will be null.
###Logs
List of read-only log actions pertaining to the node.
###Node Links
List of links (pointers) to other nodes on the GakuNin RDM. Node links can be added through this endpoint.
###Parent
If this node is a child node of another node, the parent's canonical endpoint will be available in the
`/parent/links/related/href` key. Otherwise, it will be null.
###Registrations
List of registrations of the current node.
###Root
Returns the top-level node associated with the current node. If the current node is the top-level node, the root is
the current node.
### Linked Nodes
List of nodes linked to the current node.
### Linked Registrations
List of registrations linked to the current node.
##Links
self: the canonical api endpoint of this node
html: this node's page on the GakuNin RDM website
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"id": {node_id}, # required
"attributes": {
"title": {title}, # mandatory
"category": {category}, # mandatory
"description": {description}, # optional
"tags": [{tag1}, {tag2}], # optional
"public": true|false # optional
}
}
}
Success: 200 OK + node representation
To update a node, issue either a PUT or a PATCH request against the `/links/self` URL. The `title` and `category`
fields are mandatory if you PUT and optional if you PATCH. The `tags` parameter must be an array of strings.
Non-string values will be accepted and stringified, but we make no promises about the stringification output. So
don't do that.
###Delete
Method: DELETE
URL: /links/self
Params: <none>
Success: 204 No Content
To delete a node, issue a DELETE request against `/links/self`. A successful delete will return a 204 No Content
response. Attempting to delete a node you do not own will result in a 403 Forbidden.
##Query Params
+ `view_only=<Str>` -- Allow users with limited access keys to access this node. Note that some keys are anonymous, so using the view_only key will cause user-related information to no longer serialize. This includes blank ids for users and contributors and missing serializer fields and relationships.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
serializer_class = NodeDetailSerializer
view_category = 'nodes'
view_name = 'node-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
return self.get_node()
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_object()
try:
node.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
node.save()
class NodeContributorsList(BaseContributorList, bulk_views.BulkUpdateJSONAPIView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, NodeMixin):
"""Contributors (users) for a node.
Contributors are users who can make changes to the node or, in the case of private nodes,
have read access to the node. Contributors are divided between 'bibliographic' and 'non-bibliographic'
contributors. From a permissions standpoint, both are the same, but bibliographic contributors
are included in citations, while non-bibliographic contributors are not included in citations.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed and the id for
the contributor will be an empty string.
##Node Contributor Attributes
<!--- Copied Attributes from NodeContributorDetail -->
`type` is "contributors"
name type description
======================================================================================================
bibliographic boolean Whether the user will be included in citations for this node. Default is true.
permission string User permission level. Must be "read", "write", or "admin". Default is "write".
unregistered_contributor string Contributor's assigned name if contributor hasn't yet claimed account
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
###Users
This endpoint shows the contributor user detail and is automatically embedded.
##Actions
###Adding Contributors
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "contributors", # required
"attributes": {
"bibliographic": true|false, # optional
"permission": "read"|"write"|"admin" # optional
},
"relationships": {
"users": {
"data": {
"type": "users", # required
"id": "{user_id}" # required
}
}
}
}
}
Success: 201 CREATED + node contributor representation
Add a contributor to a node by issuing a POST request to this endpoint. This effectively creates a relationship
between the node and the user. Besides the top-level type, there are optional "attributes" which describe the
relationship between the node and the user. `bibliographic` is a boolean and defaults to `true`. `permission` must
be a [valid OSF permission key](/v2/#osf-node-permission-keys) and defaults to `"write"`. A relationship object
with a "data" member, containing the user `type` and user `id` must be included. The id must be a valid user id.
All other fields not listed above will be ignored. If the request is successful the API will return
a 201 response with the representation of the new node contributor in the body. For the new node contributor's
canonical URL, see | |
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of data model for kube manager
"""
from builtins import str
from builtins import range
import json
from cfgm_common.vnc_db import DBBase
from bitstring import BitArray
from vnc_api.vnc_api import (KeyValuePair)
from kube_manager.vnc.vnc_kubernetes_config import VncKubernetesConfig as vnc_kube_config
from kube_manager.sandesh.kube_introspect import ttypes as introspect
INVALID_VLAN_ID = 4096
MAX_VLAN_ID = 4095
MIN_VLAN_ID = 1
class DBBaseKM(DBBase):
obj_type = __name__
_nested_mode = False
# Infra annotations that will be added on objects with custom annotations.
ann_fq_name_infra_key = ["project", "cluster", "owner"]
def __init__(self, uuid, obj_dict=None):
# By default there are no annotations added on an object.
self.ann_fq_name = None
@staticmethod
def get_infra_annotations():
"""Get infra annotations."""
annotations = {}
annotations['owner'] = vnc_kube_config.cluster_owner()
annotations['cluster'] = vnc_kube_config.cluster_name()
# "project" annotations, though infrstructural, are namespace specific.
# So "project" annotations are added when callee adds annotations on
# objects.
return annotations
@classmethod
def _get_annotations(cls, vnc_caller, namespace, name, k8s_type,
**custom_ann_kwargs):
"""Get all annotations.
Annotations are aggregated from multiple sources like infra info,
input params and custom annotations. This method is meant to be an
aggregator of all possible annotations.
"""
# Get annotations declared on the caller.
annotations = dict(vnc_caller.get_annotations())
# Update annotations with infra specific annotations.
infra_anns = cls.get_infra_annotations()
infra_anns['project'] = vnc_kube_config.cluster_project_name(namespace)
annotations.update(infra_anns)
# Update annotations based on explicity input params.
input_anns = {}
input_anns['namespace'] = namespace
input_anns['name'] = name
if k8s_type:
input_anns['kind'] = k8s_type
annotations.update(input_anns)
# Append other custom annotations.
annotations.update(custom_ann_kwargs)
return annotations
@classmethod
def add_annotations(cls, vnc_caller, obj, namespace, name, k8s_type=None,
**custom_ann_kwargs):
"""Add annotations on the input object.
Given an object, this method will add all required and specfied
annotations on that object.
"""
# Construct annotations to be added on the object.
annotations = cls._get_annotations(vnc_caller, namespace, name,
k8s_type, **custom_ann_kwargs)
# Validate that annotations have all the info to construct
# the annotations-based-fq-name as required by the object's db.
if hasattr(cls, 'ann_fq_name_key'):
if not set(cls.ann_fq_name_key).issubset(annotations):
err_msg = "Annotations required to contruct kube_fq_name for"+\
" object (%s:%s) was not found in input keyword args." %\
(namespace, name)
raise Exception(err_msg)
# Annotate the object.
for ann_key, ann_value in annotations.items():
obj.add_annotations(KeyValuePair(key=ann_key, value=ann_value))
@classmethod
def _update_fq_name_to_uuid(cls, uuid, obj_dict):
cls._fq_name_to_uuid[tuple(obj_dict['fq_name'])] = uuid
@classmethod
def get_fq_name_to_uuid(cls, fq_name):
return cls._fq_name_to_uuid.get(tuple(fq_name))
@classmethod
def _get_ann_fq_name_from_obj(cls, obj_dict):
"""Get the annotated fully qualified name from the object.
Annotated-fq-names are contructed from annotations found on the
object. The format of the fq-name is specified in the object's db
class. This method will construct the annoated-fq-name of the input
object.
"""
fq_name = None
if hasattr(cls, 'ann_fq_name_key'):
fq_name = []
fq_name_key = cls.ann_fq_name_infra_key + cls.ann_fq_name_key
if obj_dict.get('annotations') and\
obj_dict['annotations'].get('key_value_pair'):
kvps = obj_dict['annotations']['key_value_pair']
for elem in fq_name_key:
for kvp in kvps:
if kvp.get("key") != elem:
continue
fq_name.append(kvp.get("value"))
break
return fq_name
@classmethod
def _get_ann_fq_name_from_params(cls, **kwargs):
"""Construct annotated fully qualified name using input params."""
fq_name = []
fq_name_key = cls.ann_fq_name_infra_key + cls.ann_fq_name_key
for elem in fq_name_key:
for key, value in kwargs.items():
if key != elem:
continue
fq_name.append(value)
break
return fq_name
@classmethod
def get_ann_fq_name_to_uuid(cls, vnc_caller, namespace, name,
k8s_type=None, **kwargs):
"""Get vnc object uuid corresponding to an annotated-fq-name.
The annotated-fq-name is constructed from the input params given
by the caller.
"""
# Construct annotations based on input params.
annotations = cls._get_annotations(vnc_caller, namespace, name,
k8s_type, **kwargs)
# Validate that annoatations has all info required for construction
# of annotated-fq-name.
if hasattr(cls, 'ann_fq_name_key'):
if not set(cls.ann_fq_name_key).issubset(annotations):
err_msg = "Annotations required to contruct kube_fq_name for"+\
" object (%s:%s) was not found in input keyword args." %\
(namespace, name)
raise Exception(err_msg)
# Lookup annnoated-fq-name in annotated-fq-name to uuid table.
return cls._ann_fq_name_to_uuid.get(
tuple(cls._get_ann_fq_name_from_params(**annotations)))
@classmethod
def _update_ann_fq_name_to_uuid(cls, uuid, ann_fq_name):
cls._ann_fq_name_to_uuid[tuple(ann_fq_name)] = uuid
def build_fq_name_to_uuid(self, uuid, obj_dict):
"""Populate uuid in all tables tracking uuid."""
if not obj_dict:
return
# Update annotated-fq-name to uuid table.
self.ann_fq_name = self._get_ann_fq_name_from_obj(obj_dict)
if self.ann_fq_name:
self._update_ann_fq_name_to_uuid(uuid, self.ann_fq_name)
# Update vnc fq-name to uuid table.
self._update_fq_name_to_uuid(uuid, obj_dict)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.ann_fq_name:
if tuple(obj.ann_fq_name) in cls._ann_fq_name_to_uuid:
del cls._ann_fq_name_to_uuid[tuple(obj.ann_fq_name)]
if tuple(obj.fq_name) in cls._fq_name_to_uuid:
del cls._fq_name_to_uuid[tuple(obj.fq_name)]
def evaluate(self):
# Implement in the derived class
pass
@staticmethod
def is_nested():
"""Return nested mode enable/disable config value."""
return DBBaseKM._nested_mode
@staticmethod
def set_nested(val):
"""Configured nested mode value.
True : Enable nested mode.
False : Disable nested mode.
"""
DBBaseKM._nested_mode = val
@classmethod
def objects(cls):
# Get all vnc objects of this class.
return list(cls._dict.values())
@staticmethod
def _build_annotation_dict(annotation_dict):
return {str(annot['key']): str(annot['value'])
for annot
in annotation_dict['key_value_pair']} \
if annotation_dict and annotation_dict.get('key_value_pair') \
else {}
@staticmethod
def _build_string_dict(src_dict):
dst_dict = {}
if src_dict:
for key, value in src_dict.items():
dst_dict[str(key)] = str(value)
return dst_dict
@staticmethod
def _build_cls_uuid_list(cls, collection):
return [cls(str(list(collection)[i]))
for i in range(len(collection))] \
if collection else []
class LoadbalancerKM(DBBaseKM):
_dict = {}
obj_type = 'loadbalancer'
ann_fq_name_key = ["kind", "name"]
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(LoadbalancerKM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.virtual_machine_interfaces = set()
self.loadbalancer_listeners = set()
self.selectors = None
self.annotations = None
self.external_ip = None
self.service_name = None
self.service_namespace = None
self.firewall_rule_uuids = set()
obj_dict = self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.parent_uuid = obj['parent_uuid']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('loadbalancer_listener', obj)
name = None
namespace = None
owner = None
if self.annotations:
for kvp in self.annotations['key_value_pair'] or []:
if kvp['key'] == 'externalIP':
self.external_ip = kvp['value']
elif kvp['key'] == 'name':
name = kvp['value']
elif kvp['key'] == 'namespace':
namespace = kvp['value']
elif kvp['key'] == 'owner':
owner = kvp['value']
if owner == 'k8s':
self.service_name = name
self.service_namespace = namespace
return obj
def add_firewall_rule(self, fw_uuid):
if fw_uuid:
self.firewall_rule_uuids.add(fw_uuid)
def remove_firewall_rule(self, fw_uuid):
if fw_uuid in self.firewall_rule_uuids:
self.firewall_rule_uuids.remove(fw_uuid)
def get_firewall_rules(self):
return self.firewall_rule_uuids
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.update_multiple_refs('loadbalancer_listener', {})
super(LoadbalancerKM, cls).delete(uuid)
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Loadbalancer DB lookup/introspect request. """
lb_resp = introspect.LoadbalancerDatabaseListResp(lbs=[])
# Iterate through all elements of Loadbalancer DB.
for lb in LoadbalancerKM.objects():
# If the request is for a specific entry, then locate the entry.
if req.lb_uuid and req.lb_uuid != lb.uuid:
continue
lb_annotations = cls._build_annotation_dict(lb.annotations)
lb_listeners = cls._build_cls_uuid_list(
introspect.LbListenerUuid, lb.loadbalancer_listeners)
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, lb.virtual_machine_interfaces)
# Construct response for an element.
if 'Ingress' in lb.ann_fq_name:
lb_instance = introspect.LoadbalancerInstance(
uuid_to_ingress=lb.uuid,
name=lb.fq_name[-1],
fq_name=lb.fq_name,
annotations=lb_annotations,
external_ip=str(lb.external_ip),
lb_listeners=lb_listeners,
selectors=None,
vm_interfaces=vmis)
else:
lb_instance = introspect.LoadbalancerInstance(
uuid_to_service=lb.uuid,
name=lb.fq_name[-1],
fq_name=lb.fq_name,
annotations=lb_annotations,
external_ip=str(lb.external_ip),
lb_listeners=lb_listeners,
selectors=None,
vm_interfaces=vmis)
# Append the constructed element info to the response.
lb_resp.lbs.append(lb_instance)
# Send the reply out.
lb_resp.response(req.context())
class LoadbalancerListenerKM(DBBaseKM):
_dict = {}
obj_type = 'loadbalancer_listener'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(LoadbalancerListenerKM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.loadbalancer = None
self.loadbalancer_pool = None
self.port_name = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.display_name = obj.get('display_name', None)
self.parent_uuid = obj['parent_uuid']
self.id_perms = obj.get('id_perms', None)
self.params = obj.get('loadbalancer_listener_properties', None)
self.update_single_ref('loadbalancer', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
if self.annotations:
for kvp in self.annotations['key_value_pair'] or []:
if kvp['key'] == 'portName':
self.port_name = kvp['value']
break
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('loadbalancer', {})
obj.update_single_ref('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to LoadbalancerListener DB lookup/introspect request. """
lbl_resp = introspect.LoadbalancerListenerDatabaseListResp(lbls=[])
# Iterate through all elements of LoadbalancerListener DB.
for lbl in LoadbalancerListenerKM.objects():
# If the request is for a specific entry, then locate the entry.
if req.lbl_uuid and req.lbl_uuid != lbl.uuid:
continue
lbl_annotations = cls._build_annotation_dict(lbl.annotations)
id_perms = cls._build_string_dict(lbl.id_perms)
# Construct response for an element.
lbl_instance = introspect.LoadbalancerListenerInstance(
uuid=lbl.uuid,
name=lbl.display_name,
fq_name=[lbl.display_name],
annotations=lbl_annotations,
id_perms=id_perms,
loadbalancer=lbl.loadbalancer,
loadbalancer_pool=lbl.loadbalancer_pool,
port_name=lbl.port_name,
parent_uuid=lbl.parent_uuid)
# Append the constructed element info to the response.
lbl_resp.lbls.append(lbl_instance)
# Send the reply out.
lbl_resp.response(req.context())
# end class LoadbalancerListenerKM
class LoadbalancerPoolKM(DBBaseKM):
_dict = {}
obj_type = 'loadbalancer_pool'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(LoadbalancerPoolKM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.members = set()
self.loadbalancer_listener = None
self.custom_attributes = []
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
| |
6 7 8 9 10
11
0.21658738559361795 0.79306527522919623 -0.21658738559361673
-3.4945260899414864e-017 0.79306527522919623 -0.30630081814542448
-0.21658738559361776 0.79306527522919623 -0.21658738559361687
-0.30630081814542537 0.79306527522919623 7.9942010564300995e-016
-0.21658738559361781 0.79306527522919623 0.21658738559361859
-9.2294414310233125e-017 0.79306527522919623 0.30630081814542631
0.21658738559361762 0.79306527522919623 0.21658738559361868
0.30630081814542537 0.79306527522919623 1.0526931866375961e-015
0.21658738559361795 0.79306527522919623 -0.21658738559361673
-3.4945260899414864e-017 0.79306527522919623 -0.30630081814542448
-0.21658738559361776 0.79306527522919623 -0.21658738559361687
;
createNode transform -n "tongueLeftB_ctlGp" -p "tongueCenterBRoot_ctl";
setAttr ".t" -type "double3" 1.2170171250357078 -2.1090275680535342e-005 -7.0950295123495266e-006 ;
setAttr ".r" -type "double3" -5.3550608275535737e-012 4.9437415062718009e-005 -3.0794304942787886e-021 ;
setAttr ".s" -type "double3" 0.99999987719358707 0.99999999999999989 1 ;
createNode transform -n "tongueLeftB_ctl" -p "tongueLeftB_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 13;
createNode nurbsCurve -n "tongueLeftB_ctlShape" -p "tongueLeftB_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
-0.30630081814542537 0.60610587390425508 -8.8758314057115255e-017
-0.21658738559361781 0.60610587390425508 0.2165873855936177
-9.2294414310233125e-017 0.60610587390425508 0.30630081814542542
0.21658738559361762 0.60610587390425508 0.21658738559361779
0.30630081814542537 0.60610587390425508 1.6451476693747092e-016
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
;
createNode transform -n "tongueRightB_ctlGp" -p "tongueCenterBRoot_ctl";
setAttr ".t" -type "double3" -1.2170609287098897 -2.1090275737378761e-005 -4.9947958116192126e-006 ;
setAttr ".r" -type "double3" -5.3550608275535737e-012 4.9437415062718009e-005 -3.0794304942787886e-021 ;
setAttr ".s" -type "double3" 0.99999987719358707 0.99999999999999989 1 ;
createNode transform -n "tongueRightB_ctl" -p "tongueRightB_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 13;
createNode nurbsCurve -n "tongueRightB_ctlShape" -p "tongueRightB_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
-0.30630081814542537 0.60610587390425508 -8.8758314057115255e-017
-0.21658738559361781 0.60610587390425508 0.2165873855936177
-9.2294414310233125e-017 0.60610587390425508 0.30630081814542542
0.21658738559361762 0.60610587390425508 0.21658738559361779
0.30630081814542537 0.60610587390425508 1.6451476693747092e-016
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
;
createNode transform -n "tongueCenterCRoot_ctlGp" -p "tongue_ctl_offset_Gp";
setAttr ".t" -type "double3" 2.4804080377294042 -0.62740505718511486 5.4964670452021982e-006 ;
setAttr ".r" -type "double3" -5.3614208018684395e-012 89.999950562590996 0 ;
setAttr ".s" -type "double3" 1.0000001228064284 1.0000000000000002 1 ;
createNode transform -n "tongueCenterCRoot_ctl" -p "tongueCenterCRoot_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 17;
setAttr ".rp" -type "double3" 0 0 -8.8817841970012523e-016 ;
setAttr ".sp" -type "double3" 0 0 -8.8817841970012523e-016 ;
createNode nurbsCurve -n "tongueCenterCRoot_ctlShape" -p "tongueCenterCRoot_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
1.3770070964908447 0.52373561260812318 1.2361919986268345e-016
-2.2217301397924177e-016 0.7406740064481897 1.7482394901551732e-016
-1.3770070964908434 0.5237356126081234 1.2361919986268355e-016
-1.9473821113413483 2.1462879686810942e-016 5.065960667482514e-032
-1.3770070964908439 -0.52373561260812329 -1.2361919986268347e-016
-5.8678423548689843e-016 -0.74067400644818981 -1.7482394901551735e-016
1.3770070964908425 -0.52373561260812351 -1.2361919986268357e-016
1.9473821113413483 -3.9781745372163516e-016 -9.3898284051337207e-032
1.3770070964908447 0.52373561260812318 1.2361919986268345e-016
-2.2217301397924177e-016 0.7406740064481897 1.7482394901551732e-016
-1.3770070964908434 0.5237356126081234 1.2361919986268355e-016
;
createNode transform -n "tongueCenterC_ctlGp" -p "tongueCenterCRoot_ctl";
setAttr ".t" -type "double3" 8.0785465294863938e-007 -2.109027579422218e-005 -6.0449322498712377e-006 ;
setAttr ".r" -type "double3" -5.3550608275535809e-012 4.9437415062718022e-005 -9.1458578625526729e-021 ;
setAttr ".s" -type "double3" 0.99999987719358674 0.99999999999999967 0.99999999999999978 ;
createNode transform -n "tongueCenterC_ctl" -p "tongueCenterC_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 17;
createNode nurbsCurve -n "tongueCenterC_ctlShape" -p "tongueCenterC_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
-0.30630081814542537 0.60610587390425508 -8.8758314057115255e-017
-0.21658738559361781 0.60610587390425508 0.2165873855936177
-9.2294414310233125e-017 0.60610587390425508 0.30630081814542542
0.21658738559361762 0.60610587390425508 0.21658738559361779
0.30630081814542537 0.60610587390425508 1.6451476693747092e-016
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
;
createNode transform -n "tongueLeftC_ctlGp" -p "tongueCenterCRoot_ctl";
setAttr ".t" -type "double3" 1.0687414676999385 -2.109027579422218e-005 -6.967090497411732e-006 ;
setAttr ".r" -type "double3" -5.3550608275535809e-012 4.9437415062718022e-005 -9.1458578625526729e-021 ;
setAttr ".s" -type "double3" 0.99999987719358674 0.99999999999999967 0.99999999999999978 ;
createNode transform -n "tongueLeftC_ctl" -p "tongueLeftC_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 13;
createNode nurbsCurve -n "tongueLeftC_ctlShape" -p "tongueLeftC_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
-0.30630081814542537 0.60610587390425508 -8.8758314057115255e-017
-0.21658738559361781 0.60610587390425508 0.2165873855936177
-9.2294414310233125e-017 0.60610587390425508 0.30630081814542542
0.21658738559361762 0.60610587390425508 0.21658738559361779
0.30630081814542537 0.60610587390425508 1.6451476693747092e-016
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
;
createNode transform -n "tongueRightC_ctlGp" -p "tongueCenterCRoot_ctl";
setAttr ".t" -type "double3" -1.0687858298653841 -2.109027579422218e-005 -5.122734330953449e-006 ;
setAttr ".r" -type "double3" -5.3550608275535809e-012 4.9437415062718022e-005 -9.1458578625526729e-021 ;
setAttr ".s" -type "double3" 0.99999987719358674 0.99999999999999967 0.99999999999999978 ;
createNode transform -n "tongueRightC_ctl" -p "tongueRightC_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 13;
createNode nurbsCurve -n "tongueRightC_ctlShape" -p "tongueRightC_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
-0.30630081814542537 0.60610587390425508 -8.8758314057115255e-017
-0.21658738559361781 0.60610587390425508 0.2165873855936177
-9.2294414310233125e-017 0.60610587390425508 0.30630081814542542
0.21658738559361762 0.60610587390425508 0.21658738559361779
0.30630081814542537 0.60610587390425508 1.6451476693747092e-016
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
;
createNode transform -n "tongueCenterDRoot_ctlGp" -p "tongue_ctl_offset_Gp";
setAttr ".t" -type "double3" 3.6366313348485377 -0.77649868511463183 5.7503347156583247e-006 ;
setAttr ".r" -type "double3" -5.3614208018684395e-012 89.999950562590996 0 ;
setAttr ".s" -type "double3" 1.0000001228064284 1.0000000000000002 1 ;
createNode transform -n "tongueCenterDRoot_ctl" -p "tongueCenterDRoot_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 17;
setAttr ".rp" -type "double3" 2.7105054312137611e-020 0 -1.7763568394002505e-015 ;
setAttr ".sp" -type "double3" 2.7105054312137611e-020 0 -1.7763568394002505e-015 ;
createNode nurbsCurve -n "tongueCenterDRoot_ctlShape" -p "tongueCenterDRoot_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
1.0971490830928077 0.45831509323914349 1.3814547323995889e-016
-1.7701936264270462e-016 0.64815542069908683 1.953672018363995e-016
-1.0971490830928066 0.45831509323914371 1.3814547323995899e-016
-1.5516031132550532 1.8781922535028139e-016 5.6612527390710642e-032
-1.097149083092807 -0.45831509323914366 -1.3814547323995891e-016
-4.6752829929373208e-016 -0.64815542069908694 -1.9536720183639953e-016
1.0971490830928059 -0.45831509323914382 -1.3814547323995904e-016
1.5516031132550532 -3.4812554083659805e-016 -1.0493210521586435e-031
1.0971490830928077 0.45831509323914349 1.3814547323995889e-016
-1.7701936264270462e-016 0.64815542069908683 1.953672018363995e-016
-1.0971490830928066 0.45831509323914371 1.3814547323995899e-016
;
createNode transform -n "tongueCenterD_ctlGp" -p "tongueCenterDRoot_ctl";
setAttr ".t" -type "double3" 1.0617222921795264e-006 -2.1090275993174146e-005 -6.04493246214588e-006 ;
setAttr ".r" -type "double3" -5.3550608275535745e-012 4.9437415062718029e-005 -3.0794304942799964e-021 ;
setAttr ".s" -type "double3" 0.99999987719358685 0.99999999999999989 1.0000000000000002 ;
createNode transform -n "tongueCenterD_ctl" -p "tongueCenterD_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 17;
createNode nurbsCurve -n "tongueCenterD_ctlShape" -p "tongueCenterD_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
-0.30630081814542537 0.60610587390425508 -8.8758314057115255e-017
-0.21658738559361781 0.60610587390425508 0.2165873855936177
-9.2294414310233125e-017 0.60610587390425508 0.30630081814542542
0.21658738559361762 0.60610587390425508 0.21658738559361779
0.30630081814542537 0.60610587390425508 1.6451476693747092e-016
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
;
createNode transform -n "tongueLeftD_ctlGp" -p "tongueCenterDRoot_ctl";
setAttr ".t" -type "double3" 0.82193818750374648 0.026994595759163076 -0.11806686613690509 ;
setAttr ".r" -type "double3" -5.3550608275535745e-012 4.9437415062718029e-005 -3.0794304942799964e-021 ;
setAttr ".s" -type "double3" 0.99999987719358685 0.99999999999999989 1.0000000000000002 ;
createNode transform -n "tongueLeftD_ctl" -p "tongueLeftD_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 13;
createNode nurbsCurve -n "tongueLeftD_ctlShape" -p "tongueLeftD_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
-0.30630081814542537 0.60610587390425508 -8.8758314057115255e-017
-0.21658738559361781 0.60610587390425508 0.2165873855936177
-9.2294414310233125e-017 0.60610587390425508 0.30630081814542542
0.21658738559361762 0.60610587390425508 0.21658738559361779
0.30630081814542537 0.60610587390425508 1.6451476693747092e-016
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
;
createNode transform -n "tongueRightD_ctlGp" -p "tongueCenterDRoot_ctl";
setAttr ".t" -type "double3" -0.82198170817458127 0.026994595759191498 -0.11806544768774607 ;
setAttr ".r" -type "double3" -5.3550608275535745e-012 4.9437415062718029e-005 -3.0794304942799964e-021 ;
setAttr ".s" -type "double3" 0.99999987719358685 0.99999999999999989 1.0000000000000002 ;
createNode transform -n "tongueRightD_ctl" -p "tongueRightD_ctlGp";
setAttr ".ove" yes;
setAttr ".ovc" 13;
createNode nurbsCurve -n "tongueRightD_ctlShape" -p "tongueRightD_ctl";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
3 8 2 no 3
13 -2 -1 0 1 2 3 4 5 6 7 8 9 10
11
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
-0.30630081814542537 0.60610587390425508 -8.8758314057115255e-017
-0.21658738559361781 0.60610587390425508 0.2165873855936177
-9.2294414310233125e-017 0.60610587390425508 0.30630081814542542
0.21658738559361762 0.60610587390425508 0.21658738559361779
0.30630081814542537 0.60610587390425508 1.6451476693747092e-016
0.21658738559361795 0.60610587390425508 -0.21658738559361762
-3.4945260899414864e-017 0.60610587390425508 -0.30630081814542537
-0.21658738559361776 0.60610587390425508 -0.21658738559361776
;
'''
facial_bs_Panel = '''
createNode transform -n "facial_panelGp" -p "ctlGp";
setAttr ".t" -type "double3" 25.301668360178851 191.88146002926575 9.7810824328803392e-008 ;
setAttr ".r" -type "double3" 1.5902773407317584e-014 6.3611093629270304e-015 2.5444437451708134e-014 ;
setAttr ".s" -type "double3" 1.0000002420157659 1.0000001192092984 1.0000001192093109 ;
createNode transform -n "facial_panel" -p "facial_panelGp";
setAttr ".ove" yes;
setAttr ".ovc" 17;
createNode nurbsCurve -n "facial_panelShape" -p "facial_panel";
setAttr -k off ".v";
setAttr ".cc" -type "nurbsCurve"
1 4 0 no 3
5 0 1 2 3 4
5
3.9505976766568374 -8.0466389120191746 1.277598371340804e-015
-3.9505976766568374 -8.0466389120191746 1.277598371340804e-015
-3.9505976766568374 5.2174868562978434 -1.2775983713408044e-015
3.9505976766568374 5.2174868562978434 -1.2775983713408044e-015
3.9505976766568374 -8.0466389120191746 1.277598371340804e-015
;
createNode transform -n "facial_ctlGp" -p "facial_panel";
setAttr ".t" -type "double3" 0 0.59392984596704679 0 ;
setAttr ".rp" -type "double3" 0 5.3453686137034424 0 ;
setAttr ".sp" -type "double3" 0 5.3453686137034424 0 ;
createNode transform -n "facial_ctl" -p "facial_ctlGp";
addAttr -is true -ci true -h true -k true -sn "MaxHandle" -ln "MaxHandle" -smn
0 -smx 0 -at "long";
setAttr -l on -k off ".v";
setAttr ".ove" yes;
setAttr ".ovc" 17;
setAttr -l on | |
"""
return _Volume.CVolume8_copy(self, *args)
def makeRef(self, *args):
"""
makeRef(self, Volume) -> CVolume8
makeRef(self, Volume, x, y, z, XSize, YSize, ZSize) -> CVolume8
makeRef(self, Volume) -> CVolume8
"""
return _Volume.CVolume8_makeRef(self, *args)
def getSize(self, *args):
"""
getSize(self) -> CSize3_int
getSize(self) -> CSize3_int
"""
return _Volume.CVolume8_getSize(self, *args)
def getXSize(self):
"""getXSize(self) -> vpl::tSize"""
return _Volume.CVolume8_getXSize(self)
def getYSize(self):
"""getYSize(self) -> vpl::tSize"""
return _Volume.CVolume8_getYSize(self)
def getZSize(self):
"""getZSize(self) -> vpl::tSize"""
return _Volume.CVolume8_getZSize(self)
def width(self):
"""width(self) -> vpl::tSize"""
return _Volume.CVolume8_width(self)
def height(self):
"""height(self) -> vpl::tSize"""
return _Volume.CVolume8_height(self)
def depth(self):
"""depth(self) -> vpl::tSize"""
return _Volume.CVolume8_depth(self)
def getXOffset(self):
"""getXOffset(self) -> vpl::tSize"""
return _Volume.CVolume8_getXOffset(self)
def getYOffset(self):
"""getYOffset(self) -> vpl::tSize"""
return _Volume.CVolume8_getYOffset(self)
def getZOffset(self):
"""getZOffset(self) -> vpl::tSize"""
return _Volume.CVolume8_getZOffset(self)
def getMargin(self):
"""getMargin(self) -> vpl::tSize"""
return _Volume.CVolume8_getMargin(self)
def getIdx(self, x, y, z):
"""getIdx(self, x, y, z) -> vpl::tSize"""
return _Volume.CVolume8_getIdx(self, x, y, z)
def __call__(self, *args):
"""
__call__(self, x, y, z) -> unsigned __int8
__call__(self, x, y, z) -> unsigned __int8 const
__call__(self, i) -> unsigned __int8
__call__(self, i) -> unsigned __int8 const &
"""
return _Volume.CVolume8___call__(self, *args)
def at(self, *args):
"""
at(self, x, y, z) -> unsigned __int8 const
at(self, i) -> unsigned __int8 const &
"""
return _Volume.CVolume8_at(self, *args)
def set(self, *args):
"""
set(self, x, y, z, Value) -> CVolume8
set(self, i, Value) -> CVolume8
"""
return _Volume.CVolume8_set(self, *args)
def getPtr(self, *args):
"""
getPtr(self) -> unsigned __int8
getPtr(self) -> unsigned __int8 const
getPtr(self, x, y, z) -> unsigned __int8
getPtr(self, x, y, z) -> unsigned __int8 const *
"""
return _Volume.CVolume8_getPtr(self, *args)
def getRowPtr(self, *args):
"""
getRowPtr(self, y, z) -> unsigned __int8
getRowPtr(self, y, z) -> unsigned __int8 const *
"""
return _Volume.CVolume8_getRowPtr(self, *args)
def rect(self, *args):
"""
rect(self, Position, Size) -> vpl::img::CVolume< unsigned __int8,vpl::base::CPartedData >::tRect
rect(self, Position, Size) -> vpl::img::CVolume< unsigned __int8,vpl::base::CPartedData >::tRect const
rect(self, XRange, YRange, ZRange) -> vpl::img::CVolume< unsigned __int8,vpl::base::CPartedData >::tRect
rect(self, XRange, YRange, ZRange) -> vpl::img::CVolume< unsigned __int8,vpl::base::CPartedData >::tRect const
"""
return _Volume.CVolume8_rect(self, *args)
def row(self, *args):
"""
row(self, y, z) -> vpl::img::CVolume< unsigned __int8,vpl::base::CPartedData >::tRow
row(self, y, z) -> vpl::img::CVolume< unsigned __int8,vpl::base::CPartedData >::tRow const
"""
return _Volume.CVolume8_row(self, *args)
def fill(self, c):
"""fill(self, c) -> CVolume8"""
return _Volume.CVolume8_fill(self, c)
def fillEntire(self, c):
"""fillEntire(self, c) -> CVolume8"""
return _Volume.CVolume8_fillEntire(self, c)
def fillMargin(self, c):
"""fillMargin(self, c) -> CVolume8"""
return _Volume.CVolume8_fillMargin(self, c)
def mirrorMargin(self):
"""mirrorMargin(self) -> CVolume8"""
return _Volume.CVolume8_mirrorMargin(self)
def replace(self, Value, NewValue):
"""replace(self, Value, NewValue) -> CVolume8"""
return _Volume.CVolume8_replace(self, Value, NewValue)
def abs(self):
"""abs(self) -> CVolume8"""
return _Volume.CVolume8_abs(self)
def limit(self, Lower, Upper):
"""limit(self, Lower, Upper) -> CVolume8"""
return _Volume.CVolume8_limit(self, Lower, Upper)
def cut(self, Lower, Upper):
"""cut(self, Lower, Upper) -> CVolume8"""
return _Volume.CVolume8_cut(self, Lower, Upper)
def subSample(self, Volume, l=2, m=2, n=2):
"""
subSample(self, Volume, l=2, m=2, n=2) -> CVolume8
subSample(self, Volume, l=2, m=2) -> CVolume8
subSample(self, Volume, l=2) -> CVolume8
subSample(self, Volume) -> CVolume8
"""
return _Volume.CVolume8_subSample(self, Volume, l, m, n)
def interpolate(self, Point):
"""interpolate(self, Point) -> unsigned __int8"""
return _Volume.CVolume8_interpolate(self, Point)
def color2Voxel(self, Color):
"""color2Voxel(self, Color) -> unsigned __int8"""
return _Volume.CVolume8_color2Voxel(self, Color)
def checkPosition(self, x, y, z):
"""checkPosition(self, x, y, z) -> bool"""
return _Volume.CVolume8_checkPosition(self, x, y, z)
def getPlaneXY(self, z, Plane):
"""getPlaneXY(self, z, Plane) -> bool"""
return _Volume.CVolume8_getPlaneXY(self, z, Plane)
def getPlaneXZ(self, y, Plane):
"""getPlaneXZ(self, y, Plane) -> bool"""
return _Volume.CVolume8_getPlaneXZ(self, y, Plane)
def getPlaneYZ(self, x, Plane):
"""getPlaneYZ(self, x, Plane) -> bool"""
return _Volume.CVolume8_getPlaneYZ(self, x, Plane)
def setPlaneXY(self, z, Plane):
"""setPlaneXY(self, z, Plane) -> bool"""
return _Volume.CVolume8_setPlaneXY(self, z, Plane)
def setPlaneXZ(self, y, Plane):
"""setPlaneXZ(self, y, Plane) -> bool"""
return _Volume.CVolume8_setPlaneXZ(self, y, Plane)
def setPlaneYZ(self, x, Plane):
"""setPlaneYZ(self, x, Plane) -> bool"""
return _Volume.CVolume8_setPlaneYZ(self, x, Plane)
def enableDummyMode(self, Enable):
"""enableDummyMode(self, Enable) -> CVolume8"""
return _Volume.CVolume8_enableDummyMode(self, Enable)
def isDummy(self):
"""isDummy(self) -> bool"""
return _Volume.CVolume8_isDummy(self)
def __disown__(self):
self.this.disown()
_Volume.disown_CVolume8(self)
return weakref_proxy(self)
CVolume8_swigregister = _Volume.CVolume8_swigregister
CVolume8_swigregister(CVolume8)
class CVolume16(VPLSwig.Core.Core.CObject, swig_base_Volume16, VPLSwig.Image.Image.CSerializable):
"""Proxy of C++ vpl::img::CVolume<(vpl::img::tPixel16,vpl::base::CPartedData)> class."""
__swig_setmethods__ = {}
for _s in [VPLSwig.Core.Core.CObject, swig_base_Volume16, VPLSwig.Image.Image.CSerializable]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, CVolume16, name, value)
__swig_getmethods__ = {}
for _s in [VPLSwig.Core.Core.CObject, swig_base_Volume16, VPLSwig.Image.Image.CSerializable]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, CVolume16, name)
__repr__ = _swig_repr
CLASS_VOLUME = _Volume.CVolume16_CLASS_VOLUME
ITERATOR_DECLARED = _Volume.CVolume16_ITERATOR_DECLARED
def __init__(self, *args):
"""
__init__(self) -> CVolume16
__init__(self, XSize, YSize, ZSize, Margin=0) -> CVolume16
__init__(self, XSize, YSize, ZSize) -> CVolume16
__init__(self, Size, Margin=0) -> CVolume16
__init__(self, Size) -> CVolume16
__init__(self, Volume, x, y, z, XSize, YSize, ZSize) -> CVolume16
__init__(self, Volume, x, y, z, XSize, YSize, ZSize, arg9) -> CVolume16
__init__(self, Volume) -> CVolume16
__init__(self, Volume, arg3) -> CVolume16
__init__(self, Volume) -> CVolume16
__init__(self, Volume, arg3) -> CVolume16
"""
if self.__class__ == CVolume16:
_self = None
else:
_self = self
this = _Volume.new_CVolume16(_self, *args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _Volume.delete_CVolume16
__del__ = lambda self: None
def resize(self, *args):
"""
resize(self, XSize, YSize, ZSize, Margin=0) -> CVolume16
resize(self, XSize, YSize, ZSize) -> CVolume16
resize(self, Size, Margin=0) -> CVolume16
resize(self, Size) -> CVolume16
"""
return _Volume.CVolume16_resize(self, *args)
def copy(self, *args):
"""
copy(self, Volume, Margin=-1) -> CVolume16
copy(self, Volume) -> CVolume16
copy(self, Volume, x, y, z, XSize, YSize, ZSize, Margin=-1) -> CVolume16
copy(self, Volume, x, y, z, XSize, YSize, ZSize) -> CVolume16
copy(self, Volume, Margin=-1) -> CVolume16
copy(self, Volume) -> CVolume16
"""
return _Volume.CVolume16_copy(self, *args)
def makeRef(self, *args):
"""
makeRef(self, Volume) -> CVolume16
makeRef(self, Volume, x, y, z, XSize, YSize, ZSize) -> CVolume16
makeRef(self, Volume) -> CVolume16
"""
return _Volume.CVolume16_makeRef(self, *args)
def getSize(self, *args):
"""
getSize(self) -> CSize3_int
getSize(self) -> CSize3_int
"""
return _Volume.CVolume16_getSize(self, *args)
def getXSize(self):
"""getXSize(self) -> vpl::tSize"""
return _Volume.CVolume16_getXSize(self)
def getYSize(self):
"""getYSize(self) -> vpl::tSize"""
return _Volume.CVolume16_getYSize(self)
def getZSize(self):
"""getZSize(self) -> vpl::tSize"""
return _Volume.CVolume16_getZSize(self)
def width(self):
"""width(self) -> vpl::tSize"""
return _Volume.CVolume16_width(self)
def height(self):
"""height(self) -> vpl::tSize"""
return _Volume.CVolume16_height(self)
def depth(self):
"""depth(self) -> vpl::tSize"""
return _Volume.CVolume16_depth(self)
def getXOffset(self):
"""getXOffset(self) -> vpl::tSize"""
return _Volume.CVolume16_getXOffset(self)
def getYOffset(self):
"""getYOffset(self) -> vpl::tSize"""
return _Volume.CVolume16_getYOffset(self)
def getZOffset(self):
"""getZOffset(self) -> vpl::tSize"""
return _Volume.CVolume16_getZOffset(self)
def getMargin(self):
"""getMargin(self) -> vpl::tSize"""
return _Volume.CVolume16_getMargin(self)
def getIdx(self, x, y, z):
"""getIdx(self, x, y, z) -> vpl::tSize"""
return _Volume.CVolume16_getIdx(self, x, y, z)
def __call__(self, *args):
"""
__call__(self, x, y, z) -> unsigned __int16
__call__(self, x, y, z) -> unsigned __int16 const
__call__(self, i) -> unsigned __int16
__call__(self, i) -> unsigned __int16 const &
"""
return _Volume.CVolume16___call__(self, *args)
def at(self, *args):
"""
at(self, x, y, z) -> unsigned __int16 const
at(self, i) -> unsigned __int16 const &
"""
return _Volume.CVolume16_at(self, *args)
def set(self, *args):
"""
set(self, x, y, z, Value) -> CVolume16
set(self, i, Value) -> CVolume16
"""
return _Volume.CVolume16_set(self, *args)
def getPtr(self, *args):
"""
getPtr(self) -> unsigned __int16
getPtr(self) -> unsigned __int16 const
getPtr(self, x, y, z) -> unsigned __int16
getPtr(self, x, y, z) -> unsigned __int16 const *
"""
return _Volume.CVolume16_getPtr(self, *args)
def getRowPtr(self, *args):
"""
getRowPtr(self, y, z) -> unsigned __int16
getRowPtr(self, y, z) -> unsigned __int16 const *
"""
return _Volume.CVolume16_getRowPtr(self, *args)
def rect(self, *args):
"""
rect(self, Position, Size) -> vpl::img::CVolume< unsigned __int16,vpl::base::CPartedData >::tRect
rect(self, Position, Size) -> vpl::img::CVolume< unsigned __int16,vpl::base::CPartedData >::tRect const
rect(self, XRange, YRange, ZRange) -> vpl::img::CVolume< unsigned __int16,vpl::base::CPartedData >::tRect
rect(self, XRange, YRange, ZRange) -> vpl::img::CVolume< unsigned __int16,vpl::base::CPartedData >::tRect const
"""
return _Volume.CVolume16_rect(self, *args)
def row(self, *args):
"""
row(self, y, z) -> vpl::img::CVolume< unsigned __int16,vpl::base::CPartedData >::tRow
row(self, y, z) -> vpl::img::CVolume< unsigned __int16,vpl::base::CPartedData >::tRow const
"""
return _Volume.CVolume16_row(self, *args)
def fill(self, c):
"""fill(self, c) -> CVolume16"""
return _Volume.CVolume16_fill(self, c)
def fillEntire(self, c):
"""fillEntire(self, c) -> CVolume16"""
return _Volume.CVolume16_fillEntire(self, c)
def fillMargin(self, c):
"""fillMargin(self, c) -> CVolume16"""
return _Volume.CVolume16_fillMargin(self, c)
def mirrorMargin(self):
"""mirrorMargin(self) -> CVolume16"""
return _Volume.CVolume16_mirrorMargin(self)
def replace(self, Value, NewValue):
"""replace(self, Value, NewValue) -> CVolume16"""
return _Volume.CVolume16_replace(self, Value, NewValue)
def abs(self):
"""abs(self) -> CVolume16"""
return _Volume.CVolume16_abs(self)
def limit(self, Lower, Upper):
"""limit(self, Lower, Upper) -> CVolume16"""
return _Volume.CVolume16_limit(self, Lower, Upper)
def cut(self, Lower, Upper):
"""cut(self, Lower, Upper) -> CVolume16"""
return _Volume.CVolume16_cut(self, Lower, Upper)
def subSample(self, Volume, l=2, m=2, n=2):
"""
subSample(self, Volume, l=2, m=2, n=2) -> CVolume16
subSample(self, Volume, l=2, m=2) -> CVolume16
subSample(self, Volume, l=2) -> CVolume16
subSample(self, Volume) -> CVolume16
"""
return _Volume.CVolume16_subSample(self, Volume, l, m, n)
def interpolate(self, Point):
"""interpolate(self, Point) -> | |
use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, | |
<gh_stars>1-10
from collections import deque
from contextlib import contextmanager
from decimal import Decimal
from io import BytesIO
from unittest import TestCase
import json
import os
import sys
import tempfile
import unittest
from twisted.trial.unittest import SynchronousTestCase
import attr
from jsonschema import FormatChecker, TypeChecker, exceptions, validators
from jsonschema.compat import PY3, pathname2url
import jsonschema
def startswith(validator, startswith, instance, schema):
if not instance.startswith(startswith):
yield exceptions.ValidationError(u"Whoops!")
class TestCreateAndExtend(SynchronousTestCase):
def setUp(self):
self.addCleanup(
self.assertEqual,
validators.meta_schemas,
dict(validators.meta_schemas),
)
self.meta_schema = {u"$id": "some://meta/schema"}
self.validators = {u"startswith": startswith}
self.type_checker = TypeChecker()
self.Validator = validators.create(
meta_schema=self.meta_schema,
validators=self.validators,
type_checker=self.type_checker,
)
def test_attrs(self):
self.assertEqual(
(
self.Validator.VALIDATORS,
self.Validator.META_SCHEMA,
self.Validator.TYPE_CHECKER,
), (
self.validators,
self.meta_schema,
self.type_checker,
),
)
def test_init(self):
schema = {u"startswith": u"foo"}
self.assertEqual(self.Validator(schema).schema, schema)
def test_iter_errors(self):
schema = {u"startswith": u"hel"}
iter_errors = self.Validator(schema).iter_errors
errors = list(iter_errors(u"hello"))
self.assertEqual(errors, [])
expected_error = exceptions.ValidationError(
u"Whoops!",
instance=u"goodbye",
schema=schema,
validator=u"startswith",
validator_value=u"hel",
schema_path=deque([u"startswith"]),
)
errors = list(iter_errors(u"goodbye"))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]._contents(), expected_error._contents())
def test_if_a_version_is_provided_it_is_registered(self):
Validator = validators.create(
meta_schema={u"$id": "something"},
version="my version",
)
self.addCleanup(validators.meta_schemas.pop, "something")
self.assertEqual(Validator.__name__, "MyVersionValidator")
def test_if_a_version_is_not_provided_it_is_not_registered(self):
original = dict(validators.meta_schemas)
validators.create(meta_schema={u"id": "id"})
self.assertEqual(validators.meta_schemas, original)
def test_validates_registers_meta_schema_id(self):
meta_schema_key = "meta schema id"
my_meta_schema = {u"id": meta_schema_key}
validators.create(
meta_schema=my_meta_schema,
version="my version",
id_of=lambda s: s.get("id", ""),
)
self.addCleanup(validators.meta_schemas.pop, meta_schema_key)
self.assertIn(meta_schema_key, validators.meta_schemas)
def test_validates_registers_meta_schema_draft6_id(self):
meta_schema_key = "meta schema $id"
my_meta_schema = {u"$id": meta_schema_key}
validators.create(
meta_schema=my_meta_schema,
version="my version",
)
self.addCleanup(validators.meta_schemas.pop, meta_schema_key)
self.assertIn(meta_schema_key, validators.meta_schemas)
def test_create_default_types(self):
Validator = validators.create(meta_schema={}, validators=())
self.assertTrue(
all(
Validator({}).is_type(instance=instance, type=type)
for type, instance in [
(u"array", []),
(u"boolean", True),
(u"integer", 12),
(u"null", None),
(u"number", 12.0),
(u"object", {}),
(u"string", u"foo"),
]
),
)
def test_extend(self):
original = dict(self.Validator.VALIDATORS)
new = object()
Extended = validators.extend(
self.Validator,
validators={u"new": new},
)
self.assertEqual(
(
Extended.VALIDATORS,
Extended.META_SCHEMA,
Extended.TYPE_CHECKER,
self.Validator.VALIDATORS,
), (
dict(original, new=new),
self.Validator.META_SCHEMA,
self.Validator.TYPE_CHECKER,
original,
),
)
def test_extend_idof(self):
"""
Extending a validator preserves its notion of schema IDs.
"""
def id_of(schema):
return schema.get(u"__test__", self.Validator.ID_OF(schema))
correct_id = "the://correct/id/"
meta_schema = {
u"$id": "the://wrong/id/",
u"__test__": correct_id,
}
Original = validators.create(
meta_schema=meta_schema,
validators=self.validators,
type_checker=self.type_checker,
id_of=id_of,
)
self.assertEqual(Original.ID_OF(Original.META_SCHEMA), correct_id)
Derived = validators.extend(Original)
self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id)
class TestLegacyTypeChecking(SynchronousTestCase):
def test_create_default_types(self):
Validator = validators.create(meta_schema={}, validators=())
self.assertEqual(
set(Validator.DEFAULT_TYPES), {
u"array",
u"boolean",
u"integer",
u"null",
u"number",
u"object", u"string",
},
)
self.flushWarnings()
def test_extend(self):
Validator = validators.create(meta_schema={}, validators=())
original = dict(Validator.VALIDATORS)
new = object()
Extended = validators.extend(
Validator,
validators={u"new": new},
)
self.assertEqual(
(
Extended.VALIDATORS,
Extended.META_SCHEMA,
Extended.TYPE_CHECKER,
Validator.VALIDATORS,
Extended.DEFAULT_TYPES,
Extended({}).DEFAULT_TYPES,
self.flushWarnings()[0]["message"],
), (
dict(original, new=new),
Validator.META_SCHEMA,
Validator.TYPE_CHECKER,
original,
Validator.DEFAULT_TYPES,
Validator.DEFAULT_TYPES,
self.flushWarnings()[0]["message"],
),
)
def test_types_redefines_the_validators_type_checker(self):
schema = {"type": "string"}
self.assertFalse(validators.Draft7Validator(schema).is_valid(12))
validator = validators.Draft7Validator(
schema,
types={"string": (str, int)},
)
self.assertTrue(validator.is_valid(12))
self.flushWarnings()
def test_providing_default_types_warns(self):
self.assertWarns(
category=DeprecationWarning,
message=(
"The default_types argument is deprecated. "
"Use the type_checker argument instead."
),
# https://tm.tl/9363 :'(
filename=sys.modules[self.assertWarns.__module__].__file__,
f=validators.create,
meta_schema={},
validators={},
default_types={"foo": object},
)
def test_cannot_ask_for_default_types_with_non_default_type_checker(self):
"""
We raise an error when you ask a validator with non-default
type checker for its DEFAULT_TYPES.
The type checker argument is new, so no one but this library
itself should be trying to use it, and doing so while then
asking for DEFAULT_TYPES makes no sense (not to mention is
deprecated), since type checkers are not strictly about Python
type.
"""
Validator = validators.create(
meta_schema={},
validators={},
type_checker=TypeChecker(),
)
with self.assertRaises(validators._DontDoThat) as e:
Validator.DEFAULT_TYPES
self.assertIn(
"DEFAULT_TYPES cannot be used on Validators using TypeCheckers",
str(e.exception),
)
with self.assertRaises(validators._DontDoThat):
Validator({}).DEFAULT_TYPES
self.assertFalse(self.flushWarnings())
def test_providing_explicit_type_checker_does_not_warn(self):
Validator = validators.create(
meta_schema={},
validators={},
type_checker=TypeChecker(),
)
self.assertFalse(self.flushWarnings())
Validator({})
self.assertFalse(self.flushWarnings())
def test_providing_neither_does_not_warn(self):
Validator = validators.create(meta_schema={}, validators={})
self.assertFalse(self.flushWarnings())
Validator({})
self.assertFalse(self.flushWarnings())
def test_providing_default_types_with_type_checker_errors(self):
with self.assertRaises(TypeError) as e:
validators.create(
meta_schema={},
validators={},
default_types={"foo": object},
type_checker=TypeChecker(),
)
self.assertIn(
"Do not specify default_types when providing a type checker",
str(e.exception),
)
self.assertFalse(self.flushWarnings())
def test_extending_a_legacy_validator_with_a_type_checker_errors(self):
Validator = validators.create(
meta_schema={},
validators={},
default_types={u"array": list}
)
with self.assertRaises(TypeError) as e:
validators.extend(
Validator,
validators={},
type_checker=TypeChecker(),
)
self.assertIn(
(
"Cannot extend a validator created with default_types "
"with a type_checker. Update the validator to use a "
"type_checker when created."
),
str(e.exception),
)
self.flushWarnings()
def test_extending_a_legacy_validator_does_not_rewarn(self):
Validator = validators.create(meta_schema={}, default_types={})
self.assertTrue(self.flushWarnings())
validators.extend(Validator)
self.assertFalse(self.flushWarnings())
def test_accessing_default_types_warns(self):
Validator = validators.create(meta_schema={}, validators={})
self.assertFalse(self.flushWarnings())
self.assertWarns(
DeprecationWarning,
(
"The DEFAULT_TYPES attribute is deprecated. "
"See the type checker attached to this validator instead."
),
# https://tm.tl/9363 :'(
sys.modules[self.assertWarns.__module__].__file__,
getattr,
Validator,
"DEFAULT_TYPES",
)
def test_accessing_default_types_on_the_instance_warns(self):
Validator = validators.create(meta_schema={}, validators={})
self.assertFalse(self.flushWarnings())
self.assertWarns(
DeprecationWarning,
(
"The DEFAULT_TYPES attribute is deprecated. "
"See the type checker attached to this validator instead."
),
# https://tm.tl/9363 :'(
sys.modules[self.assertWarns.__module__].__file__,
getattr,
Validator({}),
"DEFAULT_TYPES",
)
def test_providing_types_to_init_warns(self):
Validator = validators.create(meta_schema={}, validators={})
self.assertFalse(self.flushWarnings())
self.assertWarns(
category=DeprecationWarning,
message=(
"The types argument is deprecated. "
"Provide a type_checker to jsonschema.validators.extend "
"instead."
),
# https://tm.tl/9363 :'(
filename=sys.modules[self.assertWarns.__module__].__file__,
f=Validator,
schema={},
types={"bar": object},
)
class TestIterErrors(TestCase):
def setUp(self):
self.validator = validators.Draft3Validator({})
def test_iter_errors(self):
instance = [1, 2]
schema = {
u"disallow": u"array",
u"enum": [["a", "b", "c"], ["d", "e", "f"]],
u"minItems": 3,
}
got = (e.message for e in self.validator.iter_errors(instance, schema))
expected = [
"%r is disallowed for [1, 2]" % (schema["disallow"],),
"[1, 2] is too short",
"[1, 2] is not one of %r" % (schema["enum"],),
]
self.assertEqual(sorted(got), sorted(expected))
def test_iter_errors_multiple_failures_one_validator(self):
instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"}
schema = {
u"properties": {
"foo": {u"type": "string"},
"bar": {u"minItems": 2},
"baz": {u"maximum": 10, u"enum": [2, 4, 6, 8]},
},
}
errors = list(self.validator.iter_errors(instance, schema))
self.assertEqual(len(errors), 4)
class TestValidationErrorMessages(TestCase):
def message_for(self, instance, schema, *args, **kwargs):
kwargs.setdefault("cls", validators.Draft3Validator)
with self.assertRaises(exceptions.ValidationError) as e:
validators.validate(instance, schema, *args, **kwargs)
return e.exception.message
def test_single_type_failure(self):
message = self.message_for(instance=1, schema={u"type": u"string"})
self.assertEqual(message, "1 is not of type %r" % u"string")
def test_single_type_list_failure(self):
message = self.message_for(instance=1, schema={u"type": [u"string"]})
self.assertEqual(message, "1 is not of type %r" % u"string")
def test_multiple_type_failure(self):
types = u"string", u"object"
message = self.message_for(instance=1, schema={u"type": list(types)})
self.assertEqual(message, "1 is not of type %r, %r" % types)
def test_object_without_title_type_failure(self):
type = {u"type": [{u"minimum": 3}]}
message = self.message_for(instance=1, schema={u"type": [type]})
self.assertEqual(message, "1 is less than the minimum of 3")
def test_object_with_named_type_failure(self):
schema = {u"type": [{u"name": "Foo", u"minimum": 3}]}
message = self.message_for(instance=1, schema=schema)
self.assertEqual(message, "1 is less than the minimum of 3")
def test_minimum(self):
message = self.message_for(instance=1, schema={"minimum": 2})
self.assertEqual(message, "1 is less than the minimum of 2")
def test_maximum(self):
message = self.message_for(instance=1, schema={"maximum": 0})
self.assertEqual(message, "1 is greater than the maximum of 0")
def test_dependencies_single_element(self):
depend, on = "bar", "foo"
schema = {u"dependencies": {depend: on}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft3Validator,
)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_dependencies_list_draft3(self):
depend, on = "bar", "foo"
schema = {u"dependencies": {depend: [on]}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft3Validator,
)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_dependencies_list_draft7(self):
depend, on = "bar", "foo"
schema = {u"dependencies": {depend: [on]}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft7Validator,
)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_additionalItems_single_failure(self):
message = self.message_for(
instance=[2],
schema={u"items": [], u"additionalItems": False},
)
self.assertIn("(2 was unexpected)", message)
def test_additionalItems_multiple_failures(self):
message = self.message_for(
instance=[1, 2, 3],
schema={u"items": [], u"additionalItems": False}
)
self.assertIn("(1, 2, 3 were unexpected)", message)
def test_additionalProperties_single_failure(self):
additional = "foo"
schema = {u"additionalProperties": False}
message = self.message_for(instance={additional: 2}, schema=schema)
self.assertIn("(%r was unexpected)" % (additional,), message)
def test_additionalProperties_multiple_failures(self):
schema = {u"additionalProperties": False}
message = self.message_for(
instance=dict.fromkeys(["foo", "bar"]),
schema=schema,
)
self.assertIn(repr("foo"), message)
self.assertIn(repr("bar"), message)
self.assertIn("were unexpected)", message)
def test_const(self):
schema = {u"const": 12}
message = self.message_for(
instance={"foo": "bar"},
schema=schema,
cls=validators.Draft6Validator,
)
self.assertIn("12 was expected", message)
def test_contains(self):
schema = {u"contains": {u"const": 12}}
message = self.message_for(
instance=[2, {}, []],
schema=schema,
cls=validators.Draft6Validator,
)
self.assertIn(
"None of [2, {}, []] are valid under the given schema",
message,
)
def test_invalid_format_default_message(self):
checker = FormatChecker(formats=())
checker.checks(u"thing")(lambda value: False)
schema = {u"format": u"thing"}
message = self.message_for(
instance="bla",
schema=schema,
format_checker=checker,
)
self.assertIn(repr("bla"), message)
self.assertIn(repr("thing"), message)
self.assertIn("is not a", message)
def test_additionalProperties_false_patternProperties(self):
schema = {u"type": u"object",
u"additionalProperties": False,
u"patternProperties": {
u"^abc$": {u"type": u"string"},
u"^def$": {u"type": u"string"},
}}
message = self.message_for(
instance={u"zebra": 123},
schema=schema,
cls=validators.Draft4Validator,
)
self.assertEqual(
message,
"{} does not match any of the regexes: {}, {}".format(
repr(u"zebra"), repr(u"^abc$"), repr(u"^def$"),
),
)
message = self.message_for(
instance={u"zebra": 123, u"fish": 456},
schema=schema,
cls=validators.Draft4Validator,
)
self.assertEqual(
message,
"{}, {} do not match any of the regexes: {}, {}".format(
repr(u"fish"), repr(u"zebra"), repr(u"^abc$"), repr(u"^def$")
),
)
def test_False_schema(self):
message = self.message_for(
instance="something",
schema=False,
cls=validators.Draft7Validator,
)
self.assertIn("False schema does not allow 'something'", message)
class TestValidationErrorDetails(TestCase):
# TODO: These really need unit tests for each | |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azext_iot.common.shared import EntityStatusType, AttestationType, AllocationType, ReprovisionType
from azext_iot.common.utility import generate_key
from azext_iot.tests.dps import (
API_VERSION,
CERT_PATH,
DATAPLANE_AUTH_TYPES,
WEBHOOK_URL,
IoTDPSLiveScenarioTest
)
test_endorsement_key = (
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>
)
class TestDPSEnrollments(IoTDPSLiveScenarioTest):
def __init__(self, test_method):
super(TestDPSEnrollments, self).__init__(test_method)
def test_dps_compute_device_key(self):
offline_device_key = self.cmd(
'az iot dps compute-device-key --key "{}" '
"--registration-id myarbitrarydeviceId".format(test_endorsement_key)
).output
offline_device_key = offline_device_key.strip("\"'\n")
assert offline_device_key == "<KEY>
def test_dps_enrollment_tpm_lifecycle(self):
attestation_type = AttestationType.tpm.value
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id = self.generate_enrollment_names()[0]
device_id = self.generate_device_names()[0]
enrollment = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --endorsement-key {}"
" --provisioning-status {} --device-id {} --initial-twin-tags {}"
" --initial-twin-properties {} --device-information {} "
"--allocation-policy {} --iot-hubs {}".format(
enrollment_id,
attestation_type,
self.entity_rg,
self.entity_dps_name,
test_endorsement_key,
EntityStatusType.enabled.value,
device_id,
'"{generic_dict}"',
'"{generic_dict}"',
'"{generic_dict}"',
AllocationType.static.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.static.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check("optionalDeviceInformation", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["generic_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", True),
self.check("reprovisionPolicy.updateHubAssignment", True),
],
).get_output_in_json()
etag = enrollment["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment list -g {} --dps-name {}".format(
self.entity_rg, self.entity_dps_name
),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].registrationId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("registrationId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment show -g {} --dps-name {} --enrollment-id {} --show-keys".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[
self.check("registrationId", enrollment_id),
self.check("attestation.type", attestation_type),
self.exists("attestation.{}".format(attestation_type)),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --etag {} --info {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
'""'
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.static.value),
self.check("iotHubs", self.hub_host_name.split()),
self.exists("initialTwin.tags"),
self.exists("initialTwin.properties.desired"),
self.exists("optionalDeviceInformation"),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
def test_dps_enrollment_x509_lifecycle(self):
attestation_type = AttestationType.x509.value
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id = self.generate_enrollment_names()[0]
device_id = self.generate_device_names()[0]
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --cp {} --scp {}"
" --provisioning-status {} --device-id {}"
" --initial-twin-tags {} --initial-twin-properties {}"
" --allocation-policy {} --iot-hubs {}".format(
enrollment_id,
attestation_type,
self.entity_rg,
self.entity_dps_name,
CERT_PATH,
CERT_PATH,
EntityStatusType.enabled.value,
device_id,
'"{generic_dict}"',
'"{generic_dict}"',
AllocationType.hashed.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.hashed.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["generic_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", True),
self.check("reprovisionPolicy.updateHubAssignment", True),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].registrationId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("registrationId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --etag {} --info {} --rc".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
'"{generic_dict}"',
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.hashed.value),
self.check("iotHubs", self.hub_host_name.split()),
self.exists("initialTwin.tags"),
self.exists("initialTwin.properties.desired"),
self.check("optionalDeviceInformation", self.kwargs["generic_dict"]),
self.check("attestation.type.x509.clientCertificates.primary", None),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
def test_dps_enrollment_symmetrickey_lifecycle(self):
attestation_type = AttestationType.symmetricKey.value
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id, enrollment_id2 = self.generate_enrollment_names(count=2)
primary_key = generate_key()
secondary_key = generate_key()
device_id = self.generate_enrollment_names()[0]
# Use provided keys
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --pk {} --sk {}"
" --provisioning-status {} --device-id {}"
" --initial-twin-tags {} --initial-twin-properties {} --device-information {}"
" --allocation-policy {} --rp {} --iot-hubs {} --edge-enabled".format(
enrollment_id,
attestation_type,
self.entity_rg,
self.entity_dps_name,
primary_key,
secondary_key,
EntityStatusType.enabled.value,
device_id,
'"{generic_dict}"',
'"{generic_dict}"',
'"{generic_dict}"',
AllocationType.geolatency.value.lower(),
ReprovisionType.reprovisionandresetdata.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", AllocationType.geolatency.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check("optionalDeviceInformation", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["generic_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", False),
self.check("reprovisionPolicy.updateHubAssignment", True),
self.check("capabilities.iotEdge", True),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].registrationId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("registrationId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --etag {} --edge-enabled False"
" --allocation-policy {} --webhook-url {} --api-version {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
AllocationType.custom.value,
WEBHOOK_URL,
API_VERSION,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("deviceId", device_id),
self.check("allocationPolicy", "custom"),
self.check("customAllocationDefinition.webhookUrl", WEBHOOK_URL),
self.check("customAllocationDefinition.apiVersion", API_VERSION),
self.check("iotHubs", None),
self.exists("initialTwin.tags"),
self.exists("initialTwin.properties.desired"),
self.check("attestation.symmetricKey.primaryKey", primary_key),
self.check("capabilities.iotEdge", False),
],
)
# Use service generated keys
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment create --enrollment-id {} --attestation-type {}"
" -g {} --dps-name {} --allocation-policy {} --webhook-url {} --api-version {}".format(
enrollment_id2,
attestation_type,
self.entity_rg,
self.entity_dps_name,
AllocationType.custom.value,
WEBHOOK_URL,
API_VERSION,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", attestation_type),
self.check("registrationId", enrollment_id2),
self.check("allocationPolicy", "custom"),
self.check("customAllocationDefinition.webhookUrl", WEBHOOK_URL),
self.check("customAllocationDefinition.apiVersion", API_VERSION),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id2
),
auth_type=auth_phase
),
)
def test_dps_enrollment_group_x509_lifecycle(self):
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id = self.generate_enrollment_names(group=True)[0]
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group create --enrollment-id {} -g {} --dps-name {}"
" --cp {} --scp {} --provisioning-status {} --allocation-policy {}"
" --iot-hubs {} --edge-enabled".format(
enrollment_id,
self.entity_rg,
self.entity_dps_name,
CERT_PATH,
CERT_PATH,
EntityStatusType.enabled.value,
AllocationType.geolatency.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.exists("reprovisionPolicy"),
self.check("allocationPolicy", AllocationType.geolatency.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("reprovisionPolicy.migrateDeviceData", True),
self.check("reprovisionPolicy.updateHubAssignment", True),
self.check("capabilities.iotEdge", True),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
auth_type=auth_phase
),
checks=[
self.check("length(@)", 1),
self.check("[0].enrollmentGroupId", enrollment_id),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group show -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("enrollmentGroupId", enrollment_id)],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group show -g {} --dps-name {} --enrollment-id {} --show-keys".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id),
self.exists("attestation.x509"),
],
)
# Compute Device Key only works for symmetric key enrollment groups
self.cmd(
self.set_cmd_auth_type(
'az iot dps compute-device-key -g {} --dps-name {} --enrollment-id {} '
"--registration-id myarbitrarydeviceId".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
expect_failure=True
)
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group update -g {} --dps-name {} --enrollment-id {}"
" --provisioning-status {} --rsc --etag {} --rp {} --allocation-policy {}"
" --edge-enabled False --scp {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
EntityStatusType.disabled.value,
etag,
ReprovisionType.never.value,
AllocationType.hashed.value,
CERT_PATH,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", AttestationType.x509.value),
self.check("enrollmentGroupId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.disabled.value),
self.check("attestation.type.x509.clientCertificates.secondary", None),
self.exists("reprovisionPolicy"),
self.check("allocationPolicy", AllocationType.hashed.value),
self.check("reprovisionPolicy.migrateDeviceData", False),
self.check("reprovisionPolicy.updateHubAssignment", False),
self.check("capabilities.iotEdge", False),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps registration list -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
checks=[self.check("length(@)", 0)],
)
cert_name = self.create_random_name("certificate-for-test", length=48)
cert_etag = self.cmd(
"iot dps certificate create -g {} --dps-name {} --name {} --p {}".format(
self.entity_rg, self.entity_dps_name, cert_name, CERT_PATH
),
checks=[self.check("name", cert_name)],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group update -g {} --dps-name {} --enrollment-id {}"
" --cn {} --etag {} --allocation-policy {} --webhook-url {} --api-version {}".format(
self.entity_rg,
self.entity_dps_name,
enrollment_id,
cert_name,
etag,
AllocationType.custom.value,
WEBHOOK_URL,
API_VERSION,
),
auth_type=auth_phase
),
checks=[
self.check("attestation.type", AttestationType.x509.value),
self.check("enrollmentGroupId", enrollment_id),
self.check("allocationPolicy", "custom"),
self.check("customAllocationDefinition.webhookUrl", WEBHOOK_URL),
self.check("customAllocationDefinition.apiVersion", API_VERSION),
self.check("attestation.x509.caReferences.primary", cert_name),
self.check("attestation.x509.caReferences.secondary", None),
],
)
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group delete -g {} --dps-name {} --enrollment-id {}".format(
self.entity_rg, self.entity_dps_name, enrollment_id
),
auth_type=auth_phase
),
)
self.cmd(
"iot dps certificate delete -g {} --dps-name {} --name {} --etag {}".format(
self.entity_rg, self.entity_dps_name, cert_name, cert_etag
),
)
def test_dps_enrollment_group_symmetrickey_lifecycle(self):
attestation_type = AttestationType.symmetricKey.value
for auth_phase in DATAPLANE_AUTH_TYPES:
enrollment_id, enrollment_id2 = self.generate_enrollment_names(count=2, group=True)
primary_key = generate_key()
secondary_key = generate_key()
etag = self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group create --enrollment-id {}"
" -g {} --dps-name {} --pk {} --sk {} --provisioning-status {}"
" --initial-twin-tags {} --initial-twin-properties {}"
" --allocation-policy {} --rp {} --iot-hubs {} --edge-enabled".format(
enrollment_id,
self.entity_rg,
self.entity_dps_name,
primary_key,
secondary_key,
EntityStatusType.enabled.value,
'"{generic_dict}"',
'"{generic_dict}"',
AllocationType.geolatency.value,
ReprovisionType.reprovisionandresetdata.value,
self.hub_host_name,
),
auth_type=auth_phase
),
checks=[
self.check("enrollmentGroupId", enrollment_id),
self.check("provisioningStatus", EntityStatusType.enabled.value),
self.check("allocationPolicy", AllocationType.geolatency.value),
self.check("iotHubs", self.hub_host_name.split()),
self.check("initialTwin.tags", self.kwargs["generic_dict"]),
self.check(
"initialTwin.properties.desired", self.kwargs["generic_dict"]
),
self.exists("reprovisionPolicy"),
self.check("reprovisionPolicy.migrateDeviceData", False),
self.check("reprovisionPolicy.updateHubAssignment", True),
self.check("capabilities.iotEdge", True),
],
).get_output_in_json()["etag"]
self.cmd(
self.set_cmd_auth_type(
"iot dps enrollment-group list -g {} --dps-name {}".format(self.entity_rg, self.entity_dps_name),
| |
# Copyright 2011 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from array import array
from collections import defaultdict
from whoosh.compat import string_type
from whoosh.compat import iteritems, izip, xrange
# Faceting objects
class FacetType(object):
"""Base class for "facets", aspects that can be sorted/faceted.
"""
maptype = None
def categorizer(self, global_searcher):
"""Returns a :class:`Categorizer` corresponding to this facet.
:param global_searcher: A parent searcher. You can use this searcher if
you need global document ID references.
"""
raise NotImplementedError
def map(self, default=None):
t = self.maptype
if t is None:
t = default
if t is None:
return OrderedList()
elif type(t) is type:
return t()
else:
return t
def default_name(self):
return "facet"
class Categorizer(object):
"""Base class for categorizer objects which compute a key value for a
document based on certain criteria, for use in sorting/faceting.
Categorizers are created by FacetType objects through the
:meth:`FacetType.categorizer` method. The
:class:`whoosh.searching.Searcher` object passed to the ``categorizer``
method may be a composite searcher (that is, wrapping a multi-reader), but
categorizers are always run **per-segment**, with segment-relative document
numbers.
The collector will call a categorizer's ``set_searcher`` method as it
searches each segment to let the cateogorizer set up whatever segment-
specific data it needs.
``Collector.allow_overlap`` should be ``True`` if the caller can use the
``keys_for`` method instead of ``key_for`` to group documents into
potentially overlapping groups. The default is ``False``.
If a categorizer subclass can categorize the document using only the
document number, it should set ``Collector.needs_current`` to ``False``
(this is the default) and NOT USE the given matcher in the ``key_for`` or
``keys_for`` methods, since in that case ``segment_docnum`` is not
guaranteed to be consistent with the given matcher. If a categorizer
subclass needs to access information on the matcher, it should set
``needs_current`` to ``True``. This will prevent the caller from using
optimizations that might leave the matcher in an inconsistent state.
"""
allow_overlap = False
needs_current = False
def set_searcher(self, segment_searcher, docoffset):
"""Called by the collector when the collector moves to a new segment.
The ``segment_searcher`` will be atomic. The ``docoffset`` is the
offset of the segment's document numbers relative to the entire index.
You can use the offset to get absolute index docnums by adding the
offset to segment-relative docnums.
"""
pass
def key_for(self, matcher, segment_docnum):
"""Returns a key for the current match.
:param matcher: a :class:`whoosh.matching.Matcher` object. If
``self.needs_current`` is ``False``, DO NOT use this object,
since it may be inconsistent. Use the given ``segment_docnum``
instead.
:param segment_docnum: the segment-relative document number of the
current match.
"""
# Backwards compatibility
if hasattr(self, "key_for_id"):
return self.key_for_id(segment_docnum)
elif hasattr(self, "key_for_matcher"):
return self.key_for_matcher(matcher)
raise NotImplementedError(self.__class__)
def keys_for(self, matcher, segment_docnum):
"""Yields a series of keys for the current match.
This method will be called instead of ``key_for`` if
``self.allow_overlap`` is ``True``.
:param matcher: a :class:`whoosh.matching.Matcher` object. If
``self.needs_current`` is ``False``, DO NOT use this object,
since it may be inconsistent. Use the given ``segment_docnum``
instead.
:param segment_docnum: the segment-relative document number of the
current match.
"""
# Backwards compatibility
if hasattr(self, "keys_for_id"):
return self.keys_for_id(segment_docnum)
raise NotImplementedError(self.__class__)
def key_to_name(self, key):
"""Returns a representation of the key to be used as a dictionary key
in faceting. For example, the sorting key for date fields is a large
integer; this method translates it into a ``datetime`` object to make
the groupings clearer.
"""
return key
# General field facet
class FieldFacet(FacetType):
"""Sorts/facets by the contents of a field.
For example, to sort by the contents of the "path" field in reverse order,
and facet by the contents of the "tag" field::
paths = FieldFacet("path", reverse=True)
tags = FieldFacet("tag")
results = searcher.search(myquery, sortedby=paths, groupedby=tags)
This facet returns different categorizers based on the field type.
"""
def __init__(self, fieldname, reverse=False, allow_overlap=False,
maptype=None):
"""
:param fieldname: the name of the field to sort/facet on.
:param reverse: if True, when sorting, reverse the sort order of this
facet.
:param allow_overlap: if True, when grouping, allow documents to appear
in multiple groups when they have multiple terms in the field.
"""
self.fieldname = fieldname
self.reverse = reverse
self.allow_overlap = allow_overlap
self.maptype = maptype
def default_name(self):
return self.fieldname
def categorizer(self, global_searcher):
# The searcher we're passed here may wrap a multireader, but the
# actual key functions will always be called per-segment following a
# Categorizer.set_searcher method call
fieldname = self.fieldname
fieldobj = global_searcher.schema[fieldname]
# If we're grouping with allow_overlap=True, all we can use is
# OverlappingCategorizer
if self.allow_overlap:
return OverlappingCategorizer(global_searcher, fieldname)
if global_searcher.reader().has_column(fieldname):
coltype = fieldobj.column_type
if coltype.reversible or not self.reverse:
c = ColumnCategorizer(global_searcher, fieldname, self.reverse)
else:
c = ReversedColumnCategorizer(global_searcher, fieldname)
else:
c = PostingCategorizer(global_searcher, fieldname,
self.reverse)
return c
class ColumnCategorizer(Categorizer):
def __init__(self, global_searcher, fieldname, reverse=False):
self._fieldname = fieldname
self._fieldobj = global_searcher.schema[self._fieldname]
self._column_type = self._fieldobj.column_type
self._reverse = reverse
# The column reader is set in set_searcher() as we iterate over the
# sub-searchers
self._creader = None
def __repr__(self):
return "%s(%r, %r, reverse=%r)" % (self.__class__.__name__,
self._fieldobj, self._fieldname,
self._reverse)
def set_searcher(self, segment_searcher, docoffset):
r = segment_searcher.reader()
self._creader = r.column_reader(self._fieldname,
reverse=self._reverse,
translate=False)
def key_for(self, matcher, segment_docnum):
return self._creader.sort_key(segment_docnum)
def key_to_name(self, key):
return self._fieldobj.from_column_value(key)
class ReversedColumnCategorizer(ColumnCategorizer):
"""Categorizer that reverses column values for columns that aren't
naturally reversible.
"""
def __init__(self, global_searcher, fieldname):
ColumnCategorizer.__init__(self, global_searcher, fieldname)
reader = global_searcher.reader()
self._doccount = reader.doc_count_all()
global_creader = reader.column_reader(fieldname, translate=False)
self._values = sorted(set(global_creader))
def key_for(self, matcher, segment_docnum):
value = self._creader[segment_docnum]
order = self._values.index(value)
# Subtract from 0 to reverse the order
return 0 - order
def key_to_name(self, key):
# Re-reverse the key to get the index into _values
key = self._values[0 - key]
return ColumnCategorizer.key_to_name(self, key)
class OverlappingCategorizer(Categorizer):
allow_overlap = True
def __init__(self, global_searcher, fieldname):
self._fieldname = fieldname
self._fieldobj = global_searcher.schema[fieldname]
field = global_searcher.schema[fieldname]
reader = global_searcher.reader()
self._use_vectors = bool(field.vector)
self._use_column = (reader.has_column(fieldname)
and field.column_type.stores_lists())
# These are set in set_searcher() as we iterate over the sub-searchers
self._segment_searcher = None
self._creader = None
self._lists = None
def set_searcher(self, segment_searcher, docoffset):
fieldname = self._fieldname
self._segment_searcher = segment_searcher
reader = segment_searcher.reader()
if self._use_vectors:
pass
elif self._use_column:
self._creader = reader.column_reader(fieldname, translate=False)
else:
# Otherwise, cache the values in each document in a huge list
# of lists
dc = segment_searcher.doc_count_all()
field = segment_searcher.schema[fieldname]
from_bytes = field.from_bytes
self._lists = [[] for _ in xrange(dc)]
for btext in field.sortable_terms(reader, fieldname):
text = from_bytes(btext)
postings = reader.postings(fieldname, btext)
for docid in postings.all_ids():
self._lists[docid].append(text)
def keys_for(self, matcher, docid):
if self._use_vectors:
try:
v = self._segment_searcher.vector(docid, self._fieldname)
return list(v.all_ids())
except KeyError:
return []
elif self._use_column:
return self._creader[docid]
else:
return self._lists[docid] or [None]
def key_for(self, matcher, docid):
if self._use_vectors:
try:
v = self._segment_searcher.vector(docid, self._fieldname)
return v.id()
except KeyError:
return None
elif self._use_column:
return self._creader.sort_key(docid)
else:
ls = self._lists[docid]
if ls:
return ls[0]
else:
return None
class PostingCategorizer(Categorizer):
"""
Categorizer for | |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.precise import Precise
class bitopro(Exchange):
def describe(self):
return self.deep_extend(super(bitopro, self).describe(), {
'id': 'bitopro',
'name': 'BitoPro',
'countries': ['TW'], # Taiwan
'version': 'v3',
'rateLimit': 100,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'editOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': False,
'fetchDeposits': True,
'fetchFundingFees': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': False,
'fetchPositions': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFees': True,
'fetchTransactions': False,
'fetchWithdrawal': True,
'fetchWithdrawals': True,
'setLeverage': False,
'setMarginMode': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'6h': '6h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/158227251-3a92a220-9222-453c-9277-977c6677fe71.jpg',
'api': 'https://api.bitopro.com/v3',
'www': 'https://www.bitopro.com',
'doc': [
'https://github.com/bitoex/bitopro-offical-api-docs/blob/master/v3-1/rest-1/rest.md',
],
'fees': 'https://www.bitopro.com/fees',
'referral': '',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'order-book/{pair}',
'tickers',
'tickers/{pair}',
'trades/{pair}',
'provisioning/currencies',
'provisioning/trading-pairs',
'provisioning/limitations-and-fees',
'trading-history/{pair}',
],
},
'private': {
'get': [
'accounts/balance',
'orders/history',
'orders/all/{pair}',
'orders/trades/{pair}',
'orders/{pair}/{orderId}',
'wallet/withdraw/{currency}/{serial}',
'wallet/withdraw/{currency}/id/{id}',
'wallet/depositHistory/{currency}',
'wallet/withdrawHistory/{currency}',
],
'post': [
'orders/{pair}',
'orders/batch',
'wallet/withdraw/{currency}',
],
'put': [
'orders',
],
'delete': [
'orders/{pair}/{id}',
'orders/all',
'orders/{pair}',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': self.parse_number('0.001'),
'taker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('3000000'), self.parse_number('0.00194')],
[self.parse_number('5000000'), self.parse_number('0.0015')],
[self.parse_number('30000000'), self.parse_number('0.0014')],
[self.parse_number('300000000'), self.parse_number('0.0013')],
[self.parse_number('550000000'), self.parse_number('0.0012')],
[self.parse_number('1300000000'), self.parse_number('0.0011')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.001')],
[self.parse_number('3000000'), self.parse_number('0.00097')],
[self.parse_number('5000000'), self.parse_number('0.0007')],
[self.parse_number('30000000'), self.parse_number('0.0006')],
[self.parse_number('300000000'), self.parse_number('0.0005')],
[self.parse_number('550000000'), self.parse_number('0.0004')],
[self.parse_number('1300000000'), self.parse_number('0.0003')],
],
},
},
},
'options': {
'networks': {
'ERC20': 'ERC20',
'ETH': 'ERC20',
'TRX': 'TRX',
'TRC20': 'TRX',
},
},
'exceptions': {
'exact': {
'Unsupported currency.': BadRequest, # {"error":"Unsupported currency."}
'Unsupported order type': BadRequest, # {"error":"Unsupported order type"}
'Invalid body': BadRequest, # {"error":"Invalid body"}
'Invalid Signature': AuthenticationError, # {"error":"Invalid Signature"}
'Address not in whitelist.': BadRequest,
},
'broad': {
'Invalid amount': InvalidOrder, # {"error":"Invalid amount 0.0000000001, decimal limit is 8."}
'Balance for ': InsufficientFunds, # {"error":"Balance for eth not enough, only has 0, but ordered 0.01."}
'Invalid ': BadRequest, # {"error":"Invalid price -1."}
'Wrong parameter': BadRequest, # {"error":"Wrong parameter: from"}
},
},
'commonCurrencies': {
},
})
async def fetch_currencies(self, params={}):
response = await self.publicGetProvisioningCurrencies(params)
currencies = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "currency":"eth",
# "withdrawFee":"0.007",
# "minWithdraw":"0.001",
# "maxWithdraw":"1000",
# "maxDailyWithdraw":"2000",
# "withdraw":true,
# "deposit":true,
# "depositConfirmation":"12"
# }
# ]
# }
#
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
currencyId = self.safe_string(currency, 'currency')
code = self.safe_currency_code(currencyId)
deposit = self.safe_value(currency, 'deposit')
withdraw = self.safe_value(currency, 'withdraw')
fee = self.safe_number(currency, 'withdrawFee')
withdrawMin = self.safe_number(currency, 'minWithdraw')
withdrawMax = self.safe_number(currency, 'maxWithdraw')
limits = {
'withdraw': {
'min': withdrawMin,
'max': withdrawMax,
},
'amount': {
'min': None,
'max': None,
},
}
result[code] = {
'id': currencyId,
'code': code,
'info': currency,
'type': None,
'name': None,
'active': deposit and withdraw,
'deposit': deposit,
'withdraw': withdraw,
'fee': fee,
'precision': None,
'limits': limits,
}
return result
async def fetch_markets(self, params={}):
response = await self.publicGetProvisioningTradingPairs()
markets = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "pair":"shib_twd",
# "base":"shib",
# "quote":"twd",
# "basePrecision":"8",
# "quotePrecision":"6",
# "minLimitBaseAmount":"100000",
# "maxLimitBaseAmount":"5500000000",
# "minMarketBuyQuoteAmount":"1000",
# "orderOpenLimit":"200",
# "maintain":false,
# "orderBookQuotePrecision":"6",
# "orderBookQuoteScaleLevel":"5"
# }
# ]
# }
#
result = []
for i in range(0, len(markets)):
market = markets[i]
active = not self.safe_value(market, 'maintain')
pair = self.safe_string(market, 'pair')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
id = pair
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'price': self.safe_integer(market, 'quotePrecision'),
'amount': self.safe_integer(market, 'basePrecision'),
}
limits = {
'amount': {
'min': self.safe_number(market, 'minLimitBaseAmount'),
'max': self.safe_number(market, 'maxLimitBaseAmount'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'leverage': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': base,
'quoteId': quote,
'settle': None,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'derivative': False,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'limits': limits,
'precision': precision,
'active': active,
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "pair":"btc_twd",
# "lastPrice":"1182449.00000000",
# "isBuyer":false,
# "priceChange24hr":"-1.99",
# "volume24hr":"9.13089740",
# "high24hr":"1226097.00000000",
# "low24hr":"1181000.00000000"
# }
#
marketId = self.safe_string(ticker, 'pair')
market = self.safe_market(marketId, market)
symbol = self.safe_string(market, 'symbol')
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': self.safe_string(ticker, 'high24hr'),
'low': self.safe_string(ticker, 'low24hr'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': self.safe_string(ticker, 'lastPrice'),
'last': self.safe_string(ticker, 'lastPrice'),
'previousClose': None,
'change': None,
'percentage': self.safe_string(ticker, 'priceChange24hr'),
'average': None,
'baseVolume': self.safe_string(ticker, 'volume24hr'),
'quoteVolume': None,
'info': ticker,
}, market, False)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.publicGetTickersPair(self.extend(request, params))
ticker = self.safe_value(response, 'data', {})
#
# {
# "data":{
# "pair":"btc_twd",
# "lastPrice":"1182449.00000000",
# "isBuyer":false,
# "priceChange24hr":"-1.99",
# "volume24hr":"9.13089740",
# "high24hr":"1226097.00000000",
# "low24hr":"1181000.00000000"
# }
# }
#
return self.parse_ticker(ticker, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers()
tickers = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "pair":"xrp_twd",
# "lastPrice":"21.26110000",
# "isBuyer":false,
# "priceChange24hr":"-6.53",
# "volume24hr":"102846.47084802",
# "high24hr":"23.24460000",
# "low24hr":"21.13730000"
# }
# ]
# }
#
return self.parse_tickers(tickers, symbols)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'pair': self.market_id(symbol),
}
if limit is not None:
request['limit'] = limit
response = await self.publicGetOrderBookPair(self.extend(request, params))
#
# {
# "bids":[
# {
# "price":"1175271",
# "amount":"0.00022804",
# "count":1,
# "total":"0.00022804"
# }
# ],
# "asks":[
# {
# "price":"1176906",
# "amount":"0.0496",
# "count":1,
# "total":"0.0496"
# }
# ]
# }
#
return self.parse_order_book(response, symbol, None, 'bids', 'asks', 'price', 'amount')
def parse_trade(self, trade, market):
#
# fetchTrades
# {
# "timestamp":1644651458,
# "price":"1180785.00000000",
# "amount":"0.00020000",
# "isBuyer":false
# }
#
# fetchMyTrades
# {
# "tradeId":"5685030251",
# "orderId":"9669168142",
# "price":"11821.8",
# "action":"SELL",
# "baseAmount":"0.01",
# "quoteAmount":"118.218",
# "fee":"0.236436",
# "feeSymbol":"BNB",
# "isTaker":true,
# "timestamp":1644905714862,
# "createdTimestamp":1644905714862
# }
#
id = self.safe_string(trade, 'tradeId')
orderId = self.safe_string(trade, 'orderId')
timestamp = None
if id is None:
timestamp = self.safe_timestamp(trade, 'timestamp')
else:
timestamp = self.safe_integer(trade, 'timestamp')
marketId = self.safe_string(trade, 'pair')
market = self.safe_market(marketId, market)
symbol = self.safe_string(market, 'symbol')
price = self.safe_string(trade, 'price')
type = self.safe_string_lower(trade, 'type')
side = self.safe_string_lower(trade, 'action')
if side is None:
isBuyer = self.safe_value(trade, 'isBuyer')
if isBuyer:
side = 'buy'
else:
side = 'sell'
amount = self.safe_string(trade, 'amount')
if amount is None:
amount = self.safe_string(trade, 'baseAmount')
fee = None
feeAmount = self.safe_string(trade, 'fee')
feeSymbol = self.safe_currency_code(self.safe_string(trade, 'feeSymbol'))
if feeAmount is not None:
fee = {
'cost': feeAmount,
'currency': feeSymbol,
'rate': None,
}
isTaker = self.safe_value(trade, 'isTaker')
takerOrMaker = None
if isTaker is not None:
if isTaker:
takerOrMaker = 'taker'
else:
takerOrMaker = 'maker'
return self.safe_trade({
'id': id,
'info': trade,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'takerOrMaker': takerOrMaker,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.publicGetTradesPair(self.extend(request, params))
trades = self.safe_value(response, 'data', [])
#
# {
# "data":[
# {
# "timestamp":1644651458,
# "price":"1180785.00000000",
# "amount":"0.00020000",
# "isBuyer":false
# }
# ]
# }
#
return self.parse_trades(trades, market, since, limit)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.publicGetProvisioningLimitationsAndFees(params)
tradingFeeRate = self.safe_value(response, 'tradingFeeRate', {})
first = self.safe_value(tradingFeeRate, 0)
#
# {
# "tradingFeeRate":[
# {
# "rank":0,
| |
<reponame>perellonieto/background_check<filename>cwc/evaluation/rgp.py<gh_stars>1-10
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import auc
import warnings
class RGP:
"""This class represents a Recall_1-Gain_2-Precision_1 (RGP) curve.
An object of class RGP is built based on the result of two models:
1- The first one is a training data vs reject data classifier and its
recall and precision values at various thresholds are used to build the
curve;
2- The second (binary) classifier was trained to separate both classes of
the original training data. Its gain values for all recall values of the
first classifier are multiplied by the corresponding precision values and
used to build the curve.
Args:
step1_reject_scores ([float]): Positive scores for the reject data,
obtained from the training data vs reject data classifier. For each
threshold value "theta", the classifier accepts an instance "x" when
"S_1(x) >= theta", where "S_1(x)" is the score given by the first
classifier to instance "x".
step1_training_scores ([float]): Positive scores for the training data,
obtained from the training data vs reject data classifier. For each
threshold value "theta", the classifier accepts an instance "x" when
"S_1(x) >= theta", where "S_1(x)" is the score given by the first
classifier to instance "x".
step2_training_scores ([int]): Positive scores for the training data,
obtained from the second classifier. For each threshold value "theta",
the classifier labels an instance "x" as positive when "S_1(x) >=
theta", where "S_1(x)" is the score given by the second classifier to
instance "x".
training_labels ([int]): Labels of the training data. 1 for the
positive class and 0 for the negative class.
gain (str): Which type of gain is used to evaluate the second classifier.
step2_threshold (float): Threshold used to calculate the gain of the
second classifier.
Attributes:
thresholds ([float]): Thresholds corresponding to the recall and
precision values.
recalls ([float]): Recalls of the first classifier, calculated by
thresholding over step1_reject_scores and step1_training_scores.
precisions ([float]): Precisions of the first classifier, calculated by
thresholding over step1_reject_scores and step1_training_scores.
gains ([float]): Gain values of the second classifier, calculated using
the true training instances accepted by the first classifier at the
various recall thresholds.
gain_type (str): Which type of gain is used to evaluate the second
classifier.
positive_proportion (float): the proportion of positives (true training
data) scored by the first classifier.
"""
def __init__(self, step1_reject_scores, step1_training_scores,
step2_training_scores, training_labels, gain="accuracy",
step2_threshold=0.5):
pos_scores = np.append(np.inf, np.unique(np.append(
step1_training_scores, step1_reject_scores))[::-1])
self.thresholds = np.ones(np.alen(pos_scores)) * -1.0
self.recalls = np.empty(np.alen(pos_scores))
self.precisions = np.empty(np.alen(pos_scores))
self.gains = np.empty(np.alen(pos_scores))
self.gain_type = gain
self.positive_proportion = np.alen(step1_training_scores)/\
(np.alen(step1_training_scores) + np.alen(step1_reject_scores))
for i, threshold in enumerate(pos_scores):
n_accepted_rejects = np.sum(step1_reject_scores >= threshold)
accepted_training = step1_training_scores >= threshold
new_recall = np.sum(accepted_training) / np.alen(training_labels)
if i == 0 or new_recall != self.recalls[i-1]:
self.thresholds[i] = threshold
self.recalls[i] = new_recall
if (np.sum(accepted_training) + n_accepted_rejects) == 0.0:
self.precisions[i] = np.nan
else:
self.precisions[i] = np.sum(accepted_training) / (np.sum(accepted_training) + n_accepted_rejects)
accepted_scores = step2_training_scores[accepted_training]
accepted_labels = training_labels[accepted_training]
self.gains[i] = calculate_gain(accepted_scores, accepted_labels,
gain=gain, threshold=step2_threshold)
self.recalls = self.recalls[self.thresholds > -1.0]
self.gains = self.gains[self.thresholds > -1.0]
self.precisions = self.precisions[self.thresholds > -1.0]
self.thresholds = self.thresholds[self.thresholds > -1.0]
pi = np.sum(training_labels == 1) / np.alen(training_labels)
self.f_betas = calculate_f_betas(self.recalls, self.precisions, self.gains, pi=pi, min_beta=0.5)
self.values = calculate_values(self.recalls, self.precisions, self.gains)
def plot(self, fig=None, baseline=True, accuracy=False, precision=False):
"""This method plots the RGP surface, with the recalls from the
first classifier on the x-axis and the gains of the second classifier,
multiplied by the corresponding precisions from the first classifier
on the y-axis. The optimal threshold of the first classifier is shown
in two ways:
1- Black circle marks the optimal according to f-beta.
2- Red dot marks the optimal according to optimization criterion.
Args:
fig (object): An object of a Matplotlib figure
(as obtained by using Matplotlib's figure() function).
baseline (bool): True means that the baseline will be drawn.
The baseline is built by taking the worst precision
(proportion of positives) for every recall value.
Returns:
Nothing.
"""
# Ignore warnings from matplotlib
warnings.filterwarnings("ignore")
if fig is None:
fig = plt.figure()
if accuracy==True:
plt.plot(self.recalls, self.gains, 'k.-')
plt.ylabel("$\mathrm{Accuracy}_2$")
elif precision==True:
plt.plot(self.recalls, self.precisions, 'k.-')
plt.ylabel("$\mathrm{Precision}_1$")
else:
plt.plot(self.recalls, self.gains * self.precisions, 'k.-')
plt.ylabel("$\mathrm{Accuracy'}_2$")
# plt.plot(self.recalls, self.gains * self.positive_proportion, 'k--')
# index = np.argmax(self.f_betas)
# plt.scatter(self.recalls[index], self.gains[index] *
# self.precisions[index], s=300, c='w', marker='o')
# index = np.argmax(self.values)
# plt.scatter(self.recalls[index], self.gains[index] *
# self.precisions[index], s=70, c='r', marker='o')
plt.xlabel("$\mathrm{Recall}_1$")
axes = plt.gca()
axes.set_xlim([0.0, 1.01])
axes.set_ylim([0.0, 1.0])
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.get_xaxis().tick_bottom()
axes.get_yaxis().tick_left()
# plt.legend(['RGP curve', 'Baseline']) # , 'opt. f-beta', 'opt. Telmo crit.'])
plt.show()
def get_optimal_step1_threshold(self):
"""This method returns the threshold with the highest f_beta on the RGP curve.
Args:
None.
Returns:
float: The threshold with the highest f_beta on the RGP curve.
"""
return self.thresholds[np.argmax(self.f_betas)]
def calculate_area(self):
"""This method calculates the area under the RGP curve,
by invoking Scikit Learn's auc function, which calculates
the area using the trapezoid rule.
Args:
None.
Returns:
float: The volume under the Recall-gain-ROC-Precision-gain surface.
"""
return auc(self.recalls, self.gains*self.precisions, reorder=True)
def calculate_gain(accepted_scores, accepted_labels, gain="accuracy", threshold=0.5):
"""This function calculates the gain of the second classifier, based on the true
training instances accepted by the first classifier.
Args:
accepted_scores ([int]): Positive scores obtained from
the second classifier for the true training data
accepted by the first classifier. For each threshold value "theta",
the second classifier labels an instance "x" as positive when "S_1(x) >= theta", where
"S_1(x)" is the score given by the second classifier to instance "x".
accepted_labels ([int]): Labels of the true training data
accepted by the first classifier. 1 for the positive class
and 0 for the negative class.
gain (str): Which type of gain is used to evaluate the second classifier.
threshold (float): Threshold used to calculate the gain of the
second classifier.
Returns:
float: The gain of the second classifier, based on the true
training instances accepted by the first classifier.
"""
if gain == "accuracy":
if np.alen(accepted_labels) == 0:
return np.nan
else:
n_correct_instances = np.sum(np.logical_not(
np.logical_xor(accepted_scores >= threshold, accepted_labels == 1)))
return n_correct_instances / np.alen(accepted_labels)
def calculate_f_betas(recalls, precisions, gains, pi=0.5, min_beta=0.5):
"""This function calculates f-beta values based on the given recall and precision values.
The beta value is taken based on the gain_2 value corresponding to recall_1 = 1,
where gain_2 is the gain value of the second classifier and recall_1 is the
recall value of the first classifier. At recall_1 = 1, the first classifier accepts
all true training data. If gain_2 is the second classifier's accuracy, we have 2
extreme scenarios:
1- The second classifier has accuracy_2 = 1.0 at recall_1 = 1: this means that the
second classifier is perfectly accurate on the complete training data set. Therefore,
it is also perfect with any subset of the true training data that gets accepted by the
first classifier. Thus, recall_1 is not very important and only precision_1 is able
to impact the performance of the second classifier. In this scenario, beta should be
chosen as min_beta, since lower beta values give more weight to precision.
2- The second classifier has the worst possible binary classification
performance (accuracy_2 = pi, where pi is the proportion of positives) at recall_1 = 1:
Here, precision_1 and recall_1 are equally (un)important, since the second classifier
is the main responsible for the poor aggregated performance. Therefore, beta should be
chosen as 1, giving similar weights to recall_1 and precision_1.
Args:
recalls ([float]): Recalls of the first classifier.
precisions ([float]): Precisions of the first classifier.
gains ([float]): Gains of the second classifier.
pi (float): Proportion of positives in the true training data.
min_beta (float): Minimum value of beta.
Returns:
[float]: The calculated f_betas.
"""
warnings.filterwarnings("ignore")
a = (1.0 - min_beta) / (pi - 1.0)
beta = a * gains[np.argmax(recalls)] + min_beta - a
f_betas = (1 + beta**2.0) * ((precisions * recalls) / (beta**2.0 * precisions + recalls))
f_betas[np.isnan(f_betas)] = 0.0
return f_betas
def calculate_values(recalls, precisions, gains):
"""This function calculates the optimization value corresponding
to each operating point of the aggregated classifiers.
Args:
recalls ([float]): Recalls | |
*1.5
D_height = c_height // 16 # Altura de disco
D_distancia = (D_max_width - D_min_width) // (2 * max(1, master - 1)) # Distancia entre discos
# Coordenadas de disco (rectangulo) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
x1, y1 = ((c_distancia - D_max_width) // 2), (y2 - D_height // 3)
x2, y2 = (x1 + D_max_width), (y1 + D_height)
# Generador de discos en las columnas - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#self.columnitas = [ [], [], [] ]
self.discos = {}
colores = ['slateBlue', 'cyan2', 'forest green', 'chartreuse', 'yellow', 'orange red', 'red', ]
r = 0 # contador para la posicion de colores
for i in range(master, 0, -1): # Genera los discos desde el disco n (el mas pequeño) hasta 0
disc = c.create_rectangle(x1, y1, x2, y2, fill = colores[r % len(colores)], tags = 'token')
self.discos[i] = disc # Agrega las figuras de los discos a 'discos'
#self.columnitas[0].append(i) # A REEMPLAZAR.REVERSE() # Añade los discos a la primer columna (elemento 0 en columnitas)
# [ [n, n-1, n-2, ..., 1], [], [] ]
x1, x2 = (x1 + D_distancia), (x2 - D_distancia) # Modifica las dimensiones para los siguientes discos
y1, y2 = (y1 - D_height - 2), (y2 - D_height - 2)
r += 1 # contador
self.tk.update()
self.tk.after(25)
def on_token_press(self, event):
c = self.canvas
'''Begining drag of an object'''
# record the item and its location
self._drag_data["item"] = c.find_closest(event.x, event.y)[0]
self._drag_data["x"] = event.x
self._drag_data["y"] = event.y
def on_token_release(self, event):
ax1, ay1, ax2, ay2 = c.bbox(self.columnas[0])
bx1, by1, bx2, by2 = c.bbox(self.columnas[1])
cx1, cy1, cx2, cy2 = c.bbox(self.columnas[2])
'''End drag of an object'''
# reset the drag information
self._drag_data["item"] = None
self._drag_data["x"] = 0
self._drag_data["y"] = 0
def on_token_motion(self, event):
c = self.canvas
'''Handle dragging of an object'''
# compute how much the mouse has moved
delta_x = event.x - self._drag_data["x"]
delta_y = event.y - self._drag_data["y"]
# move the object the appropriate amount
c.move(self._drag_data["item"], delta_x, delta_y)
# record the new position
self._drag_data["x"] = event.x
self._drag_data["y"] = event.y
def ViajeDiscos(self, viajero, a, b, game = None):
if self.columnitas[a][0] != viajero: raise RuntimeError # Assertion
if self.columnitas[a] != []: del self.columnitas[a][0]
disc = self.discos[viajero]
c = self.canvas
# Levantar encima de la columna 'a'
ax1, ay1, ax2, ay2 = c.bbox(self.columnas[a]) # Coordenadas de la columna 'a' ...
while 1:
x1, y1, x2, y2 = c.bbox(disc)
if y2 < ay1: break
c.move(disc, 0, -1)
self.tk.update()
time.sleep(0.002)
# Mover hasta la columna 'b'
bx1, by1, bx2, by2 = c.bbox(self.columnas[b]) # Coordenadas de la columna 'b' ...
newcenter = (bx1+bx2)//2
while 1:
x1, y1, x2, y2 = c.bbox(disc)
center = (x1 + x2) // 2
if center == newcenter: break
if game is None:
if center > newcenter: c.move(disc, -1, 0)
else: c.move(disc, 1, 0)
self.tk.update()
time.sleep(0.0045)
c.addtag_all("all")
# Bajar la pieza hasta la parte de arriba de la primera pieza de la columna 'b' - - - - - - - - - -
D_height = y2-y1
newbottom = by2 - D_height*len(self.columnitas[b]) - 2
while 1:
x1, y1, x2, y2 = c.bbox(disc)
if y2 >= newbottom: break
if (self._drag_data["item"] != None) or (game is None):
c.move(disc, 0, 1)
self.tk.update()
time.sleep(0.002)
else: break
if b == 0:
c.move(disc, 0, 9)
self.tk.update()
# Regresar al estado anterior 'columnitas' - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if self.columnitas[b] != []:
self.columnitas[b].insert(0, viajero)
else:
self.columnitas[b].append(viajero)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# GUI for animated towers-of-Hanoi-game with upto 10 discs:
'''
def displayMove(self, move):
"""method to be passed to the Hanoi-engine as a callback
to report move-count"""
self.moveCntLbl.configure(text = "move:\n%d" % move)
def adjust_nr_of_discs(self, e):
"""callback function for nr-of-discs-scale-widget"""
self.hEngine.nrOfDiscs = self.discs.get()
self.reset()
def adjust_speed(self, e):
"""callback function for speeds-scale-widget"""
self.hEngine.speed = self.tempo.get()
def setState(self, STATE):
"""most simple representation of a finite state machine"""
self.state = STATE
try:
if STATE == "START":
self.resetBtn.configure(state=DISABLED) # reset(self): self.algo.reset() self.setState("START") restores START state for a new game
self.startBtn.configure(text="start", state=NORMAL)
self.stepBtn.configure(state=NORMAL)
elif STATE == "RUNNING":
self.resetBtn.configure(state=DISABLED)
self.startBtn.configure(text="pause", state=NORMAL)
self.stepBtn.configure(state=DISABLED)
elif STATE == "PAUSE":
self.resetBtn.configure(state=NORMAL)
self.startBtn.configure(text="resume", state=NORMAL)
self.stepBtn.configure(state=NORMAL)
elif STATE == "DONE":
self.resetBtn.configure(state=NORMAL)
self.startBtn.configure(text="start", state=DISABLED)
self.stepBtn.configure(state=DISABLED)
elif STATE == "TIMEOUT":
self.resetBtn.configure(state=DISABLED)
self.startBtn.configure(state=DISABLED)
self.stepBtn.configure(state=DISABLED)
except TclError: pass
def start(self):
# callback function for start button, which also serves as pause button. Makes hEngine running until done or interrupted
if self.state in ["START", "PAUSE"]:
self.setState("RUNNING")
if self.hEngine.run():
self.setState("DONE")
else:
self.setState("PAUSE")
elif self.state == "RUNNING":
self.setState("TIMEOUT")
self.hEngine.stop()
def step(self):
# callback function for step button. makes hEngine perform a single step
self.setState("TIMEOUT")
if self.hEngine.step():
self.setState("DONE")
else:
self.setState("PAUSE")
global pts
ndiv = slider.get()
'''
# _________________________________________________________________________________________________________
# MAIN
# _________________________________________________________________________________________________________
def main():
# -----------------------------------------------------------------------------
# MAIN FUNCTIONS
# -----------------------------------------------------------------------------
def SiGui(i,a,b):
if ConGui is 'y' or ConGui is 'Y':
if (Autoresovlver.lower() in {'y', ''}):
clase.ViajeDiscos(i, a, b)
else:
clase.ViajeDiscos(i, a, b, game = 'y')
# Movimientos de los discos antes mencionados
def Movimientos(a,b,c):
if cantidad%2 == 0:
if Elemento%2 == 0:
SiGui(columnitas[c][0], c, a)
#columnitas[a].insert(0, columnitas[c].pop(0))
else:
SiGui(columnitas[c][0], c, b)
#columnitas[b].insert(0, columnitas[c].pop(0))
else:
if Elemento%2 == 0:
SiGui(columnitas[c][0], c, b)
#columnitas[b].insert(0, columnitas[c].pop(0))
else:
SiGui(columnitas[c][0], c, a)
#columnitas[a].insert(0, columnitas[c].pop(0))
# Imprime la serie de pasos a seguir
def imprimir():
a.reverse()
b.reverse()
c.reverse()
print("columna a :", a)
print("columna b :", b)
print("columna c :", c)
a.reverse()
b.reverse()
c.reverse()
print("---------------------------------------------------------------------"*2)
# condiciones que deben cumplir los casos en particular cuando la pieza a evaluar se encontraba en la lista EncontradoEn
def condiciones(EncontradoEn,b,c):
return EncontradoEn != [] and EncontradoEn[0] == Elemento and (b == [] or b[0] > EncontradoEn[0] or c == [] or c[0] > EncontradoEn[0])
'''def fcantidad(value = None):
if value is None: confirmar = 'n' # centinela
else: confirmar = value
print(value)
cantidad = DesinfectanteDeTipo("Cantidad de discos a pasar: ", tipo = int, minimo = 0)
if confirmar.lower() not in {'y', ''}:
if cantidad == 'break':
confirmar = whl('¿Seguro de que quere salir? (Y/N) ')
if confirmar.lower() not in {'y',''}: fcantidad(value = 'y')
if (value is None) and (confirmar.lower() in {'y', ''}): return confirmar
else: return cantidad'''
# _________________________________________________________________________________________________________
# MAIN BODY
# _________________________________________________________________________________________________________
CR()
CONDICION = whl("¿Encontrar pasos para resolver una Torre de Hanoi? (Y/N) ", 'y')
CR()
Proseguir, confirmar, ConGui = 'y', 'n', 0
while (CONDICION is "Y" or CONDICION is "y"):
cantidad = DesinfectanteDeTipo("Cantidad de discos a pasar: ", tipo = int, minimo = 0)
if cantidad == 'break':
confirmar = whl('¿Seguro de que quere salir? (Y/N) ')
if confirmar.lower() not in {'y', ''}: cantidad = DesinfectanteDeTipo (
"Cantidad de discos a pasar: ", tipo = int, minimo = 0
)
if cantidad == 'break': break
if confirmar.lower() in {'y', ''}: break
a, b, c, D = [], [], [], []
pasos=0
for l in range(cantidad):
a.append(l+1)
D.append(l+1)
pasos = 2*pasos+1
# Cantidad de pasos por hacer, a = [1,2,3,4,...,n] donde cada numero respresenta el nivel desde arriba hacia abajo
CR()
Proseguir = whl("Seran {} pasos. ¿Desea continuar? (Y/N) ".format(pasos), 'y')
if Proseguir.lower() not in {'y'}:
confirmar = whl('¿Seguro de que quere salir? (Y/N) ', 'y')
if confirmar.lower() in {'y'}: break
ConGui = whl('¿Visualizar con Interfaz Grafica? (Y/N) ', 'y')
if ConGui.lower() in {'y'}:
columnitas = [a,b,c]
master = cantidad
Autoresovlver = whl('¿Resolver de manera automatica? (Y/N) ')
clase = graficos(master, columnitas)
imprimir()
i = 0
while c != D:
columnitas = [a,b,c]
Elemento = D[i%cantidad]
i += 1
if condiciones(a,b,c): # condiciones a, b, c
Movimientos(2,1,0) # Movimientos c, b, a # columnitas[2], columnitas[1], | |
<filename>treetopper/timber.py
from treetopper.log import Log
from treetopper._constants import (
math,
TAPER_EQ_COEF,
TAPER_EQ
)
class TimberQuick(object):
"""TimberQuick is a class that will virtually cruise a tree based on it's
species, DBH, total height and plot factor. For fixed-area plots use the negative inverse of the plot size (1/30th ac = -30),
for variable-area plots use the Basal Area Factor (BAF) (40 BAF = 40).
Preferred Log Length and Minimum Log Length are needed
but set at the default industry standard of 40 foot preferred and 16 foot minimum.
TimberQuick uses stem-taper equations from Czaplewski, Kozak, or Wensel
(depending on species) to calculate the DIB (diameter inside bark) at any stem height.
TimberQuick will instantiate the tree with common individual and per acre metrics based on the input args.
To cruise the tree, first TimberQuick determines the merchantable DIB of the tree, this is calculated from
40% of the DIB at a stem height of 17 feet (the FORM height). This is industry standard.
TimberQuick then correlates that Merch DIB to a merchantable height. The tree is then split up into logs,
based on this Merch Height with priority given to the preferred log length and if preferred log length
cannot be achieved, then if the remaining length up to Merch Height is greater than or equal to the minimum log length,
that final log is added.
Log metrics are sent to the Log Class, to which their volumes in Board Feet
(using Scribner Coefficients based on Log Length and top DIB) and Cubic Feet (based on the Two-End Conic Cubic Foot Rule).
Log grades are determined by species, minimum log lengths and minimum top DIBs set forth by the
Official Rules for the Log Scaling and Grading Bureaus. Log defect is always 0%
For inventories, this class is meant to be added to the Plot Class using the Plot Class method of add_tree"""
def __init__(self, plot_factor: float, species: str, dbh: float, total_height: int,
preferred_log_length: int = 40, minimum_log_length: int = 16):
self.plot_factor = float(plot_factor)
self.species = str(species).upper()
self.dbh = float(dbh)
self.height = int(total_height)
self.pref_log = int(preferred_log_length)
self.min_log = int(minimum_log_length)
self.hdr = self.height / (self.dbh / 12)
self.ba = self.dbh ** 2 * 0.005454
self.rd = self.ba / math.sqrt(self.dbh)
self.merch_dib = self._get_merch_dib()
self.merch_height = self._get_merch_height()
self.tpa, self.ba_ac, self.rd_ac = 0, 0, 0
self._get_tpa_ba_ac_rd_ac()
self.bf = 0
self.cf = 0
self.bf_ac = 0
self.cf_ac = 0
self.vbar = 0
self.logs = self._get_volume_and_logs()
def __getitem__(self, item):
return self.__dict__[item]
def get_any_dib(self, stem_height):
"""Returns the diameter inside bark (DIB) at any given stem height"""
return math.floor(TAPER_EQ[self.species](self.dbh, self.height, stem_height, *TAPER_EQ_COEF[self.species]))
def _get_tpa_ba_ac_rd_ac(self):
"""Calculates the Trees per Acre, Basal Area per Acre and Relative Density per Acre
based on the plot factor"""
if self.plot_factor == 0:
return
elif self.plot_factor > 0:
self.tpa = self.plot_factor / self.ba
self.ba_ac = self.plot_factor
self.rd_ac = self.tpa * self.rd
else:
self.tpa = abs(self.plot_factor)
self.ba_ac = abs(self.plot_factor) * self.ba
self.rd_ac = self.tpa * self.rd
def _get_merch_dib(self):
"""Form Percent is the percent of DIB at the Form Height feet above ground,
this percent will be rounded down for the merch DIB in inches.
Industry standards are 40% and 17 feet"""
return math.floor(0.40 * self.get_any_dib(17))
def _get_merch_height(self):
"""Merch Height is calculated by a Divide and Conquer Algorithm with the starting check height
at 75% of the total height. The starting merch height is found when the check DIB equals the Merch DIB.
All DIBs are rounded down to their floor values, so there may be multiple stem heights with the same DIB integer.
The final merch height will be the top extent of this stem height range"""
notcheck = True
floor = 0
ceiling = self.height
chkhgt = (ceiling - floor) // 4 * 3
while notcheck:
chkdib = self.get_any_dib(chkhgt)
if chkdib == self.merch_dib:
for i in range(1, 21):
chkhgt += 1
chkdib_after = self.get_any_dib(chkhgt)
if chkdib_after != chkdib:
notcheck = False
break
elif chkdib > self.merch_dib:
floor = chkhgt
chkhgt = ceiling - ((ceiling - floor) // 2)
else:
ceiling = chkhgt
chkhgt = ceiling - ((ceiling - floor) // 2)
return chkhgt - 1
def _get_volume_and_logs(self):
"""Method for cruising the tree, this will determine the stem heights and lengths of the logs, which are sent to
the Log Class for volume calculations, return a dictionary of the logs by log number"""
stem_heights = self._calc_stem_heights()
lengths = [self._calc_log_length(stem_heights[i-1], stem_heights[i]) for i in range(1, len(stem_heights))]
stem_heights.pop(0)
logs = {}
bf = 0
cf = 0
for i, (stem_height, length) in enumerate(zip(stem_heights, lengths)):
log = Log(self, stem_height, length)
bf += log.bf
cf += log.cf
logs[i+1] = log
self.bf = bf
self.cf = cf
self.bf_ac = self.bf * self.tpa
self.cf_ac = self.cf * self.tpa
self.vbar = self.bf / self.ba
return logs
def _calc_stem_heights(self):
"""Starting at stem height of 1 (stump height), master is updated with the log stem height calculated from
self._calc_log_stem, if self._calc_log_stem returns None, all logs have been found and iteration is complete"""
master = [1]
for i in range(401):
if not self._calc_log_stem(master[i]):
break
else:
master.append(self._calc_log_stem(master[i]))
return master
def _calc_log_stem(self, previous_log_stem_height):
"""Using the previous_log_stem_height arg, it will check if the minimum log length added to previous stem height plus
1 foot of in-between is greater than the merch height, if it is, it will return None and no more logs can be added. If not
it will check if the preferred log length added to the previous stem height plus 1 foot of in-between is less than
or equal to the merch height, if it is then the new stem height is returned and a 40 foot (or user defined preferred length)
log will added. If not then the merch height is the returned and a final log is added with a length determined by the difference
between the merch height and previous stem height"""
min_log_check = previous_log_stem_height + self.min_log + 1
if min_log_check > self.merch_height - 2:
return None
else:
if previous_log_stem_height + 1 + self.pref_log <= self.merch_height:
return previous_log_stem_height + self.pref_log + 1
else:
return self.merch_height
@staticmethod
def _calc_log_length(previous_log_stem_height, current_log_stem_height):
"""Returns a log length in multiples of 2 (24, 26, 28... feet)"""
return (current_log_stem_height - previous_log_stem_height - 1) // 2 * 2
class TimberFull(object):
"""TimberFull is a class that will cruise a tree based on it's based on the user-cruised logs. These logs can be manually
added to the class using the add_log method. Required arguments for add_log are stem height (int), log length (int),
log grade (str), and log defect (int). Log defect should be the whole number value of the estimated percent defect 10% = 10.
Like TimberQuick, TimberFull uses stem-taper equations from Czaplewski, Kozak, or Wensel
(depending on species) to calculate the DIB (diameter inside bark) at any stem height.
TimberFull will instantiate the tree with common individual and per acre metrics based on the input args.
When the user adds a log using the add_log method, the log metrics are sent to the Log Class,
to which their volumes in Board Feet (using Scribner Coefficients based on Log Length and top DIB)
and Cubic Feet (based on the Two-End Conic Cubic Foot Rule) are calculated.
For inventories, this class is meant to be added to the Plot Class using the Plot Class method of add_tree"""
def __init__(self, plot_factor: float, species: str, dbh: float, total_height: int):
self.plot_factor = float(plot_factor)
self.species = str(species).upper()
self.dbh = float(dbh)
self.height = int(total_height)
self.hdr = self.height / (self.dbh / 12)
self.ba = self.dbh ** 2 * 0.005454
self.rd = self.ba / math.sqrt(self.dbh)
self.tpa, self.ba_ac, self.rd_ac = 0, 0, 0
self._get_tpa_ba_ac_rd_ac()
self.bf = 0
self.cf = 0
self.bf_ac = 0
self.cf_ac = 0
self.vbar = 0
self.logs = {}
def __getitem__(self, item):
return self.__dict__[item]
def add_log(self, stem_height, length, grade, defect):
"""Adds Log Class to the logs dictionary of TimberFull and recalculates the tree's volumes and
volume-related metrics"""
if not self.logs:
self.logs[1] | |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import re
def get_str(cls):
name = cls.actv.__class__.__name__
_str_act = '{}'.format(name)
if name == 'LeakyReLU':
_str_act += '{}'.format(cls.actv.negative_slope)
if hasattr(cls, 'norm1'):
try:
_str_norm = '{}'.format(cls.norm1.__name__)
except AttributeError:
_str_norm = '{}'.format(cls.norm1.__class__.__name__)
else:
_str_norm = ''
_str_name = '{}['.format(cls.__class__.__name__)
_str = _str_norm + '-' if _str_norm else ''
_str += _str_act + '-'
if cls.upsample:
_str += 'UpNearest-'
_str += '{}'.format(cls.conv1.__class__.__name__)
if hasattr(cls, 'downsample') and cls.downsample:
_str += '-AvgPool2d'
if cls.learned_sc:
_str = 'shortcut[{}]-residual[{}]'.format(
cls.conv1x1.__class__.__name__, _str)
else:
_str = 'residual[{}]'.format(_str)
_str += ']'
_str = _str_name + _str
return _str
# ==================================================================#
# ==================================================================#
class ResBlk(nn.Module):
def __init__(self,
dim_in,
dim_out,
actv=nn.LeakyReLU(0.2),
normalize=False,
downsample=False,
upsample=False,
no_shortcut=False):
super().__init__()
self.actv = actv
self.normalize = normalize
self.upsample = upsample
self.downsample = downsample
self.no_shortcut = no_shortcut
self.learned_sc = dim_in != dim_out and not no_shortcut
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def __str__(self):
return get_str(self)
def forward(self, x):
if not self.no_shortcut:
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2) # unit variance
else:
return self._residual(x)
# ==================================================================#
# ==================================================================#
class AdaIN(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features * 2)
def forward(self, x, s):
x = self.norm(x)
s = s.view(s.size(0), -1)
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1, 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
x = (1 + gamma) * x + beta
return x
# ==================================================================#
# ==================================================================#
class AdainResBlk(nn.Module):
def __init__(self,
dim_in,
dim_out,
style_dim=64,
w_hpf=0,
actv=nn.LeakyReLU(0.2),
upsample=False):
super().__init__()
self.w_hpf = w_hpf
self.actv = actv
self.upsample = upsample
self.learned_sc = dim_in != dim_out # and w_hpf == 0
self._build_weights(dim_in, dim_out, style_dim)
def _build_weights(self, dim_in, dim_out, style_dim=64):
self.conv1 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_out, dim_out, 3, 1, 1)
self.norm1 = AdaIN(style_dim, dim_in)
self.norm2 = AdaIN(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv1(x)
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(x)
return x
def __str__(self):
return get_str(self)
def forward(self, x, s):
out = self._residual(x, s)
if self.w_hpf == 0:
out = (out + self._shortcut(x)) / math.sqrt(2)
return out
# ==================================================================#
# ==================================================================#
class MODResBlk(nn.Module):
def __init__(self,
dim_in,
dim_out,
style_dim=64,
w_hpf=0,
actv=nn.LeakyReLU(0.2),
upsample=False,
**mod_config):
super().__init__()
self.w_hpf = w_hpf
self.actv = actv
self.upsample = upsample
self.learned_sc = dim_in != dim_out and w_hpf == 0
self._build_weights(dim_in, dim_out, style_dim, **mod_config)
def _build_weights(self, dim_in, dim_out, style_dim=64, **mod_config):
self.noise = NoiseInjection()
self.conv1 = Conv2DMod(dim_in, dim_out, style_dim, 3, **mod_config)
self.conv2 = Conv2DMod(dim_out, dim_out, style_dim, 3, **mod_config)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s, noise=None):
x = self.actv(x)
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv1(x, s)
x = self.noise(x, noise=noise)
x = self.actv(x)
x = self.conv2(x, s)
x = self.noise(x, noise=noise)
return x
def __str__(self):
return get_str(self)
def forward(self, x, s, noise=None):
out = self._residual(x, s, noise=noise)
if self.w_hpf == 0:
out = (out + self._shortcut(x)) / math.sqrt(2)
return out
# ==================================================================#
# ==================================================================#
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, x, noise=None):
if noise is None:
batch, _, height, width = x.shape
if not self.training:
torch.manual_seed(0)
noise = x.new_empty(batch, 1, height, width).normal_()
return x + self.weight * noise
# if self.training:
# if noise is None:
# batch, _, height, width = x.shape
# noise = x.new_empty(batch, 1, height, width).normal_()
# return x + self.weight * noise
# else:
# return x + self.weight
# ==================================================================#
# ==================================================================#
class Conv2DMod(nn.Module):
def __init__(self,
in_dim,
out_dim,
w_dim,
kernel,
demod=True,
stride=1,
dilation=1,
**kwargs):
super().__init__()
self.num_features = in_dim
self.filters = out_dim
self.demod = demod
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.w_dim = w_dim
self.affine = nn.Linear(w_dim, in_dim)
self.EPS = 1e-8
self.weight = nn.Parameter(
torch.randn((out_dim, in_dim, kernel, kernel)))
nn.init.kaiming_normal_(self.weight,
a=0,
mode='fan_in',
nonlinearity='leaky_relu')
def _get_same_padding(self, size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
def forward(self, x, s):
import torch.nn.functional as F
b, c, h, w = x.shape
w2 = self.weight[None, :, :, :, :]
padding = self._get_same_padding(h, self.kernel, self.dilation,
self.stride)
latent_w = self.affine(s.view(b, -1))
w1 = latent_w[:, None, :, None, None]
weights = w2 * (w1 + 1)
if self.demod:
d = torch.rsqrt((weights**2).sum(dim=(2, 3, 4), keepdims=True) +
self.EPS)
weights = weights * d
x = x.reshape(1, -1, h, w)
_, _, *ws = weights.shape
weights = weights.reshape(b * self.filters, *ws)
x = F.conv2d(x, weights, padding=padding, groups=b)
x = x.reshape(-1, self.filters, h, w)
return x
def __repr__(self):
name = self.__class__.__name__
return (f'{name}[{self.num_features}, {self.filters}, {self.kernel}]')
########################################################################
########################################################################
########################################################################
# Returns a function that creates a normalization function
# that does not condition on semantic map
def get_nonspade_norm_layer(opt, norm_type='instance'):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'sync_batch':
norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer),
affine=True)
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer),
affine=False)
else:
raise ValueError('normalization layer %s is not recognized' %
subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
# Creates SPADE normalization layer based on the given configuration
# SPADE consists of two steps. First, it normalizes the activations using
# your favorite normalization method, such as Batch Norm or Instance Norm.
# Second, it applies scale and bias to the normalized output, conditioned on
# the segmentation map.
# The format of |config_text| is spade(norm)(ks), where
# (norm) specifies the type of parameter-free normalization.
# (e.g. syncbatch, batch, instance)
# (ks) specifies the size of kernel in the SPADE module (e.g. 3x3)
# Example |config_text| will be spadesyncbatch3x3, or spadeinstance5x5.
# Also, the other arguments are
# |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE
# |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE
class ACE(nn.Module):
def __init__(self,
config_text,
norm_nc,
label_nc,
ACE_Name=None,
w_dim=64,
status='train',
spade_params=None,
use_rgb=True):
super().__init__()
self.ACE_Name = ACE_Name
self.status = status
self.save_npy = True
self.Spade = SPADE(*spade_params)
self.use_rgb = use_rgb
self.style_length = w_dim
self.blending_gamma = nn.Parameter(torch.zeros(1), requires_grad=True)
self.blending_beta = nn.Parameter(torch.zeros(1), requires_grad=True)
self.noise_var = nn.Parameter(torch.zeros(norm_nc), requires_grad=True)
assert config_text.startswith('spade')
parsed = re.search(r'spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
pw = ks // 2
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
self.param_free_norm = SynchronizedBatchNorm2d(norm_nc,
affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError(
'%s is not a recognized param-free norm type in SPADE' %
param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
if self.use_rgb:
self.create_gamma_beta_fc_layers()
self.conv_gamma = nn.Conv2d(self.style_length,
norm_nc,
kernel_size=ks,
padding=pw)
self.conv_beta = nn.Conv2d(self.style_length,
norm_nc,
kernel_size=ks,
padding=pw)
def forward(self, x, segmap, style_codes=None, obj_dic=None, noise=None):
# Part 1. generate parameter-free normalized activations
# if noise is None:
# noise = (torch.randn(x.shape[0], x.shape[3], x.shape[2], 1).to(x.device)
# added_noise = (noise * self.noise_var).transpose(1, 3)
# normalized = self.param_free_norm(x + added_noise)
if self.status != 'test':
noise = torch.randn(x.shape[0], x.shape[3], x.shape[2],
1).to(x.device)
added_noise |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.