input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"Failed",
"Updating", "Deleting", "Creating".
:vartype provisioning_state: str or ~azure.mgmt.cdn.models.AfdProvisioningState
:ivar deployment_status: Possible values include: "NotStarted", "InProgress", "Succeeded",
"Failed".
:vartype deployment_status: str or ~azure.mgmt.cdn.models.DeploymentStatus
"""
_validation = {
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'maximum': 50, 'minimum': 0},
'provisioning_state': {'readonly': True},
'deployment_status': {'readonly': True},
}
_attribute_map = {
'load_balancing_settings': {'key': 'loadBalancingSettings', 'type': 'LoadBalancingSettingsParameters'},
'health_probe_settings': {'key': 'healthProbeSettings', 'type': 'HealthProbeParameters'},
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'key': 'trafficRestorationTimeToHealedOrNewEndpointsInMinutes', 'type': 'int'},
'response_based_afd_origin_error_detection_settings': {'key': 'responseBasedAfdOriginErrorDetectionSettings', 'type': 'ResponseBasedOriginErrorDetectionParameters'},
'session_affinity_state': {'key': 'sessionAffinityState', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'deployment_status': {'key': 'deploymentStatus', 'type': 'str'},
}
def __init__(
self,
*,
load_balancing_settings: Optional["LoadBalancingSettingsParameters"] = None,
health_probe_settings: Optional["HealthProbeParameters"] = None,
traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int] = None,
response_based_afd_origin_error_detection_settings: Optional["ResponseBasedOriginErrorDetectionParameters"] = None,
session_affinity_state: Optional[Union[str, "EnabledState"]] = None,
**kwargs
):
super(AFDOriginGroupProperties, self).__init__(load_balancing_settings=load_balancing_settings, health_probe_settings=health_probe_settings, traffic_restoration_time_to_healed_or_new_endpoints_in_minutes=traffic_restoration_time_to_healed_or_new_endpoints_in_minutes, response_based_afd_origin_error_detection_settings=response_based_afd_origin_error_detection_settings, session_affinity_state=session_affinity_state, **kwargs)
self.load_balancing_settings = load_balancing_settings
self.health_probe_settings = health_probe_settings
self.traffic_restoration_time_to_healed_or_new_endpoints_in_minutes = traffic_restoration_time_to_healed_or_new_endpoints_in_minutes
self.response_based_afd_origin_error_detection_settings = response_based_afd_origin_error_detection_settings
self.session_affinity_state = session_affinity_state
self.provisioning_state = None
self.deployment_status = None
class AFDOriginGroupUpdateParameters(msrest.serialization.Model):
"""AFDOrigin group properties needed for origin group creation or update.
:param load_balancing_settings: Load balancing settings for a backend pool.
:type load_balancing_settings: ~azure.mgmt.cdn.models.LoadBalancingSettingsParameters
:param health_probe_settings: Health probe settings to the origin that is used to determine the
health of the origin.
:type health_probe_settings: ~azure.mgmt.cdn.models.HealthProbeParameters
:param traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift
the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new
endpoint is added. Default is 10 mins. This property is currently not supported.
:type traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: int
:param response_based_afd_origin_error_detection_settings: The JSON object that contains the
properties to determine origin health using real requests/responses. This property is currently
not supported.
:type response_based_afd_origin_error_detection_settings:
~azure.mgmt.cdn.models.ResponseBasedOriginErrorDetectionParameters
:param session_affinity_state: Whether to allow session affinity on this host. Valid options
are 'Enabled' or 'Disabled'. Possible values include: "Enabled", "Disabled".
:type session_affinity_state: str or ~azure.mgmt.cdn.models.EnabledState
"""
_validation = {
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'maximum': 50, 'minimum': 0},
}
_attribute_map = {
'load_balancing_settings': {'key': 'properties.loadBalancingSettings', 'type': 'LoadBalancingSettingsParameters'},
'health_probe_settings': {'key': 'properties.healthProbeSettings', 'type': 'HealthProbeParameters'},
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'key': 'properties.trafficRestorationTimeToHealedOrNewEndpointsInMinutes', 'type': 'int'},
'response_based_afd_origin_error_detection_settings': {'key': 'properties.responseBasedAfdOriginErrorDetectionSettings', 'type': 'ResponseBasedOriginErrorDetectionParameters'},
'session_affinity_state': {'key': 'properties.sessionAffinityState', 'type': 'str'},
}
def __init__(
self,
*,
load_balancing_settings: Optional["LoadBalancingSettingsParameters"] = None,
health_probe_settings: Optional["HealthProbeParameters"] = None,
traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int] = None,
response_based_afd_origin_error_detection_settings: Optional["ResponseBasedOriginErrorDetectionParameters"] = None,
session_affinity_state: Optional[Union[str, "EnabledState"]] = None,
**kwargs
):
super(AFDOriginGroupUpdateParameters, self).__init__(**kwargs)
self.load_balancing_settings = load_balancing_settings
self.health_probe_settings = health_probe_settings
self.traffic_restoration_time_to_healed_or_new_endpoints_in_minutes = traffic_restoration_time_to_healed_or_new_endpoints_in_minutes
self.response_based_afd_origin_error_detection_settings = response_based_afd_origin_error_detection_settings
self.session_affinity_state = session_affinity_state
class AFDOriginListResult(msrest.serialization.Model):
"""Result of the request to list origins. It contains a list of origin objects and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of CDN origins within an endpoint.
:vartype value: list[~azure.mgmt.cdn.models.AFDOrigin]
:param next_link: URL to get the next set of origin objects if there are any.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AFDOrigin]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
**kwargs
):
super(AFDOriginListResult, self).__init__(**kwargs)
self.value = None
self.next_link = next_link
class AFDOriginUpdatePropertiesParameters(msrest.serialization.Model):
"""The JSON object that contains the properties of the origin.
:param azure_origin: Resource reference to the Azure origin resource.
:type azure_origin: ~azure.mgmt.cdn.models.ResourceReference
:param host_name: The address of the origin. Domain names, IPv4 addresses, and IPv6 addresses
are supported.This should be unique across all origins in an endpoint.
:type host_name: str
:param http_port: The value of the HTTP port. Must be between 1 and 65535.
:type http_port: int
:param https_port: The value of the HTTPS port. Must be between 1 and 65535.
:type https_port: int
:param origin_host_header: The host header value sent to the origin with each request. If you
leave this blank, the request hostname determines this value. Azure CDN origins, such as Web
Apps, Blob Storage, and Cloud Services require this host header value to match the origin
hostname by default. This overrides the host header defined at Endpoint.
:type origin_host_header: str
:param priority: Priority of origin in given origin group for load balancing. Higher priorities
will not be used for load balancing if any lower priority origin is healthy.Must be between 1
and 5.
:type priority: int
:param weight: Weight of the origin in given origin group for load balancing. Must be between 1
and 1000.
:type weight: int
:param shared_private_link_resource: The properties of the private link resource for private
origin.
:type shared_private_link_resource: object
:param enabled_state: Whether to enable health probes to be made against backends defined under
backendPools. Health probes can only be disabled if there is a single enabled backend in single
enabled backend pool. Possible values include: "Enabled", "Disabled".
:type enabled_state: str or ~azure.mgmt.cdn.models.EnabledState
"""
_validation = {
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
'priority': {'maximum': 5, 'minimum': 1},
'weight': {'maximum': 1000, 'minimum': 1},
}
_attribute_map = {
'azure_origin': {'key': 'azureOrigin', 'type': 'ResourceReference'},
'host_name': {'key': 'hostName', 'type': 'str'},
'http_port': {'key': 'httpPort', 'type': 'int'},
'https_port': {'key': 'httpsPort', 'type': 'int'},
'origin_host_header': {'key': 'originHostHeader', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'weight': {'key': 'weight', 'type': 'int'},
'shared_private_link_resource': {'key': 'sharedPrivateLinkResource', 'type': 'object'},
'enabled_state': {'key': 'enabledState', 'type': 'str'},
}
def __init__(
self,
*,
azure_origin: Optional["ResourceReference"] = None,
host_name: Optional[str] = None,
http_port: Optional[int] = None,
https_port: Optional[int] = None,
origin_host_header: Optional[str] = None,
priority: Optional[int] = None,
weight: Optional[int] = None,
shared_private_link_resource: Optional[object] = None,
enabled_state: Optional[Union[str, "EnabledState"]] = None,
**kwargs
):
super(AFDOriginUpdatePropertiesParameters, self).__init__(**kwargs)
self.azure_origin = azure_origin
self.host_name = host_name
self.http_port = http_port
self.https_port = https_port
self.origin_host_header = origin_host_header
self.priority = priority
self.weight = weight
self.shared_private_link_resource = shared_private_link_resource
self.enabled_state = enabled_state
class AFDOriginProperties(AFDStateProperties, AFDOriginUpdatePropertiesParameters):
"""The JSON object that contains the properties of the origin.
Variables are only populated by the server, and will be ignored when sending a request.
:param azure_origin: Resource reference to the Azure origin resource.
:type azure_origin: ~azure.mgmt.cdn.models.ResourceReference
:param host_name: The address of the origin. Domain names, IPv4 addresses, and IPv6 addresses
are supported.This should be unique across all origins in an endpoint.
:type host_name: str
:param http_port: The value of the HTTP port. Must be between 1 and 65535.
:type http_port: int
:param https_port: The value of the HTTPS port. Must be between 1 and 65535.
:type https_port: int
:param origin_host_header: The host header value sent to the origin with each request. If you
leave this blank, the request hostname determines this value. Azure CDN origins, such as Web
Apps, Blob Storage, and Cloud Services require this host header value to match the origin
hostname by default. This overrides the host header defined at Endpoint.
:type origin_host_header: str
:param priority: Priority of origin in given origin group for load balancing. Higher priorities
will not be used for load balancing if any lower priority origin is healthy.Must be between 1
and 5.
:type priority: int
:param weight: Weight of the origin in given origin group for load balancing. Must be between 1
and 1000.
:type weight: int
:param shared_private_link_resource: The properties of the private link resource for private
origin.
:type shared_private_link_resource: object
:param enabled_state: Whether to enable health probes to be made against backends defined under
backendPools. Health probes can only be disabled if there is a single enabled backend in single
enabled backend pool. Possible values include: "Enabled", "Disabled".
:type enabled_state: str or ~azure.mgmt.cdn.models.EnabledState
:ivar provisioning_state: Provisioning status. Possible values include: "Succeeded", "Failed",
"Updating", "Deleting", "Creating".
:vartype provisioning_state: str or ~azure.mgmt.cdn.models.AfdProvisioningState
:ivar deployment_status: Possible values include: "NotStarted", "InProgress", "Succeeded",
"Failed".
:vartype deployment_status: str or ~azure.mgmt.cdn.models.DeploymentStatus
"""
_validation = {
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
'priority': {'maximum': 5, 'minimum': 1},
'weight': {'maximum': 1000, 'minimum': 1},
'provisioning_state': {'readonly': True},
'deployment_status': {'readonly': True},
}
_attribute_map = {
'azure_origin': {'key': 'azureOrigin', 'type': 'ResourceReference'},
'host_name': {'key': 'hostName', 'type': 'str'},
'http_port': {'key': 'httpPort', 'type': 'int'},
'https_port': {'key': 'httpsPort', 'type': 'int'},
'origin_host_header': {'key': 'originHostHeader', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'weight': {'key': 'weight', 'type': 'int'},
'shared_private_link_resource': {'key': 'sharedPrivateLinkResource', 'type': 'object'},
'enabled_state': {'key': 'enabledState', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'deployment_status': {'key': 'deploymentStatus', 'type': 'str'},
}
def __init__(
self,
*,
azure_origin: Optional["ResourceReference"] = None,
host_name: Optional[str] = None,
http_port: Optional[int] = None,
https_port: | |
path: str
alist: int
attributeBuffer: int
result: str
def __str__(self):
rep = f'setattrlistat({self.fd}, "{self.path}", {hex(self.alist)}, {hex(self.attributeBuffer)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscNetQosGuideline:
ktraces: List
param: int
param_len: int
result: str
def __str__(self):
return f'net_qos_guideline({hex(self.param)}, {self.param_len}), {self.result}'
@dataclass
class BscFmount:
ktraces: List
type: int
fd: int
flags: int
data: int
result: str
def __str__(self):
rep = f'fmount({hex(self.type)}, {self.fd}, {self.flags}, {hex(self.data)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscNtpAdjtime:
ktraces: List
tp: int
result: str
def __str__(self):
return f'ntp_adjtime({hex(self.tp)}), {self.result}'
@dataclass
class BscNtpGettime:
ktraces: List
ntvp: int
result: str
def __str__(self):
rep = f'ntp_gettime({hex(self.ntvp)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscOsFaultWithPayload:
ktraces: List
reason_namespace: int
reason_code: int
payload: int
payload_size: int
result: str
def __str__(self):
rep = (f'os_fault_with_payload({self.reason_namespace}, {hex(self.reason_code)}, {hex(self.payload)}'
f', {self.payload_size})')
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscKqueueWorkloopCtl:
ktraces: List
cmd: int
options: int
addr: int
sz: int
result: str
def __str__(self):
rep = f'kqueue_workloop_ctl({self.cmd}, {self.options}, {hex(self.addr)}, {self.sz})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscMachBridgeRemoteTime:
ktraces: List
local_timestamp: int
result: str
def __str__(self):
rep = f'mach_bridge_remote_time({self.local_timestamp})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscCoalitionLedger:
ktraces: List
operation: int
cid: int
buffer: int
bufsize: int
result: str
def __str__(self):
rep = f'coalition_ledger({self.operation}, {hex(self.cid)}, {hex(self.buffer)}, {hex(self.bufsize)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscLogData:
ktraces: List
tag: int
flags: int
buffer: int
size: int
result: str
def __str__(self):
rep = f'log_data({self.tag}, {self.flags}, {hex(self.buffer)}, {self.size})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscMemorystatusAvailableMemory:
ktraces: List
result: str
def __str__(self):
return f'memorystatus_available_memory(), {self.result}'
@dataclass
class BscSharedRegionMapAndSlide2Np:
ktraces: List
files_count: int
shared_file_np: int
mappings_count: int
mappings: int
result: str
def __str__(self):
rep = (f'shared_region_map_and_slide_2_np({self.files_count}, {hex(self.shared_file_np)},'
f' {hex(self.mappings_count)}, {hex(self.mappings)})')
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscPivotRoot:
ktraces: List
new_rootfs_path_before: str
old_rootfs_path_after: str
result: str
def __str__(self):
rep = f'pivot_root("{self.new_rootfs_path_before}", "{self.old_rootfs_path_after}")'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscTaskInspectForPid:
ktraces: List
target_tport: int
pid: int
t: int
result: str
def __str__(self):
rep = f'task_inspect_for_pid({self.target_tport}, {self.pid}, {self.t})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscTaskReadForPid:
ktraces: List
target_tport: int
pid: int
t: int
result: str
def __str__(self):
rep = f'task_read_for_pid({self.target_tport}, {self.pid}, {self.t})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscSysPreadv:
ktraces: List
fd: int
iovp: int
iovcnt: int
offset: int
result: str
no_cancel: bool = False
def __str__(self):
no_cancel = '_nocancel' if self.no_cancel else ''
return f'preadv{no_cancel}({self.fd}, {hex(self.iovp)}, {self.iovcnt}, {self.offset}), {self.result}'
@dataclass
class BscSysPwritev:
ktraces: List
fd: int
iovp: int
iovcnt: int
offset: int
result: str
no_cancel: bool = False
def __str__(self):
no_cancel = '_nocancel' if self.no_cancel else ''
return f'pwritev{no_cancel}({self.fd}, {hex(self.iovp)}, {self.iovcnt}, {self.offset}), {self.result}'
@dataclass
class BscUlockWait2:
ktraces: List
operation: int
addr: int
value: int
timeout: int
result: str
def __str__(self):
return f'ulock_wait2({self.operation}, {hex(self.addr)}, {self.value}, {self.timeout}), {self.result}'
@dataclass
class BscProcInfoExtendedId:
ktraces: List
callnum: int
pid: int
flavor: int
flags: int
result: str
def __str__(self):
rep = f'proc_info_extended_id({self.callnum}, {self.pid}, {self.flavor}, {self.flags})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscSysClose:
ktraces: List
fd: str
result: str
no_cancel: bool = False
def __str__(self):
no_cancel = '_nocancel' if self.no_cancel else ''
rep = f'close{no_cancel}({self.fd})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscLink:
ktraces: List
oldpath: str
newpath: str
result: str
def __str__(self):
rep = f'link("{self.oldpath}", "{self.newpath}")'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscUnlink:
ktraces: List
pathname: str
result: str
def __str__(self):
rep = f'unlink("{self.pathname}")'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscChdir:
ktraces: List
path: str
result: str
def __str__(self):
rep = f'chdir("{self.path}")'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscFchdir:
ktraces: List
fd: int
result: str
def __str__(self):
rep = f'fchdir({self.fd})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscMknod:
ktraces: List
pathname: str
mode: int
dev: int
result: str
def __str__(self):
rep = f'mknod("{self.pathname}", {self.mode}, {self.dev})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscChmod:
ktraces: List
pathname: str
mode: List
result: str
def __str__(self):
rep = f'''chmod("{self.pathname}", {' | '.join(map(lambda f: f.name, self.mode))})'''
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscChown:
ktraces: List
pathname: str
owner: int
group: int
result: str
def __str__(self):
rep = f'''chown("{self.pathname}", {self.owner}, {self.group})'''
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscGetpid:
ktraces: List
pid: int
def __str__(self):
return f'getpid(), pid: {self.pid}'
@dataclass
class BscSetuid:
ktraces: List
uid: int
result: str
def __str__(self):
rep = f'setuid({self.uid})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscGetuid:
ktraces: List
uid: int
def __str__(self):
return f'getuid(), uid: {self.uid}'
@dataclass
class BscGeteuid:
ktraces: List
uid: int
def __str__(self):
return f'geteuid(), uid: {self.uid}'
@dataclass
class BscWait4:
ktraces: List
pid: int
status: int
options: int
rusage: int
result: str
no_cancel: bool = False
def __str__(self):
no_cancel = '_nocancel' if self.no_cancel else ''
return f'wait4{no_cancel}({self.pid}, {hex(self.status)}, {self.options}, {hex(self.rusage)}), {self.result}'
@dataclass
class BscRecvmsg:
ktraces: List
socket: int
result: str
no_cancel: bool = False
def __str__(self):
no_cancel = '_nocancel' if self.no_cancel else ''
return f'recvmsg{no_cancel}({self.socket}), {self.result}'
@dataclass
class BscSendmsg:
ktraces: List
socket: int
result: str
no_cancel: bool = False
def __str__(self):
no_cancel = '_nocancel' if self.no_cancel else ''
return f'sendmsg{no_cancel}({self.socket}), {self.result}'
@dataclass
class BscRecvfrom:
ktraces: List
socket: int
buffer: int
length: int
flags: List
result: str
no_cancel: bool = False
def __str__(self):
no_cancel = '_nocancel' if self.no_cancel else ''
return (f'''recvfrom{no_cancel}({self.socket}, {hex(self.buffer)}, {self.length}, '''
f'''{' | '.join(map(lambda f: f.name, self.flags)) if self.flags else '0'}), {self.result}''')
@dataclass
class BscAccept:
ktraces: List
socket: int
result: str
no_cancel: bool = False
def __str__(self):
no_cancel = '_nocancel' if self.no_cancel else ''
return f'accept{no_cancel}({self.socket}), {self.result}'
@dataclass
class BscGetpeername:
ktraces: List
socket: int
address: int
address_len: int
result: str
def __str__(self):
rep = f'getpeername({self.socket}, {hex(self.address)}, {hex(self.address_len)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscGetsockname:
ktraces: List
socket: int
address: int
address_len: int
result: str
def __str__(self):
rep = f'getsockname({self.socket}, {hex(self.address)}, {hex(self.address_len)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscAccess:
ktraces: List
path: str
amode: List
result: str
def __str__(self):
rep = f'''access("{self.path}", {' | '.join(map(lambda f: f.name, self.amode))})'''
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscChflags:
ktraces: List
path: str
flags: List
result: str
def __str__(self):
rep = f'''chflags("{self.path}", {' | '.join(map(lambda f: f.name, self.flags))})'''
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscFchflags:
ktraces: List
fd: int
flags: List
result: str
def __str__(self):
rep = f'''fchflags({self.fd}, {' | '.join(map(lambda f: f.name, self.flags))})'''
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscSync:
ktraces: List
def __str__(self):
return 'sync()'
@dataclass
class BscKill:
ktraces: List
pid: int
sig: int
result: str
def __str__(self):
rep = f'kill({self.pid}, {self.sig})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscGetppid:
ktraces: List
pid: int
def __str__(self):
return f'getppid(), pid: {self.pid}'
@dataclass
class BscSysDup:
ktraces: List
fildes: int
result: str
def __str__(self):
return f'dup({self.fildes}), {self.result}'
@dataclass
class BscPipe:
ktraces: List
result: str
def __str__(self):
return f'pipe(), {self.result}'
@dataclass
class BscGetegid:
ktraces: List
gid: int
def __str__(self):
return f'getegid(), gid: {self.gid}'
@dataclass
class BscSigaction:
ktraces: List
sig: Signals
act: int
oact: int
result: str
def __str__(self):
rep = f'sigaction({self.sig.name}, {hex(self.act)}, {hex(self.oact)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscGetgid:
ktraces: List
gid: int
def __str__(self):
return f'getgid(), gid: {self.gid}'
@dataclass
class BscSigprocmap:
ktraces: List
how: SigprocmaskFlags
set: int
oset: int
result: str
def __str__(self):
rep = f'sigprocmask({self.how.name}, {hex(self.set)}, {hex(self.oset)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscGetlogin:
ktraces: List
address: int
def __str__(self):
return f'getlogin(), address: {hex(self.address)}'
@dataclass
class BscSetlogin:
ktraces: List
address: int
result: str
def __str__(self):
rep = f'setlogin({hex(self.address)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscAcct:
ktraces: List
file: str
result: str
def __str__(self):
rep = f'acct("{self.file}")'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscSigpending:
ktraces: List
set: int
result: str
def __str__(self):
rep = f'sigpending({hex(self.set)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscSigaltstack:
ktraces: List
ss_address: int
oss_address: int
result: str
def __str__(self):
rep = f'sigaltstack({hex(self.ss_address)}, {hex(self.oss_address)})'
if self.result:
rep += f', {self.result}'
return rep
@dataclass
class BscIoctl:
ktraces: List
fildes: int
request: int
arg: int
result: str
def __str__(self):
params = IOC_REQUEST_PARAMS[self.request & 0xf0000000]
group = chr((self.request >> 8) & 0xff)
number = self.request & 0xff
length = (self.request >> 16) & 0x1fff
ioc = f'''_IOC({params}, '{group}', {number}, {length})'''
rep = f'ioctl({self.fildes}, {hex(self.request)} /* {ioc} */, {hex(self.arg)})'
if self.result:
rep += f', {self.result}'
| |
optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.CreateTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_tensorboard_async_from_dict():
await test_create_tensorboard_async(request_type=dict)
def test_create_tensorboard_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tensorboard_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.CreateTensorboardRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tensorboard_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tensorboard(
parent="parent_value",
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard
mock_val = gca_tensorboard.Tensorboard(name="name_value")
assert arg == mock_val
def test_create_tensorboard_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tensorboard(
tensorboard_service.CreateTensorboardRequest(),
parent="parent_value",
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tensorboard_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tensorboard), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tensorboard(
parent="parent_value",
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tensorboard
mock_val = gca_tensorboard.Tensorboard(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tensorboard_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tensorboard(
tensorboard_service.CreateTensorboardRequest(),
parent="parent_value",
tensorboard=gca_tensorboard.Tensorboard(name="name_value"),
)
def test_get_tensorboard(
transport: str = "grpc", request_type=tensorboard_service.GetTensorboardRequest
):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard.Tensorboard(
name="name_value",
display_name="display_name_value",
description="description_value",
blob_storage_path_prefix="blob_storage_path_prefix_value",
run_count=989,
etag="etag_value",
)
response = client.get_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard.Tensorboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blob_storage_path_prefix == "blob_storage_path_prefix_value"
assert response.run_count == 989
assert response.etag == "etag_value"
def test_get_tensorboard_from_dict():
test_get_tensorboard(request_type=dict)
def test_get_tensorboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
client.get_tensorboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRequest()
@pytest.mark.asyncio
async def test_get_tensorboard_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.GetTensorboardRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard.Tensorboard(
name="name_value",
display_name="display_name_value",
description="description_value",
blob_storage_path_prefix="blob_storage_path_prefix_value",
run_count=989,
etag="etag_value",
)
)
response = await client.get_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.GetTensorboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tensorboard.Tensorboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.blob_storage_path_prefix == "blob_storage_path_prefix_value"
assert response.run_count == 989
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_tensorboard_async_from_dict():
await test_get_tensorboard_async(request_type=dict)
def test_get_tensorboard_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
call.return_value = tensorboard.Tensorboard()
client.get_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tensorboard_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.GetTensorboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tensorboard.Tensorboard()
)
await client.get_tensorboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tensorboard_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tensorboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tensorboard.Tensorboard()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tensorboard(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tensorboard_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tensorboard(
tensorboard_service.GetTensorboardRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tensorboard_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the | |
hyphens '-'. This approach may not be perfect in general, but it works for
the zhmc CLI because the original option names do not have any underscores.
Parameters:
options (dict): The click options dictionary as passed to the decorated
function by click (key: option name as changed by click, value: option
value).
Returns:
dict: Options with their original names.
"""
org_options = {}
for name, value in six.iteritems(options):
org_name = name.replace('_', '-')
org_options[org_name] = value
return org_options
def options_to_properties(options, name_map=None):
"""
Convert click options into HMC resource properties.
The click option names in input parameters to this function are the
original option names (e.g. as produced by `original_options()`.
Options with a value of `None` are not added to the returned resource
properties.
If a name mapping dictionary is specified, the option names are mapped
using that dictionary. If an option name is mapped to `None`, it is not
going to be added to the set of returned resource properties.
Parameters:
options (dict): The options dictionary (key: original option name,
value: option value).
name_map (dict): `None` or name mapping dictionary (key: original
option name, value: property name, or `None` to not add this option to
the returned properties).
Returns:
dict: Resource properties (key: property name, value: option value)
"""
properties = {}
for name, value in six.iteritems(options):
if value is None:
continue
if name_map:
name = name_map.get(name, name)
if name is not None:
properties[name] = value
return properties
def print_properties(cmd_ctx, properties, output_format, show_list=None):
"""
Print properties in the desired output format.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
properties (dict): The properties.
output_format (string): Output format from the command line.
show_list (iterable of string): The property names to be shown.
If `None`, all properties are shown.
"""
if output_format in TABLE_FORMATS:
if output_format == 'table':
output_format = 'psql'
print_properties_as_table(cmd_ctx, properties, output_format, show_list)
elif output_format == 'json':
print_properties_as_json(cmd_ctx, properties, show_list)
else:
raise InvalidOutputFormatError(output_format)
def print_resources(
cmd_ctx, resources, output_format, show_list=None, additions=None,
all=False):
# pylint: disable=redefined-builtin
"""
Print the properties of a list of resources in the desired output format.
While accessing the properties of the resources, they are fetched from
the HMC as needed.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
resources (iterable of BaseResource):
The resources.
output_format (string): Output format from command line.
show_list (iterable of string):
The property names to be shown. If a property is not in the resource
object, it will be retrieved from the HMC. This iterable also defines
the order of columns in the table, from left to right in iteration
order.
If `None`, all properties in the resource objects are shown, and their
column order is ascending by property name.
additions (dict of dict of values): Additional properties,
as a dict keyed by the property name (which also needs to be listed in
`show_list`),
whose value is a dict keyed by the resource URI,
whose value is the value to be shown.
If `None`, no additional properties are defined.
all (bool): Add all remaining properties in sorted order.
Raises:
InvalidOutputFormatError
zhmcclient.HTTPError
zhmcclient.ParseError
zhmcclient.AuthError
zhmcclient.ConnectionError
"""
if output_format in TABLE_FORMATS:
if output_format == 'table':
output_format = 'psql'
print_resources_as_table(
cmd_ctx, resources, output_format, show_list, additions, all)
elif output_format == 'json':
print_resources_as_json(cmd_ctx, resources, show_list, additions, all)
else:
raise InvalidOutputFormatError(output_format)
def print_properties_as_table(
cmd_ctx, properties, table_format, show_list=None):
"""
Print properties in tabular output format.
The order of rows is ascending by property name.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
properties (dict): The properties.
table_format (string): Supported table formats are:
- "table" -> same like "psql"
- "plain"
- "simple"
- "psql"
- "rst"
- "mediawiki"
- "html"
- "latex"
show_list (iterable of string): The property names to be shown.
If `None`, all properties are shown.
"""
headers = ['Field Name', 'Value']
out_str = dict_as_table(properties, headers, table_format, show_list)
cmd_ctx.spinner.stop()
click.echo(out_str)
def print_resources_as_table(
cmd_ctx, resources, table_format, show_list=None, additions=None,
all=False):
# pylint: disable=redefined-builtin
"""
Print resources in tabular output format.
While accessing the properties of the resources, they are fetched from
the HMC as needed.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
resources (iterable of BaseResource):
The resources.
table_format (string): Supported table formats are:
- "table" -> same like "psql"
- "plain"
- "simple"
- "psql"
- "rst"
- "mediawiki"
- "html"
- "latex"
show_list (iterable of string):
The property names to be shown. If a property is not in the resource
object, it will be retrieved from the HMC. This iterable also defines
the order of columns in the table, from left to right in iteration
order.
If `None`, all properties in the resource objects are shown, and their
column order is ascending by property name.
additions (dict of dict of values): Additional properties,
as a dict keyed by the property name (which also needs to be listed in
`show_list`),
whose value is a dict keyed by the resource URI,
whose value is the value to be shown.
If `None`, no additional properties are defined.
all (bool): Add all remaining properties in sorted order.
Raises:
zhmcclient.HTTPError
zhmcclient.ParseError
zhmcclient.AuthError
zhmcclient.ConnectionError
"""
inner_format = INNER_TABLE_FORMAT.get(table_format, table_format)
prop_names = OrderedDict() # key: property name, value: None
remaining_prop_names = OrderedDict() # key: property name, value: None
resource_props_list = []
for resource in resources:
resource_props = {}
if show_list:
for name in show_list:
if additions and name in additions:
value = additions[name][resource.uri]
else:
# May raise zhmcclient exceptions
value = resource.prop(name)
resource_props[name] = value
prop_names[name] = None
else:
for name in sorted(resource.properties.keys()):
# May raise zhmcclient exceptions
resource_props[name] = resource.prop(name)
prop_names[name] = None
if all:
resource.pull_full_properties()
for name in resource.properties.keys():
if name not in prop_names:
# May raise zhmcclient exceptions
resource_props[name] = resource.prop(name)
remaining_prop_names[name] = None
resource_props_list.append(resource_props)
prop_names = list(prop_names.keys()) + sorted(remaining_prop_names)
table = []
for resource_props in resource_props_list:
row = []
for name in prop_names:
value = resource_props.get(name, None)
value = value_as_table(value, inner_format)
row.append(value)
table.append(row)
cmd_ctx.spinner.stop()
if not table:
click.echo("No resources.")
else:
sorted_table = sorted(table, key=lambda row: row[0])
out_str = tabulate(sorted_table, prop_names, tablefmt=table_format)
click.echo(out_str)
def dict_as_table(data, headers, table_format, show_list=None):
"""
Return a string with the dictionary data in tabular output format.
The order of rows is ascending by dictionary key.
Parameters:
data (dict): The dictionary data.
headers (list): The text for the header row. `None` means no header row.
table_format: Table format, see print_resources_as_table().
show_list (iterable of string): The dict keys to be shown.
If `None`, all dict keys are shown.
"""
if table_format == 'repr':
ret_str = repr(data)
else:
table = []
inner_format = INNER_TABLE_FORMAT.get(table_format, table_format)
sorted_fields = sorted(data)
for field in sorted_fields:
if show_list is None or field in show_list:
value = value_as_table(data[field], inner_format)
table.append((field, value))
ret_str = tabulate(table, headers, tablefmt=table_format)
return ret_str
def list_as_table(data, table_format):
"""
Return a string with the list data in tabular output format.
The order of rows is the order of items in the list.
Parameters:
data (list): The list data.
table_format: Table format, see print_resources_as_table().
"""
if table_format == 'repr':
ret_str = repr(data)
else:
table = []
inner_format = INNER_TABLE_FORMAT.get(table_format, table_format)
for value in data:
value = value_as_table(value, inner_format)
table.append((value,))
ret_str = tabulate(table, headers=[], tablefmt=table_format)
return ret_str
def value_as_table(value, table_format):
"""
Return the value in the table format.
Parameters:
value (dict or list or simple type): The value to be converted.
table_format (string): The table format to be used.
Returns:
string or simple type: The value in the table format.
"""
if isinstance(value, list):
value = list_as_table(value, table_format)
elif isinstance(value, (dict, OrderedDict)):
value = dict_as_table(value, [], table_format)
else:
# format the single value
# TODO: Make the formatting less hard coded.
if isinstance(value, float):
value = '{0:.2f}'.format(value)
return value
def print_properties_as_json(cmd_ctx, properties, show_list=None):
"""
Print properties in JSON output format.
The spinner is stopped just before printing.
Parameters:
cmd_ctx (CmdContext): Context object of the command.
properties | |
<reponame>jackievilladsen/dynspec
# -*- coding: utf-8 -*-
"""
plot.py: Tools for manipulating and plotting dynamic spectra, time series, etc.
This functionality is accessed by the user through the class Dynspec.
"""
from pylab import *
from numpy import *
import matplotlib.pyplot as plt
import os
from astropy.time import Time
from copy import deepcopy
class TimeSec(Time):
# modify class Time to support using units of MJD in seconds (units of CASA's TIME column)
def __init__(self,t,format='mjds'):
if format=='mjds':
Time.__init__(self,t/24./3600.,format='mjd')
else:
Time.__init__(self,t,format=format)
def mjds(self):
return self.mjd * 24. * 3600.
def rebin2d(a,wt,binsize):
shape = tuple(array(a.shape)/array(binsize))
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
a1 = a.reshape(sh)
wt1=wt.reshape(sh)
tmp,wt2 = average(a1,len(sh)-1,wt1,True)
return average(tmp,1,wt2)
def rebin1d(a,binsize):
l = floor(len(a)/binsize)*binsize
a1 = a[0:l]
sh = len(a1)/binsize,binsize
a2 = a1.reshape(sh)
return average(a2,1)
def rebin1d_ma(a,binsize):
l = floor(len(a)/binsize)*binsize
a1 = a[0:l]
sh = len(a1)/binsize,binsize
a2 = a1.reshape(sh)
return ma.average(a2,1)
def rebin2d_ma(b,binsize):
nt,nf = binsize
lt,lf = b.shape
lt_new = floor(lt/nt)*nt
lf_new = floor(lf/nf)*nf
a = b[0:lt_new][:,0:lf_new]
shape = tuple(array(a.shape)/array(binsize))
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
a1 = a.reshape(sh)
tmp,wt2 = ma.average(a1,len(sh)-1,returned=True)
return ma.average(tmp,1,wt2)
def make_ma(a):
# make a into a masked array where all zero values are masked
return ma.masked_array(a,mask=(a==0))
def add_band(ma_big,t,f,ma_band,t_band,f_band):
# add a band to our dynamic spectrum
# use t_band and f_band to figure out what cells to put it into
# if there is already data in the big dyn spec, then don't add it
# t_band tells us the indices of the rows in ma_big (i.e., the times) for
# which this band has data
t_ind = t_band
# add one col (one freq) at a time since there are some overlapping frequencies
for i in range(len(f_band)):
m = ma_band.mask[:,i]
if not m.all(): # if not all values in this frequency channel are masked
f_ind = find(f==f_band[i])[0]
mask = ma_big.mask[t_ind,f_ind]
# resulting cells are flagged only if both ma_big and ma_band are flagged
# in those cells
ma_big.mask[t_ind,f_ind] *= ma_band.mask[:,i]
# add data from ma_band if there is no data in the destined cell yet
# mask = 0 when there is already data in a cell
ma_big[t_ind,f_ind] += ma_band[:,i].data*mask
return ma_big
def make_tick_labels(desired_ticks,x):
# tick_labels,tick_locs = make_tick_labels(desired_ticks,x)
# desired_ticks is a list of where to put tick marks but can include locations
# that are not in the range of x (x is the range of values for an axis on an image).
# This function identifies the values of desired_ticks and returns those as tick_labels,
# and identifies in the indices in x that are closest to those values and returns those
# as tick_locs.
ind = find(logical_and(desired_ticks>=min(x),desired_ticks<=max(x)))
tick_labels = desired_ticks[ind]
tick_locs = [find(min(abs(x-t))==abs(x-t))[0] for t in tick_labels]
return tick_labels,tick_locs
def closest_ind(x,x0):
# ind = closest_ind(x,x0)
# For 1D array x and scalar x0, returns the index of the item in x that has the value closest to x0
return argmin(abs(x-x0))
def trim_whitespace(dynspec,x=None,y=None):
# dynspec1,x1,y1 = trim_whitespace(dynspec,x,y)
# Trim off any rows and colummns on the outer edge of the dynspec that have no
# unmasked values.
sum0 = sum(dynspec,1)
sum1 = sum(dynspec,0)
if x is None:
x = arange(len(sum0))
if y is None:
y = arange(len(sum1))
try:
i = find(sum0.mask==False)
imin = i[0]
imax = i[-1]+1
j = find(sum1.mask==False)
jmin = j[0]
jmax = j[-1]+1
dynspec1 = dynspec[imin:imax,jmin:jmax]
x1 = x[imin:imax]
y1 = y[jmin:jmax]
except:
print 'no unmasked values'
dynspec1,x1,y1 = None,None,None
return dynspec1,x1,y1
def clip_dynspec(dynspec,lims,x=None,y=None,trim_mask=True):
# spec1,x1,y1 = clip_dynspec(dynspec,lims,x=None,y=None)
# Given a 2D array dynspec, return a smaller 2D array cut at the indices or x,y values
# in lims. lims=[xmin,xmax,ymin,ymax] is assumed to be indices, unless arrays x and y
# are provided giving the x and y values corresponding to each index, in which case the
# cut is made at the indices corresponding to the values closest to lims.
# If trim_mask=True (the default), trim off any rows and columns on the outer edge of the array
# that have no unmasked values.
(xlen,ylen) = shape(dynspec)
[xmin,xmax,ymin,ymax] = lims
if x is None:
imin = xmin
imax = xmax
else:
imin = closest_ind(x,xmin)
imax = closest_ind(x,xmax)
if y is None:
jmin = ymin
jmax = ymax
else:
jmin = closest_ind(y,ymin)
jmax = closest_ind(y,ymax)
spec1 = dynspec[imin:imax,jmin:jmax]
x1 = x[imin:imax]
y1 = y[jmin:jmax]
if trim_mask:
return trim_whitespace(spec1,x1,y1)
return spec1,x1,y1
class Dynspec:
''' Dynspec: a class for manipulating and plotting dynamic spectra (using masked arrays) and
keeping track of the frequencies and time lists corresponding to the array indices.
Here is a list of all object attributes that may be defined by any class routines:
- self.spec : dictionary containing entries for each poln product (such as 'rr') - data are masked arrays
- self.f : list of frequencies corresponding to dynspec rows
- self.time : astropy.time.Time object containing list of times corresponding to dynspec columns
'''
def __init__(self,params={}):
# initiates a Dynspec object
self.spec={}
if 'filename' in params:
self.load_dynspec(params)
def read_params(self,params):
# params is a dictionary with certain useful parameters:
# params['filename']: directory name to load dynspec from (must contain rr.dat, ll.dat, freq.dat, times.dat)
# params['i']: load dynspec only starting from time with that index (default 0)
# params['uniform']: regrid to uniform time/frequency sampling after loading dynspec (default False)
filename = params.get('filename','')
i = params.get('i',0)
uniform = params.get('uniform',False)
return filename,i,uniform
def load_dynspec(self,params):
# if filename is a valid file, then loads dynspec (rr,ll,t,f) from that directory
# self.spec['rr'] and self.spec['ll'] are loaded as masked arrays
# optional parameter i is used to tell it to load the dynspec starting from time with index i
# future modification: enable imax as well?
filename,i,uniform=self.read_params(params)
if os.path.exists(filename):
print 'loading RR from', filename
if os.path.exists(filename+'/rr.npy'):
exten = '.npy'
loadfunc = load
else:
exten = '.dat'
loadfunc = loadtxt
self.spec['rr'] = real(make_ma(loadfunc(filename+'/rr'+exten))[i:,:])
print 'RCP rms:', self.get_rms('rr')*1000, 'mJy'
print 'loading LL from', filename
self.spec['ll'] = real(make_ma(loadfunc(filename+'/ll'+exten))[i:,:])
print 'LCP rms:', self.get_rms('ll')*1000, 'mJy'
self.f=array(loadtxt(filename+'/freq.dat')) # units: Hz
t=array(loadtxt(filename+'/times.dat')[i:]) # units: MJD in seconds
self.time = TimeSec(t,format='mjds') # create Time object containing list of MJD times
if uniform:
self.regrid_uniform() # regrid to uniform time and frequency sampling
else:
print 'Warning: bad dynspec filename:', filename
def make_xlist(self,x,dx,x0=None):
# xlist: for each element in x, count how many units of dx it is away from x0 (or x[0] if x0 is not defined)
if x0 is None:
x0 = x[0]
diff = (x[1:]-x[:-1])/dx
diff_int = diff.round().astype(int)
diff0 = ((x[0]-x0)/dx).round().astype(int)
xlist = diff0 * ones(shape(x)).astype(int)
xlist[1:] += cumsum(diff_int)
return xlist
def make_full_indlist(self,xlist):
# indlist: return a list counting from 0 to max(xlist)
return arange(max(xlist)+1)
def get_tlist(self):
# return tlist: tlist is the amount of time (in units of integration time)
# that each column in the dynamic spectrum is separated from the first integration (so tlist[0] is 0)
t = self.time.mjds()
tlist = self.make_xlist(t,self.dt())
return tlist
def get_flist(self):
# return flist: flist is the number of frequency channels
# that each row in the dynamic spectrum is separated from the first channel (so flist[0] is 0)
flist = self.make_xlist(self.f,self.df())
return flist
def gen_x(self,xlist,x0,dx):
return x0 + xlist * dx
def get_spacing(self,x):
# return median spacing between elements of x
# meant to help retrieve integration time or channel width
return median(x[1:]-x[:-1])
def dt(self):
# return integration time (duration of dynspec pixels)
return self.get_spacing(self.time.mjds())
def df(self):
# return channel width (bandwidth of dynspec pixels)
return self.get_spacing(self.f)
def set_time(self,tlist,t0):
# given t0 and tlist (tlist is in units of integration times, t0 in MJD seconds), set self.time
# as a Time object with a correct list of times in MJD seconds
t = self.gen_x(tlist,t0,self.dt())
self.time = TimeSec(t,format='mjds')
def set_freq(self,flist,f0):
# given f0 and flist (flist is in units of self.df(), f0 in Hz), set self.f as an array of frequencies
# in units of Hz
self.f = self.gen_x(flist,f0,self.df())
def regrid_uniform(self):
# regrid by adding blank rows and columns so that time and frequency sampling is uniform
| |
<gh_stars>1-10
# Copyright 2019 SAP SE
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
BigVM service
"""
import itertools
from oslo_log import log as logging
from oslo_messaging import exceptions as oslo_exceptions
from oslo_service import periodic_task
import nova.conf
from nova import context as nova_context
from nova import exception
from nova import manager
from nova.objects.aggregate import AggregateList
from nova.objects.cell_mapping import CellMappingList
from nova.objects.compute_node import ComputeNodeList
from nova.objects.host_mapping import HostMappingList
from nova import rc_fields
from nova.scheduler import client as scheduler_client
from nova.scheduler.client.report import get_placement_request_id
from nova.scheduler.client.report import NESTED_PROVIDER_API_VERSION
from nova.scheduler.utils import ResourceRequest
from nova import utils
from nova.virt.vmwareapi import special_spawning
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
MEMORY_MB = rc_fields.ResourceClass.MEMORY_MB
BIGVM_RESOURCE = special_spawning.BIGVM_RESOURCE
BIGVM_DISABLED_TRAIT = 'CUSTOM_BIGVM_DISABLED'
BIGVM_EXCLUSIVE_TRAIT = 'CUSTOM_HANA_EXCLUSIVE_HOST'
MEMORY_RESERVABLE_MB_RESOURCE = utils.MEMORY_RESERVABLE_MB_RESOURCE
VMWARE_HV_TYPE = 'VMware vCenter Server'
SHARD_PREFIX = 'vc-'
HV_SIZE_BUCKET_THRESHOLD_PERCENT = 10
class BigVmManager(manager.Manager):
"""Takes care of the needs of big VMs"""
def __init__(self, *args, **kwargs):
client = scheduler_client.SchedulerClient()
self.placement_client = client.reportclient
self.special_spawn_rpc = special_spawning.SpecialVmSpawningInterface()
super(BigVmManager, self).__init__(service_name='bigvm',
*args, **kwargs)
@periodic_task.periodic_task(spacing=CONF.
prepare_empty_host_for_spawning_interval,
run_immediately=True)
def _prepare_empty_host_for_spawning(self, context):
"""Handle freeing up hosts per hv_size per VC for spawning
The general workflow is:
1) Find all hypervisor sizes (hv_size) existing in a vCenter (VC)
within an availability zone (az).
2) Choose one host per hv_size per VC and create a child resource
provider (rp) for it with a well-known name. This marks that
host as responsible for the hv_size in that VC. The child rp has no
resources, yet. This also triggers the vmware-driver to free up a
host in the cluster.
3) Either directly or in the next iteration every host without
resources is checked for its status by calling the vmware
driver. If we're done freeing up the host, we add the
BIGVM_RESOURCE to the child rp to make it consumable.
4) In every iteration, we check for reserved BIGVM_RESOURCEs on a
child rp and if the host is still free. Reserving a resource happens
after consumption, so we know the host is not longer free and have
to clean up the child rp and the vmware part. The host might be used
again if a migration happened in the background. We need to clean up
the child rp in this case and redo the scheduling.
We only want to fill up cluster memory to a certain point, configurable
via bigvm_cluster_max_usage_percent and
bigvm_cluster_max_reservation_percent. Resource-providers having more
RAM usage or -reservation than those two respectively, will not be used
for an hv_size - if they are not exclusively used for HANA flavors.
Additionally, we check in every iteration, if we have to give up a
freed-up host, because the cluster reached one of the limits.
"""
client = self.placement_client
# make sure our custom trait exists
client._ensure_traits(context, [BIGVM_DISABLED_TRAIT,
BIGVM_EXCLUSIVE_TRAIT])
vcenters, bigvm_providers, vmware_providers = \
self._get_providers(context)
self._check_and_clean_providers(context, client, bigvm_providers,
vmware_providers)
missing_hv_sizes_per_vc = self._get_missing_hv_sizes(context,
vcenters, bigvm_providers, vmware_providers)
if not any(missing_hv_sizes_per_vc.values()):
LOG.info('Free host for spawning defined for every '
'vCenter and hypervisor-size.')
return
def _flatten(list_of_lists):
return itertools.chain.from_iterable(list_of_lists)
# retrieve allocation candidates for all hv sizes. we later have to
# filter them by VC, because our placement doesn't know about VCs.
candidates = {}
for hv_size in set(_flatten(missing_hv_sizes_per_vc.values())):
resources = ResourceRequest()
resources._add_resource(None, MEMORY_MB, hv_size)
res = client.get_allocation_candidates(context, resources)
if res is None:
continue
alloc_reqs, provider_summaries, allocation_request_version = res
# filter out providers, that don't match the full host, e.g. don't
# allow 3 TB on a 6 TB host, as we need a fully free host
provider_summaries = {p: d for p, d in provider_summaries.items()
if vmware_providers.get(p, {}).get('hv_size') == hv_size}
if not provider_summaries:
LOG.warning('Could not find enough resources to free up a '
'host for hypervisor size %(hv_size)d.',
{'hv_size': hv_size})
continue
# filter out providers that are too full already
filtered_provider_summaries = {}
for p, d in provider_summaries.items():
# Hosts exclusively used for hana_* flavors cannot be too full
if BIGVM_EXCLUSIVE_TRAIT in d['traits']:
filtered_provider_summaries[p] = d
continue
used = vmware_providers.get(p, {})\
.get('memory_mb_used_percent', 100)
reserved = vmware_providers.get(p, {})\
.get('memory_reservable_mb_used_percent', 100)
if (used > CONF.bigvm_cluster_max_usage_percent
or reserved
> CONF.bigvm_cluster_max_reservation_percent):
continue
filtered_provider_summaries[p] = d
if not filtered_provider_summaries:
LOG.warning('Could not find a resource-provider to free up a '
'host for hypervisor size %(hv_size)d, because '
'all clusters are already using more than '
'%(max_used)d%% of total memory or reserving more '
'than %(max_reserved)d%% of reservable memory.',
{'hv_size': hv_size,
'max_used': CONF.bigvm_cluster_max_usage_percent,
'max_reserved':
CONF.bigvm_cluster_max_reservation_percent})
continue
# filter out providers that are disabled for bigVMs
provider_summaries = filtered_provider_summaries
filtered_provider_summaries = {}
for p, d in provider_summaries.items():
if BIGVM_DISABLED_TRAIT in d['traits']:
continue
filtered_provider_summaries[p] = d
if not filtered_provider_summaries:
LOG.warning('Could not find a resource-provider to free up a '
'host for hypervisor size %(hv_size)d, because '
'all providers with enough space are disabled.',
{'hv_size': hv_size})
continue
candidates[hv_size] = (alloc_reqs, filtered_provider_summaries)
for vc in vcenters:
for hv_size in missing_hv_sizes_per_vc[vc]:
if hv_size not in candidates:
LOG.warning('Could not find a resource-provider to free '
'up a host for hypervisor size %(hv_size)d in '
'%(vc)s.',
{'hv_size': hv_size, 'vc': vc})
continue
alloc_reqs, provider_summaries = candidates[hv_size]
# filter providers by VC, as placement returned all matching
# providers
providers = {p: d for p, d in provider_summaries.items()
if vmware_providers.get(p, {}).get('vc') == vc}
# select the one with the least usage
def _free_memory(p):
memory = providers[p]['resources'][MEMORY_MB]
return memory['capacity'] - memory['used']
provider_uuids = sorted((p for p in providers),
key=_free_memory, reverse=True)
try:
for rp_uuid in provider_uuids:
host = vmware_providers[rp_uuid]['host']
cm = vmware_providers[rp_uuid]['cell_mapping']
with nova_context.target_cell(context, cm) as cctxt:
if self._free_host_for_provider(cctxt, rp_uuid,
host):
break
except oslo_exceptions.MessagingTimeout as e:
# we don't know if the timeout happened after we started
# freeing a host already or because we couldn't reach the
# nova-compute node. Therefore, we move on to the next HV
# size for that VC and hope the timeout resolves for the
# next run.
LOG.exception(e)
LOG.warning('Skipping HV size %(hv_size)s in VC %(vc)s '
'because of error',
{'hv_size': hv_size, 'vc': vc})
def _get_providers(self, context):
"""Return our special and the basic vmware resource-providers
This returns a list of vcenters and two dicts, where the
resource-provider uuid is the key. The value contains a dict with the
important information for each resource-provider, like host, az, vc and
cell_mapping + either the hypervisor size (vmware provider) or the
resource-provider dict (special provider).
"""
client = self.placement_client
vmware_hvs = {}
for cm in CellMappingList.get_all(context):
with nova_context.target_cell(context, cm) as cctxt:
vmware_hvs.update({cn.uuid: cn.host for cn in
ComputeNodeList.get_by_hypervisor_type(cctxt,
VMWARE_HV_TYPE)})
host_azs = {}
host_vcs = {}
for agg in AggregateList.get_all(context):
if not agg.availability_zone:
continue
if agg.name == agg.availability_zone:
for host in agg.hosts:
host_azs[host] = agg.name
elif agg.name.startswith(SHARD_PREFIX):
for host in agg.hosts:
host_vcs[host] = agg.name
vcenters = set(host_vcs.values())
host_mappings = {hm.host: hm.cell_mapping
for hm in HostMappingList.get_all(context)}
# find all resource-providers that we added and also a list of vmware
# resource-providers
bigvm_providers = {}
vmware_providers = {}
resp = client.get('/resource_providers',
version=NESTED_PROVIDER_API_VERSION)
for rp in resp.json()['resource_providers']:
if rp['name'].startswith(CONF.bigvm_deployment_rp_name_prefix):
# retrieve the aggregates
url = '/resource_providers/{}/aggregates'.format(rp['uuid'])
resp = client.get(url, version=NESTED_PROVIDER_API_VERSION)
if resp.status_code != 200:
LOG.error('Could not retrieve aggregates for RP %(name)s '
'(%(rp)s).',
{'name': rp['name'], 'rp': rp['uuid']})
continue
aggregates = resp.json()['aggregates']
if not aggregates:
LOG.error('RP %(name)s (%(rp)s) has no aggregate. Cannot '
'find "parent" provider.',
{'name': rp['name'], 'rp': rp['uuid']})
continue
elif len(aggregates) > 1:
LOG.error('RP %(name)s (%(rp)s) has more than one '
'aggregate: %(aggs)s. Cannot find "parent" '
'provider.',
{'name': rp['name'], 'rp': rp['uuid'],
'aggs': ', '.join(aggregates)})
continue
host_rp_uuid = aggregates[0]
host = vmware_hvs[host_rp_uuid]
cell_mapping = host_mappings[host]
bigvm_providers[rp['uuid']] = {'rp': rp,
'host': host,
'az': host_azs[host],
'vc': host_vcs[host],
'cell_mapping': cell_mapping,
'host_rp_uuid': host_rp_uuid}
elif rp['uuid'] not in vmware_hvs: # ignore baremetal
continue
else:
# retrieve inventory for MEMORY_MB & MEMORY_RESERVABLE_MB info
url = '/resource_providers/{}/inventories'.format(rp['uuid'])
resp = client.get(url)
if resp.status_code != 200:
LOG.error('Could not retrieve inventory for RP %(rp)s.',
{'rp': rp['uuid']})
continue
inventory | |
'increment': 77,
'stepTime': 0.245965576648712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 77, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.248074951648712,
'attempts': 2, 'timeIncrement': 0.002109375, 'increment': 77,
'stepTime': 0.248074951648712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 78, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.251239014148712,
'attempts': 1, 'timeIncrement': 0.0031640625, 'increment': 78,
'stepTime': 0.251239014148712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 79, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.255985107898712,
'attempts': 1, 'timeIncrement': 0.00474609375, 'increment': 79,
'stepTime': 0.255985107898712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 80, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.263104248523712,
'attempts': 1, 'timeIncrement': 0.007119140625, 'increment': 80,
'stepTime': 0.263104248523712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 1704 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.263104248523712,
'attempts': ' 1U', 'timeIncrement': 0.01, 'increment': 81,
'stepTime': 0.263104248523712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 81, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.265604248523712,
'attempts': 2, 'timeIncrement': 0.0025, 'increment': 81,
'stepTime': 0.265604248523712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 82, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.269354248523712,
'attempts': 1, 'timeIncrement': 0.00375, 'increment': 82,
'stepTime': 0.269354248523712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 83, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.274979248523712,
'attempts': 1, 'timeIncrement': 0.005625, 'increment': 83,
'stepTime': 0.274979248523712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 2993 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.274979248523712,
'attempts': ' 1U', 'timeIncrement': 0.0084375, 'increment': 84,
'stepTime': 0.274979248523712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 84, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.277088623523712,
'attempts': 2, 'timeIncrement': 0.002109375, 'increment': 84,
'stepTime': 0.277088623523712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 85, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.280252686023712,
'attempts': 1, 'timeIncrement': 0.0031640625, 'increment': 85,
'stepTime': 0.280252686023712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 86, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.284998779773712,
'attempts': 1, 'timeIncrement': 0.00474609375, 'increment': 86,
'stepTime': 0.284998779773712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 87, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.292117920398712,
'attempts': 1, 'timeIncrement': 0.007119140625, 'increment': 87,
'stepTime': 0.292117920398712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 1750 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.292117920398712,
'attempts': ' 1U', 'timeIncrement': 0.01, 'increment': 88,
'stepTime': 0.292117920398712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 88, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.294617920398712,
'attempts': 2, 'timeIncrement': 0.0025, 'increment': 88,
'stepTime': 0.294617920398712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 89, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.298367920398712,
'attempts': 1, 'timeIncrement': 0.00375, 'increment': 89,
'stepTime': 0.298367920398712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 90, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.303992920398712,
'attempts': 1, 'timeIncrement': 0.005625, 'increment': 90,
'stepTime': 0.303992920398712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 4152 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.303992920398712,
'attempts': ' 1U', 'timeIncrement': 0.0084375, 'increment': 91,
'stepTime': 0.303992920398712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 91, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.306102295398712,
'attempts': 2, 'timeIncrement': 0.002109375, 'increment': 91,
'stepTime': 0.306102295398712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 92, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.309266357898712,
'attempts': 1, 'timeIncrement': 0.0031640625, 'increment': 92,
'stepTime': 0.309266357898712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 93, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.314012451648712,
'attempts': 1, 'timeIncrement': 0.00474609375, 'increment': 93,
'stepTime': 0.314012451648712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 94, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.321131592273712,
'attempts': 1, 'timeIncrement': 0.007119140625, 'increment': 94,
'stepTime': 0.321131592273712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 4003 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.321131592273712,
'attempts': ' 1U', 'timeIncrement': 0.01, 'increment': 95,
'stepTime': 0.321131592273712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 95, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.323631592273712,
'attempts': 2, 'timeIncrement': 0.0025, 'increment': 95,
'stepTime': 0.323631592273712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 96, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.327381592273712,
'attempts': 1, 'timeIncrement': 0.00375, 'increment': 96,
'stepTime': 0.327381592273712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 97, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.333006592273712,
'attempts': 1, 'timeIncrement': 0.005625, 'increment': 97,
'stepTime': 0.333006592273712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 5917 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.333006592273712,
'attempts': ' 1U', 'timeIncrement': 0.0084375, 'increment': 98,
'stepTime': 0.333006592273712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 98, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.335115967273712,
'attempts': 2, 'timeIncrement': 0.002109375, 'increment': 98,
'stepTime': 0.335115967273712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 99, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.338280029773712,
'attempts': 1, 'timeIncrement': 0.0031640625, 'increment': 99,
'stepTime': 0.338280029773712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 100, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.343026123523712,
'attempts': 1, 'timeIncrement': 0.00474609375, 'increment': 100,
'stepTime': 0.343026123523712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 101, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.350145264148712,
'attempts': 1, 'timeIncrement': 0.007119140625, 'increment': 101,
'stepTime': 0.350145264148712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'EXCESSIVE DISTORTION AT A TOTAL OF 5209 INTEGRATION POINTS IN SOLID (CONTINUUM) ELEMENTS',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.350145264148712,
'attempts': ' 1U', 'timeIncrement': 0.01, 'increment': 102,
'stepTime': 0.350145264148712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 2, 'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 102, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.352645264148712,
'attempts': 2, 'timeIncrement': 0.0025, 'increment': 102,
'stepTime': 0.352645264148712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 103, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.356395264148712,
'attempts': 1, 'timeIncrement': 0.00375, 'increment': 103,
'stepTime': 0.356395264148712, 'step': 1, 'jobName': 'OneTaper3D',
'severe': 0, 'iterations': 1, 'phase': STANDARD_PHASE, 'equilibrium': 1})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 104, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.362020264148712,
'attempts': 1, 'timeIncrement': 0.005625, 'increment': 104,
'stepTime': 0.362020264148712, 'step': | |
None
self._children_name_map["tcp_small_servers"] = "tcp-small-servers"
self._children_yang_names.add("tcp-small-servers")
self.udp_small_servers = None
self._children_name_map["udp_small_servers"] = "Cisco-IOS-XR-ip-udp-cfg:udp-small-servers"
self._children_yang_names.add("Cisco-IOS-XR-ip-udp-cfg:udp-small-servers")
self._segment_path = lambda: "small-servers"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-tcp-cfg:ip/cinetd/services/ipv4/%s" % self._segment_path()
class TcpSmallServers(Entity):
"""
Describing TCP related IPV4 and IPV6 small
servers
.. attribute:: access_control_list_name
Specify the access list
**type**\: str
.. attribute:: small_server
Set number of allowable TCP small servers, specify 0 for no\-limit
**type**\: union of the below types:
**type**\: :py:class:`SmallServer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Ipv6.SmallServers.TcpSmallServers.SmallServer>`
**type**\: int
**range:** 0..2147483647
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Ipv4.SmallServers.TcpSmallServers, self).__init__()
self.yang_name = "tcp-small-servers"
self.yang_parent_name = "small-servers"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('access_control_list_name', YLeaf(YType.str, 'access-control-list-name')),
('small_server', YLeaf(YType.str, 'small-server')),
])
self.access_control_list_name = None
self.small_server = None
self._segment_path = lambda: "tcp-small-servers"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-tcp-cfg:ip/cinetd/services/ipv4/small-servers/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ip.Cinetd.Services.Ipv4.SmallServers.TcpSmallServers, ['access_control_list_name', 'small_server'], name, value)
class SmallServer(Enum):
"""
SmallServer (Enum Class)
Set number of allowable TCP small servers,
specify 0 for no\-limit
.. data:: no_limit = 0
Unlimited Servers
"""
no_limit = Enum.YLeaf(0, "no-limit")
class UdpSmallServers(Entity):
"""
UDP small servers configuration
.. attribute:: access_control_list_name
Specify the access list
**type**\: str
.. attribute:: small_server
Set number of allowable small servers, specify 0 for no\-limit
**type**\: union of the below types:
**type**\: :py:class:`SmallServer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Ipv4.SmallServers.UdpSmallServers.SmallServer>`
**type**\: int
**range:** 0..2147483647
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ip-udp-cfg'
_revision = '2017-07-31'
def __init__(self):
super(Ip.Cinetd.Services.Ipv4.SmallServers.UdpSmallServers, self).__init__()
self.yang_name = "udp-small-servers"
self.yang_parent_name = "small-servers"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('access_control_list_name', YLeaf(YType.str, 'access-control-list-name')),
('small_server', YLeaf(YType.str, 'small-server')),
])
self.access_control_list_name = None
self.small_server = None
self._segment_path = lambda: "Cisco-IOS-XR-ip-udp-cfg:udp-small-servers"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-tcp-cfg:ip/cinetd/services/ipv4/small-servers/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ip.Cinetd.Services.Ipv4.SmallServers.UdpSmallServers, ['access_control_list_name', 'small_server'], name, value)
class SmallServer(Enum):
"""
SmallServer (Enum Class)
Set number of allowable small servers, specify
0 for no\-limit
.. data:: no_limit = 0
Unlimited Servers
"""
no_limit = Enum.YLeaf(0, "no-limit")
class Vrfs(Entity):
"""
VRF table
.. attribute:: vrf
VRF specific data
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf>`
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs, self).__init__()
self.yang_name = "vrfs"
self.yang_parent_name = "services"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("vrf", ("vrf", Ip.Cinetd.Services.Vrfs.Vrf))])
self._leafs = OrderedDict()
self.vrf = YList(self)
self._segment_path = lambda: "vrfs"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-tcp-cfg:ip/cinetd/services/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ip.Cinetd.Services.Vrfs, [], name, value)
class Vrf(Entity):
"""
VRF specific data
.. attribute:: vrf_name (key)
Name of the VRF instance
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: ipv6
IPV6 related services
**type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv6>`
.. attribute:: ipv4
IPV4 related services
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv4>`
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf, self).__init__()
self.yang_name = "vrf"
self.yang_parent_name = "vrfs"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vrf_name']
self._child_container_classes = OrderedDict([("ipv6", ("ipv6", Ip.Cinetd.Services.Vrfs.Vrf.Ipv6)), ("ipv4", ("ipv4", Ip.Cinetd.Services.Vrfs.Vrf.Ipv4))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('vrf_name', YLeaf(YType.str, 'vrf-name')),
])
self.vrf_name = None
self.ipv6 = Ip.Cinetd.Services.Vrfs.Vrf.Ipv6()
self.ipv6.parent = self
self._children_name_map["ipv6"] = "ipv6"
self._children_yang_names.add("ipv6")
self.ipv4 = Ip.Cinetd.Services.Vrfs.Vrf.Ipv4()
self.ipv4.parent = self
self._children_name_map["ipv4"] = "ipv4"
self._children_yang_names.add("ipv4")
self._segment_path = lambda: "vrf" + "[vrf-name='" + str(self.vrf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-tcp-cfg:ip/cinetd/services/vrfs/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ip.Cinetd.Services.Vrfs.Vrf, ['vrf_name'], name, value)
class Ipv6(Entity):
"""
IPV6 related services
.. attribute:: telnet
TELNET server configuration commands
**type**\: :py:class:`Telnet <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet>`
.. attribute:: tftp
TFTP server configuration commands
**type**\: :py:class:`Tftp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp>`
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf.Ipv6, self).__init__()
self.yang_name = "ipv6"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("telnet", ("telnet", Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet)), ("tftp", ("tftp", Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.telnet = Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet()
self.telnet.parent = self
self._children_name_map["telnet"] = "telnet"
self._children_yang_names.add("telnet")
self.tftp = Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp()
self.tftp.parent = self
self._children_name_map["tftp"] = "tftp"
self._children_yang_names.add("tftp")
self._segment_path = lambda: "ipv6"
class Telnet(Entity):
"""
TELNET server configuration commands
.. attribute:: tcp
TCP details
**type**\: :py:class:`Tcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet.Tcp>`
**presence node**\: True
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet, self).__init__()
self.yang_name = "telnet"
self.yang_parent_name = "ipv6"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("tcp", ("tcp", Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet.Tcp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.tcp = None
self._children_name_map["tcp"] = "tcp"
self._children_yang_names.add("tcp")
self._segment_path = lambda: "telnet"
class Tcp(Entity):
"""
TCP details
.. attribute:: access_list_name
Access list
**type**\: str
.. attribute:: maximum_server
Set number of allowable servers
**type**\: int
**range:** 1..100
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet.Tcp, self).__init__()
self.yang_name = "tcp"
self.yang_parent_name = "telnet"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('access_list_name', YLeaf(YType.str, 'access-list-name')),
('maximum_server', YLeaf(YType.uint32, 'maximum-server')),
])
self.access_list_name = None
self.maximum_server = None
self._segment_path = lambda: "tcp"
def __setattr__(self, name, value):
self._perform_setattr(Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet.Tcp, ['access_list_name', 'maximum_server'], name, value)
class Tftp(Entity):
"""
TFTP server configuration commands
.. attribute:: udp
UDP details
**type**\: :py:class:`Udp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp.Udp>`
**presence node**\: True
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp, self).__init__()
self.yang_name = "tftp"
self.yang_parent_name = "ipv6"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("udp", ("udp", Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp.Udp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.udp = None
self._children_name_map["udp"] = "udp"
self._children_yang_names.add("udp")
self._segment_path = lambda: "tftp"
class Udp(Entity):
"""
UDP details
.. attribute:: access_list_name
Access list
**type**\: str
.. attribute:: maximum_server
Set number of allowable servers, 0 for no\-limit
**type**\: int
**range:** 0..2147483647
.. attribute:: home_directory
Specify device name where file is read from (e .g. flash\:)
**type**\: str
**mandatory**\: True
.. attribute:: dscp_value
Set IP DSCP (DiffServ CodePoint) for TFTP Server Packets
**type**\: int
**range:** \-2147483648..2147483647
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp.Udp, self).__init__()
self.yang_name = "udp"
self.yang_parent_name = "tftp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('access_list_name', YLeaf(YType.str, 'access-list-name')),
('maximum_server', YLeaf(YType.uint32, 'maximum-server')),
('home_directory', YLeaf(YType.str, 'home-directory')),
('dscp_value', YLeaf(YType.int32, 'dscp-value')),
])
self.access_list_name = None
self.maximum_server = None
self.home_directory = None
self.dscp_value = None
self._segment_path = lambda: "udp"
def __setattr__(self, name, value):
self._perform_setattr(Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp.Udp, ['access_list_name', 'maximum_server', 'home_directory', 'dscp_value'], name, value)
class Ipv4(Entity):
"""
IPV4 related services
.. attribute:: telnet
TELNET server configuration commands
**type**\: :py:class:`Telnet <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet>`
.. attribute:: tftp
TFTP server configuration commands
**type**\: :py:class:`Tftp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp>`
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf.Ipv4, self).__init__()
self.yang_name = "ipv4"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("telnet", ("telnet", Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet)), ("tftp", ("tftp", Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.telnet = Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet()
self.telnet.parent = self
self._children_name_map["telnet"] = "telnet"
self._children_yang_names.add("telnet")
self.tftp = Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp()
self.tftp.parent = self
self._children_name_map["tftp"] = "tftp"
self._children_yang_names.add("tftp")
self._segment_path = lambda: "ipv4"
class Telnet(Entity):
"""
TELNET server configuration commands
.. attribute:: tcp
TCP details
**type**\: :py:class:`Tcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg.Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet.Tcp>`
**presence node**\: True
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet, self).__init__()
self.yang_name = "telnet"
self.yang_parent_name = "ipv4"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("tcp", ("tcp", Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet.Tcp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.tcp = None
self._children_name_map["tcp"] = "tcp"
self._children_yang_names.add("tcp")
self._segment_path = lambda: "telnet"
class Tcp(Entity):
"""
TCP details
.. attribute:: access_list_name
Access list
**type**\: str
.. attribute:: maximum_server
Set number of allowable servers
**type**\: int
**range:** 1..100
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ip-tcp-cfg'
_revision = '2016-02-26'
def __init__(self):
super(Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet.Tcp, self).__init__()
self.yang_name = | |
import pygame
from PIL import Image
import time
import pandas as pd
import os
import re # For string slicing stuff (used in natural sort function)
import difflib # For string matching in keywords for keyword suggestions and matching
# Center any windows opened (the map, for example)
os.environ['SDL_VIDEO_CENTERED'] = '1'
# Load dataset for keyword dictionary
def load_stall_keywords(data_location="canteens.xlsx"):
# Get list of canteens and stalls
canteen_data = pd.read_excel(data_location, trim_ws=True)
canteens = canteen_data['Canteen'].unique()
canteens = sorted(canteens, key=str.lower)
stalls = canteen_data['Stall'].unique()
stalls = sorted(stalls, key=str.lower)
keywords = {}
for canteen in canteens:
keywords[canteen] = {}
copy = canteen_data.copy()
copy.drop_duplicates(subset="Stall", inplace=True)
stall_keywords_intermediate = copy.set_index('Stall')['Keywords'].to_dict()
stall_canteen_intermediate = copy.set_index('Stall')['Canteen'].to_dict()
for stall in stalls:
stall_keywords = stall_keywords_intermediate[stall]
stall_canteen = stall_canteen_intermediate[stall]
keywords[stall_canteen][stall] = stall_keywords
return keywords
# Load dataset for price dictionary
def load_stall_prices(data_location="canteens.xlsx"):
# Get list of canteens and stalls
canteen_data = pd.read_excel(data_location, trim_ws=True)
canteens = canteen_data['Canteen'].unique()
canteens = sorted(canteens, key=str.lower)
stalls = canteen_data['Stall'].unique()
stalls = sorted(stalls, key=str.lower)
prices = {}
for canteen in canteens:
prices[canteen] = {}
copy = canteen_data.copy()
copy.drop_duplicates(subset="Stall", inplace=True)
stall_prices_intermediate = copy.set_index('Stall')['Price'].to_dict()
stall_canteen_intermediate = copy.set_index('Stall')['Canteen'].to_dict()
for stall in stalls:
stall_price = stall_prices_intermediate[stall]
stall_canteen = stall_canteen_intermediate[stall]
prices[stall_canteen][stall] = stall_price
return prices
# Load dataset for location dictionary
def load_canteen_location(data_location="canteens.xlsx"):
# Get list of canteens
canteen_data = pd.read_excel(data_location, trim_ws=True)
canteens = canteen_data['Canteen'].unique()
canteens = sorted(canteens, key=str.lower)
# Get dictionary of {canteen:[x,y]}
canteen_locations = {}
for canteen in canteens:
copy = canteen_data.copy()
copy.drop_duplicates(subset="Canteen", inplace=True)
canteen_locations_intermediate = copy.set_index('Canteen')['Location'].to_dict()
for canteen in canteens:
canteen_locations[canteen] = [int(canteen_locations_intermediate[canteen].split(',')[0]),
int(canteen_locations_intermediate[canteen].split(',')[1])]
return canteen_locations
# Get user's location with the use of PyGame
def get_user_location_interface():
# Initialize pygame
pygame.init()
# Get dimensions and files
imageLocation = 'NTUcampus.jpg'
pinLocation = 'pin.png'
screenTitle = "Location Based Search (NTU)"
mapSize = (620, 750)
pinSize = (50, 50)
# Set screen width and height for display surface
screen = pygame.display.set_mode(mapSize)
# Set title of screen
pygame.display.set_caption(screenTitle)
# Open image file and pin file, and scale them to the desired size
ntuMapOriginal = pygame.image.load(imageLocation).convert()
ntuMap = pygame.transform.smoothscale(ntuMapOriginal, mapSize)
pinOriginal = pygame.image.load(pinLocation).convert_alpha()
pin = pygame.transform.smoothscale(pinOriginal, pinSize)
# Loop for the whole interface while it remains active
exit = False
userLocation = None
while not exit:
# First, we make a call to the event queue to check for events every frame
for event in pygame.event.get():
# User exits the window, we return an error as location was not selected
if event.type == pygame.QUIT:
exit = True
userLocation = None
# DISPLAY ELEMENTS
# If the window is not closed, then we show NTU map and let the user pick a location
screen.blit(ntuMap, (0,0))
# Do NOT allow resizing of window
""" # If the user resizes the window, resize accordingly
if event.type == pygame.VIDEORESIZE:
screen = pygame.display.set_mode(event.dict['size'], pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE)
screen.blit(pygame.transform.smoothscale(screen, event.dict['size']), (0, 0))
scaledHeight = event.dict['h']
scaledWidth = event.dict['w'] """
# If the user picks a coordinate, then we close the window and return the coordinates of the click
if event.type == pygame.MOUSEBUTTONDOWN:
# Get outputs of Mouseclick event handler
(mouseX, mouseY) = pygame.mouse.get_pos()
# Update userLocation. Since we have successfully captured the user input, there are no more errors.
userLocation = (mouseX, mouseY)
# Paste pin on mouse position
screen.blit(pin, (userLocation[0] - 25, userLocation[1] - 42))
pygame.display.update()
time.sleep(0.2)
exit = True
# While window is open, constantly refresh the window display
pygame.display.update()
pygame.display.quit()
pygame.quit()
return userLocation
# Keyword-based Search Function
# This function attempts to match the search term with the keywords of various stalls
def search_by_keyword(keyword):
# Since keyword is non-case sensitive, we standardize it to small letters
searchTerm = keyword.lower()
# Load list of stalls and their keywords (dictionary within dictionary)
stallList = load_stall_keywords()
# Create an empty dictionary to store results found
results = {}
numberOfStallsFound = 0
# Search all the stall keywords within the canteens to find matches KEY : VALUE = CANTEEN NAME : DICTIONARY OF STALLS
# In other words, canteen = Canteen name, stalls = Dictionary of stalls
print("Searching...")
for canteen, stalls in stallList.items():
# List of stalls found in this canteen
stallsFound = []
# KEY : VALUE = STALL NAME : KEYWORDS OF STALL (As a string)
# i.e. stall = Stall name, keywords = Keywords of stall as a string e.g. "Halal, Chinese"
for stall, keywords in stalls.items():
# If our search term matches a keyword of a stall, we add the stall to results
if (searchTerm in keywords.lower()) or (keywords.lower() in searchTerm):
numberOfStallsFound += 1
# Add that stall found and its keywords as a concantated string to a list of found stalls within this canteen
stallsFound.append(stall + " (" + keywords + ")")
# Add the stalls we found in this canteen to a results dictionary contained all the canteens with stalls found by the search term
if (len(stallsFound) > 0):
results[canteen] = stallsFound
# Display results
if (numberOfStallsFound <= 0):
# If no food stalls found, we suggest keywords closest to the keyword of the user to get a match
suggestion = suggest_keyword(searchTerm)
if (suggestion != None):
# If the user agrees with the suggestion, we search again with the new valid keyword
validated = False
while not validated:
errorCheck = input("No food stall(s) found with input keyword '{}'. Did you mean '{}' instead? (y/n): ".format(searchTerm, suggestion.lower()))
if (errorCheck == "y" or errorCheck == "Y"):
# Call function to search with the suggested keyword
validated = True
search_by_keyword(suggestion)
elif (errorCheck == "n" or errorCheck == "N"):
validated = True
print("Exiting search by keyword...")
# We do not understand the user input. Ask user again to confirm his/her choice.
else:
print("Please input 'y' to search with the suggested keyword, or 'n' to exit to the menu.")
else:
print("No food stalls(s) found with input keyword '{}'. No keyword suggestions match your search term. Exiting to menu...".format(searchTerm))
else:
print("{} food stall(s) found matching keyword '{}':".format(numberOfStallsFound, searchTerm))
# Sort the dictionary's keys to print canteen names in sorted order
sortedCanteens = natural_sort(results.keys())
# Using the sorted keys, we then access the results dictionary to print out our results in order.
for canteenName in sortedCanteens:
# Iterate through the list of stalls found within this particular canteen that matches the search term
for stallFound in results[canteenName]:
print(canteenName + " - " + stallFound)
# Price-based Search Function
# Returns a listing of stalls that fit within a given price range
def search_by_price(minPrice, maxPrice):
# Load list of stalls and their keywords (dictionary within dictionary)
priceList = load_stall_prices()
# Create an empty dictionary to store results found
results = {}
numberOfStallsFound = 0
# Search all the canteens to find stalls within the price range KEY : VALUE = CANTEEN NAME : DICTIONARY OF STALLS
# In other words, canteen = Canteen name, stalls = Dictionary of stalls
print("Searching...")
for canteen, stalls in priceList.items():
# List of stalls found in this canteen
stallsFound = {}
# KEY : VALUE = STALL NAME : KEYWORDS OF STALL (As a string)
# i.e. stall = Stall name, keywords = Keywords of stall as a string e.g. "Halal, Chinese"
for stall, price in stalls.items():
# If price range is within the search range, we add it to the results
if (price >= minPrice and price <= maxPrice):
numberOfStallsFound += 1
stallsFound[stall] = price
# Store into results under canteen name
if (len(stallsFound) > 0):
results[canteen] = stallsFound
# Display results
if (numberOfStallsFound <= 0):
print("No food stall(s) found within specified price range.")
else:
print("{} food stall(s) found within specified price range (S${:.2f} - S${:.2f}):".format(numberOfStallsFound, minPrice, maxPrice))
# Sort the dictionary's keys to print canteen names in sorted order
sortedCanteens = natural_sort(results.keys())
# Using the sorted keys, we then access the results dictionary to print out our results in order.
for canteenName in sortedCanteens:
# Iterate through the dictionary of stalls found within this particular canteen that matches the price range
for stall, price in results[canteenName].items():
print("{} ({}) - S${:.2f}".format(stall, canteenName, price))
# | |
import torch
from torch.utils.data import Dataset
from .collaters import *
import json
import pickle
import os
import glob
import sys
from musa.ops import *
from .utils import *
import timeit
import struct
import numpy as np
import multiprocessing as mp
from sklearn.cluster import KMeans
import copy
def read_aco_file(spk_name, file_id, aco_dir):
spk_aco_dir = os.path.join(aco_dir, spk_name)
cc = read_bin_aco_file(os.path.join(spk_aco_dir, '{}.cc'.format(file_id)))
fv = read_bin_aco_file(os.path.join(spk_aco_dir, '{}.fv'.format(file_id)))
lf0 = read_bin_aco_file(os.path.join(spk_aco_dir, '{}.lf0'.format(file_id)))
fv = fv.reshape((-1, 1))
cc = cc.reshape((-1, 40))
# make lf0 interpolation and obtain u/v flag
i_lf0, uv = interpolation(lf0,
unvoiced_symbol=-10000000000.0)
i_lf0 = i_lf0.reshape(-1, 1)
uv = uv.reshape(-1, 1)
#print('cc shape: ', cc.shape)
# merge into aco structure
aco_data = np.concatenate((cc, fv, i_lf0, uv), axis=1)
return aco_data
def parse_lab_aco_correspondences(durs, aco_data):
""" Find the matching of acoustic frames to
duration boundaries.
An acoustic frame is within a phoneme if
>= 50% of the sliding window is within
the phoneme boundaries.
"""
# sampling rate
sr = 16000.
curr_dur_idx = 0
# set up curr boundary to be 0
# convert dur into samples, knowing
# sampling rate is 16kHz and dur is
# in seconds
#print('Parsing aco w/ durs: ', durs)
#print('Parsing aco w/ acos shape: ', len(aco_data))
cboundary = int(durs[curr_dur_idx] * sr)
# keep track of curr ph dur to compute reldur
curr_ph_dur = int(durs[curr_dur_idx] * sr)
# keep track of acumulated boundaries for reldur
acum_dur = 0
# set up current centroid of window
# in samples
wind_t = 0
wind_size = 320
wind_stride = 80
half_w = wind_size * .5
aco_seq_data = [[]]
# retrieve the tuples of relative durs (relative, absolute)
reldurs = [[]]
for aco_i in range(aco_data.shape[0]):
if wind_t >= cboundary and curr_dur_idx < (len(durs) - 1):
# window belongs to next phoneme, step on
aco_seq_data.append([])
reldurs.append([])
curr_dur_idx += 1
#print('wind_t > cboundary'
# ' ({}, {})'
# ''.format(wind_t,
# cboundary))
cboundary += int(durs[curr_dur_idx] * sr)
acum_dur += curr_ph_dur
curr_ph_dur = int(durs[curr_dur_idx] * sr)
#print('Moving cboundary to {}'.format(cboundary))
#print('durs len is: ', len(durs))
#print('last cboundary will be: ', int(sum(durs) * sr))
aco_seq_data[curr_dur_idx].append(aco_data[aco_i])
# compute reldur within current ph dur
reldur = (wind_t - acum_dur) / curr_ph_dur
reldurs[curr_dur_idx].append([reldur, curr_ph_dur / sr])
#print('Curr wind_t: {}, cboundary: {}, curr_dur_idx: '
# '{}, reldur: {}, curr_ph_dur: {},'
# 'curr_ph_dur / sr: {}'.format(wind_t,
# cboundary,
# curr_dur_idx,
# reldur,
# curr_ph_dur,
# curr_ph_dur / sr))
wind_t += wind_stride
return aco_seq_data, reldurs
def read_speaker_labs(spk_name, ids_list, lab_dir, lab_parser,
filter_by_dur=False, aco_dir=None):
parsed_lines = [] # maintain seq structure
parsed_tstamps = [] # maintain seq structure
if aco_dir is not None:
parsed_aco = [] # aco data if parsed
parsed_reldur = [] # reldur data
parse_timings = []
flat_tstamps = []
flat_lines = []
beg_t = timeit.default_timer()
#if filter_by_dur:
#log_file = open('/tmp/dur_filter.log', 'w')
for id_i, split_id in enumerate(ids_list, start=1):
spk_lab_dir = os.path.join(lab_dir, spk_name)
lab_f = os.path.join(spk_lab_dir, '{}.lab'.format(split_id))
with open(lab_f) as lf:
lab_lines = [l.rstrip() for l in lf.readlines()]
tstamps, parsed_lab = lab_parser(lab_lines)
if filter_by_dur:
filtered_lab = []
filtered_tstamps = []
# compute durs from timestamps to keep VALID phonemes
converted_durs = tstamps_to_dur(tstamps, True)
assert len(converted_durs) == len(parsed_lab), \
len(converted_durs)
for (plab, dur, tss) in zip(parsed_lab, converted_durs,
tstamps):
#print('dur=', dur)
if dur > 0:
#print('ACCEPTED with dur: ', dur)
filtered_lab.append(plab)
filtered_tstamps.append(tss)
#else:
#print('Filtered dur: ', dur)
#log_file.write('Filtred dur {} at file '
# '{}.lab\n'.format(dur,
# os.path.join(lab_dir,
# spk_name,
# split_id)))
flat_lines += filtered_lab
flat_tstamps += filtered_tstamps
parsed_tstamps.append(filtered_tstamps)
parsed_lines.append(filtered_lab)
a_durs = len(filtered_tstamps) / len(converted_durs)
#print('Ratio accepted durs: {}%'.format(a_durs * 100))
else:
parsed_tstamps.append(tstamps)
parsed_lines.append(parsed_lab)
flat_lines += parsed_lab
flat_tstamps += tstamps
if aco_dir is not None:
#print('split_id: ', split_id)
#print('parsed_tstamps: ', parsed_tstamps)
#print('parsed_tstamps[-1]: ', parsed_tstamps[-1])
# parse aco
parsed_durs = tstamps_to_dur(parsed_tstamps[-1], True)
aco_seq = read_aco_file(spk_name, split_id, aco_dir)
#print('Total read aco frames: ', aco_seq.shape)
aco_seq_data, \
seq_reldur = parse_lab_aco_correspondences(parsed_durs,
aco_seq)
parsed_aco.append(aco_seq_data)
parsed_reldur.append(seq_reldur)
#parse_timings.append(timeit.default_timer() - beg_t)
#print('Parsed spk-{} lab file {:5d}/{:5d}, mean time: {:.4f}'
# 's'.format(spk_name, id_i, len(ids_list),
# np.mean(parse_timings)),
# end='\n')
#beg_t = timeit.default_timer()
#log_file.close()
if aco_dir is None:
return (spk_name, parsed_tstamps, parsed_lines, flat_lines)
else:
return (spk_name, parsed_tstamps, parsed_lines, flat_lines,
parsed_aco, parsed_reldur)
def read_speaker_aco(spk_name, ids_list, aco_dir):
aco_data = None
parse_timings = []
beg_t = timeit.default_timer()
for id_i, split_id in enumerate(ids_list, start=1):
aco_data_ = read_aco_file(spk_name, split_id, aco_dir)
# merge into aco structure
if aco_data is None:
aco_data = aco_data_
else:
aco_data = np.concatenate((aco_data, aco_data_), axis=0)
parse_timings.append(timeit.default_timer() - beg_t)
print('Parsed spk-{} aco file {:5d}/{:5d}, mean time: {:.4f}'
's'.format(spk_name, id_i, len(ids_list),
np.mean(parse_timings)),
end='\n')
beg_t = timeit.default_timer()
return (spk_name, aco_data)
class TCSTAR(Dataset):
def __init__(self, spk_cfg_file, split, lab_dir,
lab_codebooks_path, force_gen=False,
ogmios_lab=True, parse_workers=4,
max_seq_len=None, batch_size=None,
max_spk_samples=None,
mulout=False,
q_classes=None,
trim_to_min=False,
forced_trim=None,
exclude_train_spks=[],
exclude_eval_spks=[]):
"""
# Arguments:
spk_cfg_file: config file to read a dict
split: 'train' 'valid' or 'test' split
lab_dir: root lab dir with spks within
lab_codebooks_path: codebooks file path dict
force_gen: flag to enforce re-generation of codebooks
and stats.
omgios_fmt: ogmios format to parse labs.
max_seq_len: if specified, batches are stateful-like
with max_seq_len time-steps per sample,
and batch_size is also required.
mulout: determines that speaker's data has to be
arranged in batches
trim_to_min: trim all speakers to same num_samples if
maxlen is applied (specially for MO).
forced_trim: max num of samples per speaker forced (this
has priority over trim_to_min counts)
"""
self.trim_to_min = trim_to_min
self.forced_trim = forced_trim
if max_seq_len is not None:
if batch_size is None:
raise ValueError('Please specify a batch size in '
' TCSTAR to arrange the stateful '
' sequences.')
else:
print('WARNING: trim to min flag activated, but has no '
' effect because no max_seq_len specified.')
self.max_seq_len = max_seq_len
if q_classes is not None:
assert isinstance(q_classes, int), type(q_classes)
self.q_classes = q_classes
self.mulout = mulout
self.batch_size = batch_size
self.exclude_train_spks = exclude_train_spks
with open(spk_cfg_file, 'rb') as cfg_f:
# load speakers config paths
self.speakers = pickle.load(cfg_f)
self.all_speakers = copy.deepcopy(self.speakers)
if split == 'train':
# filter speakers in exclude list
for spk in self.all_speakers.keys():
if spk in exclude_train_spks:
print('Excluding speaker {} from train '
'split'.format(spk))
del self.speakers[spk]
if split == 'valid':
# filter speakers in exclude list
for spk in self.all_speakers.keys():
if spk in exclude_eval_spks:
print('Excluding speaker {} from valid '
'split'.format(spk))
del self.speakers[spk]
# store spk2idx
fspk = list(self.speakers.keys())[0]
if 'idx' not in self.speakers[fspk]:
print('Indexing speakers with their ids...')
# index speakers with integer ids
self.spk2idx = dict((sname, i) for i, sname in
enumerate(self.speakers.keys()))
for spk,idx in self.spk2idx.items():
self.speakers[spk]['idx'] = idx
print('Created ids: ', json.dumps(self.spk2idx, indent=2))
else:
# load existing indexation
self.spk2idx = {}
for spk in self.speakers.keys():
self.spk2idx[spk] = self.speakers[spk]['idx']
print('Loaded ids: ', json.dumps(self.spk2idx, indent=2))
self.idx2spk = dict((v, k) for k, v in self.spk2idx.items())
self.split = split
self.lab_dir = lab_dir
self.ogmios_lab = ogmios_lab
self.force_gen = force_gen
self.parse_workers = parse_workers
self.lab_codebooks_path = lab_codebooks_path
self.max_spk_samples = max_spk_samples
# call load_lab
self.load_lab()
# save stats in case anything changed
with open(spk_cfg_file, 'wb') as cfg_f:
# update original speakers, excluded ones in
# train will be unmodified
for spk, spkval in self.speakers.items():
self.all_speakers[spk] = spkval
# load speakers config paths
pickle.dump(self.all_speakers, cfg_f)
def load_lab(self):
raise NotImplementedError
def parse_labs(self, lab_parser, compute_dur_stats=False,
compute_dur_classes=False, aco_dir=None):
# if aco_dir is pecified, aco_data will be parsed
# This is used by TCSTAR_aco
total_parsed_labs = []
total_flat_labs = []
total_parsed_durs = []
total_parsed_spks = []
total_parsed_aco = []
total_parsed_reldur = []
# prepare a multi-processing pool to parse labels faster
parse_pool = mp.Pool(self.parse_workers)
num_labs_total = sum(len(spk[self.split]) for sname, spk in
self.speakers.items())
if self.max_spk_samples is not None:
num_labs_total = self.max_spk_samples * len(self.speakers)
print('TCSTAR_dur-{} > Parsing {} labs from {} speakers. '
'Num workers: {}...'.format(self.split,
num_labs_total,
len(self.speakers),
self.parse_workers))
for sname, spk in self.speakers.items():
async_f = read_speaker_labs
if self.max_spk_samples is not None:
spk_samples = spk[self.split][:self.max_spk_samples]
else:
spk_samples = spk[self.split]
async_args = (sname, spk_samples, self.lab_dir,
lab_parser, True,
aco_dir)
spk['result'] = parse_pool.apply_async(async_f, async_args)
parse_pool.close()
parse_pool.join()
for sname, spk in self.speakers.items():
result = spk['result'].get()
parsed_timestamps = result[1]
parsed_durs = tstamps_to_dur(parsed_timestamps)
if compute_dur_stats:
#if self.norm_dur:
if self.split == 'train' and ('dur_stats' not in spk or \
self.force_gen):
flat_durs = [fd for dseq in parsed_durs for fd in dseq]
# if they do not exist (or force_gen) and it's train split
dur_min = np.min(flat_durs)
| |
is {0}".format(live_conn))
def _nat_setup_flows(vt, br_name, nat_spec):
# For all nat testing, flows are setup on client node only
vte = vt.allow[0]
sep = vte.sep
sut = sep.host
vif = sep.vif
setup_default_pipeline_on_all_suts(br_name)
sut.vswitch.execute(f"ovs-ofctl del-flows {br_name} table={Constants.OF_TABLE_NAT}")
flows = list()
#### Table 0: connection track all ipv4/v6 traffic
flows.append(f"table={Constants.OF_TABLE_NAT},"
f"priority=10,ip,action=ct\\(nat,table={Constants.OF_TABLE_NAT+1}\\)")
flows.append(f"table={Constants.OF_TABLE_NAT},"
f"priority=10,ipv6,action=ct\\(nat,table={Constants.OF_TABLE_NAT+1}\\)")
flows.append(f"table={Constants.OF_TABLE_NAT},priority=0,action=drop")
#### Table 1: Allow new FTP/TFTP control connections
# Part 1.0: flows for client port as in port
ipv4_nat_action = nat_spec.generate_nat_action(ipv4=True)
ipv6_nat_action = nat_spec.generate_nat_action(ipv4=False)
base = f"table={Constants.OF_TABLE_NAT+1},priority=10,in_port={vif.ofp},ct_state=+new,"
base_v4 = base
base_v6 = base
if nat_spec.is_dnat():
base_v4 += f"ip,nw_dst={nat_spec.vipv4},"
base_v6 += f"ipv6,ipv6_dst={nat_spec.vipv6},"
else:
base_v4 += 'ip,'
base_v6 += 'ipv6,'
# Specific to algs ftp and tftp
flows.append(f"{base_v4}tcp,tp_dst=21,action="
f"ct\\(alg=ftp,commit,nat\\({ipv4_nat_action}\\),"
f"table={Constants.OF_TABLE_NAT+2}\\)")
flows.append(f"{base_v4}udp,tp_dst=69,action="
f"ct\\(alg=tftp,commit,nat\\({ipv4_nat_action}\\),"
f"table={Constants.OF_TABLE_NAT+2}\\)")
flows.append(f"{base_v6}tcp6,tp_dst=21,action="
f"ct\\(alg=ftp,commit,nat\\({ipv6_nat_action}\\),"
f"table={Constants.OF_TABLE_NAT+2}\\)")
flows.append(f"{base_v6}udp6,tp_dst=69,action="
f"ct\\(alg=tftp,commit,nat\\({ipv6_nat_action}\\),"
f"table={Constants.OF_TABLE_NAT+2}\\)")
# Generic to other tcp/tcp6/udp/udp6 and icmp/icmp6 traffic
flows.append(f"{base_v4}tcp,action="
f"ct\\(commit,nat\\({ipv4_nat_action}\\),table={Constants.OF_TABLE_NAT+2}\\)")
flows.append(f"{base_v4}udp,action="
f"ct\\(commit,nat\\({ipv4_nat_action}\\),table={Constants.OF_TABLE_NAT+2}\\)")
flows.append(f"{base_v4}icmp,action="
f"ct\\(commit,nat\\({ipv4_nat_action}\\),table={Constants.OF_TABLE_NAT+2}\\)")
flows.append(f"{base_v6}tcp6,action="
f"ct\\(commit,nat\\({ipv6_nat_action}\\),table={Constants.OF_TABLE_NAT+2}\\)")
flows.append(f"{base_v6}udp6,action="
f"ct\\(commit,nat\\({ipv6_nat_action}\\),table={Constants.OF_TABLE_NAT+2}\\)")
flows.append(f"{base_v6}icmp6,action="
f"ct\\(commit,nat\\({ipv6_nat_action}\\),table={Constants.OF_TABLE_NAT+2}\\)")
# Part 1.1: related flows for data connection
flows.append(f"table={Constants.OF_TABLE_NAT+1},priority=10,ct_state=+new+rel,tcp,"
f"action=ct\\(table={Constants.OF_TABLE_NAT+2},commit,nat\\)")
flows.append(f"table={Constants.OF_TABLE_NAT+1},priority=10,ct_state=+new+rel,tcp6,"
f"action=ct\\(table={Constants.OF_TABLE_NAT+2},commit,nat\\)")
flows.append(f"table={Constants.OF_TABLE_NAT+1},priority=10,ct_state=+new+rel,udp,"
f"action=ct\\(table={Constants.OF_TABLE_NAT+2},commit,nat\\)")
flows.append(f"table={Constants.OF_TABLE_NAT+1},priority=10,ct_state=+new+rel,udp6,"
f"action=ct\\(table={Constants.OF_TABLE_NAT+2},commit,nat\\)")
# Part 1.2: established flows
flows.append(f"table={Constants.OF_TABLE_NAT+1},priority=10,ct_state=+est,"
f"action=resubmit\\(,{Constants.OF_TABLE_NAT+2}\\)")
# Part 1.3: pass related pkts
flows.append(f"table={Constants.OF_TABLE_NAT+1},priority=10,ct_state=+rel,"
f"action=resubmit\\(,{Constants.OF_TABLE_NAT+2}\\)")
#### Table 2: Jump to l2 matching table after NAT MAC translation
flows.append(f"table={Constants.OF_TABLE_NAT+2},"
f"action=resubmit\\(,{Constants.OF_TABLE_NAT+3}\\),"
f"goto_table:{Constants.OF_TABLE_L2_MATCH}")
#### Table 3: MAC address replacement
# For traffic to client vm
flows.append(f"table={Constants.OF_TABLE_NAT+3},ip,nw_dst={vif.if_addr.ipv4},"
f"action=mod_dl_dst\\({vif.mac}\\),"
f"mod_dl_src\\({_NAT_ROUTER_CIF_MAC}\\)")
flows.append(f"table={Constants.OF_TABLE_NAT+3},ip6,ipv6_dst={vif.if_addr.ipv6},"
f"action=mod_dl_dst\\({vif.mac}\\),"
f"mod_dl_src\\({_NAT_ROUTER_CIF_MAC}\\)")
# For traffic to server vms
# Must match on l3 addr, as ofp is invalid across hosts.
for dep in vte.get_full_deps():
flows.append(f"table={Constants.OF_TABLE_NAT+3},ip,nw_dst={dep.vif.if_addr.ipv4},"
f"action=mod_dl_dst\\({dep.vif.mac}\\),"
f"mod_dl_src\\({_NAT_ROUTER_SIF_MAC}\\)")
flows.append(f"table={Constants.OF_TABLE_NAT+3},ip6,ipv6_dst={dep.vif.if_addr.ipv6},"
f"action=mod_dl_dst\\({dep.vif.mac}\\),"
f"mod_dl_src\\({_NAT_ROUTER_SIF_MAC}\\)")
provision_flows(sut, br_name, flows)
# NOTE: for RELATED test, we only test on either client or server vm,
# RELATED pkt generated by intermediate device is going to have more
# complicated topology, and we may do that in the future.
#
# direction:
# - 'reply' means generate icmp from server -> client
# - 'orig' means generate icmp from client -> server
def _nat_execute_related(nat_spec, sep, dep, ip_ver, direction):
client_vm = sep.guest
server_vm = dep.guest
if ip_ver == 'ipv4':
client_ip = sep.vif.if_addr.ipv4
server_ip = dep.vif.if_addr.ipv4
if nat_spec.is_snat():
client_view_server_ip = server_ip
server_view_client_ip = nat_spec.ipv4_ia_start
else:
client_view_server_ip = nat_spec.vipv4
server_view_client_ip = client_ip
nc_option = ''
iptables_option = ''
iptables_reject_type = 'icmp-host-prohibited'
related_expected_key = 'unreachable - admin prohibited'
tcpdump_option = 'icmp'
else:
client_ip = sep.vif.if_addr.ipv6
server_ip = dep.vif.if_addr.ipv6
if nat_spec.is_snat():
client_view_server_ip = server_ip
server_view_client_ip = nat_spec.ipv6_ia_start
else:
client_view_server_ip = nat_spec.vipv6
server_view_client_ip = client_ip
nc_option = ' -6 '
iptables_option = '6'
iptables_reject_type = 'icmp6-adm-prohibited'
related_expected_key = 'unreachable prohibited'
tcpdump_option = 'icmp6'
cmd = "sudo sh -c 'cat << EOF > ./loop\n" \
"#!/bin/sh\n" \
"\n" \
"while true; do \n" \
" echo \"hello\"\n" \
" sleep 1\n" \
"done\n'"
client_vm.execute(cmd)
server_vm.execute(cmd)
client_vm.execute("chmod +x ./loop")
server_vm.execute("chmod +x ./loop")
# nc server and client each generate pkts by 'loop' input
# so we can setup iptables reject rule in either nc server or client
# to trigger either 'reply' or 'orig' direction icmp related msg.
cmd = 'nohup sh -c \"./loop | nc {0} -u -l -p 10000" >/dev/null 2>&1 &'\
.format(nc_option)
server_vm.execute(cmd)
cmd = "nohup sh -c \"./loop | nc {0} -u {1} 10000\" >/dev/null 2>&1 &" \
.format(nc_option, client_view_server_ip)
client_vm.execute(cmd)
if direction == 'orig':
tcpdump_vm = server_vm
tcpdump_if = f"virtio{dep.vif.idx}"
iptables_vm = client_vm
iptables_if = f"virtio{sep.vif.idx}"
reject_ip = client_ip
else:
tcpdump_vm = client_vm
tcpdump_if = f"virtio{sep.vif.idx}"
iptables_vm = server_vm
iptables_if = f"virtio{dep.vif.idx}"
reject_ip = server_ip
tcpdump_vm.execute('rm -rf ./cap')
# '-U' option MUST be used, otherwise empty capture file
cmd = 'nohup tcpdump -U -i {0} {1} -w ./cap >/dev/null 2>&1 &' \
.format(tcpdump_if, tcpdump_option)
tcpdump_vm.execute(cmd)
sleep(3)
# icmp-host-prohibited is used instead of icmp-port-unreachable
# because there might be some noises when nc client is quit,
# ip stack of the nc client vm will also generate port unreachable
# icmp to nc server (as nc server is keeping sending data).
cmd = 'ip{0}tables -I INPUT -i {1} -d {2} -j REJECT ' \
' --reject-with {3}'.format(
iptables_option, iptables_if, reject_ip, iptables_reject_type)
iptables_vm.execute(cmd)
# nc client actually already quit by recieving the port unreachable icmp,
# after nc client quit, nc server will also get port unreachable icmp,
# but it's not quit yet.
# wait few seconds so the above 'reject' will be triggered
# thus captured by tcpdump
sleep(3)
(_, tcpdump_log, _) = tcpdump_vm.execute('tcpdump -r ./cap')
# cleanup everything
cmd = 'ip{0}tables -D INPUT -i {1} -d {2} -j REJECT ' \
' --reject-with {3}'.format(
iptables_option, iptables_if, reject_ip, iptables_reject_type)
iptables_vm.execute(cmd)
tcpdump_vm.kill_process("tcpdump")
# nc client is quit, try to kill it again anyway
server_vm.kill_process("nc")
client_vm.kill_process("nc")
# 09:31:41.874866 IP 192.168.127.12 > 172.16.58.3: ICMP host 192.168.127.12 \
# unreachable - admin prohibited, length 42
# 13:59:34.450321 IP6 fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:102 > fc00:e968:6179::de52:7100:6401: \
# ICMP6, destination unreachable, unreachable prohibited fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:102, \
# length 62
verify_strs = list()
verify_strs.append(related_expected_key)
# MUST use ip_address to normalize ipv4/6 address to compare with tcpdump
if direction == 'orig':
verify_strs.append(f"{server_view_client_ip} > {server_ip}")
else:
verify_strs.append(f"{client_view_server_ip} > {client_ip}")
for s in verify_strs:
logger.debug('related key: {0}'.format(s))
buf = StringIO(tcpdump_log)
related = 0
for line in buf.readlines():
logger.debug('related test line :{0}'.format(line))
found = True
for s in verify_strs:
if not s in line:
found = False
break
if found:
related += 1
buf.close()
if direction == 'reply' and related != 1:
logger.error('related reply num: {0} is not 1'.format(related))
# as nc server continous send data, nc client vm iptables
# will continously send back icmp related msg, thus equal
# or larger than 1.
if direction == 'orig' and related == 0:
logger.error('related orig num cannot be zero')
def verify_snat(vt, ip_num=1, port_num=0, svr_num=1,
tool='telnet', parallel=1, ping_size=1000):
""" Given a verify topology, execute SNAT test. """
if len(vt.allow) != 1:
raise "Verify topology is invalid."
ip_num = int(ip_num)
port_num = int(port_num)
svr_num = int(svr_num)
parallel = int(parallel)
ping_size = int(ping_size)
#if svr_num > len(cs_verify_spec['ipv4_servers']):
# logger.error('svr_num:{0} is larger than available:{1}'.format(
# svr_num, len(cs_verify_spec['ipv4_servers'])))
# return
#if tool == 'iperf_tcp' or tool == 'iperf_udp':
# if svr_num > 1:
# logger.error('iperf test iterate all available servers')
# return
# don't support testing to parallel tftp/ftp
if tool in ('tftp', 'ftp_passive', 'ftp_active'):
if parallel > 1:
logger.error('file copy test cannot support parallel')
return
# flush all conntrack state firstly
flush_conntrack_on_all_suts()
flush_revalidator_on_all_suts()
nat_spec = NATSpec(_NAT_SERVER_NETV4[_NAT_SNAT_ADDR_IDX_START],
_NAT_SERVER_NETV6[_NAT_SNAT_ADDR_IDX_START],
ip_num,
_NAT_SNAT_PORT_START, port_num)
_nat_setup_flows(vt, 'br0', nat_spec)
vte = vt.allow[0]
sep = vte.sep
if tool == 'telnet':
for s in range(svr_num):
for _ in range(parallel):
dep = vte.get_full_deps()[s]
cmd = f"telnet {dep.vif.if_addr.ipv4} 22 &"
sep.guest.execute(cmd)
cmd = f"telnet {dep.vif.if_addr.ipv6} 22 &"
sep.guest.execute(cmd)
_check_nat_result(sep.host, parallel*svr_num*2, 'tcp')
sep.guest.kill_process("telnet")
elif tool == 'ping':
for s in range(svr_num):
for _ in range(parallel):
dep = vte.get_full_deps()[s]
cmd = f"ping -c 3 -i 0.3 -s {ping_size} {dep.vif.if_addr.ipv4} &"
sep.guest.execute(cmd)
cmd = f"ping -6 -c 3 -i 0.3 -s {ping_size} {dep.vif.if_addr.ipv6} &"
sep.guest.execute(cmd)
_check_nat_result(sep.host, parallel*svr_num*2, None)
sep.guest.kill_process("ping")
#elif tool == 'iperf_tcp':
# for server in cs_verify_spec['ipv4_servers']:
# ss = "${{{0}}}".format(server['vm'])
# vm = BuiltIn().get_variable_value(ss)
# cne_execute_iperf_ipv4(client_vm,
# vm, server['ipv4'], 0, 'tcp', parallel)
# # hopefully all conntracks are still in FIN state
# _check_nat_result(node, parallel + 1, 'tcp')
# for server in cs_verify_spec['ipv6_servers']:
# ss = "${{{0}}}".format(server['vm'])
# vm = BuiltIn().get_variable_value(ss)
# cne_execute_iperf_ipv6(client_vm,
# vm, server['ipv6'], 0, 'tcp', parallel)
# # hopefully all conntracks are still in FIN state
# _check_nat_result(node, parallel + 1, 'tcp')
#elif tool == 'iperf_udp':
# for server in cs_verify_spec['ipv4_servers']:
# ss = "${{{0}}}".format(server['vm'])
# vm = BuiltIn().get_variable_value(ss)
# cne_execute_iperf_ipv4(client_vm,
# vm, server['ipv4'], 0, 'udp', parallel)
# # hopefully all conntracks are still in FIN state
# _check_nat_result(node, parallel + 1, None)
# for server in cs_verify_spec['ipv6_servers']:
# ss = "${{{0}}}".format(server['vm'])
# vm = BuiltIn().get_variable_value(ss)
# cne_execute_iperf_ipv6(client_vm,
# vm, server['ipv6'], 0, 'udp', parallel)
# # hopefully all conntracks are still in FIN state
# _check_nat_result(node, parallel + 1, None)
elif tool == 'tftp':
for s in range(svr_num):
dep = vte.get_full_deps()[s]
sep.guest.execute_tftp(dep.guest, dep.vif.if_addr.ipv4)
sep.guest.execute_tftp(dep.guest, dep.vif.if_addr.ipv6)
elif tool == 'ftp_active':
for s in range(svr_num):
dep = vte.get_full_deps()[s]
sep.guest.execute_ftp(dep.guest, dep.vif.if_addr.ipv4, True)
sep.guest.execute_ftp(dep.guest, dep.vif.if_addr.ipv6, True)
elif tool == 'ftp_passive':
for s in range(svr_num):
dep = vte.get_full_deps()[s]
sep.guest.execute_ftp(dep.guest, dep.vif.if_addr.ipv4, False)
sep.guest.execute_ftp(dep.guest, dep.vif.if_addr.ipv6, False)
def | |
<reponame>ontoinsights/deep_narrative_analysis<filename>dna/help.py
# Displays various help texts in a popup window
# Called by app.py
import PySimpleGUI as sg
from utilities import encoded_logo
# Help text, Each line should be 70 chars or less
scrolled_existing = \
"Choose the 'Load Narratives From Existing Store' option when narratives have \n" \
"already been ingested to the backing store. A list of the currently available \n" \
"databases is displayed in a new window, and one can be selected for study and \n" \
"analysis. (In addition, in the future, a database can be deleted, if no longer \n" \
"needed.) The selected database will be noted (directly below the 'Load \n" \
"Narratives' section in the main window) along with a count of the contained \n" \
"narratives."
scrolled_csv = \
"Choose the 'Load Narratives From CSV Metadata' option when ingesting new narratives \n" \
"into a new or existing database. A file browser window is displayed to select the \n" \
"CSV file. In addition, a list of the currently available databases is displayed to \n" \
"allow the narratives to be added to one of them, or a new store can be defined.\n\n" \
"The CSV fields are SPECIFIC TO THE DOMAIN being investigated, although several \n" \
"items (such as narrator identification or header/footer identification in PDFs) will \n" \
"be generalized and generically available for reuse. This, however, is not a goal of \n" \
"the current work. Note that in the simplest case, the file need only identify a list \n" \
"of text files to be ingested.\n\n" \
"Note that the processing may take SEVERAL MINUTES if many narratives are ingested. \n\n" \
"For the Holocaust narratives, most are parsed from .PDF files. Therefore, the \n" \
"details of the start/end pages (within the PDF), header/footer keywords, etc. are \n" \
"used to automate the extraction and cleanup of the text. Minimal information \n" \
"on the narrator/subject of the text is also specified (such as the person's name \n" \
"and gender). However, metadata is not restricted to what is in the CSV file. To \n" \
"illustrate what can be extracted from the text alone, other information (e.g., \n" \
"the date and location of the narrator's birth) are extracted from the texts and \n" \
"added to the database.\n\n" \
"For the Holocaust narratives, the format of the CSV Metadata file is: \n" \
" Source,Title,Person,Type, Given,Given2,Surname,Maiden,Maiden2, \n" \
" Gender,Start,End,Remove,Header,Footer \n" \
"where: \n" \
" * Source is the filename of the narrative \n" \
" The file MUST be stored in the dna/resources directory\n" \
" (Note that this can be a .txt or .pdf file) \n" \
" * Title is a free-form string which will be used to identify the narrative \n" \
" * Person is either 1 or 3 indicating whether the narrative is written in the first or \n" \
" third person \n" \
" * Type is either 'T' or 'E' indicating whether the story is a timeline/life history \n" \
" of the narrator or the narrative related to an episode in the narrator's life \n" \
" * Given, Surname and Maiden are free-form strings defining the first, last and \n" \
" maiden names of the narrator/person whose narrative is defined \n" \
" (Note that a unique Given name/identifier MUST be specified for each narrator/\n" \
" subject to distinguish between them. This identifier can obfuscate the name \n" \
" of the narrator/subject.) \n" \
" * Given2 and Maiden2 are additional strings denoting that the narrator/subject \n" \
" changed their given name (perhaps when they emigrated to another country), \n" \
" and/or were married more than once. \n" \
" * Gender is either M, F, A, B, U indicating male, female, agender, bigender or \n" \
" unknown, respectively \n" \
" * Start and End are only used when the Source file is PDF and indicates the specific \n" \
" page(s) that should be extracted and processed to become the stored narrative \n" \
" (Note that only single column PDFs can be processed at this time) \n" \
" * Remove defines the number of lines to be removed from the text file created from \n" \
" the PDF. For the Holocaust narratives, there are anywhere from 1 to 6 lines \n" \
" removed (title, subject, sometimes a brief overview of the person's life, etc.) \n" \
" * Header and Footer are only used when the Source file is PDF and are sets of words \n" \
" separated by semi-colons, ;) such that if a line of text contains all the words, \n" \
" then that line will be discarded (Note that the words are case sensitive) \n\n" \
"After the narratives are ingested, the selected database will be noted (directly below \n" \
"the 'Load Narratives' section in the main window) along with a count of the contained \n" \
"narratives."
scrolled_edit = \
"After selecting the narrative database, click on 'Edit Narrative' to open a new \n" \
"window and choose one of the ingested narratives for editing. The narrative's \n" \
"nouns/concepts or events can be selected for edit. A table of the nouns and their " \
"details, and/or the sentence text and event details will be shown. \n\n" \
"Note that new concepts/nouns and events can be added, while existing ones can be updated \n" \
"or removed."
scrolled_stats = \
"After selecting the narrative database, choose 'Summary Statistics' to open a new \n" \
"window and display a variety of graphs and charts. The goal is to allow a researcher \n" \
"to understand the demographics of their narrators, and determine if they reflect the\n" \
"backing population.\n\n" \
"The output displays are TAILORED TO THE DOMAIN being investigated. The number and \n" \
"variety of graphs and charts will expand based on user feedback.\n\n" \
"In this demo, the graphs display word clouds, clusters of semantically similar \n" \
"narratives, and more. The charts illustrate different characteristics of the \n" \
"narrators (such as their genders or birth years), as well as other information \n" \
"(such as the locations and times mentioned in the texts).\n\n" \
"When INDICATED BY THE DOMAIN, it may be possible to identify if the same narrator \n" \
"has provided multiple narratives - and 'unify' the different narrator references. This \n" \
"processing is illustrated in this demo (based on the narrator's given name and surname), \n" \
"since there are numerous Holocaust-related narratives associated with and provided by \n" \
"a single individual.\n\n" \
"Lastly, a list of frequent terms whose semantics are not captured in the backing " \
"DNA ontology (i.e., are 'unknown') can be output and used to extend the ontology.\n" \
"Future releases of the DNA tooling will aid in performing this extension.\n\n" \
"Note that the output displays are TAILORED TO THE DOMAIN being investigated. And, \n" \
"the number and variety of default graphs and charts will expand based on user \n" \
"feedback."
scrolled_timeline = \
"After selecting the database, click on 'Narrative Timeline' to open a new window \n" \
"to choose either a timeline of domain-specific events (if applicable) or one of \n" \
"the ingested narratives for display. For a narrative, the metadata, story text and \n" \
"a timeline plot are displayed. For the domain details, only a timeline is displayed.\n\n" \
"From the timeline, a date (YYYY-mm) can be entered to obtain a network diagram of \n" \
"the events that occurred in that month and year.\n\n" \
"In a future release, it will be possible to compare timelines (for | |
attribute["ExprWithAlias"]["expr"]
alias = [attribute["ExprWithAlias"]["alias"]["value"]]
if function:
if "Identifier" in function:
return {
"identifier": function["Identifier"]["value"],
"alias": alias,
}
if "CompoundIdentifier" in function:
return {
"identifier": [
".".join(
[p["value"] for p in function["CompoundIdentifier"]]
)
].pop(),
"alias": [
".".join(
[p["value"] for p in function["CompoundIdentifier"]]
)
],
}
if "Function" in function:
func = function["Function"]["name"][0]["value"].upper()
args = [
self._build_dnf_filters(a) for a in function["Function"]["args"]
]
if is_function(func):
return {"function": func, "args": args, "alias": alias}
return {"aggregate": func, "args": args, "alias": alias}
if "BinaryOp" in function:
raise NotImplementedError(
"Operations in the SELECT clause are not supported"
)
if "Cast" in function:
# CAST(<var> AS <type>) - convert to the form <type>(var), e.g. BOOLEAN(on)
args = [self._build_dnf_filters(function["Cast"]["expr"])]
data_type = function["Cast"]["data_type"]
if data_type == "Timestamp":
data_type = "TIMESTAMP"
elif "Varchar" in data_type:
data_type = "VARCHAR"
elif "Decimal" in data_type:
data_type = "NUMERIC"
elif "Boolean" in data_type:
data_type = "BOOLEAN"
else:
raise SqlError("Unsupported CAST function")
alias.append(f"CAST({args[0][0]} AS {data_type})")
return {"function": data_type, "args": args, "alias": alias}
if "MapAccess" in function:
# Identifier[key] -> GET(Identifier, key) -> alias of I[k] or alias
identifier = function["MapAccess"]["column"]["Identifier"]["value"]
key_dict = function["MapAccess"]["keys"][0]["Value"]
if "SingleQuotedString" in key_dict:
key_value = (
key_dict["SingleQuotedString"],
TOKEN_TYPES.VARCHAR,
)
key = f"'{key_dict['SingleQuotedString']}'"
if "Number" in key_dict:
key_value = (
int(key_dict["Number"][0]),
TOKEN_TYPES.NUMERIC,
)
key = key_dict["Number"][0]
alias.append(f"{identifier}[{key}]")
return {
"function": "GET",
"args": [(identifier, TOKEN_TYPES.IDENTIFIER), key_value],
"alias": alias,
}
projection = [_inner(attribute) for attribute in projection]
# print(projection)
return projection
def _extract_selection(self, ast):
"""
Although there is a SELECT statement in a SQL Query, Selection refers to the
filter or WHERE statement.
"""
selections = ast[0]["Query"]["body"]["Select"]["selection"]
return self._build_dnf_filters(selections)
def _extract_filter(self, ast):
""" """
filters = ast[0]["ShowColumns"]["filter"]
if filters is None:
return None
if "Where" in filters:
return self._build_dnf_filters(filters["Where"])
if "Like" in filters:
return (
(
"column_name",
TOKEN_TYPES.IDENTIFIER,
),
"like",
(filters["Like"], TOKEN_TYPES.VARCHAR),
)
def _extract_distinct(self, ast):
return ast[0]["Query"]["body"]["Select"]["distinct"]
def _extract_limit(self, ast):
limit = ast[0]["Query"].get("limit")
if limit is not None:
return int(limit["Value"]["Number"][0])
return None
def _extract_offset(self, ast):
offset = ast[0]["Query"].get("offset")
if offset is not None:
return int(offset["value"]["Value"]["Number"][0])
return None
def _extract_order(self, ast):
order = ast[0]["Query"].get("order_by")
if order is not None:
orders = []
for col in order:
column = col["expr"]
if "Identifier" in column:
column = column["Identifier"]["value"]
if "CompoundIdentifier" in column:
column = ".".join(
[i["value"] for i in column["CompoundIdentifier"]]
)
if "Function" in column:
func = column["Function"]["name"][0]["value"].upper()
args = [
self._build_dnf_filters(a)[0]
for a in column["Function"]["args"]
]
args = ["*" if i == "Wildcard" else i for i in args]
args = [
((f"({','.join(a[0])})",) if isinstance(a[0], list) else a)
for a in args
]
alias = f"{func.upper()}({','.join([str(a[0]) for a in args])})"
column = {"function": func, "args": args, "alias": alias}
orders.append(
(
column,
"descending" if str(col["asc"]) == "False" else "ascending",
),
)
return orders
def _extract_groups(self, ast):
def _inner(element):
if element:
if "Identifier" in element:
return element["Identifier"]["value"]
if "Function" in element:
func = element["Function"]["name"][0]["value"].upper()
args = [
self._build_dnf_filters(a) for a in element["Function"]["args"]
]
args = [
((f"({','.join(a[0])})",) if isinstance(a[0], list) else a)
for a in args
]
return f"{func.upper()}({','.join([str(a[0]) for a in args])})"
if "Cast" in element:
args = [self._build_dnf_filters(element["Cast"]["expr"])]
data_type = list(element["Cast"]["data_type"].keys())[0]
return f"CAST({args[0][0]} AS {str(data_type).upper()})"
if "MapAccess" in element:
identifier = element["MapAccess"]["column"]["Identifier"]["value"]
key_dict = element["MapAccess"]["keys"][0]["Value"]
if "SingleQuotedString" in key_dict:
key = f"'{key_dict['SingleQuotedString']}'"
if "Number" in key_dict:
key = key_dict["Number"][0]
return f"{identifier}[{key}]"
groups = ast[0]["Query"]["body"]["Select"]["group_by"]
return [_inner(g) for g in groups]
def _extract_having(self, ast):
having = ast[0]["Query"]["body"]["Select"]["having"]
return self._build_dnf_filters(having)
def _explain_planner(self, ast, statistics):
explain_plan = self.copy()
explain_plan.create_plan(ast=[ast[0]["Explain"]["statement"]])
explain_node = ExplainNode(statistics, query_plan=explain_plan)
self.add_operator("explain", explain_node)
def _show_columns_planner(self, ast, statistics):
# relation = ast[0]["ShowColumns"]["table_name"][0]["value"]
relation = ".".join(
[part["value"] for part in ast[0]["ShowColumns"]["table_name"]]
)
self.add_operator(
"reader",
DatasetReaderNode(
statistics,
dataset=(None, relation),
reader=self._reader,
cache=self._cache,
partition_scheme=self._partition_scheme,
start_date=self._start_date,
end_date=self._end_date,
),
)
self.add_operator("columns", ShowColumnsNode(statistics))
self.link_operators("reader", "columns")
filters = self._extract_filter(ast)
if filters:
self.add_operator(
"filter", SelectionNode(statistics=statistics, filter=filters)
)
self.link_operators("columns", "filter")
def _naive_select_planner(self, ast, statistics):
"""
The naive planner only works on single tables and always puts operations in
this order.
FROM clause
EVALUATE
WHERE clause
AGGREGATE (GROUP BY clause)
HAVING clause
SELECT clause
DISTINCT
ORDER BY clause
LIMIT clause
OFFSET clause
This is phase one of the rewrite, to essentially mimick the existing
functionality.
"""
_relations = [r for r in self._extract_relations(ast)]
if len(_relations) == 0:
_relations = [(None, "$no_table")]
# We always have a data source - even if it's 'no table'
self.add_operator(
"from",
DatasetReaderNode(
statistics,
dataset=_relations[0],
reader=self._reader,
cache=self._cache,
partition_scheme=self._partition_scheme,
start_date=self._start_date,
end_date=self._end_date,
),
)
last_node = "from"
_join = self._extract_joins(ast)
if _join or len(_relations) == 2:
if len(_relations) == 2:
# If there's no stated JOIN but the query has two relations, we
# use a CROSS JOIN
_join = ("CrossJoin", _relations[1], None, None)
if _join[0] == "CrossJoinUnnest":
# If we're doing a CROSS JOIN UNNEST, the right table is an UNNEST function
right = _join[1]
else:
# Otherwise, the right table needs to come from the Reader
right = DatasetReaderNode(
statistics,
dataset=_join[1],
reader=self._reader,
cache=self._cache,
partition_scheme=self._partition_scheme,
start_date=self._start_date,
end_date=self._end_date,
)
# map join types to their implementations
join_nodes = {
"CrossJoin": CrossJoinNode,
"CrossJoinUnnest": CrossJoinNode,
"FullOuter": OuterJoinNode,
"Inner": InnerJoinNode,
"LeftOuter": OuterJoinNode,
"RightOuter": OuterJoinNode,
}
join_node = join_nodes.get(_join[0])
if join_node is None:
raise SqlError(f"Join type not supported - `{_join[0]}`")
self.add_operator(
"join",
join_node(
statistics,
right_table=right,
join_type=_join[0],
join_on=_join[2],
join_using=_join[3],
),
)
self.link_operators(last_node, "join")
last_node = "join"
_projection = self._extract_projections(ast)
if any(["function" in a for a in _projection]):
self.add_operator(
"eval", EvaluationNode(statistics, projection=_projection)
)
self.link_operators(last_node, "eval")
last_node = "eval"
_selection = self._extract_selection(ast)
if _selection:
self.add_operator("where", SelectionNode(statistics, filter=_selection))
self.link_operators(last_node, "where")
last_node = "where"
_groups = self._extract_groups(ast)
if _groups or any(["aggregate" in a for a in _projection]):
_aggregates = _projection.copy()
if isinstance(_aggregates, dict):
raise SqlError("GROUP BY cannot be used with SELECT *")
if not any(["aggregate" in a for a in _aggregates]):
_aggregates.append(
{
"aggregate": "COUNT",
"args": [("Wildcard", TOKEN_TYPES.WILDCARD)],
"alias": None,
}
)
self.add_operator(
"agg", AggregateNode(statistics, aggregates=_aggregates, groups=_groups)
)
self.link_operators(last_node, "agg")
last_node = "agg"
_having = self._extract_having(ast)
if _having:
self.add_operator("having", SelectionNode(statistics, filter=_having))
self.link_operators(last_node, "having")
last_node = "having"
self.add_operator("select", ProjectionNode(statistics, projection=_projection))
self.link_operators(last_node, "select")
last_node = "select"
_distinct = self._extract_distinct(ast)
if _distinct:
self.add_operator("distinct", DistinctNode(statistics))
self.link_operators(last_node, "distinct")
last_node = "distinct"
_order = self._extract_order(ast)
if _order:
self.add_operator("order", SortNode(statistics, order=_order))
self.link_operators(last_node, "order")
last_node = "order"
_offset = self._extract_offset(ast)
if _offset:
self.add_operator("offset", OffsetNode(statistics, offset=_offset))
self.link_operators(last_node, "offset")
last_node = "offset"
_limit = self._extract_limit(ast)
# 0 limit is valid
if _limit is not None:
self.add_operator("limit", LimitNode(statistics, limit=_limit))
self.link_operators(last_node, "limit")
last_node = "limit"
def explain(self):
import pyarrow
from opteryx.utils.columns import Columns
def _inner_explain(operator_name, depth):
depth += 1
operator = self.get_operator(operator_name)
yield {"operator": operator.name, "config": operator.config, "depth": depth}
out_going_links = self.get_outgoing_links(operator_name)
if out_going_links:
for next_operator_name in out_going_links:
yield from _inner_explain(next_operator_name, depth)
entry_points = self.get_entry_points()
nodes = []
for entry_point in entry_points:
nodes += list(_inner_explain(entry_point, 0))
table = pyarrow.Table.from_pylist(nodes)
table = Columns.create_table_metadata(table, table.num_rows, "plan", None)
yield table
def add_operator(self, name, operator):
"""
Add a step to the DAG
Parameters:
name: string
The name of the step, must be unique
Operator: BaseOperator
The Operator
"""
self.nodes[name] = operator
def link_operators(self, source_operator, target_operator):
"""
Link steps in a flow.
Parameters:
source_operator: string
The name of the source step
target_operator: string
The name of the target step
"""
edge = (source_operator, target_operator)
if edge not in self.edges:
self.edges.append((source_operator, target_operator))
def get_outgoing_links(self, name):
"""
Get the names of outgoing links from a given step.
Paramters:
name: string
The name of the step to search from
"""
retval = {target for source, target in self.edges if source == name}
return sorted(retval)
def get_exit_points(self):
"""
Get steps in the flow with no outgoing steps.
"""
sources = {source for source, target in self.edges}
retval = {target for source, target in self.edges if target not in sources}
return sorted(retval)
def get_entry_points(self):
"""
Get steps in the flow with no incoming steps.
"""
if len(self.nodes) == 1:
return list(self.nodes.keys())
targets = {target for source, target in self.edges}
retval = {source for source, target in self.edges if source not in targets}
return sorted(retval)
def get_operator(self, name):
"""
Get the Operator class by name.
Parameters:
name: string
The name of the step
"""
return self.nodes.get(name)
def merge(self, assimilatee):
| |
<gh_stars>0
from abc import ABC, abstractmethod
import torch
from torch import Tensor
from scipy.optimize import OptimizeResult
from scipy.optimize.optimize import _status_message
from .function import ScalarFunction
from .line_search import strong_wolfe
class HessianUpdateStrategy(ABC):
def __init__(self):
self.n_updates = 0
@abstractmethod
def solve(self, grad):
pass
@abstractmethod
def _update(self, s, y, rho_inv):
pass
def update(self, s, y):
rho_inv = y.dot(s)
if rho_inv <= 1e-10:
# curvature is negative; do not update
return
self._update(s, y, rho_inv)
self.n_updates += 1
class L_BFGS(HessianUpdateStrategy):
def __init__(self, x, history_size=100):
super().__init__()
self.y = []
self.s = []
self.rho = []
self.H_diag = 1.
self.alpha = x.new_empty(history_size)
self.history_size = history_size
def solve(self, grad):
mem_size = len(self.y)
d = grad.neg()
for i in reversed(range(mem_size)):
self.alpha[i] = self.s[i].dot(d) * self.rho[i]
d.add_(self.y[i], alpha=-self.alpha[i])
d.mul_(self.H_diag)
for i in range(mem_size):
beta_i = self.y[i].dot(d) * self.rho[i]
d.add_(self.s[i], alpha=self.alpha[i] - beta_i)
return d
def _update(self, s, y, rho_inv):
if len(self.y) == self.history_size:
self.y.pop(0)
self.s.pop(0)
self.rho.pop(0)
self.y.append(y)
self.s.append(s)
self.rho.append(rho_inv.reciprocal())
self.H_diag = rho_inv / y.dot(y)
class BFGS(HessianUpdateStrategy):
def __init__(self, x, inverse=True):
super().__init__()
self.inverse = inverse
if inverse:
self.I = torch.eye(x.numel(), device=x.device, dtype=x.dtype)
self.H = self.I.clone()
else:
self.B = torch.eye(x.numel(), device=x.device, dtype=x.dtype)
def solve(self, grad):
if self.inverse:
return torch.matmul(self.H, grad.neg())
else:
return torch.cholesky_solve(grad.neg().unsqueeze(1),
torch.linalg.cholesky(self.B)).squeeze(1)
def _update(self, s, y, rho_inv):
rho = rho_inv.reciprocal()
if self.inverse:
if self.n_updates == 0:
self.H.mul_(rho_inv / y.dot(y))
R = torch.addr(self.I, s, y, alpha=-rho)
torch.addr(
torch.linalg.multi_dot((R, self.H, R.t())),
s, s, alpha=rho, out=self.H)
else:
if self.n_updates == 0:
self.B.mul_(rho * y.dot(y))
Bs = torch.mv(self.B, s)
self.B.addr_(y, y, alpha=rho)
self.B.addr_(Bs, Bs, alpha=-1./s.dot(Bs))
@torch.no_grad()
def _minimize_bfgs_core(
fun, x0, lr=1., low_mem=False, history_size=100, inv_hess=True,
max_iter=None, line_search='strong-wolfe', gtol=1e-5, xtol=1e-9,
normp=float('inf'), callback=None, disp=0, return_all=False):
"""Minimize a multivariate function with BFGS or L-BFGS.
We choose from BFGS/L-BFGS with the `low_mem` argument.
Parameters
----------
fun : callable
Scalar objective function to minimize
x0 : Tensor
Initialization point
lr : float
Step size for parameter updates. If using line search, this will be
used as the initial step size for the search.
low_mem : bool
Whether to use L-BFGS, the "low memory" variant of the BFGS algorithm.
history_size : int
History size for L-BFGS hessian estimates. Ignored if `low_mem=False`.
inv_hess : bool
Whether to parameterize the inverse hessian vs. the hessian with BFGS.
Ignored if `low_mem=True` (L-BFGS always parameterizes the inverse).
max_iter : int, optional
Maximum number of iterations to perform. Defaults to 200 * x0.numel()
line_search : str
Line search specifier. Currently the available options are
{'none', 'strong_wolfe'}.
gtol : float
Termination tolerance on 1st-order optimality (gradient norm).
xtol : float
Termination tolerance on function/parameter changes.
normp : Number or str
The norm type to use for termination conditions. Can be any value
supported by `torch.norm` p argument.
callback : callable, optional
Function to call after each iteration with the current parameter
state, e.g. ``callback(x)``.
disp : int or bool
Display (verbosity) level. Set to >0 to print status messages.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
Returns
-------
result : OptimizeResult
Result of the optimization routine.
"""
lr = float(lr)
disp = int(disp)
if max_iter is None:
max_iter = x0.numel() * 200
if low_mem and not inv_hess:
raise ValueError('inv_hess=False is not available for L-BFGS.')
# construct scalar objective function
sf = ScalarFunction(fun, x0.shape)
closure = sf.closure
if line_search == 'strong-wolfe':
dir_evaluate = sf.dir_evaluate
# compute initial f(x) and f'(x)
x = x0.detach().view(-1).clone(memory_format=torch.contiguous_format)
f, g, _, _ = closure(x)
if disp > 1:
print('initial fval: %0.4f' % f)
if return_all:
allvecs = [x]
# initial settings
if low_mem:
hess = L_BFGS(x, history_size)
else:
hess = BFGS(x, inv_hess)
d = g.neg()
t = min(1., g.norm(p=1).reciprocal()) * lr
n_iter = 0
# BFGS iterations
for n_iter in range(1, max_iter+1):
# ==================================
# compute Quasi-Newton direction
# ==================================
if n_iter > 1:
d = hess.solve(g)
# directional derivative
gtd = g.dot(d)
# check if directional derivative is below tolerance
if gtd > -xtol:
warnflag = 4
msg = 'A non-descent direction was encountered.'
break
# ======================
# update parameter
# ======================
if line_search == 'none':
# no line search, move with fixed-step
x_new = x + d.mul(t)
f_new, g_new, _, _ = closure(x_new)
elif line_search == 'strong-wolfe':
# Determine step size via strong-wolfe line search
f_new, g_new, t, ls_evals = \
strong_wolfe(dir_evaluate, x, t, d, f, g, gtd)
x_new = x + d.mul(t)
else:
raise ValueError('invalid line_search option {}.'.format(line_search))
if disp > 1:
print('iter %3d - fval: %0.4f' % (n_iter, f_new))
if return_all:
allvecs.append(x_new)
if callback is not None:
callback(x_new)
# ================================
# update hessian approximation
# ================================
s = x_new.sub(x)
y = g_new.sub(g)
hess.update(s, y)
# =========================================
# check conditions and update buffers
# =========================================
# convergence by insufficient progress
if (s.norm(p=normp) <= xtol) | ((f_new - f).abs() <= xtol):
warnflag = 0
msg = _status_message['success']
break
# update state
f[...] = f_new
x.copy_(x_new)
g.copy_(g_new)
t = lr
# convergence by 1st-order optimality
if g.norm(p=normp) <= gtol:
warnflag = 0
msg = _status_message['success']
break
# precision loss; exit
if ~f.isfinite():
warnflag = 2
msg = _status_message['pr_loss']
break
else:
# if we get to the end, the maximum num. iterations was reached
warnflag = 1
msg = _status_message['maxiter']
if disp:
print(msg)
print(" Current function value: %f" % f)
print(" Iterations: %d" % n_iter)
print(" Function evaluations: %d" % sf.nfev)
result = OptimizeResult(fun=f, x=x.view_as(x0), grad=g.view_as(x0),
status=warnflag, success=(warnflag==0),
message=msg, nit=n_iter, nfev=sf.nfev)
if not low_mem:
if inv_hess:
result['hess_inv'] = hess.H.view(2 * x0.shape)
else:
result['hess'] = hess.B.view(2 * x0.shape)
if return_all:
result['allvecs'] = allvecs
return result
def _minimize_bfgs(
fun, x0, lr=1., inv_hess=True, max_iter=None,
line_search='strong-wolfe', gtol=1e-5, xtol=1e-9,
normp=float('inf'), callback=None, disp=0, return_all=False):
"""Minimize a multivariate function with BFGS
Parameters
----------
fun : callable
Scalar objective function to minimize.
x0 : Tensor
Initialization point.
lr : float
Step size for parameter updates. If using line search, this will be
used as the initial step size for the search.
inv_hess : bool
Whether to parameterize the inverse hessian vs. the hessian with BFGS.
max_iter : int, optional
Maximum number of iterations to perform. Defaults to
``200 * x0.numel()``.
line_search : str
Line search specifier. Currently the available options are
{'none', 'strong_wolfe'}.
gtol : float
Termination tolerance on 1st-order optimality (gradient norm).
xtol : float
Termination tolerance on function/parameter changes.
normp : Number or str
The norm type to use for termination conditions. Can be any value
supported by :func:`torch.norm`.
callback : callable, optional
Function to call after each iteration with the current parameter
state, e.g. ``callback(x)``.
disp : int or bool
Display (verbosity) level. Set to >0 to print status messages.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
Returns
-------
result : OptimizeResult
Result of the optimization routine.
"""
return _minimize_bfgs_core(
fun, x0, lr, low_mem=False, inv_hess=inv_hess, max_iter=max_iter,
line_search=line_search, gtol=gtol, xtol=xtol,
normp=normp, callback=callback, disp=disp, return_all=return_all)
def _minimize_lbfgs(
fun, x0, lr=1., history_size=100, max_iter=None,
line_search='strong-wolfe', gtol=1e-5, xtol=1e-9,
normp=float('inf'), callback=None, disp=0, return_all=False):
"""Minimize a multivariate function with L-BFGS
Parameters
----------
fun : callable
Scalar objective function to minimize.
x0 : Tensor
Initialization point.
lr : float
Step size for parameter updates. If using line search, this will be
used as the initial step size for the search.
history_size : int
History size for L-BFGS hessian estimates.
max_iter : int, optional
Maximum number of iterations to perform. Defaults to
``200 * x0.numel()``.
line_search : str
Line search specifier. Currently the available options are
{'none', 'strong_wolfe'}.
gtol : float
Termination tolerance on 1st-order optimality (gradient norm).
xtol : float
Termination tolerance on function/parameter changes.
normp : Number or str
The norm type to use for termination conditions. Can be any value
supported by :func:`torch.norm`.
callback : callable, optional
Function to call after each iteration with the current parameter
state, e.g. ``callback(x)``.
disp : int or bool
Display (verbosity) level. Set to >0 to print status messages.
return_all : bool, optional
Set to True to return a list of the best solution at each of | |
<filename>neuro-cli/tests/unit/test_shell_completion.py
import logging
import os
import shlex
import sys
from dataclasses import replace
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from pathlib import Path
from typing import (
Any,
AsyncIterator,
Callable,
Iterable,
List,
Optional,
Sequence,
Tuple,
)
from unittest import mock
import pytest
from dateutil.parser import isoparse
from yarl import URL
from neuro_sdk import (
Action,
BlobObject,
Bucket,
BucketCredentials,
Container,
Disk,
Disks,
FileStatus,
FileStatusType,
Images,
JobDescription,
Jobs,
JobStatus,
JobStatusHistory,
PersistentBucketCredentials,
RemoteImage,
Resources,
ServiceAccount,
ServiceAccounts,
Storage,
)
from neuro_sdk._buckets import Buckets
from neuro_sdk._url_utils import normalize_storage_path_uri
from neuro_sdk._utils import asyncgeneratorcontextmanager
from .conftest import SysCapWithCode
JOB_OUTPUT_TIMEOUT = 10 * 60
NETWORK_TIMEOUT = 3 * 60.0
_RunCli = Callable[[Sequence[str]], SysCapWithCode]
log = logging.getLogger(__name__)
def _default_args(verbosity: int, network_timeout: float, nmrc_path: Path) -> List[str]:
args = [
"--show-traceback",
"--disable-pypi-version-check",
"--color=no",
f"--network-timeout={network_timeout}",
"--skip-stats",
f"--neuromation-config={nmrc_path}",
]
if verbosity < 0:
args.append("-" + "q" * (-verbosity))
if verbosity > 0:
args.append("-" + "v" * verbosity)
return args
def autocomplete(
run_cli: _RunCli,
nmrc_path: Path,
monkeypatch: Any,
arguments: Sequence[str],
*,
shell: str,
verbosity: int = 0,
network_timeout: float = NETWORK_TIMEOUT,
timeout: float = JOB_OUTPUT_TIMEOUT,
) -> str:
__tracebackhide__ = True
log.info("Run 'neuro %s'", " ".join(arguments))
args = _default_args(verbosity, network_timeout, nmrc_path)
env = dict(os.environ)
env["_PYTEST_COMPLETE"] = f"{shell}_complete"
env["COMP_WORDS"] = " ".join(shlex.quote(arg) for arg in [*args, *arguments])
env["COMP_CWORD"] = str(len(args) + len(arguments) - 1)
env["NEURO_CLI_JOB_AUTOCOMPLETE_LIMIT"] = "500"
monkeypatch.setattr(os, "environ", env)
proc = run_cli([])
assert proc.code == 0
assert not proc.err
return proc.out
_RunAC = Callable[[List[str]], Tuple[str, str]]
@pytest.fixture()
def run_autocomplete(run_cli: _RunCli, nmrc_path: Path, monkeypatch: Any) -> _RunAC:
def autocompleter(args: Sequence[str]) -> Tuple[str, str]:
zsh_out = autocomplete(run_cli, nmrc_path, monkeypatch, args, shell="zsh")
bash_out = autocomplete(run_cli, nmrc_path, monkeypatch, args, shell="bash")
return zsh_out, bash_out
return autocompleter
skip_on_windows = pytest.mark.skipif(
sys.platform == "win32", reason="Autocompletion is not supported on Windows"
)
@skip_on_windows
def test_file_autocomplete(run_autocomplete: _RunAC, tmp_path: Path) -> None:
base = tmp_path / "base"
base.mkdir()
(base / "file.txt").write_bytes(b"")
(base / "folder").mkdir()
(base / "folder/file2.txt").write_bytes(b"")
(base / "folder/folder2").mkdir()
zsh_out, bash_out = run_autocomplete(["storage", "cp", "fi"])
assert bash_out == "uri,file:,"
assert zsh_out == "uri\nfile:\n_\n_"
base_uri = base.as_uri()
base_prefix = base_uri[5:]
names = os.listdir(base)
names = [name + ("/" if "folder" in name else "") for name in names]
zsh_out, bash_out = run_autocomplete(["storage", "cp", base_uri + "/"])
assert bash_out == "\n".join(f"uri,{name},{base_prefix}/" for name in names)
assert zsh_out == "\n".join(f"uri\n{name}\n_\n{base_uri}/" for name in names)
zsh_out, bash_out = run_autocomplete(["storage", "cp", base_uri + "/f"])
assert bash_out == "\n".join(f"uri,{name},{base_prefix}/" for name in names)
assert zsh_out == "\n".join(f"uri\n{name}\n_\n{base_uri}/" for name in names)
zsh_out, bash_out = run_autocomplete(["storage", "cp", base_uri + "/fi"])
assert bash_out == f"uri,file.txt,{base_prefix}/"
assert zsh_out == f"uri\nfile.txt\n_\n{base_uri}/"
zsh_out, bash_out = run_autocomplete(["storage", "cp", base_uri + "/folder"])
assert bash_out == f"uri,folder/,{base_prefix}/"
assert zsh_out == f"uri\nfolder/\n_\n{base_uri}/"
zsh_out, bash_out = run_autocomplete(["storage", "cp", base_uri + "/folder/"])
names = os.listdir(base / "folder")
names = [name + ("/" if "folder" in name else "") for name in names]
assert bash_out == "\n".join(f"uri,{name},{base_prefix}/folder/" for name in names)
assert zsh_out == "\n".join(f"uri\n{name}\n_\n{base_uri}/folder/" for name in names)
@skip_on_windows
def test_file_autocomplete_default(run_autocomplete: _RunAC) -> None:
default = Path.cwd()
default_uri = default.as_uri()
default_prefix = default_uri[5:]
names = [p.name + ("/" if p.is_dir() else "") for p in default.iterdir()]
zsh_out, bash_out = run_autocomplete(["storage", "cp", "file:"])
assert bash_out == "\n".join(f"uri,{name},{default_prefix}/" for name in names)
assert zsh_out == "\n".join(f"uri\n{name}\n_\n{default_uri}/" for name in names)
cwd = Path.cwd()
cwd_uri = cwd.as_uri()
cwd_prefix = cwd_uri[5:]
names = [p.name + ("/" if p.is_dir() else "") for p in cwd.iterdir()]
zsh_out, bash_out = run_autocomplete(["storage", "cp", "file://"])
assert bash_out == "\n".join(f"uri,{name},{cwd_prefix}/" for name in names)
assert zsh_out == "\n".join(f"uri\n{name}\n_\n{cwd_uri}/" for name in names)
@skip_on_windows
def test_file_autocomplete_root(run_autocomplete: _RunAC) -> None:
names = [p.name + ("/" if p.is_dir() else "") for p in Path("/").iterdir()]
zsh_out, bash_out = run_autocomplete(["storage", "cp", "file:/"])
assert bash_out == "\n".join(f"uri,{name},///" for name in names)
assert zsh_out == "\n".join(f"uri\n{name}\n_\nfile:///" for name in names)
zsh_out, bash_out = run_autocomplete(["storage", "cp", "file:///"])
assert bash_out == "\n".join(f"uri,{name},///" for name in names)
assert zsh_out == "\n".join(f"uri\n{name}\n_\nfile:///" for name in names)
@skip_on_windows
def test_storage_autocomplete(run_autocomplete: _RunAC) -> None:
with mock.patch.object(Storage, "stat") as mocked_stat, mock.patch.object(
Storage, "list"
) as mocked_list:
tree = {
URL("storage://default"): ["test-user", "other-user"],
URL("storage://default/test-user"): ["folder", "file.txt"],
URL("storage://default/test-user/folder"): ["folder2", "file2.txt"],
URL("storage://default/other-user"): ["folder3", "file3.txt"],
URL("storage://other-cluster"): ["test-user"],
}
def is_dir(uri: URL) -> bool:
return ".txt" not in uri.name
async def stat(uri: URL) -> FileStatus:
uri = normalize_storage_path_uri(uri, "test-user", "default", org_name=None)
if uri.path.endswith("/") and uri.path != "/":
uri = uri.with_path(uri.path.rstrip("/"))
return FileStatus(
path=uri.path,
type=FileStatusType.DIRECTORY if is_dir(uri) else FileStatusType.FILE,
size=0,
modification_time=1234567890,
permission=Action.WRITE,
uri=uri,
)
@asyncgeneratorcontextmanager
async def list(uri: URL) -> AsyncIterator[FileStatus]:
uri = normalize_storage_path_uri(uri, "test-user", "default", org_name=None)
for name in tree[uri]:
child = uri / name
yield FileStatus(
path=name,
type=FileStatusType.DIRECTORY
if is_dir(child)
else FileStatusType.FILE,
size=0,
modification_time=1234567890,
permission=Action.WRITE,
uri=child,
)
mocked_stat.side_effect = stat
mocked_list.side_effect = list
zsh_out, bash_out = run_autocomplete(["storage", "cp", "st"])
assert bash_out == "uri,storage:,"
assert zsh_out == "uri\nstorage:\n_\n_"
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage:"])
assert bash_out == ("uri,folder/,\n" "uri,file.txt,")
assert zsh_out == ("uri\nfolder/\n_\nstorage:\n" "uri\nfile.txt\n_\nstorage:")
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage:f"])
assert bash_out == ("uri,folder/,\n" "uri,file.txt,")
assert zsh_out == ("uri\nfolder/\n_\nstorage:\n" "uri\nfile.txt\n_\nstorage:")
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage:folder/"])
assert bash_out == ("uri,folder2/,folder/\n" "uri,file2.txt,folder/")
assert zsh_out == (
"uri\nfolder2/\n_\nstorage:folder/\n" "uri\nfile2.txt\n_\nstorage:folder/"
)
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage:folder/fi"])
assert bash_out == "uri,file2.txt,folder/"
assert zsh_out == "uri\nfile2.txt\n_\nstorage:folder/"
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage:/"])
assert bash_out == ("uri,test-user/,//default/\n" "uri,other-user/,//default/")
assert zsh_out == (
"uri\ntest-user/\n_\nstorage://default/\n"
"uri\nother-user/\n_\nstorage://default/"
)
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage:/t"])
assert bash_out == "uri,test-user/,//default/"
assert zsh_out == "uri\ntest-user/\n_\nstorage://default/"
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage:/test-user/"])
assert bash_out == (
"uri,folder/,//default/test-user/\n" "uri,file.txt,//default/test-user/"
)
assert zsh_out == (
"uri\nfolder/\n_\nstorage://default/test-user/\n"
"uri\nfile.txt\n_\nstorage://default/test-user/"
)
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage://"])
assert bash_out == "uri,default/,//\nuri,other/,//"
assert zsh_out == (
"uri\ndefault/\n_\nstorage://\n" "uri\nother/\n_\nstorage://"
)
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage://d"])
assert bash_out == "uri,default/,//"
assert zsh_out == "uri\ndefault/\n_\nstorage://"
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage://default/"])
assert bash_out == ("uri,test-user/,//default/\n" "uri,other-user/,//default/")
assert zsh_out == (
"uri\ntest-user/\n_\nstorage://default/\n"
"uri\nother-user/\n_\nstorage://default/"
)
zsh_out, bash_out = run_autocomplete(["storage", "cp", "storage://default/t"])
assert bash_out == "uri,test-user/,//default/"
assert zsh_out == "uri\ntest-user/\n_\nstorage://default/"
@skip_on_windows
def test_blob_autocomplete(run_autocomplete: _RunAC) -> None:
with mock.patch.object(Buckets, "list") as mocked_list, mock.patch.object(
Buckets, "blob_is_dir"
) as mocked_blob_is_dir, mock.patch.object(
Buckets, "list_blobs"
) as mocked_list_blobs:
@asyncgeneratorcontextmanager
async def list(cluster_name: str) -> AsyncIterator[Bucket]:
yield Bucket(
id="bucket-1",
name="neuro-my-bucket",
created_at=datetime(2018, 1, 1, 3),
cluster_name="default",
owner="user",
provider=Bucket.Provider.AWS,
imported=False,
org_name=None,
)
yield Bucket(
id="bucket-2",
name="neuro-public-bucket",
created_at=datetime(2018, 1, 1, 17, 2, 4),
cluster_name="default",
owner="public",
provider=Bucket.Provider.AWS,
imported=False,
org_name=None,
)
yield Bucket(
id="bucket-3",
name="neuro-shared-bucket",
created_at=datetime(2018, 1, 1, 13, 1, 5),
cluster_name="default",
owner="another-user",
provider=Bucket.Provider.AWS,
imported=False,
org_name=None,
)
async def blob_is_dir(uri: URL) -> bool:
return ".txt" not in uri.name
@asyncgeneratorcontextmanager
async def list_blobs(uri: URL) -> AsyncIterator[BlobObject]:
async with list(uri.host) as it:
async for bucket in it:
try:
key = bucket.get_key_for_uri(uri)
except ValueError:
continue
break
else:
return
blobs = [
BlobObject(
key="file1024.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=bucket,
size=1024,
),
BlobObject(
key="otherfile.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=bucket,
size=1024,
),
BlobObject(
key="file_bigger.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=bucket,
size=1_024_001,
),
BlobObject(
key="folder2/info.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=bucket,
size=240,
),
BlobObject(
key="folder2/",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=bucket,
size=0,
),
BlobObject(
key="folder23/",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=bucket,
size=0,
),
]
for blob in blobs:
if blob.key.startswith(key):
if "/" not in blob.key[len(key) :].rstrip("/"):
yield blob
mocked_list.side_effect = list
mocked_blob_is_dir.side_effect = blob_is_dir
mocked_list_blobs.side_effect = list_blobs
zsh_out, bash_out = run_autocomplete(["blob", "ls", "bl"])
assert bash_out == "uri,blob:,"
assert zsh_out == "uri\nblob:\n_\n_"
zsh_out, bash_out = run_autocomplete(["blob", "ls", "blob:"])
assert bash_out == (
"uri,bucket-1/,\n"
"uri,neuro-my-bucket/,\n"
"uri,bucket-2/,\n"
"uri,neuro-public-bucket/,\n"
"uri,bucket-3/,\n"
"uri,neuro-shared-bucket/,"
)
assert zsh_out == (
"uri\nbucket-1/\n_\nblob:\nuri\nneuro-my-bucket/\n_\nblob:\n"
"uri\nbucket-2/\n_\nblob:\n"
"uri\nneuro-public-bucket/\n_\nblob:\n"
"uri\nbucket-3/\n_\nblob:\n"
"uri\nneuro-shared-bucket/\n_\nblob:"
)
zsh_out, bash_out = run_autocomplete(["blob", "ls", "blob:b"])
assert bash_out == ("uri,bucket-1/,\n" "uri,bucket-2/,\n" "uri,bucket-3/,")
assert zsh_out == (
"uri\nbucket-1/\n_\nblob:\n"
"uri\nbucket-2/\n_\nblob:\n"
"uri\nbucket-3/\n_\nblob:"
)
zsh_out, bash_out = run_autocomplete(["blob", "ls", "blob:n"])
assert bash_out == (
"uri,neuro-my-bucket/,\n"
"uri,neuro-public-bucket/,\n"
"uri,neuro-shared-bucket/,"
)
assert zsh_out == (
"uri\nneuro-my-bucket/\n_\nblob:\n"
"uri\nneuro-public-bucket/\n_\nblob:\n"
"uri\nneuro-shared-bucket/\n_\nblob:"
)
zsh_out, bash_out = run_autocomplete(["blob", "ls", "blob:bucket-1"])
assert bash_out == "uri,bucket-1/,"
assert zsh_out == "uri\nbucket-1/\n_\nblob:"
zsh_out, bash_out = run_autocomplete(["blob", "ls", "blob:bucket-1/"])
assert bash_out == (
"uri,file1024.txt,bucket-1/\n"
"uri,otherfile.txt,bucket-1/\n"
"uri,file_bigger.txt,bucket-1/\n"
"uri,folder2/,bucket-1/\n"
"uri,folder23/,bucket-1/"
)
assert zsh_out == (
"uri\nfile1024.txt\n_\nblob:bucket-1/\n"
"uri\notherfile.txt\n_\nblob:bucket-1/\n"
"uri\nfile_bigger.txt\n_\nblob:bucket-1/\n"
"uri\nfolder2/\n_\nblob:bucket-1/\n"
"uri\nfolder23/\n_\nblob:bucket-1/"
)
zsh_out, bash_out = run_autocomplete(["blob", "ls", "blob:bucket-1/f"])
assert bash_out == (
"uri,file1024.txt,bucket-1/\n"
"uri,file_bigger.txt,bucket-1/\n"
"uri,folder2/,bucket-1/\n"
"uri,folder23/,bucket-1/"
)
assert zsh_out == (
"uri\nfile1024.txt\n_\nblob:bucket-1/\n"
"uri\nfile_bigger.txt\n_\nblob:bucket-1/\n"
"uri\nfolder2/\n_\nblob:bucket-1/\n"
"uri\nfolder23/\n_\nblob:bucket-1/"
)
zsh_out, bash_out = run_autocomplete(["blob", "ls", "blob:bucket-1/fi"])
assert bash_out == (
"uri,file1024.txt,bucket-1/\n" "uri,file_bigger.txt,bucket-1/"
)
assert zsh_out == (
"uri\nfile1024.txt\n_\nblob:bucket-1/\n"
"uri\nfile_bigger.txt\n_\nblob:bucket-1/"
)
zsh_out, bash_out = run_autocomplete(["blob", "ls", | |
<reponame>sahirsharma/Martian<filename>NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/io/fits/hdu/hdulist.py
# Licensed under a 3-clause BSD style license - see PYFITS.rst
from __future__ import print_function
import gzip
import os
import shutil
import sys
import warnings
from . import compressed
from .base import _BaseHDU, _ValidHDU, _NonstandardHDU, ExtensionHDU
from .groups import GroupsHDU
from .image import PrimaryHDU, ImageHDU
from ..file import _File
from ..header import _pad_length
from ..util import (_is_int, _tmp_name, fileobj_closed, ignore_sigint,
_get_array_mmap)
from ..verify import _Verify, _ErrList, VerifyError, VerifyWarning
from ....extern.six import string_types
from ....utils import indent
from ....utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
def fitsopen(name, mode='readonly', memmap=None, save_backup=False,
cache=True, **kwargs):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : file path, file object or file-like object
File to be opened.
mode : str, optional
Open mode, 'readonly' (default), 'update', 'append', 'denywrite', or
'ostream'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used?
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that a
backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache (default: `True`).
kwargs : dict, optional
additional optional keyword arguments, possible values are:
- **uint** : bool
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
This is enabled by default so that the pseudo-unsigned
integer convention is assumed.
Note, for backward compatibility, the kwarg **uint16** may
be used instead. The kwarg was renamed when support was
added for integers of any size.
- **ignore_missing_end** : bool
Do not issue an exception when opening a file that is
missing an ``END`` card in the last header.
- **checksum** : bool, str
If `True`, verifies that both ``DATASUM`` and
``CHECKSUM`` card values (when present in the HDU header)
match the header and data of all HDU's in the file. Updates to a
file that already has a checksum will preserve and update the
existing checksums unless this argument is given a value of
'remove', in which case the CHECKSUM and DATASUM values are not
checked, and are removed when saving changes to the file.
- **disable_image_compression** : bool
If `True`, treats compressed image HDU's like normal
binary table HDU's.
- **do_not_scale_image_data** : bool
If `True`, image data is not scaled using BSCALE/BZERO values
when read.
- **ignore_blank** : bool
If `True`, the BLANK keyword is ignored if present.
- **scale_back** : bool
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data.
Returns
-------
hdulist : an `HDUList` object
`HDUList` containing all of the header data units in the
file.
"""
from .. import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if 'uint16' in kwargs and 'uint' not in kwargs:
kwargs['uint'] = kwargs['uint16']
del kwargs['uint16']
warnings.warn(
'The uint16 keyword argument is deprecated since v1.1.0. Use '
'the uint argument instead.', AstropyDeprecationWarning)
if 'uint' not in kwargs:
kwargs['uint'] = conf.enable_uint
if not name:
raise ValueError('Empty filename: %s' % repr(name))
return HDUList.fromfile(name, mode, memmap, save_backup, cache, **kwargs)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : sequence of HDU objects or single HDU, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file object, optional
The opened physical file associated with the `HDUList`.
"""
self._file = file
self._save_backup = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError(
"Element %d in the HDUList input is not an HDU." % idx)
super(HDUList, self).__init__(hdus)
self.update_extend()
def __iter__(self):
for idx in range(len(self)):
yield self[idx]
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
hdus = super(HDUList, self).__getitem__(key)
return HDUList(hdus)
idx = self.index_of(key)
return super(HDUList, self).__getitem__(idx)
def __contains__(self, item):
"""
Returns `True` if ``HDUList.index_of(item)`` succeeds.
"""
try:
self.index_of(item)
return True
except KeyError:
return False
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self.index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError('An element in the HDUList must be an HDU.')
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError('%s is not an HDU.' % item)
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError('%s is not an HDU.' % hdu)
try:
super(HDUList, self).__setitem__(_key, hdu)
except IndexError:
raise IndexError('Extension %s is out of bound or not found.'
% key)
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self.index_of(key)
end_index = len(self) - 1
super(HDUList, self).__delitem__(key)
if (key == end_index or key == -1 and not self._resize):
self._truncate = True
else:
self._truncate = False
self._resize = True
def __getslice__(self, start, end):
return self[slice(start, end)]
def __delslice__(self, start, stop):
"""
Delete a slice of HDUs from the `HDUList`, indexed by number only.
"""
del self[slice(start, stop)]
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@classmethod
def fromfile(cls, fileobj, mode=None, memmap=None,
save_backup=False, cache=True, **kwargs):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap,
save_backup=save_backup, cache=cache, **kwargs)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer, memoryview, etc.
A string or other memory buffer containing an entire FITS file. It
should be noted that if that memory is read-only (such as a Python
string) the returned :class:`HDUList`'s data portions will also be
read-only.
kwargs : dict
Optional keyword arguments. See
:func:`astropy.io.fits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary | |
self.onSaveAs(self.currfile) # may be None
def onSaveAs(self, forcefile=None):
"""
retains successful encoding name here for next save, because this
may be the first Save after New or a manual text insertion; Save
and SaveAs may both use last known encoding, per config file (it
probably should be used for Save, but SaveAs usage is unclear);
gui prompts are prefilled with the known encoding if there is one;
does manual text.encode() to avoid creating file; text mode files
perform platform specific end-line conversion: Windows \r dropped
if present on open by text mode (auto) and binary mode (manually);
if manual content inserts, must delete \r else duplicates here;
knownEncoding=None before first Open or Save, after New, if binary Open;
encoding behavior is configurable in the local textConfig.py:
1) if savesUseKnownEncoding > 0, try encoding from last open or save
2) if savesAskUser True, try user input next (prefill with known?)
3) if savesEncoding nonempty, try this encoding next: 'utf-8', etc
4) tries sys.getdefaultencoding() as a last resort
"""
filename = forcefile or self.my_asksaveasfilename()
if not filename:
return
text = self.getAllText() # 2.1: a str string, with \n eolns,
encpick = None # even if read/inserted as bytes
# try known encoding at latest Open or Save, if any
if self.knownEncoding and ( # enc known?
(forcefile and self.savesUseKnownEncoding >= 1) or # on Save?
(not forcefile and self.savesUseKnownEncoding >= 2)): # on SaveAs?
try:
text.encode(self.knownEncoding)
encpick = self.knownEncoding
except UnicodeError:
pass
# try user input, prefill with known type, else next choice
if not encpick and self.savesAskUser:
self.update() # else dialog doesn't appear in rare cases
askuser = askstring('PyEdit', 'Enter Unicode encoding for save',
initialvalue=(self.knownEncoding or
self.savesEncoding or
sys.getdefaultencoding() or ''))
self.text.focus() # else must click
if askuser:
try:
text.encode(askuser)
encpick = askuser
except (UnicodeError, LookupError): # LookupError: bad name
pass # UnicodeError: can't encode
# try config file
if not encpick and self.savesEncoding:
try:
text.encode(self.savesEncoding)
encpick = self.savesEncoding
except (UnicodeError, LookupError):
pass
# try platform default (utf8 on windows)
if not encpick:
try:
text.encode(sys.getdefaultencoding())
encpick = sys.getdefaultencoding()
except (UnicodeError, LookupError):
pass
# open in text mode for endlines + encoding
if not encpick:
showerror('PyEdit', 'Could not encode for file ' + filename)
else:
try:
file = open(filename, 'w', encoding=encpick)
file.write(text)
file.close()
except:
showerror('PyEdit', 'Could not write file ' + filename)
else:
self.setFileName(filename) # may be newly created
self.text.edit_modified(0) # 2.0: clear modified flag
self.knownEncoding = encpick # 2.1: keep enc for next save
# don't clear undo/redo stks!
def onNew(self):
"""
start editing a new file from scratch in current window;
see onClone to pop-up a new independent edit window instead;
"""
if self.text_edit_modified(): # 2.0
if not askyesno('PyEdit', 'Text has changed: discard changes?'):
return
self.setFileName(None)
self.clearAllText()
self.text.edit_reset() # 2.0: clear undo/redo stks
self.text.edit_modified(0) # 2.0: clear modified flag
self.knownEncoding = None # 2.1: Unicode type unknown
def onQuit(self):
"""
on Quit menu/toolbar select and wm border X button in toplevel windows;
2.1: don't exit app if others changed; 2.0: don't ask if self unchanged;
moved to the top-level window classes at the end since may vary per usage:
a Quit in GUI might quit() to exit, destroy() just one Toplevel, Tk, or
edit frame, or not be provided at all when run as an attached component;
check self for changes, and if might quit(), main windows should check
other windows in the process-wide list to see if they have changed too;
"""
assert False, 'onQuit must be defined in window-specific sublass'
def text_edit_modified(self):
"""
2.1: this now works! seems to have been a bool result type issue in tkinter;
2.0: self.text.edit_modified() broken in Python 2.4: do manually for now;
"""
return self.text.edit_modified()
# return self.tk.call((self.text._w, 'edit') + ('modified', None))
############################################################################
# Edit menu commands
############################################################################
def onUndo(self): # 2.0
try: # tk8.4 keeps undo/redo stacks
self.text.edit_undo() # exception if stacks empty
except TclError: # menu tear-offs for quick undo
showinfo('PyEdit', 'Nothing to undo')
def onRedo(self): # 2.0: redo an undone
try:
self.text.edit_redo()
except TclError:
showinfo('PyEdit', 'Nothing to redo')
def onCopy(self): # get text selected by mouse, etc.
if not self.text.tag_ranges(SEL): # save in cross-app clipboard
showerror('PyEdit', 'No text selected')
else:
text = self.text.get(SEL_FIRST, SEL_LAST)
self.clipboard_clear()
self.clipboard_append(text)
def onDelete(self): # delete selected text, no save
if not self.text.tag_ranges(SEL):
showerror('PyEdit', 'No text selected')
else:
self.text.delete(SEL_FIRST, SEL_LAST)
def onCut(self):
if not self.text.tag_ranges(SEL):
showerror('PyEdit', 'No text selected')
else:
self.onCopy() # save and delete selected text
self.onDelete()
def onPaste(self):
try:
text = self.selection_get(selection='CLIPBOARD')
except TclError:
showerror('PyEdit', 'Nothing to paste')
return
self.text.insert(INSERT, text) # add at current insert cursor
self.text.tag_remove(SEL, '1.0', END)
self.text.tag_add(SEL, INSERT + '-%dc' % len(text), INSERT)
self.text.see(INSERT) # select it, so it can be cut
def onSelectAll(self):
self.text.tag_add(SEL, '1.0', END + '-1c') # select entire text
self.text.mark_set(INSERT, '1.0') # move insert point to top
self.text.see(INSERT) # scroll to top
############################################################################
# Search menu commands
############################################################################
def onGoto(self, forceline=None):
line = forceline or askinteger('PyEdit', 'Enter line number')
self.text.update()
self.text.focus()
if line is not None:
maxindex = self.text.index(END + '-1c')
maxline = int(maxindex.split('.')[0])
if line > 0 and line <= maxline:
self.text.mark_set(INSERT, '%d.0' % line) # goto line
self.text.tag_remove(SEL, '1.0', END) # delete selects
self.text.tag_add(SEL, INSERT, 'insert + 1l') # select line
self.text.see(INSERT) # scroll to line
else:
showerror('PyEdit', 'Bad line number')
def onFind(self, lastkey=None):
key = lastkey or askstring('PyEdit', 'Enter search string')
self.text.update()
self.text.focus()
self.lastfind = key
if key: # 2.0: nocase
nocase = configs.get('caseinsens', True) # 2.0: config
where = self.text.search(key, INSERT, END, nocase=nocase)
if not where: # don't wrap
showerror('PyEdit', 'String not found')
else:
pastkey = where + '+%dc' % len(key) # index past key
self.text.tag_remove(SEL, '1.0', END) # remove any sel
self.text.tag_add(SEL, where, pastkey) # select key
self.text.mark_set(INSERT, pastkey) # for next find
self.text.see(where) # scroll display
def onRefind(self):
self.onFind(self.lastfind)
def onChange(self):
"""
non-modal find/change dialog
2.1: pass per-dialog inputs to callbacks, may be > 1 change dialog open
"""
new = Toplevel(self)
new.title('PyEdit - change')
Label(new, text='Find text?', relief=RIDGE, width=15).grid(row=0, column=0)
Label(new, text='Change to?', relief=RIDGE, width=15).grid(row=1, column=0)
entry1 = Entry(new)
entry2 = Entry(new)
entry1.grid(row=0, column=1, sticky=EW)
entry2.grid(row=1, column=1, sticky=EW)
def onFind(): # use my entry in enclosing scope
self.onFind(entry1.get()) # runs normal find dialog callback
def onApply():
self.onDoChange(entry1.get(), entry2.get())
Button(new, text='Find', command=onFind).grid(row=0, column=2, sticky=EW)
Button(new, text='Apply', command=onApply).grid(row=1, column=2, sticky=EW)
new.columnconfigure(1, weight=1) # expandable entries
def onDoChange(self, findtext, changeto):
# on Apply in change dialog: change and refind
if self.text.tag_ranges(SEL): # must find first
self.text.delete(SEL_FIRST, SEL_LAST)
self.text.insert(INSERT, changeto) # deletes if empty
self.text.see(INSERT)
self.onFind(findtext) # goto next appear
self.text.update() # force refresh
def onGrep(self):
"""
TBD: better to issue an error if any file fails to decode?
but utf-16 2-bytes/char format created in Notepad may decode
without error per utf-8, and search strings won't be found;
TBD: could allow input of multiple encoding names, split on
comma, try each one for every file, without open loadEncode?
"""
from minghu6.gui.formrows import makeFormRow
# nonmodal dialog: get dirnname, filenamepatt, grepkey
popup = Toplevel()
popup.title('PyEdit - grep')
var1 = makeFormRow(popup, label='Directory root', width=18, browse=False)
var2 = makeFormRow(popup, label='Filename pattern', width=18, browse=False)
var3 = makeFormRow(popup, label='Search string', width=18, browse=False)
var4 = makeFormRow(popup, label='Content encoding', width=18, browse=False)
var1.set('.') # current dir
var2.set('*.py') # initial values
var4.set(sys.getdefaultencoding()) # for file content, not filenames
cb = lambda: self.onDoGrep(var1.get(), var2.get(), var3.get(), var4.get())
Button(popup, text='Go', command=cb).pack()
def onDoGrep(self, dirname, filenamepatt, grepkey, encoding):
"""
on Go in grep dialog: populate scrolled list with matches
tbd: should producer thread be daemon so it dies with app?
"""
import threading, queue
# make non-modal un-closeable dialog
mypopup = Tk()
mypopup.title('PyEdit - grepping')
status = Label(mypopup, text='Grep thread searching for: %r...' % grepkey)
status.pack(padx=20, pady=20)
mypopup.protocol('WM_DELETE_WINDOW', lambda: None) # ignore X close
# start producer thread, consumer loop
myqueue = queue.Queue()
threadargs = (filenamepatt, dirname, grepkey, encoding, myqueue)
threading.Thread(target=self.grepThreadProducer, args=threadargs).start()
self.grepThreadConsumer(grepkey, encoding, myqueue, mypopup)
def grepThreadProducer(self, filenamepatt, dirname, grepkey, encoding, myqueue):
"""
in | |
default_stream_filter
self._default_file_filter_name = default_file_filter
self._default_stream_filter = _select(default_stream_filter)
self._default_string_filter = _select(default_string_filter)
default_file_filter = default_file_filter or default_stream_filter
self._default_file_filter = _select(default_file_filter)
def __getitem__(self, item):
if item == generic.NameObject('/Identity'):
return IdentityCryptFilter()
return self._crypt_filters[item]
def __contains__(self, item):
return (
item == generic.NameObject('/Identity')
or item in self._crypt_filters
)
def filters(self):
"""Enumerate all crypt filters in this configuration."""
return self._crypt_filters.values()
def set_security_handler(self, handler: 'SecurityHandler'):
"""
Set the security handler on all crypt filters in this configuration.
:param handler:
A :class:`.SecurityHandler` instance.
"""
for cf in self._crypt_filters.values():
cf._set_security_handler(handler)
def get_for_stream(self):
"""
Retrieve the default crypt filter to use with streams.
:return:
A :class:`.CryptFilter` instance.
"""
return self._default_stream_filter
def get_for_string(self):
"""
Retrieve the default crypt filter to use with strings.
:return:
A :class:`.CryptFilter` instance.
"""
return self._default_string_filter
def get_for_embedded_file(self):
"""
Retrieve the default crypt filter to use with embedded files.
:return:
A :class:`.CryptFilter` instance.
"""
return self._default_file_filter
@property
def stream_filter_name(self) -> generic.NameObject:
"""
The name of the default crypt filter to use with streams.
"""
return self._default_stream_filter_name
@property
def string_filter_name(self) -> generic.NameObject:
"""
The name of the default crypt filter to use with streams.
"""
return self._default_string_filter_name
@property
def embedded_file_filter_name(self) -> generic.NameObject:
"""
Retrieve the name of the default crypt filter to use with embedded
files.
"""
return self._default_file_filter_name
def as_pdf_object(self):
"""
Serialise this crypt filter configuration to a dictionary object,
including all its subordinate crypt filters (with the exception of
the identity filter, if relevant).
"""
result = generic.DictionaryObject()
result['/StmF'] = self._default_stream_filter_name
result['/StrF'] = self._default_string_filter_name
if self._default_file_filter_name is not None:
result['/EFF'] = self._default_file_filter_name
result['/CF'] = generic.DictionaryObject({
generic.NameObject(key): value.as_pdf_object()
for key, value in self._crypt_filters.items() if key != IDENTITY
})
return result
def standard_filters(self):
"""
Return the "standard" filters associated with this crypt filter
configuration, i.e. those registered as the defaults for strings,
streams and embedded files, respectively.
These sometimes require special treatment (as per the specification).
:return:
A set with one, two or three elements.
"""
stmf = self._default_stream_filter
strf = self._default_string_filter
eff = self._default_file_filter
return {stmf, strf, eff}
def _std_rc4_config(keylen):
return CryptFilterConfiguration(
{STD_CF: StandardRC4CryptFilter(keylen=keylen)},
default_stream_filter=STD_CF,
default_string_filter=STD_CF
)
def _pubkey_rc4_config(keylen, recipients=None, encrypt_metadata=True):
return CryptFilterConfiguration(
{DEFAULT_CRYPT_FILTER: PubKeyRC4CryptFilter(
keylen=keylen, acts_as_default=True, recipients=recipients,
encrypt_metadata=encrypt_metadata
)},
default_stream_filter=DEFAULT_CRYPT_FILTER,
default_string_filter=DEFAULT_CRYPT_FILTER
)
def _std_aes_config(keylen):
return CryptFilterConfiguration(
{STD_CF: StandardAESCryptFilter(keylen=keylen)},
default_stream_filter=STD_CF,
default_string_filter=STD_CF
)
def _pubkey_aes_config(keylen, recipients=None, encrypt_metadata=True):
return CryptFilterConfiguration(
{DEFAULT_CRYPT_FILTER: PubKeyAESCryptFilter(
keylen=keylen, acts_as_default=True, recipients=recipients,
encrypt_metadata=encrypt_metadata
)},
default_stream_filter=DEFAULT_CRYPT_FILTER,
default_string_filter=DEFAULT_CRYPT_FILTER
)
CryptFilterBuilder = Callable[[generic.DictionaryObject, bool], CryptFilter]
"""
Type alias for a callable that produces a crypt filter from a dictionary.
"""
def build_crypt_filter(reg: Dict[generic.NameObject, CryptFilterBuilder],
cfdict: generic.DictionaryObject,
acts_as_default: bool) -> Optional[CryptFilter]:
"""
Interpret a crypt filter dictionary for a security handler.
:param reg:
A registry of named crypt filters.
:param cfdict:
A crypt filter dictionary.
:param acts_as_default:
Indicates whether this filter is intended to be used in
``/StrF`` or ``/StmF``.
:return:
An appropriate :class:`.CryptFilter` object, or ``None``
if the crypt filter uses the ``/None`` method.
:raise NotImplementedError:
Raised when the crypt filter's ``/CFM`` entry indicates an unknown
crypt filter method.
"""
try:
cfm = cfdict['/CFM']
except KeyError:
return None
if cfm == '/None':
return None
try:
factory = reg[cfm]
except KeyError:
raise NotImplementedError("No such crypt filter method: " + cfm)
return factory(cfdict, acts_as_default)
def _build_legacy_standard_crypt_filter(cfdict: generic.DictionaryObject,
_acts_as_default):
keylen_bits = cfdict.get('/Length', 40)
return StandardRC4CryptFilter(keylen=keylen_bits // 8)
@SecurityHandler.register
class StandardSecurityHandler(SecurityHandler):
"""
Implementation of the standard (password-based) security handler.
You shouldn't have to instantiate :class:`.StandardSecurityHandler` objects
yourself. For encrypting new documents, use :meth:`build_from_pw`
or :meth:`build_from_pw_legacy`.
For decrypting existing documents, pyHanko will take care of instantiating
security handlers through :meth:`.SecurityHandler.build`.
"""
_known_crypt_filters: Dict[generic.NameObject, CryptFilterBuilder] = {
'/V2': _build_legacy_standard_crypt_filter,
'/AESV2': lambda _, __: StandardAESCryptFilter(keylen=16),
'/AESV3': lambda _, __: StandardAESCryptFilter(keylen=32),
'/Identity': lambda _, __: IdentityCryptFilter()
}
@classmethod
def get_name(cls) -> str:
return generic.NameObject('/Standard')
@classmethod
def build_from_pw_legacy(cls, rev: StandardSecuritySettingsRevision,
id1, desired_owner_pass, desired_user_pass=None,
keylen_bytes=16, use_aes128=True,
perms: int = ALL_PERMS,
crypt_filter_config=None, **kwargs):
"""
Initialise a legacy password-based security handler, to attach to a
:class:`~.pyhanko.pdf_utils.writer.PdfFileWriter`.
Any remaining keyword arguments will be passed to the constructor.
.. danger::
The functionality implemented by this handler is deprecated in the
PDF standard. We only provide it for testing purposes, and to
interface with legacy systems.
:param rev:
Security handler revision to use, see
:class:`.StandardSecuritySettingsRevision`.
:param id1:
The first part of the document ID.
:param desired_owner_pass:
Desired owner password.
:param desired_user_pass:
Desired user password.
:param keylen_bytes:
Length of the key (in bytes).
:param use_aes128:
Use AES-128 instead of RC4 (default: ``True``).
:param perms:
Permission bits to set (defined as an integer)
:param crypt_filter_config:
Custom crypt filter configuration. PyHanko will supply a reasonable
default if none is specified.
:return:
A :class:`StandardSecurityHandler` instance.
"""
desired_owner_pass = _legacy_normalise_pw(desired_owner_pass)
desired_user_pass = (
_legacy_normalise_pw(desired_user_pass)
if desired_user_pass is not None else desired_owner_pass
)
if rev > StandardSecuritySettingsRevision.RC4_OR_AES128:
raise ValueError(
f"{rev} is not supported by this bootstrapping method."
)
if rev == StandardSecuritySettingsRevision.RC4_BASIC:
keylen_bytes = 5
elif use_aes128 and \
rev == StandardSecuritySettingsRevision.RC4_OR_AES128:
keylen_bytes = 16
o_entry = _compute_o_value_legacy(
desired_owner_pass, desired_user_pass, rev.value, keylen_bytes
)
# force perms to a 4-byte format
perms = _as_signed(perms & 0xfffffffc)
if rev == StandardSecuritySettingsRevision.RC4_BASIC:
# some permissions are not available for these security handlers
perms = _as_signed(perms | 0xffffffc0)
u_entry, key = _compute_u_value_r2(
desired_user_pass, o_entry, perms, id1
)
else:
u_entry, key = _compute_u_value_r34(
desired_user_pass, rev.value, keylen_bytes, o_entry, perms, id1
)
if rev == StandardSecuritySettingsRevision.RC4_OR_AES128:
version = SecurityHandlerVersion.RC4_OR_AES128
elif rev == StandardSecuritySettingsRevision.RC4_BASIC:
version = SecurityHandlerVersion.RC4_40
else:
version = SecurityHandlerVersion.RC4_LONGER_KEYS
if rev == StandardSecuritySettingsRevision.RC4_OR_AES128 and \
crypt_filter_config is None:
if use_aes128:
crypt_filter_config = _std_aes_config(keylen=16)
else:
crypt_filter_config = _std_rc4_config(keylen=keylen_bytes)
sh = cls(
version=version, revision=rev, legacy_keylen=keylen_bytes,
perm_flags=perms, odata=o_entry,
udata=u_entry, crypt_filter_config=crypt_filter_config,
**kwargs
)
sh._shared_key = key
return sh
@classmethod
def build_from_pw(cls, desired_owner_pass, desired_user_pass=None,
perms=ALL_PERMS, encrypt_metadata=True, **kwargs):
"""
Initialise a password-based security handler backed by AES-256,
to attach to a :class:`~.pyhanko.pdf_utils.writer.PdfFileWriter`.
This handler will use the new PDF 2.0 encryption scheme.
Any remaining keyword arguments will be passed to the constructor.
:param desired_owner_pass:
Desired owner password.
:param desired_user_pass:
Desired user password.
:param perms:
Desired usage permissions.
:param encrypt_metadata:
Whether to set up the security handler for encrypting metadata
as well.
:return:
A :class:`StandardSecurityHandler` instance.
"""
owner_pw_bytes = _r6_normalise_pw(desired_owner_pass)
user_pw_bytes = (
_r6_normalise_pw(desired_user_pass)
if desired_user_pass is not None
else owner_pw_bytes
)
encryption_key = secrets.token_bytes(32)
u_validation_salt = secrets.token_bytes(8)
u_key_salt = secrets.token_bytes(8)
u_hash = _r6_hash_algo(user_pw_bytes, u_validation_salt)
u_entry = u_hash + u_validation_salt + u_key_salt
u_interm_key = _r6_hash_algo(user_pw_bytes, u_key_salt)
_, ue_seed = _aes_cbc_encrypt(
u_interm_key, encryption_key, bytes(16), use_padding=False
)
assert len(ue_seed) == 32
o_validation_salt = secrets.token_bytes(8)
o_key_salt = secrets.token_bytes(8)
o_hash = _r6_hash_algo(owner_pw_bytes, o_validation_salt, u_entry)
o_entry = o_hash + o_validation_salt + o_key_salt
o_interm_key = _r6_hash_algo(owner_pw_bytes, o_key_salt, u_entry)
_, oe_seed = _aes_cbc_encrypt(
o_interm_key, encryption_key, bytes(16), use_padding=False
)
assert len(oe_seed) == 32
perms_bytes = struct.pack('<I', perms & 0xfffffffc)
extd_perms_bytes = (
perms_bytes + (b'\xff' * 4)
+ (b'T' if encrypt_metadata else b'F')
+ b'adb' + secrets.token_bytes(4)
)
# need to encrypt one 16 byte block in ECB mode
# [I _really_ don't like the way this part of the spec works, but
# we have to sacrifice our principles on the altar of backwards
# compatibility.]
cipher = Cipher(algorithms.AES(encryption_key), modes.ECB())
encryptor = cipher.encryptor()
encrypted_perms = \
encryptor.update(extd_perms_bytes) + encryptor.finalize()
sh = cls(
version=SecurityHandlerVersion.AES256,
revision=StandardSecuritySettingsRevision.AES256,
legacy_keylen=32, perm_flags=perms, odata=o_entry,
udata=u_entry, oeseed=oe_seed, ueseed=ue_seed,
encrypted_perms=encrypted_perms, encrypt_metadata=encrypt_metadata,
**kwargs
)
sh._shared_key = encryption_key
return sh
@staticmethod
def _check_r6_values(udata, odata, oeseed, ueseed, encrypted_perms, rev=6):
if not (len(udata) == len(odata) == 48):
raise misc.PdfError(
"/U and /O entries must be 48 bytes long in a "
f"rev. {rev} security handler"
) # pragma: nocover
if not oeseed or not ueseed or \
not (len(oeseed) == len(ueseed) == 32):
raise misc.PdfError(
"/UE and /OE must be present and be 32 bytes long in a "
f"rev. {rev} security handler"
) # pragma: nocover
if not encrypted_perms or len(encrypted_perms) != 16:
raise misc.PdfError(
"/Perms must be present and be 16 bytes long in a "
f"rev. {rev} security handler"
) # pragma: nocover
def __init__(self, version: SecurityHandlerVersion,
revision: StandardSecuritySettingsRevision,
legacy_keylen, # in bytes, not bits
perm_flags: int, odata, udata, oeseed=None,
ueseed=None, encrypted_perms=None, encrypt_metadata=True,
crypt_filter_config: CryptFilterConfiguration = None,
compat_entries=True):
if crypt_filter_config is None:
if version == SecurityHandlerVersion.RC4_40:
crypt_filter_config = _std_rc4_config(5)
elif version == SecurityHandlerVersion.RC4_LONGER_KEYS:
| |
request: Request instance for DeleteRecordPlan.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DeleteRecordPlanRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DeleteRecordPlanResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteRecordPlan", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteRecordPlanResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteScene(self, request):
"""删除场景
:param request: Request instance for DeleteScene.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DeleteSceneRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DeleteSceneResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteScene", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteSceneResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteTimeTemplate(self, request):
"""本接口(DeleteTimeTemplate) 用于删除时间模板。
:param request: Request instance for DeleteTimeTemplate.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DeleteTimeTemplateRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DeleteTimeTemplateResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteTimeTemplate", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteTimeTemplateResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteVideoList(self, request):
"""删除录像存储列表
:param request: Request instance for DeleteVideoList.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DeleteVideoListRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DeleteVideoListResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteVideoList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteVideoListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeAllDeviceList(self, request):
"""本接口(DescribeAllDeviceList) 用于获取设备列表。
:param request: Request instance for DescribeAllDeviceList.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeAllDeviceListRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeAllDeviceListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeAllDeviceList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeAllDeviceListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeBindSceneDevices(self, request):
"""获取场景绑定设备列表
:param request: Request instance for DescribeBindSceneDevices.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeBindSceneDevicesRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeBindSceneDevicesResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeBindSceneDevices", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeBindSceneDevicesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeChannelsByLiveRecordPlan(self, request):
"""根据直播录制计划获取频道列表
:param request: Request instance for DescribeChannelsByLiveRecordPlan.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeChannelsByLiveRecordPlanRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeChannelsByLiveRecordPlanResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeChannelsByLiveRecordPlan", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeChannelsByLiveRecordPlanResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDeviceGroup(self, request):
"""本接口(DescribeDeviceGroup)用于根据设备ID查询设备所在分组信息,可批量查询。
:param request: Request instance for DescribeDeviceGroup.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeDeviceGroupRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeDeviceGroupResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDeviceGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDeviceGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDevicePassWord(self, request):
"""本接口(DescribeDevicePassWord)用于查询设备密码。
:param request: Request instance for DescribeDevicePassWord.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeDevicePassWordRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeDevicePassWordResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDevicePassWord", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDevicePassWordResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDeviceStreams(self, request):
"""本接口(DescribeDeviceStreams)用于获取设备实时流地址。
:param request: Request instance for DescribeDeviceStreams.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeDeviceStreamsRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeDeviceStreamsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDeviceStreams", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDeviceStreamsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeGroupById(self, request):
"""本接口(DescribeGroupById)用于根据分组ID查询分组。
:param request: Request instance for DescribeGroupById.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeGroupByIdRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeGroupByIdResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeGroupById", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeGroupByIdResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeGroupByPath(self, request):
"""根据分组路径查询分组
:param request: Request instance for DescribeGroupByPath.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeGroupByPathRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeGroupByPathResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeGroupByPath", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeGroupByPathResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeGroupDevices(self, request):
"""本接口(DescribeGroupDevices)用于查询分组下的设备列表。
:param request: Request instance for DescribeGroupDevices.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeGroupDevicesRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeGroupDevicesResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeGroupDevices", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeGroupDevicesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeGroups(self, request):
"""本接口(DescribeGroups)用于批量查询分组信息。
:param request: Request instance for DescribeGroups.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeGroupsRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeGroupsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeGroups", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeGroupsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeIPCChannels(self, request):
"""获取IPC设备下属通道
:param request: Request instance for DescribeIPCChannels.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeIPCChannelsRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeIPCChannelsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeIPCChannels", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeIPCChannelsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLiveChannel(self, request):
"""直播详情接口
:param request: Request instance for DescribeLiveChannel.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveChannelRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveChannelResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLiveChannel", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLiveChannelResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLiveChannelList(self, request):
"""直播列表接口
:param request: Request instance for DescribeLiveChannelList.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveChannelListRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveChannelListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLiveChannelList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLiveChannelListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLiveRecordPlanById(self, request):
"""获取直播录制计划详情
:param request: Request instance for DescribeLiveRecordPlanById.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveRecordPlanByIdRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveRecordPlanByIdResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLiveRecordPlanById", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLiveRecordPlanByIdResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLiveRecordPlanIds(self, request):
"""获取直播录制计划列表
:param request: Request instance for DescribeLiveRecordPlanIds.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveRecordPlanIdsRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveRecordPlanIdsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLiveRecordPlanIds", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLiveRecordPlanIdsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLiveStream(self, request):
"""直播拉流接口
:param request: Request instance for DescribeLiveStream.
:type request: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveStreamRequest`
:rtype: :class:`tencentcloud.iotvideoindustry.v20201201.models.DescribeLiveStreamResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLiveStream", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLiveStreamResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLiveVideoList(self, request):
| |
attr)
# return super().__getattr__(self, attr)
class Imaginary(Object):
def gen_name(self, name):
global _problem_compilation
if _problem_compilation:
hns = gen_hashnums(HASHNUM_DEPTH_DEFAULT)
return ' '.join([v.poodle_internal__sym_name for v in hns])
else:
return super().gen_name(name)
def __init__(self, value=None, _force_name=None):
self.__imaginary__ = True
super().__init__(value, _force_name)
self._class_variable = gen_var_imaginary(self.__class__.__name__, prefix="im-")
global _effect_compilation
global _collected_predicates
global _collected_effects
global _collected_parameters
if _effect_compilation:
self._new_fresh = True
self._parse_history = []
self._class_variable = gen_var_imaginary(self.__class__.__name__)
exists_predicate = gen_one_predicate(self.__class__.__name__+HASHNUM_EXISTS_PFX, self._class_variable, self.__class__.__name__)
for v in self._class_variable.split():
_collected_predicates.append("("+HASHNUM_ID_PREDICATE + " " + v + ")")
_collected_predicates.append("(not %s)" % exists_predicate)
_collected_predicate_templates.append("("+HASHNUM_ID_PREDICATE+" ?var - "+HASHNUM_CLASS_NAME+")")
_collected_predicate_templates.append("({pred} ?var - {cls})".format(pred=HASHNUM_ID_PREDICATE, cls=HASHNUM_CLASS_NAME))
_collected_effects.append(exists_predicate)
_collected_parameters[self._class_variable] = HASHNUM_CLASS_NAME
class _StringFactory:
def __init__(self):
self.reset()
def get(self, value):
if not value in self.values:
self.values[value] = String(value) # needed to indicate internal call
return self.values[value]
def get_objects(self):
return list(self.values.values())
def reset(self):
self.values = {}
stringFactory = _StringFactory()
class String(Object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.poodle_internal__value in stringFactory.values and not self._variable_mode:
self.poodle_internal__sym_name = stringFactory.values[self.poodle_internal__value].poodle_internal__sym_name
def __setattr__(self, name, value):
if not name.startswith("_") and not name.startswith("poodle_internal"):
raise TypeError("Strings are immutable")
return super().__setattr__(name, value)
def __getattr__(self, name):
if not name.startswith("_") and not name.startswith("poodle_internal"):
raise TypeError("Strings are immutable")
return super().__getattr__(name, value)
def __str__(self):
return str(self._get_value())
def __repr__(self):
return repr(str(self))
def __hash__(self):
return hash(self.poodle_internal__value)
def __eq__(self, other):
other = resolve_poodle_special_object(other)
# if isinstance(other, str): other = stringFactory.get(other)
# if isinstance(other, Property):
# return other._property_value == self
if isinstance(other, Object) and isinstance(self.poodle_internal__value, str) \
and isinstance(other.poodle_internal__value, str) \
and not self._variable_mode \
and not other._variable_mode:
return self.poodle_internal__value == other.poodle_internal__value
return super().__eq__(other)
# A static object initializes itself with instances static_values
class StaticObject(Object):
# TODO
pass
class PoodleHashnum(Object):
"hashnum is used in imaginary object identification"
pass # unsorted, unopimized
class BooleanObject(Object):
pass
_problem_compilation = True
_system_objects["object-True"] = BooleanObject("TRUE")
_system_objects["object-False"] = BooleanObject("FALSE")
_collected_object_classes.add("BooleanObject") # TODO THIS DOES NOT WORK FIXME
_problem_compilation = False
#########################################################################
##
## Domain Definition
##
class PlannedAction(metaclass=ActionMeta):
# class PlannedAction():
cost = 1
argumentList = []
parameterList = []
problem = None
template = None
_clips_rhs = []
_clips_lhs = []
collected_parameters = {}
def __init__(self, **kwargs):
self._planned_objects_dict = kwargs
for k,v in kwargs.items():
if isinstance(v, Object):
v._sealed = True
setattr(self, k, v)
def __str__(self):
return self.render(self._planned_objects_dict)
def __repr__(self):
return self._default_render(self._planned_objects_dict)
def render(self, obj_dict):
return self._default_render(obj_dict)
def _default_render(self, obj_dict):
return self.__class__.__name__+": "+", ".join('%s=%s' % (n, obj_dict.get(n)) for n in dir(self) if isinstance(getattr(self,n), Object))
# return ", ".join(repr(getattr(self,n)) for n in dir(self))
# return repr(dir(self))
# return ', '.join("%s: %s" % item for item in attrs.items() if isinstance(item[1], Property))
# return ', '.join("%s: %s" % item for item in attrs.items() )
# ret = "{0}".format(self.__class__.__name__)
# for arg in self.argumentList:
# ret +=" {0}({1})".format(arg.poodle_internal__sym_name, arg.poodle_internal__value)
# return ret
@classmethod
def compile(cls, problem):
# TODO: acquire lock for multithreaded!!!
global _compilation
global _collected_predicates
global _collected_parameters
global _collected_effects
global _selector_out
global _effect_compilation
assert _selector_out is None, "Selector operators used outside of Select() decorator while compiling %s in %s" % (cls, problem)
_collected_predicates = []
_collected_parameters = {}
_collected_effects = []
_compilation = True
cls.problem = problem
sel_ret = cls.selector(cls)
cls.selector_objects = []
if sel_ret != "DEFAULT":
assert type(sel_ret) != type(True) and not sel_ret is None, "selector() does not return supported value in %s (value was %s)" % (repr(cls), repr(sel_ret))
if type(sel_ret) == type([]):
cls.selector_objects = sel_ret
else:
cls.selector_objects = [sel_ret]
_effect_compilation = True
log.info("{0}".format(cls.effect(cls)))
_effect_compilation = False
_compilation = False
# _collected_predicates = filter(None, list(set(_collected_predicates)))
_collected_predicates = list(filter(None, list(OrderedDict.fromkeys(cls._class_collected_predicates + _collected_predicates))))
for k in _replaced_predicates:
if not k in _collected_predicates: continue
_collected_predicates.remove(k)
_collected_predicates.append(_replaced_predicates[k])
collected_parameters = ""
assert len(_collected_effects) > 0, "Action %s has no effect" % cls.__name__
assert len(_collected_predicates) > 0, "Action %s has nothing to select" % cls.__name__
cls.collected_parameters = {}
cls.collected_parameters.update(_collected_parameters)
cls.collected_parameters.update(cls._class_collected_parameters)
cls.collected_predicates = _collected_predicates
cls.collected_effects = _collected_effects
for ob in cls.collected_parameters:
if not "?" in ob: continue # hack fix for object name leak into params
if " " in ob:
# WARNING! this is because of how imaginary variables are implemented
# collected_parameters += "%s - %s " % (ob.split()[0], _collected_parameters[ob])
# collected_parameters += "%s - %s " % (ob.split()[1], _collected_parameters[ob])
collected_parameters += "%s - %s " % (ob.split()[0], HASHNUM_CLASS_NAME)
collected_parameters += "%s - %s " % (ob.split()[1], HASHNUM_CLASS_NAME)
else:
collected_parameters += "%s - %s " % (ob, cls.collected_parameters[ob])
assert len(collected_parameters) > 0
deduplicate_equals(_collected_predicates)
return """
(:action {action_name}
:parameters ({parameters})
:precondition (and
{precondition}
)
:effect (and
{effect}
{cost}
)
)
""".format(action_name = cls.__name__,
parameters=collected_parameters.strip(),
precondition='\n '.join(_collected_predicates),
effect='\n '.join(_collected_effects),
cost='(increase (total-cost) {0})'.format(cls.cost)
)
@classmethod
def get_clips_lhs_rhs(cls, problem):
if cls._clips_rhs:
return cls._clips_lhs, cls._clips_rhs
cls.compile(problem)
lhs = copy.copy(cls.collected_predicates)
rhs = []
lhs = [ "(test %s)" % r.replace("=", "eq") if r.startswith("(=") else r for r in lhs ]
for p in cls.collected_effects:
if p.startswith("(not"):
fname = "?f"+str(new_id())
retracting_predicate = p.replace("(not","")[:-1].strip()
assert retracting_predicate in lhs, "ProgrammingError: retracting predicate %s not found in precondition of %s" % (p, repr(cls))
lhs = [ fname+" <- "+r if r == retracting_predicate else r for r in lhs ]
cl = "(retract %s)" % fname
else:
cl = "(assert {ce})".format(ce=p)
rhs.append(cl)
cls._clips_lhs = lhs
cls._clips_rhs = rhs
return lhs, rhs
@classmethod
def compile_clips(cls, problem):
lhs, rhs = cls.get_clips_lhs_rhs(problem)
return """
(defrule {name}
{lhs}
=>
{rhs}
)
""".format(name=cls.__name__,lhs='\n '.join(lhs),
rhs='\n '.join(rhs))
def selector(self):
return "DEFAULT"
# raise NotImplementedError
def effect(self):
raise NotImplementedError("effect() in %s not implemented" % repr(self))
def __call__(self):
if hasattr(self.problem, self.methodName):
return getattr(self.problem, self.methodName)(**self.kwargs)
else:
return self.wrappedMethod[0].__call__(**self.kwargs)
class PlannedActionJinja2(PlannedAction):
template = "./template/default.j2"
# def __str__(self, template=None):
# fileIn = ""
# with open(self.template, "r") as fd:
# fileIn = fd.read()
# template = Template(fileIn)
# param = []
# for arg in self.argumentList:
# args = []
# args.append(arg.poodle_internal__sym_name)
# args.append(arg.poodle_internal__value)
# param.append(args)
# return template.render(action=self.__class__.__name__, parameters=param)
# def getTemplate(self):
# if self.template == None:
# return "./template/{0}.j2".format(self.__class__.__name__)
# return selt.template
# problem definition
class Problem:
HASHNUM_COUNT = HASHNUM_COUNT_DEFAULT # amount of hashnums generated for imaginary object
HASHNUM_DEPTH = HASHNUM_DEPTH_DEFAULT # only 2 is currently supported, waring! set globally only!
folder_name = None
objectList = []
def __init__(self):
self._has_imaginary = False
self._plan = None
self._compiled_problem = ""
def getFolderName(self):
return self.folder_name
def addObject(self, obj):
self.objectList.append(obj)
return obj
def getObjectList(self):
return self.objectList
def actions(self):
return []
# raise NotImplementedError("Please implement .actions() method to return list of planned action classes")
def getActionByName(self):
strList = []
for action in self.action():
strList.append(action.__class__.__name__)
return strList
def goal(self):
raise NotImplementedError("Please implement .goal() method to return goal in XXX format")
def wait_result(self, s, url, task_id, rq_hash):
url_solve = url.strip('/') + '/solve?rqh=%s' % rq_hash
url_check = url.strip('/') + '/check?rqh=%s' % rq_hash
url_result = url.strip('/') + '/result?rqh=%s' % rq_hash
url_kill = url.strip('/') + '/kill?rqh=%s' % rq_hash
proccessing_time_start = time.time()
errorCount = 0
while 1:
time.sleep(SOLVER_CHECK_TIME)
response = s.post(url_check, data={'id': crypt(SOLVER_KEY, str(task_id))})
status = crypt(SOLVER_KEY, response.content.decode("utf-8"))
# print(status)
if status == SOLVER_PROCESSING_STATUS :
# print(time.time() - proccessing_time_start )
if proccessing_time_start == 0 :
proccessing_time_start = time.time()
continue
elif time.time() - proccessing_time_start > self.solve_timeout:
log.debug(str(self.solve_timeout) + ' sec break')
response = s.post(url_kill, data={'id': crypt(SOLVER_KEY, str(task_id))})
status = crypt(SOLVER_KEY, response.content.decode("utf-8"))
return 1
continue
elif status == SOLVER_UNKNOWN_STATUS:
log.debug('UNKNOWN SOLVER_ID')
if errorCount > 5: return 1
else: errorCount += 1
elif status == SOLVER_DONE_STATUS:
response = s.post(url_result, data={'id': crypt(SOLVER_KEY, str(task_id))})
response_plan = crypt(SOLVER_KEY, response.content.decode("utf-8"))
actionClassLoader = ActionClassLoader(self.actions() + [getattr(self, k).plan_class for k in dir(self) if hasattr(getattr(self, k), "plan_class")], self)
actionClassLoader.loadFromStr(response_plan)
self._plan = actionClassLoader._plan
for ob in self.objectList: ob._sealed = True
return 0
elif status == SOLVER_KILLED_STATUS:
log.debug('SOLVER_KILLED_STATUS')
return 1
elif status == SOLVER_ERROR_STATUS:
log.debug('SOLVER_ERROR_STATUS')
response = s.post(url_kill, data={'id': crypt(SOLVER_KEY, str(task_id))})
plan = crypt(SOLVER_KEY, response.content.decode("utf-8"))
return 1
else:
log.debug('UNKNOWN_STATUS')
if errorCount > 5: return 1
else: errorCount += 1
def run_cloud(self, url):
rq_hash = ''.join(random.choice(string.ascii_lowercase) for i in range(20))
url_solve = url.strip('/') + '/solve?rqh=%s' % rq_hash
SOLVER_KEY = "list(filter(None, _collected_predicates + _collected_effects))"
problem_pddl_base64 = crypt(SOLVER_KEY, str(self.compile_problem())) #base64.b64encode(bytes(self.compile_problem(), 'utf-8'))
domain_pddl_base64 = crypt(SOLVER_KEY, str(self.compile_domain()))#base64.b64encode(bytes(self.compile_domain(), 'utf-8'))
data_pddl = {'d': domain_pddl_base64, 'p': problem_pddl_base64, 'n': crypt(SOLVER_KEY, self.__class__.__name__) }
s = requests.Session()
response = s.post(url_solve, data=data_pddl)
task_id = crypt(SOLVER_KEY, response.content.decode("utf-8"))
log.debug("Submitted task with ID: "+task_id)
return self.wait_result(s, url, task_id, rq_hash)
#actionClassLoader = ActionClassLoader(self.actions(), self)
def run_local(self):
global _collected_parameters
# print(_collected_parameters)
counter = 0
try:
with open("./.counter", "r") | |
assert np.all(image[~bb_mask] == [0, 0, 0])
image = np.zeros_like(image)
bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[2, 0:3] = True
bb_mask[0:3, 2] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:5, 0:5] = True
bb_mask[2, 2] = False
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=2, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:1+1, 1] = True
bb_mask[1, 0:1+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
got_exception = False
try:
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=True)
except Exception as e:
got_exception = True
assert got_exception == False
bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1, label=None)
got_exception = False
try:
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=True)
except Exception as e:
got_exception = True
assert got_exception == True
# extract_from_image()
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((0, 1), (0, 1), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[8:11, 8:11, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((1, 0), (1, 0), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[0:4, 0:5, :])
# to_keypoints()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
kps = bb.to_keypoints()
assert kps[0].y == 1
assert kps[0].x == 1
assert kps[1].y == 1
assert kps[1].x == 3
assert kps[2].y == 3
assert kps[2].x == 3
assert kps[3].y == 3
assert kps[3].x == 1
# copy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label == "test"
bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2")
assert bb2.y1 == 10
assert bb2.x1 == 20
assert bb2.y2 == 30
assert bb2.x2 == 40
assert bb2.label == "test2"
# deepcopy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"])
bb2 = bb.deepcopy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label[0] == "test"
# BoundingBox_repr()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__repr__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
# test_BoundingBox_str()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__str__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
def test_BoundingBoxesOnImage():
reseed()
# test height/width
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.height == 40
assert bbsoi.width == 50
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
assert bbsoi.height == 40
assert bbsoi.width == 50
# on()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
bbsoi_projected = bbsoi.on((40, 50))
assert bbsoi_projected.bounding_boxes[0].y1 == 10
assert bbsoi_projected.bounding_boxes[0].x1 == 20
assert bbsoi_projected.bounding_boxes[0].y2 == 30
assert bbsoi_projected.bounding_boxes[0].x2 == 40
assert bbsoi_projected.bounding_boxes[1].y1 == 15
assert bbsoi_projected.bounding_boxes[1].x1 == 25
assert bbsoi_projected.bounding_boxes[1].y2 == 35
assert bbsoi_projected.bounding_boxes[1].x2 == 45
bbsoi_projected = bbsoi.on((40*2, 50*2, 3))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
bbsoi_projected = bbsoi.on(np.zeros((40*2, 50*2, 3), dtype=np.uint8))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
# draw_on_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
image = bbsoi.draw_on_image(np.zeros(bbsoi.shape, dtype=np.uint8), color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image[10-1, 20-1, :] == [0, 0, 0])
assert np.all(image[10-1, 20-0, :] == [0, 0, 0])
assert np.all(image[10-0, 20-1, :] == [0, 0, 0])
assert np.all(image[10-0, 20-0, :] == [0, 255, 0])
assert np.all(image[10+1, 20+1, :] == [0, 0, 0])
assert np.all(image[30-1, 40-1, :] == [0, 0, 0])
assert np.all(image[30+1, 40-0, :] == [0, 0, 0])
assert np.all(image[30+0, 40+1, :] == [0, 0, 0])
assert np.all(image[30+0, 40+0, :] == [0, 255, 0])
assert np.all(image[30+1, 40+1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-0, :] == [0, 0, 0])
assert np.all(image[15-0, 25-1, :] == [0, 0, 0])
assert np.all(image[15-0, 25-0, :] == [0, 255, 0])
assert np.all(image[15+1, 25+1, :] == [0, 0, 0])
assert np.all(image[35-1, 45-1, :] == [0, 0, 0])
assert np.all(image[35+1, 45+0, :] == [0, 0, 0])
assert np.all(image[35+0, 45+1, :] == [0, 0, 0])
assert np.all(image[35+0, 45+0, :] == [0, 255, 0])
assert np.all(image[35+1, 45+1, :] == [0, 0, 0])
# remove_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_slim = bbsoi.remove_out_of_image(fully=True, partly=True)
assert len(bbsoi_slim.bounding_boxes) == 1
assert bbsoi_slim.bounding_boxes[0] == bb1
# cut_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_cut = bbsoi.cut_out_of_image()
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_cut.bounding_boxes[0].y1 == 10
assert bbsoi_cut.bounding_boxes[0].x1 == 20
assert bbsoi_cut.bounding_boxes[0].y2 == 30
assert bbsoi_cut.bounding_boxes[0].x2 == 40
assert bbsoi_cut.bounding_boxes[1].y1 == 15
assert bbsoi_cut.bounding_boxes[1].x1 == 25
assert bbsoi_cut.bounding_boxes[1].y2 == 35
assert bbsoi_cut.bounding_boxes[1].x2 == 50
# shift()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift(right=1)
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10
assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1
assert bbsoi_shifted.bounding_boxes[0].y2 == 30
assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1
assert bbsoi_shifted.bounding_boxes[1].y1 == 15
assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1
assert bbsoi_shifted.bounding_boxes[1].y2 == 35
assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1
# copy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 0
# deepcopy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 10
# repr() / str()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, x2=40.0000, y2=30.0000, label=None)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, x2=51.0000, y2=35.0000, label=None)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (bb1_expected, bb2_expected)
assert bbsoi.__repr__() == bbsoi.__str__() == expected
def test_HeatmapsOnImage_draw():
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape | |
+ n2)))/(8.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
38: lambda n1, n2: ((2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(1 + 2*n1 + 2*n2))/(8.*n1*(1 + n1)*n2*(1 + n2)),
39: lambda n1, n2: -((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(-2 + 7*n1 + 7*n2))/(56.*n1*(1 + n1)*n2*(1 + n2)),
40: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(26 + 56*n1**3*n2 + (9 - 38*n2)*n2 + 2*n1**2*(-19 + 2*n2*(-9 + 28*n2)) + n1*(9 + 4*n2*(-21 + n2*(-9 + 14*n2)))))/(56.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
41: lambda n1, n2: (3*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(32.*n1*(1 + n1)*n2*(1 + n2)),
42: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(1 + 2*n1 + 2*n2)*(1 + 2*(n1**2 - 4*n1*n2 + n2**2)))/(16.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
43: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(1 + 2*n1 + 2*n2)*(3 + 2*n1 + 2*n2))/(32.*n1*(1 + n1)*n2*(1 + n2)),
}
# Exact time dependence loops
M13e = {
0: lambda n1: 1.125,
1: lambda n1: -(1/(1 + n1)),
2: lambda n1: 2.25,
3: lambda n1: -(1/(1 + n1)),
4: lambda n1: 1.125,
5: lambda n1: 5.25,
6: lambda n1: 10.5,
7: lambda n1: 5.25,
8: lambda n1: 5.25,
9: lambda n1: -21/(4 + 4*n1),
10: lambda n1: (21 + 42*n1)/(4 + 4*n1),
11: lambda n1: -21/(4 + 4*n1),
12: lambda n1: (21*n1)/(4 + 4*n1),
13: lambda n1: -21/(1 + n1),
14: lambda n1: -21/(1 + n1),
}
M22e = {
0: lambda n1, n2: (6 + n1*(1 + 2*n1)*(-7 + n1 + 2*n1**2) - 7*n2 + 2*n1*(-3 + n1*(19 + n1*(-5 + 4*(-3 + n1)*n1)))*n2 + (-13 + 2*n1*(19 + 6*n1 - 4*n1**2))*n2**2 + 2*(2 + n1*(-5 - 4*n1 + 8*n1**2))*n2**3 + 4*(1 - 6*n1)*n2**4 + 8*n1*n2**5)/(4.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
1: lambda n1, n2: (n1**2*(1 - 11*n2) + 2*n1**3*(5 + 7*n2) + (-3 + 2*n2)*(6 + n2*(8 + 5*n2)) + n1*(-12 + n2*(-38 + n2*(-11 + 14*n2))))/(7.*n1*(1 + n1)*n2*(1 + n2)),
2: lambda n1, n2: (-3 + 2*n1)/n2 + (-3 + 2*n2)/n1,
3: lambda n1, n2: (2*(48 - 2*n1*(1 + 10*n1) - 2*n2 + 3*n1*(17 + 7*n1)*n2 + (-20 + 7*n1*(3 + 7*n1))*n2**2))/(49.*n1*(1 + n1)*n2*(1 + n2)),
4: lambda n1, n2: (4*(3 - 2*n2 + n1*(-2 + 7*n2)))/(7.*n1*n2),
5: lambda n1, n2: 2,
6: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-2 + 3*n2 + 4*n1**4*n2 + (3 - 2*n2)*n2**2 + 2*n1**3*(-1 + n2)*(1 + 2*n2) + n1*(1 + 2*n2)*(3 + 2*(-2 + n2)*n2*(1 + n2)) + n1**2*(3 + 2*n2*(-5 + 2*(-1 + n2)*n2))))/(2.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
7: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(2 + n2*(4 + 5*n2) + n1**2*(5 + 7*n2) + n1*(4 + n2*(10 + 7*n2))))/(7.*n1*(1 + n1)*n2*(1 + n2)),
8: lambda n1, n2: ((n1 + n2)*(-3 + 2*n1 + 2*n2))/(n1*n2),
9: lambda n1, n2: ((-2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-2 + n1*(3 + 2*n1) + 3*n2 + 2*n1*(-4 + n1*(-1 + 2*n1))*n2 - 2*(-1 + n1)*n2**2 + 4*n1*n2**3))/(2.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
10: lambda n1, n2: ((-2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(8 + 5*n2 + n1*(5 + 7*n2)))/(7.*n1*(1 + n1)*n2*(1 + n2)),
11: lambda n1, n2: ((-2 + n1 + n2)*(-3 + 2*n1 + 2*n2))/(n1*n2),
12: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(2 + (-1 + n1)*n1*(1 + 2*n1) - n2 - 2*n1*(1 + n1)*n2 - (1 + 2*n1)*n2**2 + 2*n2**3))/(8.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
13: lambda n1, n2: ((1 + n1 + n2)*(2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(8.*n1*(1 + n1)*n2*(1 + n2)),
14: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(6 + n1 - 2*n1**2 + n2 - 2*n2**2))/(8.*n1*(1 + n1)*n2*(1 + n2)),
15: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(2 + (11 - 6*n1)*n1 + 11*n2 + 4*(-2 + n1)*n1*(5 + 4*n1)*n2 + 2*(-3 - 6*n1 + 8*n1**2)*n2**2 + 16*n1*n2**3))/(8.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
16: lambda n1, n2: -((-3 + 2*n1 + 2*n2)*(9 + 3*n1 + 3*n2 + 7*n1*n2))/(14.*n1*(1 + n1)*n2*(1 + n2)),
17: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(5 + 5*n1 + 5*n2 + 7*n1*n2))/(14.*n1*(1 + n1)*n2*(1 + n2)),
18: lambda n1, n2: (3 - 2*n1 - 2*n2)/(2.*n1*n2),
19: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(2.*n1*n2),
20: lambda n1, n2: ((-2 + n1 + n2)*(-1 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(1 + 2*n1*n2))/(4.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
21: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(2 + n1 + 4*n1**3 + n2 - 8*n1**2*n2 + 4*n2**3 - 8*n1*n2*(1 + n2)))/(8.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
22: lambda n1, n2: ((2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(1 + 2*n1 + 2*n2))/(8.*n1*(1 + n1)*n2*(1 + n2)),
23: lambda n1, n2: -((-2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(8.*n1*(1 + n1)*n2*(1 + n2)),
24: lambda n1, n2: ((-2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(1 + 2*n1 + 2*n2)*(-1 + 4*n1*n2))/(8.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
25: lambda n1, n2: (3*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(32.*n1*(1 + n1)*n2*(1 + n2)),
26: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(1 + 2*n1 + 2*n2)*(1 + 2*(n1**2 - 4*n1*n2 + n2**2)))/(16.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
27: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2)*(1 + 2*n1 + 2*n2)*(3 + 2*n1 + 2*n2))/(32.*n1*(1 + n1)*n2*(1 + n2)),
28: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(n1*(-1 + 2*n1) + (-2 + n2)*(3 + 2*n2)))/(2.*n1*(1 + n1)*n2*(1 + n2)),
29: lambda n1, n2: (2*(-3 + 2*n1 + 2*n2)*(9 + 3*n1 + 3*n2 + 7*n1*n2))/(7.*n1*(1 + n1)*n2*(1 + n2)),
30: lambda n1, n2: (-6 + 4*n1 + 4*n2)/(n1*n2),
31: lambda n1, n2: ((2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(2.*n1*(1 + n1)*n2*(1 + n2)),
32: lambda n1, n2: ((-2 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(2.*n1*(1 + n1)*n2*(1 + n2)),
33: lambda n1, n2: -((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(2.*n1*(1 + n1)*n2*(1 + n2)),
34: lambda n1, n2: ((1 + n1 + n2)*(-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(2.*n1*(1 + n1)*n2*(1 + n2)),
35: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-1 + 2*n1 + 2*n2))/(n1*(1 + n1)*n2*(1 + n2)),
}
# galaxy matter
M13gm = {
0: lambda n1: (5 + 9*n1)/(8 + 8*n1),
1: lambda n1: -(1/(2 + 2*n1)),
2: lambda n1: (3*(5 + 9*n1))/(8.*(1 + n1)),
3: lambda n1: -(1/(2 + 2*n1)),
4: lambda n1: (-7 + 9*n1)/(8.*(1 + n1)),
5: lambda n1: -9/(8 + 8*n1),
6: lambda n1: (9 + 18*n1)/(8 + 8*n1),
7: lambda n1: -9/(8 + 8*n1),
8: lambda n1: (3*(-2 + 9*n1))/(8.*(1 + n1)),
9: lambda n1: -9/(4 + 4*n1),
10: lambda n1: (9*n1)/(4 + 4*n1),
}
M22gm = {
0: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-2 + n1*(-1 + (13 - 6*n1)*n1) - n2 + 2*n1*(-3 + 2*n1)*(-9 + n1*(3 + 7*n1))*n2 + (13 + 2*n1*(-27 + 14*(-1 + n1)*n1))*n2**2 + 2*(-3 + n1*(-15 + 14*n1))*n2**3 + 28*n1*n2**4))/(28.*n1*(1 + n1)*(-1 + 2*n1)*n2*(1 + n2)*(-1 + 2*n2)),
1: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-22 + 7*n1**2*(5 + 7*n2) + n2*(16 + 35*n2) + n1*(16 + 7*n2*(6 + 7*n2))))/(98.*n1*(1 + n1)*n2*(1 + n2)),
2: lambda n1, n2: ((-3 + 2*n1 + 2*n2)*(-4 + 7*n1 + 7*n2))/(14.*n1*n2),
3: | |
# -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline
from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
find_lexer_class, guess_lexer, TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str:
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
except AttributeError:
print("%s not found!" % what, file=sys.stderr)
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main(args=sys.argv):
"""
Main command line entry point.
"""
# pylint: disable-msg=R0911,R0912,R0915
usage = USAGE % ((args[0],) * 6)
if sys.platform in ['win32', 'cygwin']:
try:
# Provide coloring under Windows, if possible
import colorama
colorama.init()
except ImportError:
pass
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg")
except getopt.GetoptError:
print(usage, file=sys.stderr)
return 2
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if not opts and not args:
print(usage)
return 0
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2014 by <NAME>.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
_print_help(what, name)
return 0
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
except ClassNotFound as err:
lexer = TextLexer()
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
arg = a_opt or ''
try:
print(fmter.get_style_defs(arg))
except Exception as err:
print('Error:', err, file=sys.stderr)
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout
# select lexer
lexer = opts.pop('-l', None)
if lexer:
try:
lexer = get_lexer_by_name(lexer, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
infn = args[0]
try:
code = open(infn, 'rb').read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
else:
if '-g' in opts:
code = sys.stdin.read()
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
elif not lexer:
print('Error: no lexer name given and reading ' + | |
if skip_select:
break
if next_node == P4_HLIR.PACKET_TOO_SHORT:
# Packet needs to be at least one byte too short
self.sym_packet.set_max_length(simplify(new_pos - 8))
break
if not skip_select:
sym_transition_key = []
for transition_key_elem in parse_state.transition_key:
if isinstance(transition_key_elem, TypeValueField):
sym_transition_key.append(self.current_context(
).get_header_field(transition_key_elem.header_name,
transition_key_elem.header_field))
elif isinstance(transition_key_elem, TypeValueStackField):
sym_transition_key.append(
self.current_context().get_last_header_field(
transition_key_elem.header_name,
transition_key_elem.header_field,
self.hlir.get_header_stack(
transition_key_elem.header_name).size))
else:
raise Exception(
'Transition key type not supported: {}'.format(
transition_key_elem.__class__))
# XXX: is this check really necessary?
if len(sym_transition_key) > 0:
# Make sure that we are not hitting any of the cases before the
# case that we care about
other_constraints = []
for current_transition in parse_state.transitions:
if current_transition != path_transition:
other_constraints.append(
self.parser_transition_key_constraint(
sym_transition_key, current_transition.
value, current_transition.mask))
else:
break
constraints.append(Not(Or(other_constraints)))
logging.debug(
"Other constraints: {}".format(other_constraints))
# The constraint for the case that we are interested in
if path_transition.value is not None:
constraint = self.parser_transition_key_constraint(
sym_transition_key, path_transition.value,
path_transition.mask)
constraints.append(constraint)
logging.debug(sym_transition_key)
pos = simplify(new_pos)
# XXX: workaround
self.current_context().set_field_value('meta_meta', 'packet_len',
self.sym_packet.packet_size_var)
constraints.append(self.sym_packet.get_length_constraint())
constraints.extend(self.current_context().get_name_constraints())
self.solver.add(And(constraints))
parser_constraints_gen_timer.stop()
logging.info('Generate parser constraints: %.3f sec' %
(parser_constraints_gen_timer.get_time()))
Statistics().solver_time.start()
result = self.solver.check()
Statistics().num_solver_calls += 1
Statistics().solver_time.stop()
if not Config().get_incremental():
self.constraints[0] = constraints
self.solver.reset()
return result == sat
def parser_op_trans_to_str(self, op_trans):
# XXX: after unifying type value representations
# assert isinstance(op_trans.op.value[1], TypeValueHexstr)
return op_trans.error_str
def log_model(self, model, context_history):
var_vals = defaultdict(lambda: [])
for i, context in enumerate(context_history):
for var, smt_var in context.var_to_smt_var.items():
if len(var_vals[var]) < i:
# Add empty entries for the contexts where the variable
# didn't exist
var_vals[var] += [''] * (i - len(var_vals[var]))
if smt_var is None:
var_vals[var].append('')
else:
var_vals[var].append(str(model.eval(smt_var)))
table = Table()
table.add_rows([['.'.join(var)] + vals
for var, vals in sorted(var_vals.items())])
logging.info('Model\n' + str(table))
def control_transition_constraints(self, context, transition):
assert isinstance(transition, Edge)
constraints = []
table_name = transition.src
if transition.transition_type == TransitionType.BOOL_TRANSITION:
t_val = transition.val
conditional = self.pipeline.conditionals[table_name]
context.set_source_info(conditional.source_info)
expected_result = BoolVal(t_val)
sym_expr = self.type_value_to_smt(context, conditional.expression)
constraints.append(sym_expr == expected_result)
elif transition.transition_type == TransitionType.ACTION_TRANSITION:
assert table_name in self.pipeline.tables
table = self.pipeline.tables[table_name]
context.set_source_info(table.source_info)
if table.match_type not in ['exact', 'lpm', 'ternary', 'range']:
raise Exception(
'Match type {} not supported!'.format(table.match_type))
sym_key_elems = []
for key_elem in table.key:
header_name, header_field = key_elem.target
sym_key_elems.append(
context.get_header_field(key_elem.target[0],
key_elem.target[1]))
context.set_table_values(table_name, sym_key_elems)
self.action_to_smt(context, table_name, transition.action)
elif transition.transition_type == TransitionType.CONST_ACTION_TRANSITION:
logging.debug("const action transition table_name='%s'"
" action='%s' action_data='%s' prev='%s'",
table_name, transition.action,
transition.action_data,
transition.prev_const_action_transition)
# See the code in this file beginning with the line
# 'other_constraints = []' in the function
# generate_parser_constraints for reference. We want to
# do something similar to that here: We want the packet
# fields and metadata _not_ to match any earlier entries
# in the 'const entries' list, and we want them _to_ match
# the current entry being considered.
raise Exception('ConstActionTransition is not yet supported')
else:
raise Exception('Transition type {} not supported!'.format(
transition.transition_type))
context.unset_source_info()
return constraints
def generate_constraints(self, path, control_path,
source_info_to_node_name, count,
is_complete_control_path):
# XXX: This is very hacky right now
expected_path = [
n.src if not isinstance(n, ParserOpTransition) else
self.parser_op_trans_to_str(n) for n in path
] + ['sink'] + [(n.src, n) for n in control_path]
logging.info("")
logging.info("BEGIN %d Exp path (len %d+%d=%d) complete_path %s: %s"
"" % (count, len(path), len(control_path),
len(path) + len(control_path),
is_complete_control_path, expected_path))
assert len(control_path) == len(
self.context_history_lens) or not Config().get_incremental()
self.context_history.append(copy.copy(self.current_context()))
context = self.current_context()
constraints = []
time2 = time.time()
# XXX: very ugly to split parsing/control like that, need better solution
logging.info('control_path = {}'.format(control_path))
transition = None
if len(control_path) > 0:
transition = control_path[-1]
constraints.extend(
self.control_transition_constraints(context, transition))
self.context_history.append(copy.copy(self.current_context()))
context = self.current_context()
constraints.extend(context.get_name_constraints())
# XXX: Workaround for simple_switch issue
constraints.append(Or(ULT(context.get_header_field('standard_metadata', 'egress_spec'), 256), context.get_header_field('standard_metadata', 'egress_spec') == 511))
if not Config().get_incremental():
for cs in self.constraints:
self.solver.add(And(cs))
self.constraints[-1].extend(constraints)
# Construct and test the packet
# logging.debug(And(constraints))
self.solver.add(And(constraints))
# If the last part of the path is a table with no const entries
# and the prefix of the current path is satisfiable, so is the new
# path
if transition is not None and not is_complete_control_path and len(
context.uninitialized_reads) == 0 and len(
context.invalid_header_writes) == 0:
if Config().get_table_opt(
) and transition.transition_type == TransitionType.ACTION_TRANSITION:
assert transition.src in self.pipeline.tables
table = self.pipeline.tables[transition.src]
assert not table.has_const_entries()
result = TestPathResult.SUCCESS
self.result_history[-2].append(result)
return (expected_path, result, None, None)
elif Config().get_conditional_opt(
) and transition.transition_type == TransitionType.BOOL_TRANSITION:
cond_history = self.result_history[-2]
if len(
cond_history
) > 0 and cond_history[0] == TestPathResult.NO_PACKET_FOUND:
assert len(cond_history) == 1
result = TestPathResult.SUCCESS
self.result_history[-2].append(result)
return (expected_path, result, None, None)
time3 = time.time()
Statistics().solver_time.start()
smt_result = self.solver.check()
Statistics().num_solver_calls += 1
Statistics().solver_time.stop()
time4 = time.time()
packet_hexstr = None
payload = None
ss_cli_setup_cmds = []
table_setup_cmd_data = []
uninitialized_read_data = None
invalid_header_write_data = None
actual_path_data = None
result = None
if smt_result != unsat:
model = self.solver.model()
if not Config().get_silent():
self.log_model(model, self.context_history)
payload = self.sym_packet.get_payload_from_model(model)
# Determine table configurations
table_configs = []
for t in control_path:
table_name = t.src
transition = t
if table_name in self.pipeline.tables and context.has_table_values(
table_name):
runtime_data_values = []
for i, runtime_param in enumerate(
transition.action.runtime_data):
runtime_data_values.append(
(runtime_param.name,
model[context.get_runtime_data_for_table_action(
table_name, transition.action.name,
runtime_param.name, i)]))
sym_table_values = context.get_table_values(
model, table_name)
table = self.pipeline.tables[table_name]
table_values_strs = []
table_key_data = []
table_entry_priority = None
for table_key, sym_table_value in zip(
table.key, sym_table_values):
key_field_name = '.'.join(table_key.target)
sym_table_value_long = model_value_to_long(
sym_table_value)
if table_key.match_type == 'lpm':
bitwidth = context.get_header_field_size(
table_key.target[0], table_key.target[1])
table_values_strs.append(
'{}/{}'.format(sym_table_value, bitwidth))
table_key_data.append(
OrderedDict([
('match_kind', 'lpm'),
('key_field_name', key_field_name),
('value', sym_table_value_long),
('prefix_length', bitwidth),
]))
elif table_key.match_type == 'ternary':
# Always use exact match mask, which is
# represented in simple_switch_CLI as a 1 bit
# in every bit position of the field.
bitwidth = context.get_header_field_size(
table_key.target[0], table_key.target[1])
mask = (1 << bitwidth) - 1
table_values_strs.append(
'{}&&&{}'.format(sym_table_value, mask))
table_entry_priority = 1
table_key_data.append(
OrderedDict([('match_kind', 'ternary'), (
'key_field_name', key_field_name), (
'value', sym_table_value_long), (
'mask', mask)]))
elif table_key.match_type == 'range':
# Always use a range where the min and max
# values are exactly the one desired value
# generated.
table_values_strs.append('{}->{}'.format(
sym_table_value, sym_table_value))
table_entry_priority = 1
table_key_data.append(
OrderedDict([('match_kind', 'range'), (
'key_field_name', key_field_name
), ('min_value', sym_table_value_long), (
'max_value', sym_table_value_long)]))
elif table_key.match_type == 'exact':
table_values_strs.append(str(sym_table_value))
table_key_data.append(
OrderedDict([('match_kind', 'exact'), (
'key_field_name', key_field_name), (
'value', sym_table_value_long)]))
else:
raise Exception('Match type {} not supported'.
format(table_key.match_type))
logging.debug("table_name %s"
" table.default_entry.action_const %s"
"" % (table_name,
table.default_entry.action_const))
if (len(table_values_strs) == 0
and table.default_entry.action_const):
# Then we cannot change the default action for the
# table at run time, so don't remember any entry
# for this table.
pass
else:
table_configs.append(
(table_name, transition, table_values_strs,
table_key_data, runtime_data_values,
table_entry_priority))
# Print table configuration
for table, action, values, key_data, params, priority in table_configs:
# XXX: inelegant
const_table = self.pipeline.tables[table].has_const_entries()
params2 = []
param_vals = []
for param_name, param_val in params:
param_val = model_value_to_long(param_val)
param_vals.append(param_val)
params2.append(
OrderedDict([('name', param_name), ('value', param_val)
]))
if len(values) == 0 or const_table or action.default_entry:
ss_cli_cmd = ('table_set_default ' +
self.table_set_default_cmd_string(
table, action.get_name(), param_vals))
logging.info(ss_cli_cmd)
table_setup_info = OrderedDict(
[("command", "table_set_default"), ("table_name",
table),
("action_name",
action.get_name()), ("action_parameters", params2)])
else:
ss_cli_cmd = ('table_add ' + self.table_add_cmd_string(
table, action.get_name(), values, param_vals,
priority))
table_setup_info = OrderedDict(
[("command", "table_add"), ("table_name",
table), ("keys", key_data),
("action_name",
action.get_name()), ("action_parameters", params2)])
if priority is not None:
table_setup_info['priority'] = priority
logging.info(ss_cli_cmd)
ss_cli_setup_cmds.append(ss_cli_cmd)
table_setup_cmd_data.append(table_setup_info)
packet_len_bytes = len(payload)
packet_hexstr = ''.join([('%02x' % (x)) for x in payload])
logging.info("packet (%d bytes) %s"
"" % (packet_len_bytes, packet_hexstr))
if len(context.uninitialized_reads) != 0:
result = TestPathResult.UNINITIALIZED_READ
uninitialized_read_data = []
for uninitialized_read in context.uninitialized_reads:
var_name, source_info = uninitialized_read
logging.error('Uninitialized read of {} at {}'.format(
var_name, source_info))
uninitialized_read_data.append(
OrderedDict([("variable_name", var_name), (
"source_info", source_info_to_dict(source_info))]))
elif len(context.invalid_header_writes) != 0:
result = TestPathResult.INVALID_HEADER_WRITE
invalid_header_write_data = []
for invalid_header_write in context.invalid_header_writes:
var_name, source_info = invalid_header_write
logging.error('Invalid header write of {} at {}'.format(
var_name, source_info))
invalid_header_write_data.append(
OrderedDict([("variable_name", var_name), (
"source_info", source_info_to_dict(source_info))]))
elif len(payload) >= Config().get_min_packet_len_generated():
if Config().get_run_simple_switch(
) and is_complete_control_path:
extracted_path = self.test_packet(payload, table_configs,
source_info_to_node_name)
if is_complete_control_path:
match = (expected_path == extracted_path)
else:
len1 = len(expected_path)
len2 = len(extracted_path)
match = (expected_path == extracted_path[0:len1]
) and len1 <= len2
else:
match = True
if match:
logging.info('Test successful: {}'.format(expected_path))
result = TestPathResult.SUCCESS
else:
logging.error('Expected and actual path differ')
logging.error('Expected: {}'.format(expected_path))
logging.error('Actual: {}'.format(extracted_path))
actual_path_data = extracted_path
result = TestPathResult.TEST_FAILED
assert False
else:
result = TestPathResult.PACKET_SHORTER_THAN_MIN
result = TestPathResult.SUCCESS
logging.warning('Packet not sent (%d bytes is shorter than'
' minimum %d supported)'
'' % (len(payload),
Config().get_min_packet_len_generated()))
else:
logging.info(
| |
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
all_structs.append(register_client_id_args)
register_client_id_args.thrift_spec = (
None, # 0
(1, TType.I32, 'block_id', None, None, ), # 1
(2, TType.I64, 'client_id', None, None, ), # 2
)
class register_client_id_result(object):
__slots__ = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('register_client_id_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key))
for key in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
all_structs.append(register_client_id_result)
register_client_id_result.thrift_spec = (
)
class command_request_args(object):
"""
Attributes:
- seq
- block_id
- arguments
"""
__slots__ = (
'seq',
'block_id',
'arguments',
)
def __init__(self, seq=None, block_id=None, arguments=None,):
self.seq = seq
self.block_id = block_id
self.arguments = arguments
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.seq = sequence_id()
self.seq.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.arguments = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readBinary()
self.arguments.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('command_request_args')
if self.seq is not None:
oprot.writeFieldBegin('seq', TType.STRUCT, 1)
self.seq.write(oprot)
oprot.writeFieldEnd()
if self.block_id is not None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.arguments is not None:
oprot.writeFieldBegin('arguments', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.arguments))
for iter6 in self.arguments:
oprot.writeBinary(iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key))
for key in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
all_structs.append(command_request_args)
command_request_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'seq', [sequence_id, None], None, ), # 1
(2, TType.I32, 'block_id', None, None, ), # 2
(3, TType.LIST, 'arguments', (TType.STRING, 'BINARY', False), None, ), # 3
)
class chain_request_args(object):
"""
Attributes:
- seq
- block_id
- arguments
"""
__slots__ = (
'seq',
'block_id',
'arguments',
)
def __init__(self, seq=None, block_id=None, arguments=None,):
self.seq = seq
self.block_id = block_id
self.arguments = arguments
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.seq = sequence_id()
self.seq.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.arguments = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = iprot.readBinary()
self.arguments.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('chain_request_args')
if self.seq is not None:
oprot.writeFieldBegin('seq', TType.STRUCT, 1)
self.seq.write(oprot)
oprot.writeFieldEnd()
if self.block_id is not None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.arguments is not None:
oprot.writeFieldBegin('arguments', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.arguments))
for iter13 in self.arguments:
oprot.writeBinary(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key))
for key in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
all_structs.append(chain_request_args)
chain_request_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'seq', [sequence_id, None], None, ), # 1
(2, TType.I32, 'block_id', None, None, ), # 2
(3, TType.LIST, 'arguments', (TType.STRING, 'BINARY', False), None, ), # 3
)
class run_command_args(object):
"""
Attributes:
- block_id
- arguments
"""
__slots__ = (
'block_id',
'arguments',
)
def __init__(self, block_id=None, arguments=None,):
self.block_id = block_id
self.arguments = arguments
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.arguments = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = iprot.readBinary()
self.arguments.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('run_command_args')
if self.block_id is not None:
oprot.writeFieldBegin('block_id', TType.I32, 1)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.arguments is not None:
oprot.writeFieldBegin('arguments', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.arguments))
for iter20 in self.arguments:
oprot.writeBinary(iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key))
for key in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
all_structs.append(run_command_args)
run_command_args.thrift_spec = (
None, # 0
(1, TType.I32, 'block_id', None, None, ), # 1
(2, TType.LIST, 'arguments', (TType.STRING, 'BINARY', False), None, ), # 2
)
class run_command_result(object):
"""
Attributes:
- success
"""
__slots__ = (
'success',
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = iprot.readBinary()
self.success.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('run_command_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter27 in self.success:
oprot.writeBinary(iter27)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key))
for key in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
all_structs.append(run_command_result)
run_command_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'BINARY', False), None, ), # 0
)
class subscribe_args(object):
"""
Attributes:
- block_id
- ops
"""
__slots__ = (
'block_id',
'ops',
)
def __init__(self, block_id=None, ops=None,):
self.block_id = block_id
self.ops = ops
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.ops = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in range(_size28):
_elem33 = iprot.readString()
self.ops.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('subscribe_args')
if self.block_id is not None:
oprot.writeFieldBegin('block_id', TType.I32, 1)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.ops is not None:
oprot.writeFieldBegin('ops', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.ops))
for iter34 in self.ops:
oprot.writeString(iter34)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key))
| |
# -*- coding: utf-8 -*-
"""Parser for PCAP files."""
import binascii
import operator
import socket
import dpkt
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.parsers import interface
from plaso.parsers import manager
__author__ = '<NAME> (<EMAIL>)'
def ParseDNS(dns_packet_data):
"""Parse DNS packets and return a string with relevant details.
Args:
dns_packet_data: DNS packet data.
Returns:
Formatted DNS details.
"""
dns_data = []
try:
dns = dpkt.dns.DNS(dns_packet_data)
if dns.rcode is dpkt.dns.DNS_RCODE_NOERR:
if dns.qr == 1:
if not dns.an:
dns_data.append(u'DNS Response: No answer for ')
dns_data.append(dns.qd[0].name)
else:
# Type of DNS answer.
for answer in dns.an:
if answer.type == 5:
dns_data.append(u'DNS-CNAME request ')
dns_data.append(answer.name)
dns_data.append(u' response: ')
dns_data.append(answer.cname)
elif answer.type == 1:
dns_data.append(u'DNS-A request ')
dns_data.append(answer.name)
dns_data.append(u' response: ')
dns_data.append(socket.inet_ntoa(answer.rdata))
elif answer.type == 12:
dns_data.append(u'DNS-PTR request ')
dns_data.append(answer.name)
dns_data.append(u' response: ')
dns_data.append(answer.ptrname)
elif not dns.qr:
dns_data.append(u'DNS Query for ')
dns_data.append(dns.qd[0].name)
else:
dns_data.append(u'DNS error code ')
dns_data.append(str(dns.rcode))
except dpkt.UnpackError as exception:
dns_data.append(u'DNS Unpack Error: {0:s}. First 20 of data {1:s}'.format(
exception, repr(dns_packet_data[:20])))
except IndexError as exception:
dns_data.append(u'DNS Index Error: {0:s}'.format(exception))
return u' '.join(dns_data)
def ParseNetBios(netbios_packet):
"""Parse the netBIOS stream details.
Args:
netbios_packet: NetBIOS packet.
Returns:
Formatted netBIOS details.
"""
netbios_data = []
for query in netbios_packet.qd:
netbios_data.append(u'NETBIOS qd:')
netbios_data.append(repr(dpkt.netbios.decode_name(query.name)))
for answer in netbios_packet.an:
netbios_data.append(u'NETBIOS an:')
netbios_data.append(repr(dpkt.netbios.decode_name(answer.name)))
for name in netbios_packet.ns:
netbios_data.append(u'NETBIOS ns:')
netbios_data.append(repr(dpkt.netbios.decode_name(name.name)))
return u' '.join(netbios_data)
def TCPFlags(flag):
"""Check the tcp flags for a packet for future use.
Args:
flag: Flag value from TCP packet.
Returns:
String with printable flags for specific packet.
"""
res = []
if flag & dpkt.tcp.TH_FIN:
res.append(u'FIN')
if flag & dpkt.tcp.TH_SYN:
res.append(u'SYN')
if flag & dpkt.tcp.TH_RST:
res.append(u'RST')
if flag & dpkt.tcp.TH_PUSH:
res.append(u'PUSH')
if flag & dpkt.tcp.TH_ACK:
res.append(u'ACK')
if flag & dpkt.tcp.TH_URG:
res.append(u'URG')
if flag & dpkt.tcp.TH_ECE:
res.append(u'ECN')
if flag & dpkt.tcp.TH_CWR:
res.append(u'CWR')
return u'|'.join(res)
def ICMPTypes(packet):
"""Parse the type information for the icmp packets.
Args:
packet: ICMP packet data.
Returns:
Formatted ICMP details.
"""
icmp_type = packet.type
icmp_code = packet.code
icmp_data = []
icmp_data.append(u'ICMP')
# TODO: Make the below code more readable.
# Possible to use lookup dict? Or method
# calls?
if icmp_type is dpkt.icmp.ICMP_CODE_NONE:
icmp_data.append(u'ICMP without codes')
elif icmp_type is dpkt.icmp.ICMP_ECHOREPLY:
icmp_data.append(u'echo reply')
elif icmp_type is dpkt.icmp.ICMP_UNREACH:
icmp_data.append(u'ICMP dest unreachable')
if icmp_code is dpkt.icmp.ICMP_UNREACH_NET:
icmp_data.append(u': bad net')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_HOST:
icmp_data.append(u': host unreachable')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_PROTO:
icmp_data.append(u': bad protocol')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_PORT:
icmp_data.append(u': port unreachable')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_NEEDFRAG:
icmp_data.append(u': IP_DF caused drop')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_SRCFAIL:
icmp_data.append(u': src route failed')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_NET_UNKNOWN:
icmp_data.append(u': unknown net')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_HOST_UNKNOWN:
icmp_data.append(u': unknown host')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_ISOLATED:
icmp_data.append(u': src host isolated')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_NET_PROHIB:
icmp_data.append(u': for crypto devs')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_HOST_PROHIB:
icmp_data.append(u': for cypto devs')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_TOSNET:
icmp_data.append(u': bad tos for net')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_TOSHOST:
icmp_data.append(u': bad tos for host')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_FILTER_PROHIB:
icmp_data.append(u': prohibited access')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_HOST_PRECEDENCE:
icmp_data.append(u': precedence error')
elif icmp_code is dpkt.icmp.ICMP_UNREACH_PRECEDENCE_CUTOFF:
icmp_data.append(u': precedence cutoff')
elif icmp_type is dpkt.icmp.ICMP_SRCQUENCH:
icmp_data.append(u'ICMP source quench')
elif icmp_type is dpkt.icmp.ICMP_REDIRECT:
icmp_data.append(u'ICMP Redirect')
if icmp_code is dpkt.icmp.ICMP_REDIRECT_NET:
icmp_data.append(u' for network')
elif icmp_code is dpkt.icmp.ICMP_REDIRECT_HOST:
icmp_data.append(u' for host')
elif icmp_code is dpkt.icmp.ICMP_REDIRECT_TOSNET:
icmp_data.append(u' for tos and net')
elif icmp_code is dpkt.icmp.ICMP_REDIRECT_TOSHOST:
icmp_data.append(u' for tos and host')
elif icmp_type is dpkt.icmp.ICMP_ALTHOSTADDR:
icmp_data.append(u'ICMP alternate host address')
elif icmp_type is dpkt.icmp.ICMP_ECHO:
icmp_data.append(u'ICMP echo')
elif icmp_type is dpkt.icmp.ICMP_RTRADVERT:
icmp_data.append(u'ICMP Route advertisement')
if icmp_code is dpkt.icmp.ICMP_RTRADVERT_NORMAL:
icmp_data.append(u': normal')
elif icmp_code is dpkt.icmp.ICMP_RTRADVERT_NOROUTE_COMMON:
icmp_data.append(u': selective routing')
elif icmp_type is dpkt.icmp.ICMP_RTRSOLICIT:
icmp_data.append(u'ICMP Router solicitation')
elif icmp_type is dpkt.icmp.ICMP_TIMEXCEED:
icmp_data.append(u'ICMP time exceeded, code:')
if icmp_code is dpkt.icmp.ICMP_TIMEXCEED_INTRANS:
icmp_data.append(u' ttl==0 in transit')
elif icmp_code is dpkt.icmp.ICMP_TIMEXCEED_REASS:
icmp_data.append(u'ttl==0 in reass')
elif icmp_type is dpkt.icmp.ICMP_PARAMPROB:
icmp_data.append(u'ICMP ip header bad')
if icmp_code is dpkt.icmp.ICMP_PARAMPROB_ERRATPTR:
icmp_data.append(u':req. opt. absent')
elif icmp_code is dpkt.icmp.ICMP_PARAMPROB_OPTABSENT:
icmp_data.append(u': req. opt. absent')
elif icmp_code is dpkt.icmp.ICMP_PARAMPROB_LENGTH:
icmp_data.append(u': length')
elif icmp_type is dpkt.icmp.ICMP_TSTAMP:
icmp_data.append(u'ICMP timestamp request')
elif icmp_type is dpkt.icmp.ICMP_TSTAMPREPLY:
icmp_data.append(u'ICMP timestamp reply')
elif icmp_type is dpkt.icmp.ICMP_INFO:
icmp_data.append(u'ICMP information request')
elif icmp_type is dpkt.icmp.ICMP_INFOREPLY:
icmp_data.append(u'ICMP information reply')
elif icmp_type is dpkt.icmp.ICMP_MASK:
icmp_data.append(u'ICMP address mask request')
elif icmp_type is dpkt.icmp.ICMP_MASKREPLY:
icmp_data.append(u'ICMP address mask reply')
elif icmp_type is dpkt.icmp.ICMP_TRACEROUTE:
icmp_data.append(u'ICMP traceroute')
elif icmp_type is dpkt.icmp.ICMP_DATACONVERR:
icmp_data.append(u'ICMP data conversion error')
elif icmp_type is dpkt.icmp.ICMP_MOBILE_REDIRECT:
icmp_data.append(u'ICMP mobile host redirect')
elif icmp_type is dpkt.icmp.ICMP_IP6_WHEREAREYOU:
icmp_data.append(u'ICMP IPv6 where-are-you')
elif icmp_type is dpkt.icmp.ICMP_IP6_IAMHERE:
icmp_data.append(u'ICMP IPv6 i-am-here')
elif icmp_type is dpkt.icmp.ICMP_MOBILE_REG:
icmp_data.append(u'ICMP mobile registration req')
elif icmp_type is dpkt.icmp.ICMP_MOBILE_REGREPLY:
icmp_data.append(u'ICMP mobile registration reply')
elif icmp_type is dpkt.icmp.ICMP_DNS:
icmp_data.append(u'ICMP domain name request')
elif icmp_type is dpkt.icmp.ICMP_DNSREPLY:
icmp_data.append(u'ICMP domain name reply')
elif icmp_type is dpkt.icmp.ICMP_PHOTURIS:
icmp_data.append(u'ICMP Photuris')
if icmp_code is dpkt.icmp.ICMP_PHOTURIS_UNKNOWN_INDEX:
icmp_data.append(u': unknown sec index')
elif icmp_code is dpkt.icmp.ICMP_PHOTURIS_AUTH_FAILED:
icmp_data.append(u': auth failed')
elif icmp_code is dpkt.icmp.ICMP_PHOTURIS_DECOMPRESS_FAILED:
icmp_data.append(u': decompress failed')
elif icmp_code is dpkt.icmp.ICMP_PHOTURIS_DECRYPT_FAILED:
icmp_data.append(u': decrypt failed')
elif icmp_code is dpkt.icmp.ICMP_PHOTURIS_NEED_AUTHN:
icmp_data.append(u': no authentication')
elif icmp_code is dpkt.icmp.ICMP_PHOTURIS_NEED_AUTHZ:
icmp_data.append(u': no authorization')
elif icmp_type is dpkt.icmp.ICMP_TYPE_MAX:
icmp_data.append(u'ICMP Type Max')
return u' '.join(icmp_data)
class Stream(object):
"""Used to store packet details on network streams parsed from a pcap file."""
def __init__(self, packet, prot_data, source_ip, dest_ip, prot):
"""Initialize new stream.
Args:
packet: Packet data.
prot_data: Protocol level data for ARP, UDP, RCP, ICMP.
other types of ether packets, this is just the ether.data.
source_ip: Source IP.
dest_ip: Dest IP.
prot: Protocol (TCP, UDP, ICMP, ARP).
"""
self.packet_id = [packet[1]]
self.timestamps = [packet[0]]
self.size = packet[3]
self.start_time = packet[0]
self.all_data = [prot_data]
self.protocol_data = u''
self.stream_data = b''
if prot in [u'TCP', u'UDP']:
self.source_port = prot_data.sport
self.dest_port = prot_data.dport
else:
self.source_port = u''
self.dest_port = u''
self.source_ip = source_ip
self.dest_ip = dest_ip
self.protocol = prot
def AddPacket(self, packet, prot_data):
"""Add another packet to an existing stream.
Args:
packet: Packet data.
prot_data: Protocol level data for ARP, UDP, RCP, ICMP.
other types of ether packets, this is just the ether.data
"""
self.packet_id.append(packet[1])
self.timestamps.append(packet[0])
self.all_data.append(prot_data)
self.size += packet[3]
def SpecialTypes(self):
"""Checks for some special types of packets.
This method checks for some special packets and assembles usable data
currently works for: DNS (udp 53), http, netbios (udp 137), ICMP.
Returns:
A tuple consisting of a basic desctiption of the stream
(i.e. HTTP Request) and the prettyfied string for the protocols.
"""
packet_details = []
if self.stream_data[:4] == b'HTTP':
try:
http = dpkt.http.Response(self.stream_data)
packet_details.append(u'HTTP Response: status: ')
packet_details.append(http.status)
packet_details.append(u' reason: ')
packet_details.append(http.reason)
packet_details.append(u' version: ')
packet_details.append(http.version)
return u'HTTP Response', u' '.join(packet_details)
except dpkt.UnpackError as exception:
packet_details = (
u'HTTP Response Unpack Error: {0:s}. '
u'First 20 of data {1:s}').format(
exception, repr(self.stream_data[:20]))
return u'HTTP Response', packet_details
except IndexError as exception:
packet_details = (
u'HTTP Response Index Error: {0:s}. First 20 of data {1:s}').format(
exception, repr(self.stream_data[:20]))
return u'HTTP Response', packet_details
except ValueError as exception:
packet_details = (
u'HTTP Response parsing error: {0:s}. '
u'First 20 of data {1:s}').format(
exception, repr(self.stream_data[:20]))
return u'HTTP Response', packet_details
elif self.stream_data[:3] == b'GET' or self.stream_data[:4] == b'POST':
try:
http = dpkt.http.Request(self.stream_data)
packet_details.append(u'HTTP Request: method: ')
packet_details.append(http.method)
packet_details.append(' uri: ')
packet_details.append(http.uri)
packet_details.append(' version: ')
packet_details.append(http.version)
packet_details.append(' headers: ')
packet_details.append(repr(http.headers))
return u'HTTP Request', u' '.join(packet_details)
except dpkt.UnpackError as exception:
packet_details = (
u'HTTP Request unpack error: {0:s}. First 20 of data {1:s}').format(
exception, repr(self.stream_data[:20]))
return u'HTTP Request', packet_details
except ValueError as exception:
packet_details = (
u'HTTP Request parsing error: {0:s}. '
u'First 20 of data {1:s}').format(
exception, repr(self.stream_data[:20]))
return u'HTTP Request', packet_details
elif self.protocol == u'UDP' and (
self.source_port == 53 or self.dest_port == 53):
# DNS request/replies.
# Check to see if the lengths are valid.
for packet in self.all_data:
if not packet.ulen == len(packet):
packet_details.append(u'Truncated DNS packets - unable to parse: ')
packet_details.append(repr(self.stream_data[15:40]))
return u'DNS', u' '.join(packet_details)
return u'DNS', ParseDNS(self.stream_data)
elif self.protocol == u'UDP' and (
self.source_port == 137 or self.dest_port == 137):
return u'NetBIOS', ParseNetBios(dpkt.netbios.NS(self.stream_data))
elif self.protocol == u'ICMP':
# ICMP packets all end up as 1 stream, so they need to be
# processed 1 by 1.
return u'ICMP', ICMPTypes(self.all_data[0])
elif b'\x03\x01' in self.stream_data[1:3]:
# Some form of ssl3 data.
try:
ssl = dpkt.ssl.SSL2(self.stream_data)
packet_details.append(u'SSL data. Length: ')
packet_details.append(str(ssl.len))
return u'SSL', u' '.join(packet_details)
except dpkt.UnpackError as exception:
packet_details = (
u'SSL unpack | |
the frames visible.
pedantic: boolean whether to show aliasing for simple values.
If pedantic is false, simple values are replicated, rather
than, for example, having all references to 1 refer to the
same int object.
"""
Gui.__init__(self, debug)
self.pedantic = pedantic
self.withdraw()
# initially there is no object diagram, no class diagram
# and no representation of the stack.
self.od = None
self.cd = None
self.stack = None
# instance_vars maps from classes to the instance vars
# that are drawn for that class; for opaque classes, it
# is an empty list.
# an instance of an opaque class is shown with a small empty box;
# the contents are not shown.
self.instance_vars = {}
# the following classes are opaque by default
self.opaque_class(Lumpy)
self.opaque_class(object)
self.opaque_class(type(make_thing)) # function
self.opaque_class(Exception)
self.opaque_class(set) # I don't remember why
# any object that belongs to a class in the Tkinter module
# is opaque (the name of the module depends on the Python version)
self.opaque_module(TKINTER_MODULE)
# by default, class objects and module objects are opaque
classobjtype = type(Lumpy)
self.opaque_class(classobjtype)
modtype = type(inspect)
self.opaque_class(modtype)
# the __class__ of a new-style object is a type object.
# when type objects are drawn, show only the __name__
self.opaque_class(type)
self.make_reference()
def restrict_class(self, classobj, variables=None):
"""Restricts a class so that only the given variables are shown."""
if variables == None:
variables = []
self.instance_vars[classobj] = variables
def opaque_class(self, classobj):
"""Restricts a class so that no variables are shown."""
self.restrict_class(classobj, None)
def is_opaque(self, classobj):
"""Checks whether this class is completely opaque.
(restricted to _no_ instance variables)
"""
try:
return not len(self.instance_vars[classobj])
except KeyError:
return False
def transparent_class(self, classobj):
"""Unrestricts a class so its variables are shown.
If the class is not restricted, raise an exception."""
del self.instance_vars[classobj]
def opaque_module(self, modobj):
"""Makes all classes defined in this module opaque."""
for var, val in modobj.__dict__.iteritems():
if isinstance(val, type(Lumpy)):
self.opaque_class(val)
def make_reference(self):
"""Takes a snapshot of the current state.
Subsequent diagrams will be relative to this reference.
"""
self._make_reference_helper()
def _make_reference_helper(self):
"""Takes the reference snapshot.
This extra method call is here so that the reference
and the snapshot we take later have the same number of
frames on the stack. UGH.
"""
self.ref = Snapshot()
def make_stack(self):
"""Takes a snapshot of the current state.
Subtract away the frames and variables that existed in the
previous reference, then makes a Stack.
"""
self.snapshot = Snapshot()
self.snapshot.clean(self.ref)
self.values = {}
self.classes = []
self.stack = Stack(self, self.snapshot)
def register(self, thing, val):
"""Associates a value with the Thing that represents it.
Later we can check whether we have already created
a Thing for a given value.
"""
thing.lumpy = self
thing.val = val
self.values[id(val)] = thing
def lookup(self, val):
"""Check whether a value is already represented by a Thing.
Returns:
an existing Thing or None.
"""
vid = id(val)
return self.values.get(vid, None)
def object_diagram(self, obj=None, loop=True):
"""Creates a new object diagram based on the current state.
If an object is provided, draws the object. Otherwise, draws
the current run-time stack (relative to the last reference).
"""
if obj:
thing = make_thing(self, obj)
else:
if self.stack == None:
self.make_stack()
thing = self.stack
# if there is already an Object Diagram, clear it; otherwise,
# create one
if self.od:
self.od.clear()
else:
self.od = ObjectDiagram(self)
# draw the object or stack, then the arrows
drawn = self.od.draw(thing)
self.od.draw_arrows()
# wait for the user
if loop:
self.mainloop()
return Thing.things_drawn
def class_diagram(self, classes=None, loop=True):
"""Create a new object diagram based on the current state.
If a list of classes is provided, only those classes are
shown. Otherwise, all classes that Lumpy know about are shown.
"""
# if there is not already a snapshot, make one
if self.stack == None:
self.make_stack()
# scan the the stack looking for has-a
# relationships (note that we can't do this until the
# stack is complete)
for val in self.values.values():
if isinstance(val, Instance) and val.cls is not None:
val.scan_bindings(val.cls)
# if there is already a class diagram, clear it; otherwise
# create one
if self.cd:
self.cd.clear()
else:
self.cd = ClassDiagram(self, classes)
self.cd.draw()
if loop:
self.mainloop()
return Thing.things_drawn
def get_class_list(self):
"""Returns list of classes that should be drawn in a class diagram."""
t = []
for cls in self.classes:
if not self.is_opaque(cls.classobj):
t.append(cls)
elif cls.parents or cls.childs:
t.append(cls)
return t
class Diagram(object):
"""Parent class for ClassDiagram and ObjectDiagram."""
def __init__(self, lumpy, title):
self.lumpy = lumpy
self.arrows = []
self.tl = lumpy.tl()
self.tl.title(title)
self.tl.geometry('+0+0')
self.tl.protocol("WM_DELETE_WINDOW", self.close)
self.setup()
def ca(self, width=100, height=100, **options):
"""make a canvas for the diagram"""
return self.lumpy.widget(DiagCanvas, width=width, height=height,
**options)
def setup(self):
"""create the gui for the diagram"""
# push the frame for the toplevel window
self.lumpy.pushfr(self.tl)
self.lumpy.col([0, 1])
# the frame at the top contains buttons
self.lumpy.row([0, 0, 1], bg='white')
self.lumpy.bu(text='Close', command=self.close)
self.lumpy.bu(text='Print to file:', command=self.printfile_callback)
self.en = self.lumpy.en(width=10, text='lumpy.ps')
self.en.bind('<Return>', self.printfile_callback)
self.la = self.lumpy.la(width=40)
self.lumpy.endrow()
# the grid contains the canvas and scrollbars
self.lumpy.gr(2, [1, 0])
self.ca_width = 1000
self.ca_height = 500
self.canvas = self.ca(self.ca_width, self.ca_height, bg='white')
yb = self.lumpy.sb(command=self.canvas.yview, sticky=N+S)
xb = self.lumpy.sb(command=self.canvas.xview, orient=HORIZONTAL,
sticky=E+W)
self.canvas.configure(xscrollcommand=xb.set, yscrollcommand=yb.set,
scrollregion=(0, 0, 800, 800))
self.lumpy.endgr()
self.lumpy.endcol()
self.lumpy.popfr()
# measure some sample letters to get the text height
# and set the scale factor for the canvas accordingly
self.canvas.clear_transforms()
bbox = self.canvas.measure(['bdfhklgjpqy'])
self.unit = 1.0 * bbox.height()
transform = ScaleTransform([self.unit, self.unit])
self.canvas.add_transform(transform)
def printfile_callback(self, event=None):
"""Dumps the contents of the canvas to a file.
Gets the filename from the filename entry.
"""
filename = self.en.get()
self.printfile(filename)
def printfile(self, filename):
"""Dumps the contents of the canvas to a file.
filename: string output file name
"""
# shrinkwrap the canvas
bbox = self.canvas.bbox(ALL)
width = bbox.right*self.unit
height = bbox.bottom*self.unit
self.canvas.config(width=width, height=height)
# write the file
self.canvas.dump(filename)
self.canvas.config(width=self.ca_width, height=self.ca_height)
self.la.config(text='Wrote file ' + filename)
def close(self):
"""close the window and exit"""
self.tl.withdraw()
self.lumpy.quit()
def add_arrow(self, arrow):
"""append a new arrow on the list"""
self.arrows.append(arrow)
def draw_arrows(self):
"""draw all the arrows on the list"""
for arrow in self.arrows:
arrow.draw(self)
def update_arrows(self, n=None):
"""update up to n arrows (or all of them is n==None)"""
i = 0
for arrow in self.arrows:
arrow.update()
i += 1
if n and i > n: break
class ObjectDiagram(Diagram):
"""Represents an object diagram."""
def __init__(self, lumpy=None):
Diagram.__init__(self, lumpy, 'Object Diagram')
def draw(self, thing):
"""Draws the top-level Thing."""
drawn = thing.draw(self, Point([2, 2]), flip=1)
# configure the scroll region
self.canvas.scroll_config()
return drawn
def clear(self):
"""Clears the diagram."""
self.arrows = []
self.tl.deiconify()
self.canvas.delete(ALL)
class ClassDiagram(Diagram):
"""Represents a class diagram."""
def __init__(self, lumpy, classes=None):
Diagram.__init__(self, lumpy, 'Class Diagram')
self.classes = classes
def draw(self):
"""Draw the class diagram.
Includes the classes in self.classes,
or if there are none, then all the classes Lumpy has seen.
"""
pos = Point([2, 2])
if self.classes == None:
classes = self.lumpy.get_class_list()
else:
classes = [make_thing(self.lumpy, cls) for cls in self.classes]
# find the classes that have no parents, and find the
# height of each tree
roots = [c for c in classes if c.parents == []]
for root in roots:
root.set_height()
# for all the leaf nodes, compute the distance to
# the parent
leafs = [c for c in classes if c.childs == []]
for leaf in leafs:
leaf.set_depth()
# if we're drawing all the classes, start with the roots;
# otherwise draw the classes we were given.
if self.classes == None:
drawn = self.draw_classes(roots, pos)
else:
drawn = self.draw_classes(classes, pos)
self.draw_arrows()
# configure the scroll region
self.canvas.scroll_config()
def draw_classes(self, classes, pos, tags=tuple()):
"""Draw this list of classes and all their subclasses.
Starts at the given position.
Returns:
list of all classes drawn
"""
p = pos.copy()
alldrawn = []
for c in classes:
drawn = c.cdc.draw(self, p, tags)
alldrawn.extend(drawn)
# TODO: change this so it finds the bottom-most bbox in drawn
bbox = c.cdc.get_bbox()
for thing in alldrawn:
if | |
request and disconnects the client from the server \
implicitly closing any subscriptions that the client has open. \
Once the stop has completed the optional callback is performed.
:type on_stopped: function or None
:param on_stopped: function to call when the connection is \
closed. This function prototype must be ``func(client, err)`` \
where ``client`` is the instance that has stopped and \
``err`` will contain any error report that occurred during the \
stop request
:return: The Client instance.
:raises TypeError: if the type of any of the arguments is incorrect.
:raises InvalidArgumentError: if any of the arguments are invalid.
:raises TypeError: if the on_stopped argument was not a function
"""
LOG.entry('Client.stop', self._id)
validate_callback_function('on_stopped', on_stopped, 2, 0)
LOG.parms(self._id, 'on_stopped:', on_stopped)
# Cancel retry timer
if self._retry_timer:
self._retry_timer.cancel()
# just return if already stopped or in the process of
# stopping
if self.is_stopped():
if on_stopped:
LOG.entry('Client.stop.on_stopped', self._id)
on_stopped(self, None)
LOG.exit('Client.stop.on_stopped', self._id, None)
LOG.exit('Client.stop', self._id, self)
return self
self._perform_disconnect(on_stopped)
LOG.exit('Client.stop', self._id, self)
return self
def _perform_disconnect(self, on_stopped, error=None):
"""
Performs the disconnection
"""
LOG.entry('Client._perform_disconnect', self._id)
LOG.parms(self._id, 'on_stopped:', on_stopped)
LOG.parms(self._id, 'error:', error)
self._set_state(STOPPING, error)
if self._connect_thread and \
self._connect_thread != threading.current_thread():
self._connect_thread.join(1)
if self._retry_timer:
self._retry_timer.join(1)
# Only disconnect when all outstanding send operations are complete
if self._outstanding_sends.empty:
def stop_processing(client, on_stopped):
LOG.entry(
'Client._perform_disconnect.stop_processing',
self._id)
if client._heartbeat_timeout:
client._heartbeat_timeout.cancel()
client._heartbeat_timeout = None
if self._sock:
self._sock.close()
self._sock = None
# Clear all queued sends as we are disconnecting
while self._queued_sends:
msg = self._queued_sends.pop(0)
def next_tick():
"""
next tick
"""
LOG.entry(
'Client._perform_disconnect.next_tick',
self._id)
msg.on_sent(
StoppedError(
'send aborted due to disconnect'),
None,
None,
None)
LOG.exit(
'Client._perform_disconnect.next_tick',
self._id,
None)
timer = threading.Timer(1, next_tick)
timer.start()
# Discard current subscriptions
self._subscriptions.clear()
# Indicate that we've disconnected
client._set_state(STOPPED)
# Wakeup the action_handler_thread
self.action_handler.wakeup()
LOG.state(
'Client._perform_disconnect.stop_processing',
self._id,
STOPPED)
if not self._first_start:
self._first_start = True
if self._on_state_changed:
self._on_state_changed(self, STOPPED, None)
if on_stopped:
LOG.entry(
'Client._perform_disconnect.on_stopped',
self._id)
on_stopped(self, None)
LOG.exit(
'Client._perform_disconnect.on_stopped',
self._id,
None)
LOG.exit(
'Client._perform_disconnect.stop_processing',
self._id,
None)
self._stop_messenger(stop_processing, on_stopped)
LOG.exit('Client._perform_disconnect', self._id, None)
return
# Try disconnect again
timer = threading.Timer(1, self._perform_disconnect, [on_stopped])
timer.start()
LOG.exit('Client._perform_disconnect', self._id, None)
def _stop_messenger(self, stop_processing_callback, callback=None):
"""
Function to trigger the client to disconnect
"""
LOG.entry('Client._stop_messenger', self._id)
LOG.parms(
NO_CLIENT_ID,
'stop_processing_callback:',
stop_processing_callback)
LOG.parms(NO_CLIENT_ID, 'callback:', callback)
stopped = True
# If messenger available then request it to stop
# (otherwise it must have already been stopped)
if self._messenger:
stopped = self._messenger.stop(self._sock, 100)
if stopped:
if self._heartbeat_timeout:
self._heartbeat_timeout.cancel()
self._heartbeat_timeout = None
stop_processing_callback(self, callback)
LOG.exit('Client._stop_messenger', self._id, None)
def _connect_to_service(self, callback, services):
"""
Function to connect to the service, tries each available service in
turn. If none can connect it emits an error, waits and attempts to
connect again. Callback happens once a successful connect/reconnect
occurs
"""
LOG.entry('Client._connect_to_service', self._id)
LOG.parms(NO_CLIENT_ID, 'callback:', callback)
LOG.parms(NO_CLIENT_ID, 'services:', services)
if self.is_stopped():
if callback and self._on_state_changed is not None:
LOG.entry('Client._connect_to_service.callback', self._id)
self._on_state_changed(
self, STOPPED,
StoppedError('connect aborted due to stop request'))
LOG.exit('Client._connect_to_service.callback', self._id, None)
LOG.exit('Client._connect_to_service', self._id, None)
return
error = None
connected = False
# Try each service in turn until we can successfully connect, or
# exhaust the list
for i, service in enumerate(services):
if connected:
break
try:
LOG.data(self._id, 'attempting to connect to:', str(service))
try:
self._sock = _MQLightSocket(
service,
self._security_options,
self._queue_on_read,
self._queue_on_close)
self._messenger.connect(service)
# Wait for client to start
while not self._messenger.started():
# Pass any data to proton.
self._messenger.pop(self._sock, False)
if self.state not in (RETRYING, STARTING):
# Don't keep waiting if we're no longer in a
# starting state
LOG.data(self._id, 'client no longer starting')
break
time.sleep(0.5)
else:
connected = True
except (ssl.SSLError) as exc:
error = SecurityError(exc)
LOG.data(
self._id,
'failed to connect to: {0} due to error: {1}'.format(
service, error))
except (NetworkError, SecurityError) as exc:
error = exc
LOG.data(
self._id,
'failed to connect to: {0} due to error: {1}'.format(
service, error))
except Exception as exc:
LOG.ffdc(
'Client._connect_to_service',
'ffdc012',
self._id,
err=sys.exc_info())
error = exc
if connected:
LOG.data(
self._id,
'successfully connected to:',
service)
self._service = service
# Indicate that we're connected
self._set_state(STARTED)
event_to_emit = None
if self._first_start:
event_to_emit = STARTED
self._first_start = False
self._retry_count = 0
LOG.data(self._id, 'first start since being stopped')
self._process_queued_actions()
else:
self._retry_count = 0
event_to_emit = STARTED
self._connection_id += 1
# Fire callbacks
LOG.state(
'Client._connect_to_service',
self._id,
event_to_emit)
if self._on_state_changed:
self.create_thread(
target=self._on_state_changed,
args=(self, event_to_emit, None),
name=':on_state_changed')
if callback:
self.create_thread(
target=callback,
args=(self,),
name=':callback')
# Setup heartbeat timer to ensure that while connected we
# send heartbeat frames to keep the connection alive, when
# required.
timeout = self._messenger.get_remote_idle_timeout(
service.address_only)
interval = timeout / 2 if timeout > 0 else timeout
LOG.data(self._id, 'heartbeat_interval:', interval)
if interval > 0:
def perform_heartbeat(interval):
LOG.entry(
'Client._connect_to_service.perform_heartbeat',
self._id)
if self._messenger:
self._messenger.heartbeat(self._sock)
self._heartbeat_timeout = threading.Timer(
interval,
perform_heartbeat,
[interval])
self._heartbeat_timeout.start()
LOG.exit(
'Client._connect_to_service.perform_heartbeat',
self._id,
None)
self._heartbeat_timeout = threading.Timer(
interval,
perform_heartbeat,
[interval])
self._heartbeat_timeout.start()
except Exception as exc:
# Should never get here, as it means that messenger.connect has
# been called in an invalid way, so FFDC
error = exc
LOG.ffdc(
'Client._connect_to_service',
'ffdc003',
self._id,
err=sys.exc_info())
raise MQLightError(exc)
if not connected and not self.is_stopped():
def retry():
LOG.entry('Client._connect_to_service.retry', self._id)
if not self.is_stopped():
self._perform_connect(callback, False)
LOG.exit(
'Client._connect_to_service.retry',
self._id,
None)
if _should_reconnect(error):
# We've tried all services without success. Pause for a while
# before trying again
self._set_state(RETRYING, error)
self._retry_count += 1
retry_cap = 60
# limit to the power of 8 as anything above this will put the
# interval higher than the cap straight away.
exponent = self._retry_count if self._retry_count <= 8 else 8
upper_bound = pow(2, exponent)
lower_bound = 0.75 * upper_bound
jitter = random() * (0.25 * upper_bound)
interval = min(retry_cap, (lower_bound + jitter))
# times by CONNECT_RETRY_INTERVAL for unittest purposes
# TODO Review retry internal seem tooo long
interval = round(interval) * CONNECT_RETRY_INTERVAL
LOG.data(
self._id,
'trying to connect again after {0} seconds'.format(
interval))
self._retry_timer = threading.Timer(interval, retry)
self._retry_timer.start()
else:
self._perform_disconnect(None, error)
LOG.exit('Client._connect_to_service', self._id, None)
def _reconnect(self):
"""
Reconnects the client to the MQ Light service, The 'reconnected' event
will be emitted once the client has reconnected.
Returns:
The instance of the client if reconnect succeeded otherwise None
"""
LOG.entry('Client._reconnect', self._id)
if self.is_stopped():
LOG.exit('Client._reconnect', self._id, None)
return None
# Stop the messenger to free the object then attempt a reconnect
def stop_processing(client, callback=None):
LOG.entry('Client.reconnect.stop_processing', client.get_id())
if client._heartbeat_timeout:
client._heartbeat_timeout.cancel()
if self.state not in (STOPPING, STOPPED):
# Initiate the reconnection
self._set_state(RETRYING)
client._perform_connect(self._process_queued_actions, False)
LOG.exit('Client.reconnect.stop_processing', client.get_id(), None)
self._stop_messenger(stop_processing)
LOG.exit('Client._reconnect', self._id, self)
return self
def get_id(self):
"""
:returns: The client id
"""
LOG.data(self._id, self._id)
return self._id
def get_service(self):
"""
:returns: The service if connected otherwise ``None``
"""
if self.state == STARTED:
address = self._service.address_only
LOG.data(self._id, 'service:', address)
return address
else:
LOG.data(self._id, 'Not connected')
LOG.data(self._id, 'service: None')
return None
def get_state(self):
"""
:returns: The state of the client
**States**
* started - client is connected to the server and ready to process \
messages.
* starting - client is attempting to connect to the server following \
a stop event.
* stopped - client is stopped and not connected to the server. This \
can occur following a user request or a non-recovery connection error
* stopping - occurs before ``stopped`` state and is closing any \
current connections.
* retrying - attempting to connect to the server following a \
recoverable error. Previous states would be ``starting`` or ``started``
"""
LOG.data(self._id, 'state:', self._state)
return self._state
def _set_state(self, new_state, error=None):
"""
Sets the state of the client
"""
LOG.entry('Client._set_state', self._id)
LOG.parms(self._id, 'new state:', new_state)
LOG.parms(self._id, 'error:', str(type(error)), error)
LOG.parms(self._id, 'error type:', str(type(error)))
if new_state not in STATES:
raise InvalidArgumentError('invalid state')
if self._state != new_state:
self._state = new_state
if self._on_state_changed:
LOG.state('Client._set_state', self._id, self._state)
self.create_thread(
target=self._on_state_changed,
args=(self, self._state, error),
name=':on_state_changed')
LOG.exit('Client._set_state', self._id, self._state)
return self._state
state = property(get_state)
def is_stopped(self):
"""
:returns: ``True`` if the Client is in the stopped or stopping state,
otherwise ``False``
"""
LOG.data(self._id, 'state:', self.state)
return self.state in (STOPPED, STOPPING)
def send(self, topic, data, options=None, on_sent=None):
"""Sends a message to the MQLight service.
:param str topic: The topic of the message to be sent to.
:type data: str or bytearray.
:param data: Body of the message. | |
\
\
\ / line_m
\ /
tex_2_theta[0] + r_2
/ \
/ \
\ / \
\ / \
\ / \
tex_2_theta[1] + r_1 \ line_o2
/ \
/ \
/ \
\
\
\
\ line_o1
"""
#----- theta-s
theta = self.ray_1_theta
self.arc_theta = []
self.arc_theta_pos_offset = []
self.tex_theta = []
# reflection points array
arc_origin_ary = [self.get_r1, self.get_r1, self.get_r2, self.get_r2, self.get_r2]
for i in range(0,5):
# arc
(arc_start, arc_angle) = self.get_arc_theta_n(i, theta)
self.arc_theta.append(
Arc(
start_angle = arc_start,
angle = arc_angle,
radius = self.arc_theta_radius,
color = self.arc_theta_color,
arc_center = ORIGIN # initial center of the arc is at ORIGIN
))
# Initialize self.arc_theta_pos_offset
#
# Arc's center is the arc object center, not the arc (circle)'s center
# Now the arc_center of all arcs are at the ORIGIN, thus,
# get_center() gives an offset from the center.
# ___
# \_
# \
# | arc
# ORIGIN * + \
# arc.get_center()
# |<->|
# offset
#
self.arc_theta_pos_offset.append(self.arc_theta[i].get_center())
arc_center = arc_origin_ary[i]() + self.arc_theta_pos_offset[i]
self.arc_theta[i].move_to(arc_center)
# tex (\theta), when visualize which one, use the next line
# tex_str = r"\theta_{0}".format(i)
tex_str = r"\theta"
self.tex_theta.append(TexMobject(tex_str, color=self.tex_theta_color))
self.tex_theta[i].move_to(arc_origin_ary[i]() + self.tex_theta_position_offset[i])
# x, y share the the position theta[2] and theta[3]
self.tex_angle_x = TexMobject(r"x", color=self.tex_theta_color).move_to(self.tex_theta[2])
self.tex_angle_y = TexMobject(r"y", color=self.tex_theta_color).move_to(self.tex_theta[3])
if (self.is_show_construct):
self.play(*[ShowCreation(obj) for obj in self.arc_theta])
self.play(FadeIn(self.tex_angle_x), FadeIn(self.tex_angle_y))
self.play(FadeOut(self.tex_angle_x), FadeOut(self.tex_angle_y))
self.play(*[FadeIn(obj) for obj in self.tex_theta])
#----- 2 theta-s
self.arc_2_theta = []
self.arc_2_theta_pos_offset = []
self.tex_2_theta = []
arc_2_theta_origin_ary = [self.get_r1, self.get_r2]
for i in range(0,2):
# arc
(arc_start, arc_angle) = self.get_arc_2_theta_n(i, theta)
self.arc_2_theta.append(
Arc(
start_angle = arc_start,
angle = arc_angle,
radius = self.arc_2_theta_radius,
color = self.arc_2_theta_color,
arc_center = ORIGIN
))
# Arc's center is the arc object center, not the arc (circle)'s center
self.arc_2_theta_pos_offset.append(self.arc_2_theta[i].get_center())
arc_2_theta_center = arc_2_theta_origin_ary[i]() + self.arc_2_theta_pos_offset[i]
self.arc_2_theta[i].move_to(arc_2_theta_center)
# tex (\theta), when visualize which one, use the next line
# tex_str = r"2\theta_{0}".format(i)
tex_str = r"2\theta"
self.tex_2_theta.append(TexMobject(tex_str, color=self.tex_2_theta_color))
self.tex_2_theta[i].move_to(arc_2_theta_origin_ary[i]() + self.tex_2_theta_position_offset[i])
if (self.is_show_construct):
self.play(*[ShowCreation(obj) for obj in self.arc_2_theta])
self.play(*[FadeIn(obj) for obj in self.tex_2_theta])
def create_line_annotation_tex(self):
"""
r_2
+---------+------------ l_1
| |
| |
| |
r_1+---------+------------ l_2
| |
| |
| |
n_1 n_2
\ / m: transversal
\ /
+ r_2
/ \
/ \
\ / \
\ / \
\ / \
+ r_1 \ o_2
/ \
/ \
/ \
\
\
\
\ o_1
"""
line_l1_offset = 0.4 * RIGHT
self.tex_line_l1_pos = self.line_mirror_up_l1.get_end() + line_l1_offset
line_l2_offset = 0.4 * RIGHT
self.tex_line_l2_pos = self.line_normal_left_l2.get_end() + line_l2_offset
line_n1_offset = 0.4 * DOWN
self.tex_line_n1_pos = self.line_mirror_left_n1.get_end() + line_n1_offset
line_n2_offset = 0.4 * DOWN
self.tex_line_n2_pos = self.line_normal_up_n2.get_end() + line_n2_offset
line_o1_offset = 0.4 * UP
self.tex_line_o1_pos = self.line_o1.get_start() + line_o1_offset
line_o2_offset = 0.4 * UP
self.tex_line_o2_pos = self.line_o2.get_end() + line_o2_offset
line_m_offset = 1.4 * RIGHT + 0.35 * UP
self.tex_line_m_pos = self.line_m.get_end() + line_m_offset
self.tex_line_l1 = TexMobject(r"l_{1}", color=WHITE).move_to(self.tex_line_l1_pos)
self.tex_line_l2 = TexMobject(r"l_{2}", color=WHITE).move_to(self.tex_line_l2_pos)
self.tex_line_n1 = TexMobject(r"n_{1}", color=WHITE).move_to(self.tex_line_n1_pos)
self.tex_line_n2 = TexMobject(r"n_{2}", color=WHITE).move_to(self.tex_line_n2_pos)
self.tex_line_o1 = TexMobject(r"o_{1}", color=WHITE).move_to(self.tex_line_o1_pos)
self.tex_line_o2 = TexMobject(r"o_{2}", color=WHITE).move_to(self.tex_line_o2_pos)
self.tex_line_m = TexMobject(r"m", r"\text{: transversal}", color=WHITE).move_to(self.tex_line_m_pos)
tex_line_list = [self.tex_line_l1, self.tex_line_l2,
self.tex_line_n1, self.tex_line_n2,
self.tex_line_o1, self.tex_line_o2,
self.tex_line_m]
if (self.is_show_construct):
self.play(*[ShowCreation(obj) for obj in tex_line_list])
def animate_setup(self):
"""Start the setup: the corner reflector: T1
"""
if (self.is_skip_to_end):
# Add mirror (l1, n1) only
self.play(*[FadeIn(mobj) for mobj in
[self.line_mirror_up_l1, self.line_mirror_left_n1]])
return
self.play(ShowCreation(self.line_mirror_up_l1), ShowCreation(self.line_mirror_left_n1))
self.wait(self.wait_time)
self.play(ShowCreation(self.elbow_l1_n1))
self.wait(self.wait_time)
self.play(FadeOut(self.elbow_l1_n1))
self.wait(self.wait_time)
def animate_incident_ray(self):
"""
Incident ray: T2
Normal, show angles: T3, T4, T5
Same corresponding angles, l1||l2: T6
"""
# T6 Mobjects
text_corresponding_pos = 3.2 * RIGHT + 1.0 * UP
text_corresponding = TextMobject(r"Corresponding angles", color=WHITE).move_to(text_corresponding_pos)
# get left position of the text
arrow_start = text_corresponding.get_critical_point(LEFT) + -0.2 * RIGHT
arrow_corres_1 = Arrow(arrow_start, self.mirror_corner_pos + 0.6 * RIGHT + -0.2 * UP,
color=BLUE, stroke_width = 4, buff=0)
arrow_corres_2 = Arrow(arrow_start, self.get_r1() + 0.6 * RIGHT + -0.2 * UP,
color=BLUE, stroke_width = 4, buff=0)
if (self.is_skip_to_end):
# l1, l2, parallel only
self.play(*[FadeIn(mobj) for mobj in
[self.line_normal_left_l2,
self.tex_line_l1, self.tex_line_l2,
self.parallel_sign_l1, self.parallel_sign_l2]])
return
# T2: incident ray
self.play(ShowCreation(self.line_ray_1))
self.wait(self.wait_time)
# T3: draw the normal
self.play(ShowCreation(self.line_normal_left_l2))
self.wait(self.wait_time)
# T4, T5: show two lines (l1, l2) are perpendicular to n1
self.play(FadeOut(self.line_ray_1), ShowCreation(self.elbow_l2_n1))
self.wait(self.wait_time)
self.play(ShowCreation(self.elbow_l1_n1))
self.wait(self.wait_time)
# T6: corresponding angles, show parallel
self.play(FadeIn(self.tex_line_l1), FadeIn(self.tex_line_l2))
self.play(FadeIn(text_corresponding), ShowCreation(arrow_corres_1), ShowCreation(arrow_corres_2))
self.wait(self.wait_time)
self.play(ShowCreation(self.parallel_sign_l1), ShowCreation(self.parallel_sign_l2))
self.wait(self.wait_time)
# Remove corresponding angles and right angles
self.play(FadeOut(text_corresponding), FadeOut(arrow_corres_1), FadeOut(arrow_corres_2),
FadeOut(self.elbow_l2_n1), FadeOut(self.elbow_l1_n1))
self.wait(self.wait_time)
def animate_reflection_1_ray(self):
"""
Incident ray 1: T7
Show angles: T8
"""
if (self.is_skip_to_end):
self.play(*[FadeIn(mobj) for mobj in
[self.line_ray_1, self.line_ray_2,
self.arc_theta[0], self.tex_theta[0],
self.arc_theta[1], self.tex_theta[1]]])
return
# T7: Show incident ray 1, normal
self.play(ShowCreation(self.line_ray_1))
self.wait(self.wait_time)
# T8: reflection ray 2, angle theta[0]
self.play(ShowCreation(self.line_ray_2))
self.wait(self.wait_time)
self.play(ShowCreation(self.arc_theta[0]), FadeIn(self.tex_theta[0]))
self.wait(self.wait_time)
self.play(ShowCreation(self.arc_theta[1]), FadeIn(self.tex_theta[1]))
self.wait(self.wait_time)
def animate_reflection_2_ray(self):
"""
Reflection ray 1, ray 2, Show the angle x and l1||l2 again: T9
Show transversal: T10
Show only prallel and transversal, x == theta: T11
Put back the mirrors: T12
"""
# tex l1, tex l2, parallel signs are shown, but removed at the end
fadeout_at_end_list = [
self.tex_line_l1, self.parallel_sign_l1,
self.tex_line_l2, self.parallel_sign_l2]
if (self.is_skip_to_end):
self.play(*[FadeIn(mobj) for mobj in
[self.line_normal_up_n2,
self.line_ray_3,
self.arc_theta[2], self.tex_theta[2]]],
*[FadeOut(mobj) for mobj in fadeout_at_end_list])
return
# T9: reflection ray 3, normal n2, angle theta[2], angle theta[3]
self.play(ShowCreation(self.line_normal_up_n2), ShowCreation(self.elbow_l1_n2))
self.wait(self.wait_time)
self.play(FadeOut(self.elbow_l1_n2))
self.wait(self.wait_time)
self.play(ShowCreation(self.line_ray_3))
self.wait(self.wait_time)
self.play(ShowCreation(self.arc_theta[2]), FadeIn(self.tex_angle_x))
self.wait(self.wait_time)
# T10: fadeout n1, n2, ray1, ray3, arc[0], theta[0], arc[1], theta[0], fadein l_m
non_parallel_creation_push = [self.line_mirror_left_n1,
self.line_normal_up_n2,
self.line_ray_1,
self.line_ray_3,
self.arc_theta[0]]
non_parallel_fade_push = [self.tex_theta[0]]
self.play(*[FadeOut(mobj) for mobj in non_parallel_creation_push],
*[FadeOut(mobj) for mobj in non_parallel_fade_push])
self.wait(self.wait_time)
self.play(ShowCreation(self.line_m))
self.wait(self.wait_time)
self.play(FadeIn(self.tex_line_m))
self.wait(self.wait_time)
# T10 tmp Mobjects
text_alt_pos = 3.2 * RIGHT + 1.0 * UP
text_alt = TextMobject(r"Alternate angles", color=WHITE).move_to(text_alt_pos)
tex_x_eq_t = TexMobject( r"x = \theta", color=WHITE).move_to(text_alt_pos + DOWN)
# get left position of the text
arrow_start = text_alt.get_critical_point(LEFT) + -0.4 * RIGHT
arrow_alt_1 = Arrow(arrow_start, self.get_r2() + -0.1 * RIGHT + -0.4 * UP,
color=BLUE, stroke_width = 4, buff=0)
arrow_alt_2 = Arrow(arrow_start, self.get_r1() + 1.8 * RIGHT + 0.6 * UP,
color=BLUE, stroke_width = 4, buff=0)
# T10: m: transversal
self.play(FadeIn(text_alt),
ShowCreation(arrow_alt_1),
ShowCreation(arrow_alt_2))
self.wait(self.wait_time)
# T11: x = theta
self.play(FadeIn(tex_x_eq_t))
self.wait(self.wait_time)
self.play(FadeOut(self.tex_angle_x), FadeIn(self.tex_theta[2]))
self.wait(self.wait_time)
# T12: restore the mirrors and rays. hide l1, l2, parallel signs
fadeout_list = [text_alt, tex_x_eq_t, arrow_alt_1, arrow_alt_2,
self.line_m, self.tex_line_m]
fadeout_list.extend(fadeout_at_end_list)
self.play(*[FadeOut(mobj) for mobj in fadeout_list])
self.play(*[ShowCreation(mobj) for mobj in non_parallel_creation_push],
*[FadeIn(mobj) for mobj in non_parallel_fade_push])
self.wait(self.wait_time)
def animate_2nd_reflection_y_theta(self):
"""
T15-1: show angle y
T15-2: mirror reflection y = theta
"""
if (self.is_skip_to_end):
self.add(self.arc_theta[3], self.tex_theta[3])
return
y_work = copy.deepcopy(self.tex_angle_y)
self.play(ShowCreation(self.arc_theta[3]), FadeIn(y_work))
self.wait(self.wait_time)
self.play(FadeOut(y_work))
self.play(FadeIn(self.tex_theta[3]))
self.wait(self.wait_time)
def animate_show_upper_2_theta(self):
"""
Show upper (2*theta)
T16-1: extend line o1
T16-2: push non-vertical angle objects
T16-3: show vertical angles text
T16-4: pop non-vertical angle objects
T17-1: (theta, theta) -> (theta + theta) -> 2 theta
T17-2: remove the upper mirror and adjust 2 theta
"""
# push and pop list
push_tex_list_1 = [self.tex_theta[2]]
push_mob_list_1 = [self.arc_theta[2], self.line_ray_2]
# push list
push_tex_list_2 = [self.tex_theta[0], self.tex_theta[1]]
push_mob_list_2 = [self.arc_theta[0], self.arc_theta[1],
# mirror left, normal at r2
self.line_mirror_left_n1, self.line_normal_up_n2,
# normal at r1
self.line_normal_left_l2,
# ray_1, ray_2
self.line_ray_1, self.line_ray_3]
if (self.is_skip_to_end):
self.remove(*[t for t in push_tex_list_2])
self.remove(*[m for m in push_mob_list_2])
self.remove(self.line_mirror_up_l1, self.tex_theta[3], self.arc_theta[3])
self.remove(self.tex_theta[2])
self.add(self.line_o2)
self.add(self.arc_theta[4])
self.add(self.tex_2_theta[1])
return
# T16-1: extend line o1
self.play(ShowCreation(self.line_o2))
self.wait(self.wait_time)
# T16-2: push non-vertical angle objects
self.play(*[FadeOut(tex) for tex in push_tex_list_1],
*[FadeOut(tex) for tex in push_tex_list_2],
*[FadeOut(mob) for mob in push_mob_list_1],
*[FadeOut(mob) for mob in push_mob_list_2])
self.wait(self.wait_time)
# T16-3: show vertical angles text
self.play(ShowCreation(self.arc_theta[4]))
self.wait(self.wait_time)
# temporal text
text_vertical_pos = -2.0 * RIGHT + -1.0 * UP
text_vertical = TextMobject(r"Vertical angles", color=WHITE).move_to(text_vertical_pos)
arrow_start = text_vertical.get_critical_point(UP) + 0.2 * UP
arrow_vert_1 = Arrow(arrow_start, self.get_r2() + -0.8 * RIGHT + -0.2 * UP,
color=BLUE, stroke_width = 4, buff=0)
arrow_vert_2 = Arrow(arrow_start, self.get_r2() + +0.5 * RIGHT + -0.5 * UP,
color=BLUE, stroke_width = 4, buff=0)
self.play(ShowCreation(arrow_vert_1), ShowCreation(arrow_vert_2), FadeIn(text_vertical))
self.wait(self.wait_time)
# Show vertical angle theta
self.play(FadeIn(self.tex_theta[4]))
self.wait(self.wait_time)
# fadeout vertical angle annotation (text and arrows)
self.play(FadeOut(arrow_vert_1), FadeOut(arrow_vert_2), FadeOut(text_vertical))
self.wait(self.wait_time)
# T16-4: pop back ray 2 and theta
self.play(*[FadeIn(tex) for tex in push_tex_list_1],
*[ShowCreation(mob) for mob in push_mob_list_1])
self.wait(self.wait_time)
# T17-1: | |
[
['BOTSID', 'M', 3,'AN'],
['M1301', 'M', (2,4),'AN'],
['M1302', 'M', 30,'AN'],
['M1303', 'C', 1,'AN'],
['M1304', 'M', 12,'AN'],
['M1305', 'C', 15,'R'],
['M1306', 'C', (2,2),'AN'],
['M1307', 'C', 2,'AN'],
['M1308', 'C', 12,'AN'],
['M1309', 'M', (2,4),'AN'],
['M1310', 'C', (2,4),'AN'],
],
'M14': [
['BOTSID', 'M', 3,'AN'],
['M1401', 'M', 12,'AN'],
['M1402', 'M', 2,'AN'],
['M1403', 'C', 15,'AN'],
['M1404', 'C', (2,2),'AN'],
['M1405', 'C', (6,6),'DT'],
['M1406', 'C', 12,'AN'],
['M1407', 'M', (2,4),'AN'],
['M1408', 'C', (2,4),'AN'],
['M1409', 'M', 15,'R'],
['M1410', 'C', 30,'AN'],
['M1411', 'C', 30,'AN'],
],
'M15': [
['BOTSID', 'M', 3,'AN'],
['M1501', 'M', 2,'AN'],
['M1502', 'M', 30,'AN'],
['M1503', 'M', (6,6),'DT'],
['M1504', 'M', 30,'AN'],
['M1505', 'C', (2,4),'AN'],
['M1506', 'M', (4,8),'TM'],
['M1507', 'C', (2,15),'AN'],
['M1508', 'C', 30,'AN'],
['M1509', 'C', (2,4),'AN'],
['M1510', 'C', (2,30),'AN'],
['M1511', 'C', (2,2),'AN'],
['M1512', 'C', 1,'AN'],
],
'M2': [
['BOTSID', 'M', 3,'AN'],
['M201', 'M', (2,2),'AN'],
['M202', 'C', (4,6),'AN'],
['M203', 'C', (6,6),'DT'],
['M204', 'C', (3,3),'AN'],
['M205', 'C', (2,30),'AN'],
['M206', 'C', (6,6),'DT'],
['M207', 'C', 2,'AN'],
['M208', 'C', 30,'AN'],
],
'M20': [
['BOTSID', 'M', 3,'AN'],
['M2001', 'M', (2,4),'AN'],
['M2002', 'M', 12,'AN'],
['M2003', 'M', 4,'AN'],
['M2004', 'M', 10,'AN'],
['M2005', 'M', 2,'AN'],
['M2006', 'M', 30,'AN'],
['M2007', 'M', (2,2),'AN'],
['M2008', 'M', 30,'AN'],
['M2009', 'C', 45,'AN'],
],
'M21': [
['BOTSID', 'M', 3,'AN'],
['M2101', 'M', (2,2),'AN'],
['M2102', 'M', 30,'AN'],
['M2103', 'M', (2,4),'AN'],
['M2104', 'M', 12,'AN'],
['M2105', 'C', 1,'AN'],
['M2106', 'C', 25,'AN'],
['M2107', 'C', (2,4),'AN'],
['M2108', 'C', 12,'AN'],
['M2109', 'C', (2,4),'AN'],
['M2110', 'C', 12,'AN'],
['M2111', 'C', (2,4),'AN'],
['M2112', 'C', (2,4),'AN'],
['M2113', 'C', 15,'R'],
],
'M3': [
['BOTSID', 'M', 3,'AN'],
['M301', 'M', 1,'AN'],
['M302', 'C', (6,6),'DT'],
['M303', 'C', (4,8),'TM'],
['M304', 'C', (2,2),'AN'],
],
'M7': [
['BOTSID', 'M', 3,'AN'],
['M701', 'M', (2,15),'AN'],
['M702', 'C', (2,15),'AN'],
['M703', 'C', (2,15),'AN'],
['M704', 'C', (2,15),'AN'],
['M705', 'C', (2,2),'AN'],
],
'MAN': [
['BOTSID', 'M', 3,'AN'],
['MAN01', 'M', 2,'AN'],
['MAN02', 'M', 45,'AN'],
['MAN03', 'C', 45,'AN'],
['MAN04', 'C', 2,'AN'],
['MAN05', 'C', 45,'AN'],
['MAN06', 'C', 45,'AN'],
],
'MBL': [
['BOTSID', 'M', 3,'AN'],
['MBL01', 'M', (2,4),'AN'],
['MBL02', 'M', 12,'AN'],
],
'MC': [
['BOTSID', 'M', 3,'AN'],
['MC01', 'M', (3,3),'AN'],
['MC02', 'M', (2,2),'AN'],
['MC03', 'M', 7,'R'],
['MC04', 'C', (2,25),'AN'],
['MC05', 'C', 6,'R'],
],
'MCD': [
['BOTSID', 'M', 3,'AN'],
['MCD01', 'M', 15,'R'],
['MCD02', 'C', (6,6),'DT'],
['MCD03', 'C', 15,'R'],
['MCD04', 'C', 35,'AN'],
],
'MCT': [
['BOTSID', 'M', 3,'AN'],
['MCT01', 'M', (3,3),'AN'],
['MCT02', 'C', (2,2),'AN'],
['MCT03', 'C', 10,'R'],
['MCT04', 'C', 10,'R'],
['MCT05', 'C', (2,2),'AN'],
['MCT06', 'C', 7,'R'],
['MCT07', 'C', 1,'AN'],
['MCT08', 'C', (2,25),'AN'],
],
'MEA': [
['BOTSID', 'M', 3,'AN'],
['MEA01', 'C', (2,2),'AN'],
['MEA02', 'C', 3,'AN'],
['MEA03', 'C', 10,'R'],
['MEA04', 'C', (2,2),'AN'],
],
'MI': [
['BOTSID', 'M', 3,'AN'],
['MI01', 'M', (2,2),'AN'],
['MI02', 'C', 15,'N2'],
['MI03', 'C', 15,'N2'],
['MI04', 'C', 15,'N2'],
['MI05', 'C', 80,'AN'],
],
'MIA': [
['BOTSID', 'M', 3,'AN'],
['MIA01', 'M', 15,'R'],
['MIA02', 'C', 15,'R'],
['MIA03', 'C', 15,'R'],
['MIA04', 'C', 15,'R'],
['MIA05', 'C', 30,'AN'],
['MIA06', 'C', 15,'R'],
['MIA07', 'C', 15,'R'],
['MIA08', 'C', 15,'R'],
['MIA09', 'C', 15,'R'],
['MIA10', 'C', 15,'R'],
['MIA11', 'C', 15,'R'],
['MIA12', 'C', 15,'R'],
['MIA13', 'C', 15,'R'],
['MIA14', 'C', 15,'R'],
['MIA15', 'C', 15,'R'],
['MIA16', 'C', 15,'R'],
['MIA17', 'C', 15,'R'],
['MIA18', 'C', 15,'R'],
['MIA19', 'C', 15,'R'],
['MIA20', 'C', 30,'AN'],
['MIA21', 'C', 30,'AN'],
['MIA22', 'C', 30,'AN'],
['MIA23', 'C', 30,'AN'],
['MIA24', 'C', 15,'R'],
],
'MII': [
['BOTSID', 'M', 3,'AN'],
['MII01', 'M', 1,'AN'],
['MII02', 'C', 1,'AN'],
['MII03', 'C', 1,'AN'],
['MII04', 'C', 1,'AN'],
['MII05', 'C', 1,'AN'],
['MII06', 'C', 1,'AN'],
['MII07', 'C', 1,'AN'],
['MII08', 'C', 15,'R'],
['MII09', 'C', (2,2),'AN'],
['MII10', 'C', 30,'AN'],
['MII11', 'C', (2,2),'AN'],
['MII12', 'C', 30,'AN'],
['MII13', 'C', 10,'R'],
['MII14', 'C', (2,2),'AN'],
['MII15', 'C', 15,'R'],
],
'MIL': [
['BOTSID', 'M', 3,'AN'],
['MIL01', 'M', 20,'AN'],
['MIL02', 'C', 80,'AN'],
['MIL03', 'C', (3,3),'AN'],
['MIL04', 'C', (6,6),'DT'],
['MIL05', 'C', (3,3),'AN'],
['MIL06', 'C', (6,6),'DT'],
['MIL07', 'C', (2,2),'AN'],
['MIL08', 'C', 15,'R'],
['MIL09', 'C', 2,'AN'],
['MIL10', 'C', 2,'AN'],
['MIL11', 'C', (2,2),'AN'],
['MIL12', 'C', 30,'AN'],
],
'MIN': [
['BOTSID', 'M', 3,'AN'],
['MIN01', 'M', 7,'N1'],
['MIN02', 'C', 7,'N1'],
['MIN03', 'C', 7,'N1'],
['MIN04', 'C', 7,'N1'],
['MIN05', 'C', 7,'N1'],
['MIN06', 'C', 7,'N1'],
['MIN07', 'C', 7,'N1'],
['MIN08', 'C', 7,'N1'],
['MIN09', 'C', 7,'N1'],
['MIN10', 'C', 7,'N1'],
['MIN11', 'C', 7,'N1'],
['MIN12', 'C', 7,'N1'],
['MIN13', 'C', 7,'N1'],
['MIN14', 'C', 7,'N1'],
['MIN15', 'C', 7,'N1'],
['MIN16', 'C', 7,'N1'],
],
'MIR': [
['BOTSID', 'M', 3,'AN'],
['MIR01', 'M', 1,'AN'],
['MIR02', 'M', 1,'AN'],
['MIR03', 'C', 1,'AN'],
['MIR04', 'C', 30,'AN'],
['MIR05', 'C', 10,'R'],
['MIR06', 'C', 15,'N2'],
['MIR07', 'C', (2,2),'AN'],
['MIR08', 'C', 15,'R'],
['MIR09', 'C', 10,'R'],
['MIR10', 'C', 10,'R'],
['MIR11', 'C', 1,'AN'],
['MIR12', 'C', (6,6),'DT'],
],
'MIS': [
['BOTSID', 'M', 3,'AN'],
['MIS01', 'M', (2,2),'AN'],
['MIS02', 'C', (3,3),'AN'],
['MIS03', 'C', (2,3),'AN'],
['MIS04', 'C', 35,'AN'],
['MIS05', 'C', (3,3),'AN'],
],
'MIT': [
['BOTSID', 'M', 3,'AN'],
['MIT01', 'M', 30,'AN'],
['MIT02', 'C', 80,'AN'],
['MIT03', 'C', 3,'R'],
['MIT04', 'C', 3,'R'],
],
'MOA': [
['BOTSID', 'M', 3,'AN'],
['MOA01', 'C', 10,'R'],
['MOA02', 'C', 15,'R'],
['MOA03', 'C', 30,'AN'],
['MOA04', 'C', 30,'AN'],
['MOA05', 'C', 30,'AN'],
['MOA06', 'C', 30,'AN'],
['MOA07', 'C', 30,'AN'],
['MOA08', 'C', 15,'R'],
['MOA09', 'C', 15,'R'],
],
'MRC': [
['BOTSID', 'M', 3,'AN'],
['MRC01', 'M', (2,2),'AN'],
['MRC02', 'M', 1,'AN'],
['MRC03', 'M', 1,'AN'],
['MRC04', 'M', 15,'R'],
['MRC05', 'C', 35,'AN'],
['MRC06', 'C', 1,'AN'],
['MRC07', 'C', 15,'R'],
['MRC08', 'C', 1,'AN'],
['MRC09', 'C', 15,'R'],
],
'MS': [
['BOTSID', 'M', 3,'AN'],
['MS01', 'M', (2,2),'AN'],
['MS02', 'M', (2,10),'AN'],
['MS03', 'M', 9,'N2'],
['MS04', 'C', (2,2),'AN'],
['MS05', 'C', 9,'N2'],
['MS06', 'C', 6,'R'],
],
'MSG': [
['BOTSID', 'M', 3,'AN'],
['MSG01', 'M', 264,'AN'],
['MSG02', 'C', (2,2),'AN'],
],
'MSS': [
['BOTSID', 'M', 3,'AN'],
['MSS01', 'C', (2,2),'AN'],
['MSS02', 'C', 80,'AN'],
['MSS03', 'C', (2,2),'AN'],
['MSS04', 'C', (2,3),'AN'],
['MSS05', 'C', 1,'AN'],
],
'N1': [
['BOTSID', 'M', 3,'AN'],
['N101', 'M', (2,2),'AN'],
['N102', 'C', 35,'AN'],
['N103', 'C', 2,'AN'],
['N104', 'C', (2,17),'AN'],
['N105', 'C', (2,2),'AN'],
['N106', 'C', (2,2),'AN'],
],
'N10': [
['BOTSID', 'M', 3,'AN'],
['N1001', 'C', 15,'R'],
['N1002', 'C', 45,'AN'],
['N1003', 'M', 45,'AN'],
['N1004', 'C', 1,'AN'],
['N1005', 'C', 16,'AN'],
['N1006', 'C', (2,8),'AN'],
['N1007', 'C', 1,'AN'],
['N1008', 'C', 10,'R'],
],
'N11': [
['BOTSID', 'M', 3,'AN'],
['N1101', 'M', 10,'AN'],
['N1102', 'C', 30,'AN'],
['N1103', 'C', 30,'AN'],
],
'N12': [
['BOTSID', 'M', 3,'AN'],
['N1201', 'M', 1,'AN'],
['N1202', 'M', (2,2),'AN'],
],
'N2': [
['BOTSID', 'M', 3,'AN'],
['N201', 'M', 35,'AN'],
['N202', 'C', 35,'AN'],
],
'N3': [
['BOTSID', 'M', 3,'AN'],
['N301', 'M', 35,'AN'],
['N302', 'C', 35,'AN'],
],
'N4': [
['BOTSID', 'M', 3,'AN'],
['N401', 'C', (2,30),'AN'],
['N402', 'C', (2,2),'AN'],
['N403', 'C', (3,9),'AN'],
['N404', 'C', (2,3),'AN'],
['N405', 'C', 2,'AN'],
['N406', 'C', 30,'AN'],
],
'N5': [
['BOTSID', 'M', 3,'AN'],
['N501', 'C', (4,5),'R'],
['N502', 'C', (2,3),'R'],
['N503', 'C', (2,4),'R'],
['N504', 'C', (4,4),'AN'],
['N505', 'C', 1,'AN'],
['N506', 'C', 8,'R'],
['N507', 'C', (2,4),'N2'],
['N508', 'C', 1,'AN'],
['N509', 'C', (2,2),'AN'],
],
'N6': [
['BOTSID', 'M', 3,'AN'],
['N601', 'M', 30,'AN'],
],
'N7': [
['BOTSID', 'M', 3,'AN'],
['N701', 'C', 4,'AN'],
['N702', 'M', 10,'AN'],
['N703', 'C', 10,'R'],
['N704', 'C', 2,'AN'],
['N705', 'C', (3,8),'R'],
['N706', 'C', (2,6),'R'],
['N707', 'C', 6,'R'],
['N708', 'C', 8,'R'],
['N709', 'C', 1,'AN'],
['N710', 'C', 1,'AN'],
['N711', 'C', (2,2),'AN'],
['N712', 'C', (2,4),'AN'],
['N713', 'C', (3,6),'AN'],
['N714', 'C', 3,'AN'],
['N715', 'C', (4,5),'R'],
['N716', 'C', 1,'AN'],
['N717', 'C', 1,'AN'],
['N718', 'C', 1,'R'],
['N719', 'C', (2,2),'AN'],
['N720', 'C', 8,'R'],
['N721', 'C', 8,'R'],
['N722', 'C', (4,4),'AN'],
['N723', 'C', (2,4),'AN'],
],
'N7A': [
['BOTSID', 'M', 3,'AN'],
['N7A01', 'C', (2,2),'AN'],
['N7A02', 'C', 8,'R'],
['N7A03', 'C', 2,'R'],
['N7A04', 'C', (3,3),'AN'],
['N7A05', 'C', 2,'R'],
['N7A06', 'C', 2,'R'],
['N7A07', 'C', (2,2),'AN'],
['N7A08', 'C', (2,2),'AN'],
['N7A09', 'C', (2,2),'AN'],
],
'N7B': [
['BOTSID', 'M', 3,'AN'],
['N7B01', 'C', 2,'R'],
['N7B02', 'C', 1,'AN'],
['N7B03', 'C', (3,3),'AN'],
['N7B04', 'C', (3,3),'AN'],
['N7B05', 'C', (3,3),'AN'],
['N7B06', 'C', 30,'AN'],
],
'N8': [
['BOTSID', 'M', 3,'AN'],
['N801', 'M', 6,'R'],
['N802', 'M', (6,6),'DT'],
['N803', 'C', 1,'AN'],
['N804', 'C', 4,'AN'],
['N805', 'C', 10,'AN'],
['N806', 'C', 6,'R'],
['N807', 'C', (6,6),'DT'],
['N808', 'C', (2,30),'AN'],
['N809', 'C', (2,2),'AN'],
['N810', 'C', (2,4),'AN'],
['N811', 'C', 5,'AN'],
],
'N8A': [
['BOTSID', 'M', 3,'AN'],
['N8A01', 'C', (2,2),'AN'],
['N8A02', 'C', 6,'R'],
['N8A03', 'C', (6,6),'DT'],
['N8A04', 'C', 30,'AN'],
['N8A05', 'C', (2,30),'AN'],
['N8A06', 'C', (2,2),'AN'],
['N8A07', 'C', (2,4),'AN'],
['N8A08', 'C', 5,'AN'],
['N8A09', 'C', 4,'AN'],
['N8A10', 'C', 10,'AN'],
],
'N9': [
['BOTSID', 'M', 3,'AN'],
['N901', 'M', (2,2),'AN'],
['N902', 'C', 30,'AN'],
['N903', 'C', 45,'AN'],
['N904', 'C', (6,6),'DT'],
['N905', 'C', (4,8),'TM'],
['N906', 'C', (2,2),'AN'],
],
'NA': [
['BOTSID', 'M', 3,'AN'],
['NA01', 'C', (2,2),'AN'],
['NA02', 'C', 30,'AN'],
['NA03', 'M', 4,'AN'],
['NA04', 'M', 10,'AN'],
['NA05', 'C', 1,'AN'],
['NA06', 'C', 3,'AN'],
['NA07', 'C', (2,4),'AN'],
['NA08', 'C', (4,5),'R'],
['NA09', 'C', (2,4),'AN'],
['NA10', 'C', (2,2),'AN'],
],
'NB': [
['BOTSID', 'M', 3,'AN'],
['NB01', 'C', (2,30),'AN'],
['NB02', 'C', (2,2),'AN'],
['NB03', 'C', (6,9),'AN'],
],
'NCA': [
['BOTSID', 'M', 3,'AN'],
['NCA01', 'C', 11,'AN'],
['NCA02', 'C', 2,'AN'],
['NCA03', 'C', 80,'AN'],
['NCA04', 'C', 15,'R'],
['NCA05', 'C', (2,2),'AN'],
],
'NCD': [
['BOTSID', 'M', 3,'AN'],
['NCD01', 'C', (2,2),'AN'],
['NCD02', 'C', 1,'AN'],
['NCD03', 'C', 11,'AN'],
['NCD04', 'C', (2,3),'AN'],
['NCD05', 'C', (2,2),'AN'],
['NCD06', 'C', 12,'AN'],
['NCD07', 'C', 80,'AN'],
],
'NM1': [
['BOTSID', 'M', 3,'AN'],
['NM101', 'M', (2,2),'AN'],
['NM102', 'M', 1,'AN'],
['NM103', 'C', 35,'AN'],
['NM104', 'C', 25,'AN'],
['NM105', 'C', 25,'AN'],
['NM106', 'C', 10,'AN'],
['NM107', 'C', 10,'AN'],
['NM108', 'C', 2,'AN'],
['NM109', 'C', (2,17),'AN'],
],
'NTE': [
['BOTSID', 'M', | |
import matplotlib.pyplot as plt
import seaborn as sb
import sys
import scipy
import scipy.signal
import matplotlib.dates as mdates
import pandas as pd
import numpy as np
import datetime
from datetime import date, timedelta
sys.path.append('/Users/hn/Documents/00_GitHub/Ag/NASA/Python_codes/')
sys.path.append('/home/hnoorazar/NASA/')
import NASA_core as nc
def SG_clean_SOS_orchardinPlot_VerticalLine(raw_dt, SG_dt, idx, ax, onset_cut=0.5, offset_cut=0.5):
"""
This is created after the meeting on Jan, 10, 2022.
Changes made to the previous function (SG_clean_SOS_orchardinPlot):
a. Vertical lines for time reference
b. Add area of fields to the title of the plots. (Done in Driver)
c. In the title break AdamBenton2016 to one county! (Done in Driver)
d. make the previous and next auxiliary years gray backgound.
"""
"""Returns A plot with of a given VI (NDVI or EVI) with SOS and EOS points.
Arguments
---------
raw_dt : dataframe
pandas dataframe of raw observations from Google Earth Engine
SG_dt : dataframe
pandas dataframe of smoothed version of data points.
idx : str
A string indicating vegetation index.
ax : axis
An axis object of Matplotlib.
onset_cut : float
Start Of Season threshold
offset_cut : float
End Of Season threshold
Returns
-------
A plot a given VI (NDVI or EVI) with SOS and EOS points.
"""
assert (len(SG_dt['ID'].unique()) == 1)
#############################################
###
### find SOS's and EOS's
###
#############################################
ratio_colName = idx + "_ratio"
SEOS_output_columns = ['ID', idx, 'human_system_start_time',
ratio_colName, 'SOS', 'EOS', 'season_count']
"""
The reason I am multiplying len(SG_dt) by 4 is that we can have at least two
seasons which means 2 SOS and 2 EOS. So, at least 4 rows are needed.
and the reason for 14 is that there are 14 years from 2008 to 2021.
"""
all_poly_and_SEOS = pd.DataFrame(data = None,
index = np.arange(4*14*len(SG_dt)),
columns = SEOS_output_columns)
unique_years = SG_dt['human_system_start_time'].dt.year.unique()
pointer_SEOS_tab = 0
SG_dt = SG_dt[SEOS_output_columns[0:3]]
"""
detect SOS and EOS in each year
"""
yr_count = 0
for yr in unique_years:
curr_field_yr = SG_dt[SG_dt['human_system_start_time'].dt.year == yr].copy()
y_orchard = curr_field_yr[curr_field_yr['human_system_start_time'].dt.month >= 5]
y_orchard = y_orchard[y_orchard['human_system_start_time'].dt.month <= 10]
y_orchard_range = max(y_orchard[idx]) - min(y_orchard[idx])
if y_orchard_range > 0.3:
curr_field_yr = nc.addToDF_SOS_EOS_White(pd_TS = curr_field_yr,
VegIdx = idx,
onset_thresh = onset_cut,
offset_thresh = offset_cut)
curr_field_yr = nc.Null_SOS_EOS_by_DoYDiff(pd_TS=curr_field_yr, min_season_length=40)
else:
VegIdx_min = curr_field_yr[idx].min()
VegIdx_max = curr_field_yr[idx].max()
VegRange = VegIdx_max - VegIdx_min + sys.float_info.epsilon
curr_field_yr[ratio_colName] = (curr_field_yr[idx] - VegIdx_min) / VegRange
curr_field_yr['SOS'] = 666
curr_field_yr['EOS'] = 666
#############################################
###
### plot
###
#############################################
# sb.set();
# plot SG smoothed
ax.plot(SG_dt['human_system_start_time'], SG_dt[idx], c='k', linewidth=2,
label= 'SG' if yr_count == 0 else "");
ax.scatter(raw_dt['human_system_start_time'], raw_dt[idx],
s=7, c='dodgerblue', label="raw" if yr_count == 0 else "");
###
### plot SOS and EOS
###
#
# SOS
#
SOS = curr_field_yr[curr_field_yr['SOS'] != 0]
if len(SOS)>0: # dataframe might be empty
if SOS.iloc[0]['SOS'] != 666:
ax.scatter(SOS['human_system_start_time'], SOS['SOS'], marker='+', s=155, c='g',
label="")
# annotate SOS
for ii in np.arange(0, len(SOS)):
style = dict(size=10, color='g', rotation='vertical')
ax.text(x = SOS.iloc[ii]['human_system_start_time'].date(),
y = -0.18,
s = str(SOS.iloc[ii]['human_system_start_time'].date())[5:], #
**style)
else:
ax.plot(curr_field_yr['human_system_start_time'],
np.ones(len(curr_field_yr['human_system_start_time']))*1,
c='g', linewidth=2);
#
# EOS
#
EOS = curr_field_yr[curr_field_yr['EOS'] != 0]
if len(EOS)>0: # dataframe might be empty
if EOS.iloc[0]['EOS'] != 666:
ax.scatter(EOS['human_system_start_time'], EOS['EOS'],
marker='+', s=155, c='r',
label="")
# annotate EOS
for ii in np.arange(0, len(EOS)):
style = dict(size=10, color='r', rotation='vertical')
ax.text(x = EOS.iloc[ii]['human_system_start_time'].date(),
y = -0.18,
s = str(EOS.iloc[ii]['human_system_start_time'].date())[5:],
**style)
# Plot ratios:
# ax.plot(curr_field_yr['human_system_start_time'],
# curr_field_yr[ratio_colName],
# c='gray', label=ratio_colName if yr_count == 0 else "")
yr_count += 1
# ax.axhline(0 , color = 'r', linewidth=.5)
# ax.axhline(1 , color = 'r', linewidth=.5)
ax.set_title(SG_dt['ID'].unique()[0] + ", cut: " + str(onset_cut) + ", " + idx);
ax.set(ylabel=idx)
# ax.set_xlim([datetime.date(2007, 12, 10), datetime.date(2022, 1, 10)])
ax.set_xlim([SG_dt.human_system_start_time.min() - timedelta(10),
SG_dt.human_system_start_time.max() + timedelta(10)])
ax.set_ylim([-0.3, 1.15])
# ax.xaxis.set_major_locator(mdates.YearLocator(2)) # every year.
from matplotlib.dates import MonthLocator, DateFormatter
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(DateFormatter('%b'))
# ax.xaxis.set_major_locator(plt.MaxNLocator(18))
# ax.right_ax.grid(False)
# ax.grid(b=None)
ax.legend(loc="upper left");
def SG_clean_SOS_orchardinPlot(raw_dt, SG_dt, idx, ax, onset_cut=0.5, offset_cut=0.5):
"""Returns A plot with of a given VI (NDVI or EVI) with SOS and EOS points.
Arguments
---------
raw_dt : dataframe
pandas dataframe of raw observations from Google Earth Engine
SG_dt : dataframe
pandas dataframe of smoothed version of data points.
idx : str
A string indicating vegetation index.
ax : axis
An axis object of Matplotlib.
onset_cut : float
Start Of Season threshold
offset_cut : float
End Of Season threshold
Returns
-------
A plot a given VI (NDVI or EVI) with SOS and EOS points.
"""
assert (len(SG_dt['ID'].unique()) == 1)
#############################################
###
### find SOS's and EOS's
###
#############################################
ratio_colName = idx + "_ratio"
SEOS_output_columns = ['ID', idx, 'human_system_start_time',
ratio_colName, 'SOS', 'EOS', 'season_count']
"""
The reason I am multiplying len(SG_dt) by 4 is that we can have at least two
seasons which means 2 SOS and 2 EOS. So, at least 4 rows are needed.
and the reason for 14 is that there are 14 years from 2008 to 2021.
"""
all_poly_and_SEOS = pd.DataFrame(data = None,
index = np.arange(4*14*len(SG_dt)),
columns = SEOS_output_columns)
unique_years = SG_dt['human_system_start_time'].dt.year.unique()
pointer_SEOS_tab = 0
SG_dt = SG_dt[SEOS_output_columns[0:3]]
"""
detect SOS and EOS in each year
"""
yr_count = 0
for yr in unique_years:
curr_field_yr = SG_dt[SG_dt['human_system_start_time'].dt.year == yr].copy()
y_orchard = curr_field_yr[curr_field_yr['human_system_start_time'].dt.month >= 5]
y_orchard = y_orchard[y_orchard['human_system_start_time'].dt.month <= 10]
y_orchard_range = max(y_orchard[idx]) - min(y_orchard[idx])
if y_orchard_range > 0.3:
curr_field_yr = nc.addToDF_SOS_EOS_White(pd_TS = curr_field_yr,
VegIdx = idx,
onset_thresh = onset_cut,
offset_thresh = offset_cut)
curr_field_yr = nc.Null_SOS_EOS_by_DoYDiff(pd_TS=curr_field_yr, min_season_length=40)
else:
VegIdx_min = curr_field_yr[idx].min()
VegIdx_max = curr_field_yr[idx].max()
VegRange = VegIdx_max - VegIdx_min + sys.float_info.epsilon
curr_field_yr[ratio_colName] = (curr_field_yr[idx] - VegIdx_min) / VegRange
curr_field_yr['SOS'] = 666
curr_field_yr['EOS'] = 666
#############################################
###
### plot
###
#############################################
# sb.set();
# plot SG smoothed
ax.plot(SG_dt['human_system_start_time'], SG_dt[idx], c='k', linewidth=2,
label= 'SG' if yr_count == 0 else "");
ax.scatter(raw_dt['human_system_start_time'], raw_dt[idx],
s=7, c='dodgerblue', label="raw" if yr_count == 0 else "");
###
### plot SOS and EOS
###
#
# SOS
#
SOS = curr_field_yr[curr_field_yr['SOS'] != 0]
if len(SOS)>0: # dataframe might be empty
if SOS.iloc[0]['SOS'] != 666:
ax.scatter(SOS['human_system_start_time'], SOS['SOS'], marker='+', s=155, c='g',
label="")
# annotate SOS
for ii in np.arange(0, len(SOS)):
style = dict(size=10, color='g', rotation='vertical')
ax.text(x = SOS.iloc[ii]['human_system_start_time'].date(),
y = -0.1,
s = str(SOS.iloc[ii]['human_system_start_time'].date())[5:], #
**style)
else:
ax.plot(curr_field_yr['human_system_start_time'],
np.ones(len(curr_field_yr['human_system_start_time']))*1,
c='g', linewidth=2);
#
# EOS
#
EOS = curr_field_yr[curr_field_yr['EOS'] != 0]
if len(EOS)>0: # dataframe might be empty
if EOS.iloc[0]['EOS'] != 666:
ax.scatter(EOS['human_system_start_time'], EOS['EOS'],
marker='+', s=155, c='r',
label="")
# annotate EOS
for ii in np.arange(0, len(EOS)):
style = dict(size=10, color='r', rotation='vertical')
ax.text(x = EOS.iloc[ii]['human_system_start_time'].date(),
y = -0.1,
s = str(EOS.iloc[ii]['human_system_start_time'].date())[5:], #[6:]
**style)
# Plot ratios:
ax.plot(curr_field_yr['human_system_start_time'],
curr_field_yr[ratio_colName],
c='gray', label=ratio_colName if yr_count == 0 else "")
yr_count += 1
# ax.axhline(0 , color = 'r', linewidth=.5)
# ax.axhline(1 , color = 'r', linewidth=.5)
ax.set_title(SG_dt['ID'].unique()[0] + ", cut: " + str(onset_cut) + ", " + idx);
ax.set(ylabel=idx)
# ax.set_xlim([datetime.date(2007, 12, 10), datetime.date(2022, 1, 10)])
ax.set_xlim([SG_dt.human_system_start_time.min() - timedelta(10),
SG_dt.human_system_start_time.max() + timedelta(10)])
ax.set_ylim([-0.3, 1.15])
ax.xaxis.set_major_locator(mdates.YearLocator(1)) # every year.
ax.legend(loc="best");
def SG_clean_SOS(raw_dt, SG_dt, idx, ax, onset_cut=0.5, offset_cut=0.5):
"""Returns A plot with of a given VI (NDVI or EVI) with SOS and EOS points.
Arguments
---------
raw_dt : dataframe
pandas dataframe of raw observations from Google Earth Engine
SG_dt : dataframe
pandas dataframe of smoothed version of data points.
idx : str
A string indicating vegetation index.
ax : axis
An axis object of Matplotlib.
onset_cut : float
Start Of Season threshold
offset_cut : float
End Of Season threshold
Returns
-------
A plot a given VI (NDVI or EVI) with SOS and EOS points.
"""
assert (len(SG_dt['ID'].unique()) == 1)
#############################################
###
### find SOS's and EOS's
###
#############################################
SEOS_output_columns = ['ID', idx, 'human_system_start_time',
'EVI_ratio', 'SOS', 'EOS', 'season_count']
"""
The reason I am multiplying len(a_df) by 4 is that we can have at least two
seasons which means 2 SOS and 2 EOS. So, at least 4 rows are needed.
and the reason for 14 is that there are 14 | |
m_flow_in = self._m_flow_in
cp_in = self._cp_in
# Dimensionless delta-circuit conductances
self._beta1 = 1./(self._Rd[0][0]*m_flow_in[0]*cp_in[0])
self._beta2 = 1./(self._Rd[1][1]*m_flow_in[0]*cp_in[0])
self._beta12 = 1./(self._Rd[0][1]*m_flow_in[0]*cp_in[0])
self._beta = 0.5*(self._beta2 - self._beta1)
# Eigenvalues
self._gamma = np.sqrt(0.25*(self._beta1+self._beta2)**2
+ self._beta12*(self._beta1+self._beta2))
self._delta = 1./self._gamma \
* (self._beta12 + 0.5*(self._beta1+self._beta2))
def _format_inputs(self, m_flow_borehole, cp_f, nSegments):
"""
Format mass flow rate and heat capacity inputs.
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
"""
# Format mass flow rate inputs
if np.isscalar(m_flow_borehole):
# Mass flow rate in each fluid circuit
m_flow_in = m_flow_borehole*np.ones(self.nInlets)
else:
# Mass flow rate in each fluid circuit
m_flow_in = m_flow_borehole
self._m_flow_in = m_flow_in
# Mass flow rate in pipes
m_flow_pipe = np.tile(m_flow_in, 2*self.nPipes)
self._m_flow_pipe = m_flow_pipe
# Format heat capacity inputs
if np.isscalar(cp_f):
# Heat capacity in each fluid circuit
cp_in = cp_f*np.ones(self.nInlets)
else:
# Heat capacity in each fluid circuit
cp_in = cp_f
self._cp_in = cp_in
# Heat capacity in pipes
cp_pipe = np.tile(cp_in, 2*self.nPipes)
self._cp_pipe = cp_pipe
def _f1(self, z):
"""
Calculate function f1 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
f1 = np.exp(self._beta*z)*(np.cosh(self._gamma*z)
- self._delta*np.sinh(self._gamma*z))
return f1
def _f2(self, z):
"""
Calculate function f2 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
f2 = np.exp(self._beta*z)*self._beta12/self._gamma \
* np.sinh(self._gamma*z)
return f2
def _f3(self, z):
"""
Calculate function f3 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
f3 = np.exp(self._beta*z)*(np.cosh(self._gamma*z)
+ self._delta*np.sinh(self._gamma*z))
return f3
def _f4(self, z):
"""
Calculate function f4 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
A = self._delta*self._beta1 + self._beta2*self._beta12/self._gamma
f4 = np.exp(self._beta*z) \
* (self._beta1*np.cosh(self._gamma*z) - A*np.sinh(self._gamma*z))
return f4
def _f5(self, z):
"""
Calculate function f5 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
B = self._delta*self._beta2 + self._beta1*self._beta12/self._gamma
f5 = np.exp(self._beta*z) \
* (self._beta2*np.cosh(self._gamma*z) + B*np.sinh(self._gamma*z))
return f5
def _F4(self, z):
"""
Calculate integral of function f4 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
A = self._delta*self._beta1 + self._beta2*self._beta12/self._gamma
C = self._beta1*self._beta + A*self._gamma
S = - (self._beta1*self._gamma + self._beta*A)
denom = (self._beta**2 - self._gamma**2)
F4 = np.exp(self._beta*z) / denom \
* (C*np.cosh(self._gamma*z) + S*np.sinh(self._gamma*z))
return F4
def _F5(self, z):
"""
Calculate integral of function f5 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
B = self._delta*self._beta2 + self._beta1*self._beta12/self._gamma
C = self._beta2*self._beta - B*self._gamma
S = - (self._beta2*self._gamma - self._beta*B)
denom = (self._beta**2 - self._gamma**2)
F5 = np.exp(self._beta*z) / denom \
* (C*np.cosh(self._gamma*z) + S*np.sinh(self._gamma*z))
return F5
class MultipleUTube(_BasePipe):
"""
Class for multiple U-Tube boreholes.
Contains information regarding the physical dimensions and thermal
characteristics of the pipes and the grout material, as well as methods to
evaluate fluid temperatures and heat extraction rates based on the work of
Cimmino [#Cimmino2016]_ for boreholes with any number of U-tubes.
Attributes
----------
pos : list of tuples
Position (x, y) (in meters) of the pipes inside the borehole.
r_in : float
Inner radius (in meters) of the U-Tube pipes.
r_out : float
Outer radius (in meters) of the U-Tube pipes.
borehole : Borehole object
Borehole class object of the borehole containing the U-Tube.
k_s : float
Soil thermal conductivity (in W/m-K).
k_g : float
Grout thermal conductivity (in W/m-K).
R_fp : float
Fluid to outer pipe wall thermal resistance (m-K/W).
J : int, optional
Number of multipoles per pipe to evaluate the thermal resistances.
Default is 2.
nPipes : int
Number of U-Tubes.
config : str, defaults to 'parallel'
Configuration of the U-Tube pipes:
'parallel' : U-tubes are connected in parallel.
'series' : U-tubes are connected in series.
nInlets : int
Total number of pipe inlets, equals to 1.
nOutlets : int
Total number of pipe outlets, equals to 1.
Notes
-----
The expected array shapes of input parameters and outputs are documented
for each class method. `nInlets` and `nOutlets` are the number of inlets
and outlets to the borehole, and both are equal to 1 for a multiple U-tube
borehole. `nSegments` is the number of discretized segments along the
borehole. `nPipes` is the number of pipes (i.e. the number of U-tubes) in
the borehole. `nDepths` is the number of depths at which temperatures are
evaluated.
References
----------
.. [#Cimmino2016] <NAME>. (2016). Fluid and borehole wall temperature
profiles in vertical geothermal boreholes with multiple U-tubes.
Renewable Energy, 96, 137-147.
"""
def __init__(self, pos, r_in, r_out, borehole, k_s,
k_g, R_fp, nPipes, config='parallel', J=2):
self.pos = pos
self.r_in = r_in
self.r_out = r_out
self.b = borehole
self.k_s = k_s
self.k_g = k_g
self.R_fp = R_fp
self.J = J
self.nPipes = nPipes
self.nInlets = 1
self.nOutlets = 1
self.config = config.lower()
self._check_geometry()
# Delta-circuit thermal resistances
self._Rd = thermal_resistances(pos, r_out, borehole.r_b,
k_s, k_g, self.R_fp, J=self.J)[1]
# Initialize stored_coefficients
self._initialize_stored_coefficients()
def _continuity_condition_base(self, m_flow_borehole, cp_f, nSegments):
"""
Equation that satisfies equal fluid temperatures in both legs of
each U-tube pipe at depth (z = H).
Returns coefficients for the relation:
.. math::
\\mathbf{a_{out}} T_{f,out} = \\mathbf{a_{in}} T_{f,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_out : (nOutlets, nOutlets,) array
Array of coefficients for outlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp_f, nSegments)
# Coefficient matrices from continuity condition:
# [b_u]*[T_{f,u}](z=0) = [b_d]*[T_{f,d}](z=0) + [b_b]*[T_b]
b_d, b_u, b_b = self._continuity_condition(
m_flow_borehole, cp_f, nSegments)
b_u_m1 = np.linalg.inv(b_u)
if self.config == 'parallel':
# Intermediate coefficient matrices:
# [T_{f,d}](z=0) = [c_in]*[T_{f,in}]
c_in = np.ones((self.nPipes, 1))
# Intermediate coefficient matrices:
# [T_{f,out}] = d_u*[T_{f,u}](z=0)
mcp = self._m_flow_pipe[-self.nPipes:]*self._cp_pipe[-self.nPipes:]
d_u = np.reshape(mcp/np.sum(mcp), (1, -1))
# Final coefficient matrices for continuity at depth (z = H):
# [a_out][T_{f,out}] = [a_in]*[T_{f,in}] + [a_b]*[T_b]
a_in = d_u @ b_u_m1 @ b_d @ c_in
a_out = np.array([[1.0]])
a_b = d_u @ b_u_m1 @ b_b
elif self.config == 'series':
# Intermediate coefficient matrices:
# [T_{f,d}](z=0) = [c_in]*[T_{f,in}] + [c_u]*[T_{f,u}](z=0)
c_in = np.eye(self.nPipes, M=1)
c_u = np.eye(self.nPipes, k=-1)
# Intermediate coefficient matrices:
# [d_u]*[T_{f,u}](z=0) = [d_in]*[T_{f,in}] + [d_b]*[T_b]
d_u = b_u - b_d @ c_u
d_in = b_d @ c_in
d_b = b_b
d_u_m1 = np.linalg.inv(d_u)
# Intermediate coefficient matrices:
# [T_{f,out}] = e_u*[T_{f,u}](z=0)
e_u = np.eye(self.nPipes, M=1, k=-self.nPipes+1).T
# Final coefficient matrices for continuity at depth (z = H):
# [a_out][T_{f,out}] = [a_in]*[T_{f,in}] + [a_b]*[T_b]
a_in = e_u @ d_u_m1 @ d_in
a_out = np.array([[1.0]])
a_b = e_u @ d_u_m1 @ d_b
else:
raise NotImplementedError("Configuration '{}' not implemented.".format(self.config))
return a_in, a_out, a_b
def _continuity_condition_head(self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate fluid temperatures at depth
(z = 0). These coefficients take into account connections between
U-tube pipes.
Returns coefficients for the relation:
.. math::
\\mathbf{T_f}(z=0) = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{out}} \\mathbf{T_{f,out}}
+ \\mathbf{a_{b}} \\mathbf{T_{b}}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (2*nPipes, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_out : (2*nPipes, nOutlets,) array
Array of coefficients for outlet fluid temperature.
a_b : (2*nPipes, nSegments,) array
| |
in degrees:
standard mode | logo mode
0 - east | 0 - north
90 - north | 90 - east
180 - west | 180 - south
270 - south | 270 - west
:param to_angle: the new turtle heading
"""
self._turn(to_angle - self._heading)
seth = setheading
def home(self):
"""Move turtle to the origin - coordinates (0,0) - and set its heading
to its start-orientation
(which depends on the mode, see mode()).
"""
self.setheading(90)
self.goto(0, 0)
# pylint:disable=too-many-locals, too-many-statements, too-many-branches
def _plot(self, x, y, c):
if self._pensize == 1:
try:
self._fg_bitmap[int(x), int(y)] = c
return
except IndexError:
pass
r = self._pensize // 2 + 1
angle = (
self._angleOffset + self._angleOrient * self._heading - 90
) % self._fullcircle
sin = math.sin(math.radians(angle))
cos = math.cos(math.radians(angle))
x0 = x + sin * r
x1 = x - sin * (self._pensize - r)
y0 = y - cos * r
y1 = y + cos * (self._pensize - r)
coords = [x0, x1, y0, y1]
for i, v in enumerate(coords):
if v >= 0:
coords[i] = math.ceil(v)
else:
coords[i] = math.floor(v)
x0, x1, y0, y1 = coords
steep = abs(y1 - y0) > abs(x1 - x0)
rev = False
dx = x1 - x0
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
dx = x1 - x0
if x0 > x1:
rev = True
dx = x0 - x1
dy = abs(y1 - y0)
err = dx / 2
ystep = -1
if y0 < y1:
ystep = 1
while (not rev and x0 <= x1) or (rev and x1 <= x0):
# first row
if steep:
try:
self._fg_bitmap[int(y0), int(x0)] = c
except IndexError:
pass
else:
try:
self._fg_bitmap[int(x0), int(y0)] = c
except IndexError:
pass
if y0 != y1 and self._heading % 90 != 0:
# need a second row to fill the cracks
j = -1 if y1 < y0 else 1
if steep:
try:
self._fg_bitmap[int(y0 + j), int(x0)] = c
except IndexError:
pass
else:
try:
self._fg_bitmap[int(x0), int(y0 + j)] = c
except IndexError:
pass
err -= dy
if err < 0:
y0 += ystep
err += dx
if rev:
x0 -= 1
else:
x0 += 1
# pylint:enable=too-many-locals, too-many-statements, too-many-branches
def circle(self, radius, extent=None, steps=None):
"""Draw a circle with given radius. The center is radius units left of
the turtle; extent - an angle - determines which part of the circle is
drawn. If extent is not given, draw the entire circle. If extent is not
a full circle, one endpoint of the arc is the current pen position.
Draw the arc in counterclockwise direction if radius is positive,
otherwise in clockwise direction. Finally the direction of the turtle
is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon, steps
determines the number of steps to use. If not given, it will be
calculated automatically. May be used to draw regular polygons.
:param radius: the radius of the circle
:param extent: the arc of the circle to be drawn
:param steps: how many points along the arc are computed
"""
# call: circle(radius) # full circle
# --or: circle(radius, extent) # arc
# --or: circle(radius, extent, steps)
# --or: circle(radius, steps=6) # 6-sided polygon
pos = self.pos()
h = self._heading
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent) / self._fullcircle
steps = int(min(3 + abs(radius) / 4.0, 12.0) * frac) * 4
w = extent / steps
w2 = 0.5 * w
l = radius * math.sin(w * math.pi / 180.0 * self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
self.left(w2)
for _ in range(steps - 1):
self.forward(l)
self.left(w)
# rounding error correction on the last step
self.setheading(self.towards(pos))
# get back to exact same position and heading
self.goto(pos)
self.setheading(h)
# pylint:disable=inconsistent-return-statements
def speed(self, speed=None):
"""
Set the turtle's speed to an integer value in the range 0..10. If no
argument is given, return current speed.
If input is a number greater than 10 or smaller than 1, speed is set
to 0. Speedstrings are mapped to speedvalues as follows:
"fastest": 0
"fast": 10
"normal": 6
"slow": 3
"slowest": 1
Speeds from 1 to 10 enforce increasingly faster animation of line
drawing and turtle turning.
Attention: speed = 0 means that no animation takes place.
forward/back makes turtle jump and likewise left/right make the
turtle turn instantly.
:param speed: the new turtle speed (0..10) or None
"""
if speed is None:
return self._speed
if speed > 10 or speed < 1:
self._speed = 0
else:
self._speed = speed
# pylint:enable=inconsistent-return-statements
def dot(self, size=None, color=None):
"""Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and
2*pensize is used.
:param size: the diameter of the dot
:param color: the color of the dot
"""
if size is None:
size = max(self._pensize + 4, self._pensize * 2)
if color is None:
color = self._pencolor
else:
color = self._color_to_pencolor(color)
pensize = self._pensize
pencolor = self._pencolor
down = self.isdown()
if size > 1:
self._pensize = size
self._pencolor = color
self.pendown()
self.right(180)
self.right(180)
if not down:
self.penup()
self._pensize = pensize
self._pencolor = pencolor
else:
self._pensize = 1
self._plot(self._x, self._y, color)
self._pensize = pensize
def stamp(self, bitmap=None, palette=None):
"""
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be used to
delete it by calling clearstamp(stamp_id).
"""
if len(self._fg_addon_group) >= 6:
print("Addon group full")
return -1
s_id = len(self._stamps)
if self._turtle_pic is None:
# easy.
new_stamp = displayio.TileGrid(
self._turtle_bitmap,
pixel_shader=self._turtle_palette,
x=int(self._x - self._turtle_bitmap.width // 2),
y=int(self._y - self._turtle_bitmap.height // 2),
)
elif self._turtle_odb is not None:
# odb bitmap
new_stamp = displayio.TileGrid(
self._turtle_odb,
pixel_shader=displayio.ColorConverter(),
x=int(self._x - self._turtle_odb.width // 2),
y=int(self._y - self._turtle_odb.height // 2),
)
self._turtle_odb_use += 1
else:
if bitmap is None:
raise RuntimeError("a bitmap must be provided")
if palette is None:
raise RuntimeError("a palette must be provided")
new_stamp = displayio.TileGrid(
bitmap,
pixel_shader=palette,
x=int(self._x - bitmap.width // 2),
y=int(self._y - bitmap.height // 2),
)
self._fg_addon_group.append(new_stamp)
if self._turtle_odb is not None:
self._stamps[s_id] = (new_stamp, self._turtle_odb_file)
else:
self._stamps[s_id] = new_stamp
return s_id
def clearstamp(self, stampid):
"""
Delete stamp with given stampid.
:param stampid: the id of the stamp to be deleted
"""
if isinstance(stampid, int):
if stampid in self._stamps and self._stamps[stampid] is not None:
if isinstance(self._stamps[stampid], tuple):
self._fg_addon_group.remove(self._stamps[stampid][0])
self._turtle_odb_use -= 1
if self._turtle_odb_use == 0:
self._stamps[stampid][1].close()
else:
self._fg_addon_group.remove(self._stamps[stampid])
self._stamps[stampid] = None
else:
return
else:
raise TypeError("Stamp id must be an int")
def clearstamps(self, n=None):
"""
Delete all or first/last n of turtle's stamps. If n is None, delete
all stamps, if n > 0 delete first n stamps, else if n < 0 delete last
n stamps.
:param n: how many stamps to delete (None means delete them all)
"""
i = 1
for sid in self._stamps:
if self._stamps[sid] is not None:
self.clearstamp(sid)
if n is not None and i >= n:
return
i += 1
###########################################################################
# Tell turtle's state
def pos(self):
"""Return the turtle's current location (x,y) (as a Vec2D vector)."""
return Vec2D(self._x - self._w // 2, self._h // 2 - self._y)
position = pos
def towards(self, x1, y1=None):
"""
Return the angle between the line from turtle position to position
specified by (x,y) or the vector. This depends on the turtle's start
orientation which depends on the mode - "standard" or "logo").
:param x: a number or a pair/vector of numbers
:param y: a number if x is a number, else None
"""
if y1 is None:
y1 = x1[1]
x1 = x1[0]
x0, y0 = self.pos()
result = math.degrees(math.atan2(x1 - x0, y1 - y0))
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient * result) % self._fullcircle
def xcor(self):
"""Return the turtle's x coordinate."""
| |
sent_password = True
time.sleep(0.025)
return proc.exitstatus
except vt.TerminalException as err:
log.error(
'Failed to upload file {0!r}: {1}\n'.format(
dest_path, err
),
exc_info=True
)
# Signal an error
return 1
def smb_file(dest_path, contents, kwargs):
'''
Use smbclient to copy a file to a server
'''
tmpfh, tmppath = tempfile.mkstemp()
with salt.utils.fopen(tmppath, 'w') as tmpfile:
tmpfile.write(contents)
log.debug('Uploading {0} to {1} (smbclient)'.format(
dest_path, kwargs['hostname'])
)
# Shell out to smbclient
comps = tmppath.split('/')
src_dir = '/'.join(comps[:-1])
src_file = comps[-1]
comps = dest_path.split('\\')
dest_dir = '\\'.join(comps[:-1])
dest_file = comps[-1]
cmd = 'smbclient {0}/c$ -c "cd {3}; prompt; lcd {1}; del {4}; mput {2}; rename {2} {4}; exit;"'.format(
kwargs['creds'], src_dir, src_file, dest_dir, dest_file
)
log.debug('SCP command: {0!r}'.format(cmd))
win_cmd(cmd)
def win_cmd(command, **kwargs):
'''
Wrapper for commands to be run against Windows boxes
'''
try:
proc = NonBlockingPopen(
command,
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stream_stds=kwargs.get('display_ssh_output', True),
)
log.debug(
'Executing command(PID {0}): {1!r}'.format(
proc.pid, command
)
)
proc.poll_and_read_until_finish()
proc.communicate()
return proc.returncode
except Exception as err:
log.error(
'Failed to execute command {0!r}: {1}\n'.format(
command, err
),
exc_info=True
)
# Signal an error
return 1
def root_cmd(command, tty, sudo, **kwargs):
'''
Wrapper for commands to be run as root
'''
if sudo:
if 'sudo_password' in kwargs and kwargs['sudo_password'] is not None:
command = 'echo "{1}" | sudo -S {0}'.format(
command,
kwargs['sudo_password'],
)
else:
command = 'sudo {0}'.format(command)
log.debug('Using sudo to run command {0}'.format(command))
ssh_args = []
if tty:
# Use double `-t` on the `ssh` command, it's necessary when `sudo` has
# `requiretty` enforced.
ssh_args.extend(['-t', '-t'])
ssh_args.extend([
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking=no',
# Set hosts key database path to /dev/null, ie, non-existing
'-oUserKnownHostsFile=/dev/null',
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none'
])
if 'key_filename' in kwargs:
# There should never be both a password and an ssh key passed in, so
ssh_args.extend([
# tell SSH to skip password authentication
'-oPasswordAuthentication=no',
'-oChallengeResponseAuthentication=no',
# Make sure public key authentication is enabled
'-oPubkeyAuthentication=yes',
# No Keyboard interaction!
'-oKbdInteractiveAuthentication=no',
# Also, specify the location of the key file
'-i {0}'.format(kwargs['key_filename'])
])
if 'ssh_gateway' in kwargs:
ssh_gateway = kwargs['ssh_gateway']
ssh_gateway_port = 22
ssh_gateway_key = ''
ssh_gateway_user = 'root'
if ':' in ssh_gateway:
ssh_gateway, ssh_gateway_port = ssh_gateway.split(':')
if 'ssh_gateway_port' in kwargs:
ssh_gateway_port = kwargs['ssh_gateway_port']
if 'ssh_gateway_key' in kwargs:
ssh_gateway_key = '-i {0}'.format(kwargs['ssh_gateway_key'])
if 'ssh_gateway_user' in kwargs:
ssh_gateway_user = kwargs['ssh_gateway_user']
ssh_args.extend([
# Setup ProxyCommand
'-oProxyCommand="ssh {0} {1}@{2} -p {3} nc -q0 %h %p"'.format(
ssh_gateway_key,
ssh_gateway_user,
ssh_gateway,
ssh_gateway_port
)
])
log.info(
'Using SSH gateway {0}@{1}:{2}'.format(
ssh_gateway_user, ssh_gateway, ssh_gateway_port
)
)
cmd = 'ssh {0} {1[username]}@{1[hostname]} {2}'.format(
' '.join(ssh_args), kwargs, pipes.quote(command)
)
log.debug('SSH command: {0!r}'.format(cmd))
try:
password_retries = 15
stdout, stderr = None, None
try:
proc = vt.Terminal(
cmd,
shell=True,
log_stdout=True,
log_stderr=True,
stream_stdout=kwargs.get('display_ssh_output', True),
stream_stderr=kwargs.get('display_ssh_output', True)
)
sent_password = False
while proc.isalive():
stdout, stderr = proc.recv()
if stdout and SSH_PASSWORD_PROMP_RE.match(stdout):
if sent_password:
# second time??? Wrong password?
log.warning(
'Asking for password again. Wrong one provided???'
)
proc.terminate()
raise SaltCloudPasswordError()
proc.sendline(kwargs['password'])
sent_password = True
# 0.0125 is really too fast on some systems
time.sleep(0.5)
return proc.exitstatus
except SaltCloudPasswordError:
if sudo and (password_retries > 0):
log.warning(
'Asking for password failed, retrying'
)
else:
return 1
except vt.TerminalException as err:
log.error(
'Failed to execute command {0!r}: {1}\n'.format(
command, err
),
exc_info=True
)
# Signal an error
return 1
def check_auth(name, pub_key=None, sock_dir=None, queue=None, timeout=300):
'''
This function is called from a multiprocess instance, to wait for a minion
to become available to receive salt commands
'''
event = salt.utils.event.SaltEvent('master', sock_dir)
starttime = time.mktime(time.localtime())
newtimeout = timeout
log.debug(
'In check_auth, waiting for {0} to become available'.format(
name
)
)
while newtimeout > 0:
newtimeout = timeout - (time.mktime(time.localtime()) - starttime)
ret = event.get_event(full=True)
if ret is None:
continue
if ret['tag'] == 'minion_start' and ret['data']['id'] == name:
queue.put(name)
newtimeout = 0
log.debug('Minion {0} is ready to receive commands'.format(name))
def ip_to_int(ip):
'''
Converts an IP address to an integer
'''
ret = 0
for octet in ip.split('.'):
ret = ret * 256 + int(octet)
return ret
def is_public_ip(ip):
'''
Determines whether an IP address falls within one of the private IP ranges
'''
addr = ip_to_int(ip)
if addr > 167772160 and addr < 184549375:
# 10.0.0.0/24
return False
elif addr > 3232235520 and addr < 3232301055:
# 192.168.0.0/16
return False
elif addr > 2886729728 and addr < 2887778303:
# 172.16.0.0/12
return False
return True
def check_name(name, safe_chars):
'''
Check whether the specified name contains invalid characters
'''
regexp = re.compile('[^{0}]'.format(safe_chars))
if regexp.search(name):
raise SaltCloudException(
'{0} contains characters not supported by this cloud provider. '
'Valid characters are: {1}'.format(
name, safe_chars
)
)
def remove_sshkey(host, known_hosts=None):
'''
Remove a host from the known_hosts file
'''
if known_hosts is None:
if 'HOME' in os.environ:
known_hosts = '{0}/.ssh/known_hosts'.format(os.environ['HOME'])
else:
try:
known_hosts = '{0}/.ssh/known_hosts'.format(
pwd.getpwuid(os.getuid()).pwd_dir
)
except Exception:
pass
if known_hosts is not None:
log.debug(
'Removing ssh key for {0} from known hosts file {1}'.format(
host, known_hosts
)
)
else:
log.debug(
'Removing ssh key for {0} from known hosts file'.format(host)
)
cmd = 'ssh-keygen -R {0}'.format(host)
subprocess.call(cmd, shell=True)
def wait_for_ip(update_callback,
update_args=None,
update_kwargs=None,
timeout=5 * 60,
interval=5,
interval_multiplier=1,
max_failures=10):
'''
Helper function that waits for an IP address for a specific maximum amount
of time.
:param update_callback: callback function which queries the cloud provider
for the VM ip address. It must return None if the
required data, IP included, is not available yet.
:param update_args: Arguments to pass to update_callback
:param update_kwargs: Keyword arguments to pass to update_callback
:param timeout: The maximum amount of time(in seconds) to wait for the IP
address.
:param interval: The looping interval, ie, the amount of time to sleep
before the next iteration.
:param interval_multiplier: Increase the interval by this multiplier after
each request; helps with throttling
:param max_failures: If update_callback returns ``False`` it's considered
query failure. This value is the amount of failures
accepted before giving up.
:returns: The update_callback returned data
:raises: SaltCloudExecutionTimeout
'''
if update_args is None:
update_args = ()
if update_kwargs is None:
update_kwargs = {}
duration = timeout
while True:
log.debug(
'Waiting for VM IP. Giving up in 00:{0:02d}:{1:02d}'.format(
int(timeout // 60),
int(timeout % 60)
)
)
data = update_callback(*update_args, **update_kwargs)
if data is False:
log.debug(
'update_callback has returned False which is considered a '
'failure. Remaining Failures: {0}'.format(max_failures)
)
max_failures -= 1
if max_failures <= 0:
raise SaltCloudExecutionFailure(
'Too much failures occurred while waiting for '
'the IP address'
)
elif data is not None:
return data
if timeout < 0:
raise SaltCloudExecutionTimeout(
'Unable to get IP for 00:{0:02d}:{1:02d}'.format(
int(duration // 60),
int(duration % 60)
)
)
time.sleep(interval)
timeout -= interval
if interval_multiplier > 1:
interval *= interval_multiplier
if interval > timeout:
interval = timeout + 1
log.info('Interval multiplier in effect; interval is '
'now {0}s'.format(interval))
def simple_types_filter(datadict):
'''
Convert the data dictionary into simple types, ie, int, float, string,
bool, etc.
'''
if not isinstance(datadict, dict):
# This function is only supposed to work on dictionaries
return datadict
simpletypes_keys = (str, unicode, int, long, float, bool)
simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple])
simpledict = {}
for key, value in datadict.iteritems():
if key is not None and not isinstance(key, simpletypes_keys):
key = repr(key)
if value is not None and isinstance(value, dict):
value = simple_types_filter(value)
elif value is not None and not isinstance(value, simpletypes_values):
value = repr(value)
simpledict[key] = value
return simpledict
def list_nodes_select(nodes, selection, call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_select function must be called '
'with -f or --function.'
)
if 'error' in nodes:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
nodes['error']['Errors']['Error']['Message']
)
)
ret = {}
for node in nodes:
pairs = {}
data = nodes[node]
for key in data:
if str(key) in selection:
value = data[key]
pairs[key] = value
ret[node] = pairs
return ret
def init_cachedir(base=None):
'''
Initialize the cachedir needed for Salt Cloud to keep track of minions
'''
if base is None:
| |
<reponame>mjlshen/qontract-reconcile
import base64
import logging
import json
import os
import shutil
from datetime import datetime
from collections import defaultdict
from threading import Lock
from dataclasses import dataclass
from typing import Iterable, Mapping, Any
from python_terraform import Terraform, IsFlagged, TerraformCommandError
from ruamel import yaml
from sretoolbox.utils import retry
from sretoolbox.utils import threaded
import reconcile.utils.lean_terraform_client as lean_tf
from reconcile.utils import gql
from reconcile.utils.aws_api import AWSApi
from reconcile.utils.openshift_resource import OpenshiftResource as OR
ALLOWED_TF_SHOW_FORMAT_VERSION = "0.1"
DATE_FORMAT = '%Y-%m-%d'
@dataclass
class AccountUser:
account: str
user: str
class DeletionApprovalExpirationValueError(Exception):
pass
class TerraformClient: # pylint: disable=too-many-public-methods
def __init__(self, integration: str, integration_version: str,
integration_prefix: str, accounts: Iterable[Mapping[str, Any]],
working_dirs: Mapping[str, str], thread_pool_size: int,
aws_api: AWSApi, init_users=False):
self.integration = integration
self.integration_version = integration_version
self.integration_prefix = integration_prefix
self.working_dirs = working_dirs
self.accounts = {a['name']: a for a in accounts}
self.parallelism = thread_pool_size
self.thread_pool_size = thread_pool_size
self._aws_api = aws_api
self._log_lock = Lock()
self.should_apply = False
self.init_specs()
self.init_outputs()
self.OUTPUT_TYPE_SECRETS = 'Secrets'
self.OUTPUT_TYPE_PASSWORDS = 'enc-passwords'
self.OUTPUT_TYPE_CONSOLEURLS = 'console-urls'
if init_users:
self.init_existing_users()
def init_existing_users(self):
all_users = {}
for account, output in self.outputs.items():
users = []
user_passwords = self.format_output(
output, self.OUTPUT_TYPE_PASSWORDS)
for user_name in user_passwords:
users.append(user_name)
all_users[account] = users
self.users = all_users
def get_new_users(self):
new_users = []
self.init_outputs() # get updated output
for account, output in self.outputs.items():
user_passwords = self.format_output(
output, self.OUTPUT_TYPE_PASSWORDS)
console_urls = self.format_output(
output, self.OUTPUT_TYPE_CONSOLEURLS)
for user_name, enc_password in user_passwords.items():
if AccountUser(account, user_name) not in self.created_users:
continue
new_users.append((account, console_urls[account],
user_name, enc_password))
return new_users
def init_specs(self):
wd_specs = \
[{'name': name, 'wd': wd}
for name, wd in self.working_dirs.items()]
results = threaded.run(self.terraform_init, wd_specs,
self.thread_pool_size)
self.specs = \
[{'name': name, 'tf': tf} for name, tf in results]
@retry(exceptions=TerraformCommandError)
def terraform_init(self, init_spec):
name = init_spec['name']
wd = init_spec['wd']
tf = Terraform(working_dir=wd)
return_code, stdout, stderr = tf.init()
error = self.check_output(name, 'init', return_code, stdout, stderr)
if error:
raise TerraformCommandError(
return_code, 'init', out=stdout, err=stderr)
return name, tf
def init_outputs(self):
results = threaded.run(self.terraform_output, self.specs,
self.thread_pool_size)
self.outputs = dict(results)
@retry(exceptions=TerraformCommandError)
def terraform_output(self, spec):
name = spec['name']
tf = spec['tf']
return_code, stdout, stderr = tf.output_cmd(json=IsFlagged)
error = self.check_output(name, 'output', return_code, stdout, stderr)
no_output_error = \
'The module root could not be found. There is nothing to output.'
if error:
if no_output_error in stderr:
stdout = '{}'
else:
raise TerraformCommandError(
return_code, 'output', out=stdout, err=stderr)
return name, json.loads(stdout)
# terraform plan
def plan(self, enable_deletion):
errors = False
disabled_deletions_detected = False
results = threaded.run(self.terraform_plan, self.specs,
self.thread_pool_size,
enable_deletion=enable_deletion)
self.deleted_users = []
self.created_users = []
for disabled_deletion_detected, deleted_users, created_users, error \
in results:
if error:
errors = True
if disabled_deletion_detected:
disabled_deletions_detected = True
self.deleted_users.extend(deleted_users)
self.created_users.extend(created_users)
return disabled_deletions_detected, errors
def dump_deleted_users(self, io_dir):
if not self.deleted_users:
return
if not os.path.exists(io_dir):
os.makedirs(io_dir)
file_path = os.path.join(io_dir, self.integration + '.json')
with open(file_path, 'w') as f:
f.write(json.dumps(self.deleted_users))
@retry()
def terraform_plan(self, plan_spec, enable_deletion):
name = plan_spec['name']
tf = plan_spec['tf']
return_code, stdout, stderr = tf.plan(detailed_exitcode=False,
parallelism=self.parallelism,
out=name)
error = self.check_output(name, 'plan', return_code, stdout, stderr)
disabled_deletion_detected, deleted_users, created_users = \
self.log_plan_diff(name, tf, enable_deletion)
return disabled_deletion_detected, deleted_users, created_users, error
def log_plan_diff(self, name, tf, enable_deletion):
disabled_deletion_detected = False
account_enable_deletion = \
self.accounts[name].get('enableDeletion') or False
# deletions are alowed
# if enableDeletion is true for an account
# or if the integration's enable_deletion is true
deletions_allowed = enable_deletion or account_enable_deletion
deleted_users = []
created_users = []
output = self.terraform_show(name, tf.working_dir)
format_version = output.get('format_version')
if format_version != ALLOWED_TF_SHOW_FORMAT_VERSION:
raise NotImplementedError(
'terraform show untested format version')
# https://www.terraform.io/docs/internals/json-format.html
# Terraform is not yet fully able to
# track changes to output values, so the actions indicated may not be
# fully accurate, but the "after" value will always be correct.
# to overcome the "before" value not being accurate,
# we find it in the previously initiated outputs.
output_changes = output.get('output_changes', {})
for output_name, output_change in output_changes.items():
before = self.outputs[name].get(output_name, {}).get('value')
after = output_change.get('after')
if before != after:
logging.info(['update', name, 'output', output_name])
self.should_apply = True
# A way to detect deleted outputs is by comparing
# the prior state with the output changes.
# the output changes do not contain deleted outputs
# while the prior state does. for the outputs to
# actually be deleted, we should apply.
prior_outputs = \
output.get('prior_state', {}).get('values', {}).get('outputs', {})
deleted_outputs = \
[po for po in prior_outputs if po not in output_changes]
for output_name in deleted_outputs:
logging.info(['delete', name, 'output', output_name])
self.should_apply = True
resource_changes = output.get('resource_changes')
if resource_changes is None:
return disabled_deletion_detected, deleted_users, created_users
always_enabled_deletions = {
'random_id',
'aws_lb_target_group_attachment',
}
# https://www.terraform.io/docs/internals/json-format.html
for resource_change in resource_changes:
resource_type = resource_change['type']
resource_name = resource_change['name']
resource_change = resource_change['change']
actions = resource_change['actions']
for action in actions:
if action == 'no-op':
logging.debug(
[action, name, resource_type, resource_name])
continue
# Ignore RDS modifications that are going to occur during the next
# maintenance window. This can be up to 7 days away and will cause
# unnecessary Terraform state updates until they complete.
if action == 'update' and resource_type == 'aws_db_instance' and \
self._is_ignored_rds_modification(
name, resource_name, resource_change):
logging.debug(
f"Not setting should_apply for {resource_name} because the "
f"only change is EngineVersion and that setting is in "
f"PendingModifiedValues")
continue
with self._log_lock:
logging.info([action, name, resource_type, resource_name])
self.should_apply = True
if action == 'create':
if resource_type == 'aws_iam_user_login_profile':
created_users.append(AccountUser(name, resource_name))
if action == 'delete':
if resource_type in always_enabled_deletions:
continue
if not deletions_allowed and not \
self.deletion_approved(
name, resource_type, resource_name):
disabled_deletion_detected = True
logging.error(
'\'delete\' action is not enabled. ' +
'Please run the integration manually ' +
'with the \'--enable-deletion\' flag.'
)
if resource_type == 'aws_iam_user':
deleted_users.append({
'account': name,
'user': resource_name
})
if resource_type == 'aws_db_instance':
deletion_protected = \
resource_change['before'].get(
'deletion_protection')
if deletion_protected:
disabled_deletion_detected = True
logging.error(
'\'delete\' action is not enabled for '
'deletion protected RDS instance: '
f'{resource_name}. Please set '
'deletion_protection to false in a new MR. '
'The new MR must be merged first.'
)
return disabled_deletion_detected, deleted_users, created_users
def deletion_approved(self, account_name, resource_type, resource_name):
account = self.accounts[account_name]
deletion_approvals = account.get('deletionApprovals')
if not deletion_approvals:
return False
now = datetime.utcnow()
for da in deletion_approvals:
try:
expiration = datetime.strptime(da['expiration'], DATE_FORMAT)
except ValueError:
raise DeletionApprovalExpirationValueError(
f"[{account_name}] expiration not does not match "
f"date format {DATE_FORMAT}. details: "
f"type: {da['type']}, name: {da['name']}"
)
if resource_type == da['type'] \
and resource_name == da['name'] \
and now <= expiration:
return True
return False
@staticmethod
def terraform_show(name, working_dir):
return lean_tf.show_json(working_dir, name)
# terraform apply
def apply(self):
errors = False
results = threaded.run(self.terraform_apply, self.specs,
self.thread_pool_size)
for error in results:
if error:
errors = True
return errors
def terraform_apply(self, apply_spec):
name = apply_spec['name']
tf = apply_spec['tf']
# adding var=None to allow applying the saved plan
# https://github.com/beelit94/python-terraform/issues/67
return_code, stdout, stderr = tf.apply(dir_or_plan=name, var=None)
error = self.check_output(name, 'apply', return_code, stdout, stderr)
return error
def get_terraform_output_secrets(self):
data = {}
for account, output in self.outputs.items():
data[account] = \
self.format_output(output, self.OUTPUT_TYPE_SECRETS)
return data
def populate_desired_state(self, ri, oc_map, tf_namespaces, account_name):
self.init_outputs() # get updated output
# Dealing with credentials for RDS replicas
replicas_info = self.get_replicas_info(namespaces=tf_namespaces)
for account, output in self.outputs.items():
if account_name and account != account_name:
continue
formatted_output = self.format_output(
output, self.OUTPUT_TYPE_SECRETS)
for name, data in formatted_output.items():
# Grabbing the username/password from the
# replica_source and using them in the
# replica. This is needed because we can't
# set username/password for a replica in
# terraform.
if account in replicas_info:
if name in replicas_info[account]:
replica_src_name = replicas_info[account][name]
data['db.user'] = \
formatted_output[replica_src_name]['db.user']
data['db.password'] = \
formatted_output[replica_src_name]['db.password']
cluster = data['{}_cluster'.format(self.integration_prefix)]
if not oc_map.get(cluster):
continue
namespace = \
data['{}_namespace'.format(self.integration_prefix)]
resource = data['{}_resource'.format(self.integration_prefix)]
output_resource_name = data['{}_output_resource_name'.format(
self.integration_prefix)]
annotations = data.get('{}_annotations'.format(
self.integration_prefix))
oc_resource = \
self.construct_oc_resource(output_resource_name, data,
account, annotations)
ri.add_desired(
cluster,
namespace,
resource,
output_resource_name,
oc_resource
)
@staticmethod
def get_replicas_info(namespaces):
replicas_info = defaultdict(dict)
for tf_namespace in namespaces:
tf_resources = tf_namespace.get('terraformResources')
if tf_resources is None:
continue
for tf_resource in tf_namespace['terraformResources']:
# First, we have to find the terraform resources
# that have a replica_source defined in app-interface
replica_src = tf_resource.get('replica_source')
if replica_src is None:
# When replica_source is not there, we look for
# replicate_source_db in the defaults
replica_src_db = None
defaults_ref = tf_resource.get('defaults')
if defaults_ref is not None:
defaults_res = gql.get_api().get_resource(
defaults_ref
)
defaults = yaml.safe_load(defaults_res['content'])
replica_src_db = defaults.get('replicate_source_db')
# Also, we look for replicate_source_db in the overrides
override_replica_src_db = None
overrides = tf_resource.get('overrides')
if | |
from __future__ import absolute_import, division, print_function
import warnings
import pkg_resources
import numpy as np
from numpy.linalg.linalg import LinAlgError
import theano
from theano import Op, config, tensor
from theano.scalar import bool as bool_t
from theano.gof import COp, ParamsType
from theano.gpuarray import GpuArrayType
from .basic_ops import (CGpuKernelBase, as_gpuarray_variable, gpu_contiguous, gpuarray_helper_inc_dir,
infer_context_name)
from .type import gpu_context_type
try:
import pygpu
from pygpu.basic import triu, tril
pygpu_available = True
except ImportError:
pygpu_available = False
cusolver_available = False
try:
import skcuda
from skcuda import cusolver
cusolver_available = True
except (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):
pass
cublas_available = False
try:
from skcuda import cublas
cublas_available = True
except (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):
pass
if cusolver_available:
# Add cusolver call as it is missing in skcuda
# SPOTRS
cusolver._libcusolver.cusolverDnSpotrs.restype = int
cusolver._libcusolver.cusolverDnSpotrs.argtypes = [cusolver.ctypes.c_void_p,
cusolver.ctypes.c_int,
cusolver.ctypes.c_int,
cusolver.ctypes.c_int,
cusolver.ctypes.c_void_p,
cusolver.ctypes.c_int,
cusolver.ctypes.c_void_p,
cusolver.ctypes.c_int,
cusolver.ctypes.c_void_p]
def cusolverDnSpotrs(handle, uplo, n, nrhs, A, lda,
B, ldb, devInfo):
"""
Solve real single precision linear system for hermitian matrices.
References
----------
`cusolverDn<t>potrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-potrs>`_
"""
status = cusolver._libcusolver.cusolverDnSpotrs(handle, uplo, n, nrhs,
int(A), lda, int(B),
ldb, int(devInfo))
cusolver.cusolverCheckStatus(status)
def attach_cusolver_handle_to_context(ctx):
handle = getattr(ctx, 'cusolver_handle', None)
if handle is None:
with ctx:
ctx.cusolver_handle = cusolver.cusolverDnCreate()
def attach_cublas_handle_to_context(ctx):
handle = getattr(ctx, 'cublas_handle', None)
if handle is None:
with ctx:
ctx.cublas_handle = cublas.cublasCreate()
# it is a subset of all cases available in slinalg's MATRIX_STRUCTURE
MATRIX_STRUCTURES_SOLVE = (
'general',
'symmetric',
'lower_triangular',
'upper_triangular')
class GpuCusolverSolve(Op):
"""
CUSOLVER GPU solver OP.
Parameters
----------
trans
Whether to take the transpose of the input matrix or not.
"""
__props__ = ('A_structure', 'trans', 'inplace')
def __init__(self, A_structure='general', trans='N', inplace=False):
self.trans = trans
self.inplace = inplace
self.A_structure = A_structure
if self.inplace:
self.destroy_map = {0: [0]}
assert A_structure in MATRIX_STRUCTURES_SOLVE
super(GpuCusolverSolve, self).__init__()
def make_node(self, inp1, inp2):
if not cusolver_available:
raise RuntimeError('CUSOLVER is not available and '
'GpuCusolverSolve Op can not be constructed.')
if skcuda.__version__ <= '0.5.1':
warnings.warn('The GpuSolve op requires scikit-cuda > 0.5.1 to work with CUDA 8')
context_name = infer_context_name(inp1, inp2)
inp1 = as_gpuarray_variable(inp1, context_name)
inp2 = as_gpuarray_variable(inp2, context_name)
inp1 = gpu_contiguous(inp1)
inp2 = gpu_contiguous(inp2)
# this op can only operate on float32 matrices
assert inp1.ndim == 2
assert inp2.ndim == 2
assert inp1.dtype == 'float32'
assert inp2.dtype == 'float32'
return theano.Apply(
self, [inp1, inp2],
[GpuArrayType('float32',
broadcastable=inp1.broadcastable,
context_name=context_name)()])
def prepare_node(self, node, storage_map, compute_map, impl):
ctx = node.inputs[0].type.context
attach_cusolver_handle_to_context(ctx)
def check_dev_info(self, dev_info):
val = np.asarray(dev_info)[0]
if val > 0:
raise LinAlgError('A is singular')
def perform(self, node, inputs, outputs):
context = inputs[0][0].context
# Size of the matrices to invert.
z = outputs[0]
# Matrix.
A = inputs[0]
# Solution vectors.
b = inputs[1]
assert(len(A.shape) == 2)
assert(len(b.shape) == 2)
if self.trans in ['T', 'C']:
trans = 1
l, n = A.shape
k, m = b.shape
elif self.trans == 'N':
trans = 0
n, l = A.shape
k, m = b.shape
else:
raise ValueError('Invalid value for trans')
if l != n:
raise ValueError('A must be a square matrix')
if n != k:
raise ValueError('A and b must be aligned.')
lda = max(1, n)
ldb = max(1, k)
# We copy A and b as cusolver operates inplace
b = pygpu.array(b, copy=True, order='F')
if not self.inplace:
A = pygpu.array(A, copy=True)
A_ptr = A.gpudata
b_ptr = b.gpudata
# cusolver expects a F ordered matrix, but A is not explicitly
# converted between C and F order, instead we switch the
# "transpose" flag.
if A.flags['C_CONTIGUOUS']:
trans = 1 - trans
if self.A_structure == 'symmetric':
with context:
workspace_size = cusolver.cusolverDnSpotrf_bufferSize(
context.cusolver_handle, 0, n, A_ptr, lda)
workspace = pygpu.zeros(workspace_size, dtype='float32',
context=context)
dev_info = pygpu.zeros((1,), dtype='int32', context=context)
workspace_ptr = workspace.gpudata
dev_info_ptr = dev_info.gpudata
with context:
cusolver.cusolverDnSpotrf(
context.cusolver_handle, 0, n, A_ptr, lda, workspace_ptr,
workspace_size, dev_info_ptr)
self.check_dev_info(dev_info)
cusolverDnSpotrs(
context.cusolver_handle, 0, n, m, A_ptr, lda,
b_ptr, ldb, dev_info_ptr)
else:
# general case for A
with context:
workspace_size = cusolver.cusolverDnSgetrf_bufferSize(
context.cusolver_handle, n, n, A_ptr, lda)
workspace = pygpu.zeros(workspace_size, dtype='float32',
context=context)
pivots = pygpu.zeros(n, dtype='int32', context=context)
dev_info = pygpu.zeros((1,), dtype='int32', context=context)
workspace_ptr = workspace.gpudata
pivots_ptr = pivots.gpudata
dev_info_ptr = dev_info.gpudata
with context:
cusolver.cusolverDnSgetrf(
context.cusolver_handle, n, n, A_ptr, lda, workspace_ptr,
pivots_ptr, dev_info_ptr)
self.check_dev_info(dev_info)
cusolver.cusolverDnSgetrs(
context.cusolver_handle, trans, n, m, A_ptr, lda,
pivots_ptr, b_ptr, ldb, dev_info_ptr)
z[0] = b
class GpuCublasTriangularSolve(Op):
"""
CUBLAS GPU Triangular Solve Op.
Parameters
----------
lower
Whether system is lower-triangular (True) or upper-triangular (False).
trans
Whether to take the transpose of the input matrix or not.
"""
__props__ = ('trans', 'lower')
def __init__(self, lower=True, trans='N'):
self.trans = trans
self.lower = lower
super(GpuCublasTriangularSolve, self).__init__()
def make_node(self, inp1, inp2):
if not cublas_available:
raise RuntimeError('CUBLAS is not available and '
'GpuCublasTriangularSolve Op can not be constructed.')
context_name = infer_context_name(inp1, inp2)
inp1 = as_gpuarray_variable(inp1, context_name)
inp2 = as_gpuarray_variable(inp2, context_name)
inp1 = gpu_contiguous(inp1)
inp2 = gpu_contiguous(inp2)
# this op can only operate on float32 matrices
assert inp1.ndim == 2
assert inp2.ndim in [1, 2]
assert inp1.dtype == 'float32'
assert inp2.dtype == 'float32'
return theano.Apply(self, [inp1, inp2],
[GpuArrayType('float32',
broadcastable=inp2.broadcastable,
context_name=context_name)()])
def prepare_node(self, node, storage_map, compute_map, impl):
ctx = node.inputs[0].type.context
attach_cublas_handle_to_context(ctx)
def perform(self, node, inputs, outputs):
ctx = node.inputs[0].type.context
# Solution set
x = outputs[0]
# Matrix.
A = inputs[0]
# right hand side
b = inputs[1]
assert(len(A.shape) == 2)
assert(len(b.shape) in [1, 2])
# implicitly deal with the difference between C order
# and fortran order by flipping the trans and lower flags
lower = not self.lower
trans = self.trans
if trans in ['T', 'C']:
trans = 'N'
l, n = A.shape
elif trans == 'N':
trans = 'T'
n, l = A.shape
else:
raise ValueError('Invalid value for trans')
if b.ndim == 2:
k, m = b.shape
else:
k, = b.shape
m = 1
if l != n:
raise ValueError('A must be a square matrix')
if n != k:
raise ValueError('A and b must be aligned.')
lda = max(1, n)
ldb = max(1, k)
# solution overwrites right hand side on exit
b = pygpu.array(b, copy=True, order='F')
A_ptr = A.gpudata
b_ptr = b.gpudata
# unit scalar used for multiplication
alpha = 1.0
# indicates matrix A is on left of B
side = 'l'
# set whether upper or lower part of matrix A stored
uplo = 'l' if lower else 'u'
# indicates elements on diagonal of matrix A may not be unity
diag = 'n'
with ctx:
if b.ndim == 1:
# matrix vector solve
cublas.cublasStrsv(ctx.cublas_handle, uplo, trans, diag, n,
A_ptr, lda, b_ptr, 1)
else:
cublas.cublasStrsm(ctx.cublas_handle, side, uplo, trans, diag,
n, m, alpha, A_ptr, lda, b_ptr, ldb)
x[0] = b
def gpu_solve(A, b, A_structure='general', trans='N'):
if A_structure == 'lower':
return GpuCublasTriangularSolve(True, trans)(A, b)
elif A_structure == 'upper':
return GpuCublasTriangularSolve(False, trans)(A, b)
return GpuCusolverSolve(A_structure, trans)(A, b)
class GpuCholesky(Op):
"""
CUSOLVER GPU Cholesky Op.
Given a real positive definite matrix `A` returns either a lower
triangular matrix `L` such that `A == dot(L, L.T)` if `lower == True`
else returns an upper triangular matrix `U` such that `A == dot(U.T, U)`
if `lower == False`.
Parameters
----------
lower
Whether to return a lower rather than upper triangular decomposition.
"""
__props__ = ('lower', 'inplace')
def __init__(self, lower=True, inplace=False):
self.lower = lower
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
super(GpuCholesky, self).__init__()
def clone_inplace(self):
return self.__class__(lower=self.lower, inplace=True)
def make_node(self, inp):
if not cusolver_available:
raise RuntimeError('CUSOLVER is not available and '
'GpuCholesky Op can not be constructed.')
if skcuda.__version__ <= '0.5.1':
warnings.warn('The GpuCholesky op requires scikit-cuda > 0.5.1 to work with CUDA 8')
if not pygpu_available:
raise RuntimeError('Missing pygpu or triu/tril functions.'
'Install or update libgpuarray.')
context_name = infer_context_name(inp)
inp = as_gpuarray_variable(inp, context_name)
inp = gpu_contiguous(inp)
# this op can only operate on float32 matrices
# because of current implementation of triu/tril.
# TODO: support float64 for triu/tril in GpuArray and for GpuCholesky/GpuCusolverSolve in Theano.
assert inp.ndim == 2
assert inp.dtype == 'float32'
return theano.Apply(self, [inp], [inp.type()])
def prepare_node(self, node, storage_map, compute_map, impl):
ctx = node.inputs[0].type.context
attach_cusolver_handle_to_context(ctx)
def perform(self, node, inputs, outputs):
context = inputs[0][0].context
# Input matrix.
A = inputs[0]
l, n = A.shape
if l != n:
raise ValueError('A must be a square matrix')
lda = max(1, n)
# cusolver operates on F ordered matrices, but A is expected
# to be symmetric | |
<reponame>bobh0303/fontbakery
import os
from fontbakery.profiles.universal import UNIVERSAL_PROFILE_CHECKS
from fontbakery.status import INFO, WARN, ERROR, SKIP, PASS, FAIL
from fontbakery.section import Section
from fontbakery.callable import check, disable
from fontbakery.utils import filesize_formatting
from fontbakery.message import Message
from fontbakery.fonts_profile import profile_factory
from fontbakery.constants import (NameID,
PlatformID,
WindowsEncodingID,
WindowsLanguageID,
MacintoshEncodingID,
MacintoshLanguageID,
LATEST_TTFAUTOHINT_VERSION)
from .googlefonts_conditions import * # pylint: disable=wildcard-import,unused-wildcard-import
profile_imports = ('fontbakery.profiles.universal',)
profile = profile_factory(default_section=Section("Google Fonts"))
profile.configuration_defaults = {
"com.google.fonts/check/file_size": {
"WARN_SIZE": 1 * 1024 * 1024,
"FAIL_SIZE": 9 * 1024 * 1024
}
}
METADATA_CHECKS = [
'com.google.fonts/check/metadata/parses',
'com.google.fonts/check/metadata/unknown_designer',
'com.google.fonts/check/metadata/multiple_designers',
'com.google.fonts/check/metadata/designer_values',
'com.google.fonts/check/metadata/listed_on_gfonts',
'com.google.fonts/check/metadata/unique_full_name_values',
'com.google.fonts/check/metadata/unique_weight_style_pairs',
'com.google.fonts/check/metadata/license',
'com.google.fonts/check/metadata/menu_and_latin',
'com.google.fonts/check/metadata/subsets_order',
'com.google.fonts/check/metadata/includes_production_subsets',
'com.google.fonts/check/metadata/copyright',
'com.google.fonts/check/metadata/familyname',
'com.google.fonts/check/metadata/has_regular',
'com.google.fonts/check/metadata/regular_is_400',
'com.google.fonts/check/metadata/nameid/family_name',
'com.google.fonts/check/metadata/nameid/post_script_name',
'com.google.fonts/check/metadata/nameid/full_name',
'com.google.fonts/check/metadata/nameid/family_and_full_names', # FIXME! This seems redundant!
'com.google.fonts/check/metadata/nameid/copyright',
'com.google.fonts/check/metadata/nameid/font_name', # FIXME! This looks suspiciously similar to com.google.fonts/check/metadata/nameid/family_name
'com.google.fonts/check/metadata/match_fullname_postscript',
'com.google.fonts/check/metadata/match_filename_postscript',
'com.google.fonts/check/metadata/match_weight_postscript',
'com.google.fonts/check/metadata/valid_name_values',
'com.google.fonts/check/metadata/valid_full_name_values',
'com.google.fonts/check/metadata/valid_filename_values',
'com.google.fonts/check/metadata/valid_post_script_name_values',
'com.google.fonts/check/metadata/valid_copyright',
'com.google.fonts/check/metadata/reserved_font_name',
'com.google.fonts/check/metadata/copyright_max_length',
'com.google.fonts/check/metadata/filenames',
'com.google.fonts/check/metadata/italic_style',
'com.google.fonts/check/metadata/normal_style',
'com.google.fonts/check/metadata/fontname_not_camel_cased',
'com.google.fonts/check/metadata/match_name_familyname',
'com.google.fonts/check/metadata/canonical_weight_value',
'com.google.fonts/check/metadata/os2_weightclass',
'com.google.fonts/check/metadata/canonical_style_names',
'com.google.fonts/check/metadata/broken_links',
'com.google.fonts/check/metadata/undeclared_fonts',
'com.google.fonts/check/metadata/category',
'com.google.fonts/check/metadata/gf-axisregistry_valid_tags',
'com.google.fonts/check/metadata/gf-axisregistry_bounds',
'com.google.fonts/check/metadata/consistent_axis_enumeration',
'com.google.fonts/check/metadata/escaped_strings',
'com.google.fonts/check/metadata/designer_profiles',
'com.google.fonts/check/metadata/family_directory_name',
'com.google.fonts/check/metadata/can_render_samples'
]
DESCRIPTION_CHECKS = [
'com.google.fonts/check/description/broken_links',
'com.google.fonts/check/description/valid_html',
'com.google.fonts/check/description/min_length',
'com.google.fonts/check/description/max_length',
'com.google.fonts/check/description/git_url',
'com.google.fonts/check/description/eof_linebreak',
'com.google.fonts/check/description/family_update',
'com.google.fonts/check/description/urls'
]
FAMILY_CHECKS = [
# 'com.google.fonts/check/family/equal_numbers_of_glyphs',
# 'com.google.fonts/check/family/equal_glyph_names',
'com.google.fonts/check/family/has_license',
'com.google.fonts/check/family/control_chars',
'com.google.fonts/check/family/tnum_horizontal_metrics',
'com.google.fonts/check/family/italics_have_roman_counterparts'
]
NAME_TABLE_CHECKS = [
'com.google.fonts/check/name/unwanted_chars',
'com.google.fonts/check/name/license',
'com.google.fonts/check/name/license_url',
'com.google.fonts/check/name/family_and_style_max_length',
'com.google.fonts/check/name/line_breaks',
'com.google.fonts/check/name/rfn'
]
# The glyphs checks will be enabled once
# we implement check polymorphism
# https://github.com/googlefonts/fontbakery/issues/3436
GLYPHSAPP_CHECKS = [
#DISABLED: 'com.google.fonts/check/glyphs_file/name/family_and_style_max_length',
#DISABLED: 'com.google.fonts/check/glyphs_file/font_copyright'
]
REPO_CHECKS = [
'com.google.fonts/check/repo/dirname_matches_nameid_1',
'com.google.fonts/check/repo/vf_has_static_fonts',
'com.google.fonts/check/repo/upstream_yaml_has_required_fields',
'com.google.fonts/check/repo/fb_report',
'com.google.fonts/check/repo/zip_files',
'com.google.fonts/check/repo/sample_image',
'com.google.fonts/check/license/OFL_copyright',
'com.google.fonts/check/license/OFL_body_text'
]
FONT_FILE_CHECKS = [
'com.google.fonts/check/glyph_coverage',
'com.google.fonts/check/canonical_filename',
'com.google.fonts/check/usweightclass',
'com.google.fonts/check/fstype',
'com.google.fonts/check/vendor_id',
'com.google.fonts/check/ligature_carets',
'com.google.fonts/check/production_glyphs_similarity',
'com.google.fonts/check/fontv',
#DISABLED: 'com.google.fonts/check/production_encoded_glyphs',
'com.google.fonts/check/glyf_nested_components',
'com.google.fonts/check/varfont/generate_static',
'com.google.fonts/check/kerning_for_non_ligated_sequences',
'com.google.fonts/check/name/description_max_length',
'com.google.fonts/check/fvar_name_entries',
'com.google.fonts/check/version_bump',
'com.google.fonts/check/epar',
'com.google.fonts/check/font_copyright',
'com.google.fonts/check/italic_angle',
'com.google.fonts/check/has_ttfautohint_params',
'com.google.fonts/check/name/version_format',
'com.google.fonts/check/name/familyname_first_char',
'com.google.fonts/check/hinting_impact',
'com.google.fonts/check/file_size',
'com.google.fonts/check/varfont/has_HVAR',
'com.google.fonts/check/name/typographicfamilyname',
'com.google.fonts/check/name/subfamilyname',
'com.google.fonts/check/name/typographicsubfamilyname',
'com.google.fonts/check/gasp',
'com.google.fonts/check/name/familyname',
'com.google.fonts/check/name/mandatory_entries',
'com.google.fonts/check/name/copyright_length',
'com.google.fonts/check/fontdata_namecheck',
'com.google.fonts/check/name/ascii_only_entries',
'com.google.fonts/check/varfont_has_instances',
'com.google.fonts/check/varfont_weight_instances',
'com.google.fonts/check/old_ttfautohint',
'com.google.fonts/check/vttclean',
'com.google.fonts/check/name/postscriptname',
'com.google.fonts/check/aat',
'com.google.fonts/check/name/fullfontname',
'com.google.fonts/check/mac_style',
'com.google.fonts/check/fsselection',
'com.google.fonts/check/smart_dropout',
'com.google.fonts/check/integer_ppem_if_hinted',
'com.google.fonts/check/unitsperem_strict',
'com.google.fonts/check/transformed_components',
'com.google.fonts/check/vertical_metrics_regressions',
'com.google.fonts/check/cjk_vertical_metrics',
'com.google.fonts/check/cjk_vertical_metrics_regressions',
'com.google.fonts/check/cjk_not_enough_glyphs',
'com.google.fonts/check/varfont_instance_coordinates',
'com.google.fonts/check/varfont_instance_names',
'com.google.fonts/check/varfont_duplicate_instance_names',
'com.google.fonts/check/varfont/consistent_axes',
'com.google.fonts/check/varfont/unsupported_axes',
'com.google.fonts/check/varfont/grade_reflow',
'com.google.fonts/check/gf-axisregistry/fvar_axis_defaults',
'com.google.fonts/check/STAT/gf-axisregistry',
'com.google.fonts/check/STAT/axis_order',
'com.google.fonts/check/mandatory_avar_table',
'com.google.fonts/check/missing_small_caps_glyphs',
'com.google.fonts/check/stylisticset_description',
'com.google.fonts/check/os2/use_typo_metrics',
'com.google.fonts/check/meta/script_lang_tags',
'com.google.fonts/check/no_debugging_tables',
'com.google.fonts/check/render_own_name'
]
GOOGLEFONTS_PROFILE_CHECKS = \
UNIVERSAL_PROFILE_CHECKS + \
METADATA_CHECKS + \
DESCRIPTION_CHECKS + \
FAMILY_CHECKS + \
NAME_TABLE_CHECKS + \
REPO_CHECKS + \
FONT_FILE_CHECKS + \
GLYPHSAPP_CHECKS
@check(
id = 'com.google.fonts/check/canonical_filename',
rationale = """
A font's filename must be composed in the following manner:
<familyname>-<stylename>.ttf
- Nunito-Regular.ttf,
- Oswald-BoldItalic.ttf
Variable fonts must list the axis tags in alphabetical order in square brackets and separated by commas:
- Roboto[wdth,wght].ttf
- Familyname-Italic[wght].ttf
""",
proposal = 'legacy:check/001'
)
def com_google_fonts_check_canonical_filename(font):
"""Checking file is named canonically."""
from fontTools.ttLib import TTFont
from .shared_conditions import (is_variable_font,
variable_font_filename)
from .googlefonts_conditions import canonical_stylename
from fontbakery.utils import suffix
from fontbakery.constants import STATIC_STYLE_NAMES
failed = False
if "_" in os.path.basename(font):
failed = True
yield FAIL,\
Message("invalid-char",
f'font filename "{font}" is invalid.'
f' It must not contain underscore characters!')
return
ttFont = TTFont(font)
if is_variable_font(ttFont):
if suffix(font) in STATIC_STYLE_NAMES:
failed = True
yield FAIL,\
Message("varfont-with-static-filename",
"This is a variable font, but it is using"
" a naming scheme typical of a static font.")
expected = variable_font_filename(ttFont)
if expected is None:
failed = True
yield FAIL,\
Message("unknown-name",
"FontBakery was unable to figure out which"
" filename to expect for this variable font.\n"
"This most likely means that the name table entries"
" used as reference such as FONT_FAMILY_NAME may"
" not be properly set.\n"
"Please review the name table entries.")
return
font_filename = os.path.basename(font)
if font_filename != expected:
failed = True
yield FAIL,\
Message("bad-varfont-filename",
f"The file '{font_filename}' must be renamed"
f" to '{expected}' according to the"
f" Google Fonts naming policy for variable fonts.")
else:
if not canonical_stylename(font):
failed = True
style_names = '", "'.join(STATIC_STYLE_NAMES)
yield FAIL,\
Message("bad-static-filename",
f'Style name used in "{font}" is not canonical.'
f' You should rebuild the font using'
f' any of the following'
f' style names: "{style_names}".')
if not failed:
yield PASS, f"{font} is named canonically."
@check(
id = 'com.google.fonts/check/description/broken_links',
conditions = ['description_html'],
rationale = """
The snippet of HTML in the DESCRIPTION.en_us.html file is added to the font family webpage on the Google Fonts website. For that reason, all hyperlinks in it must be properly working.
""",
proposal = 'legacy:check/003'
)
def com_google_fonts_check_description_broken_links(description_html):
"""Does DESCRIPTION file contain broken links?"""
import requests
from lxml import etree
doc = description_html
broken_links = []
unique_links = []
for a_href in doc.iterfind('.//a[@href]'):
link = a_href.get("href")
# avoid requesting the same URL more then once
if link in unique_links:
continue
if link.startswith("mailto:") and \
"@" in link and \
"." in link.split("@")[1]:
yield INFO,\
Message("email",
f"Found an email address: {link}")
continue
unique_links.append(link)
try:
response = requests.head(link, allow_redirects=True, timeout=10)
code = response.status_code
# Status 429: "Too Many Requests" is acceptable
# because it means the website is probably ok and
# we're just perhaps being too agressive in probing the server!
if code not in [requests.codes.ok,
requests.codes.too_many_requests]:
broken_links.append(f"{link} (status code: {code})")
except requests.exceptions.Timeout:
yield WARN,\
Message("timeout",
f"Timedout while attempting to access: '{link}'."
f" Please verify if that's a broken link.")
except requests.exceptions.RequestException:
broken_links.append(link)
if len(broken_links) > 0:
broken_links_list = '\n\t'.join(broken_links)
yield FAIL,\
Message("broken-links",
f"The following links are broken"
f" in the DESCRIPTION file:\n\t"
f"{broken_links_list}")
else:
yield PASS, "All links in the DESCRIPTION file look good!"
@check(
id = 'com.google.fonts/check/description/urls',
conditions = ['description_html'],
rationale = """
The snippet of HTML in the DESCRIPTION.en_us.html file is added to the font family webpage on the Google Fonts website.
Google Fonts has a content formatting policy for that snippet that expects the text content of links not to include the http:// or https:// prefixes.
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/3497'
)
def com_google_fonts_check_description_urls(description_html):
"""URLs on DESCRIPTION file must not display http(s) prefix."""
from lxml import etree
passed = True
for a_href in description_html.iterfind('.//a[@href]'):
link_text = a_href.text
if link_text.startswith("http://") or \
link_text.startswith("https://"):
passed = False
yield WARN,\
Message("prefix-found",
f'Please remove the "http(s)://"'
f' prefix from the link text "{link_text}"')
continue
if passed:
yield PASS, "All good!"
@condition
def description_html (description):
if not description:
return
from lxml import etree
html = "<html>" + description + "</html>"
try:
return etree.fromstring(html)
except etree.XMLSyntaxError:
return None
@check(
id = 'com.google.fonts/check/description/git_url',
conditions = ['description_html'],
rationale = """
The contents of the DESCRIPTION.en-us.html file are displayed on the Google Fonts website in the about section of each font family specimen page.
Since all of the Google Fonts collection is composed of libre-licensed fonts, this check enforces a policy that there must be a hypertext link in that page directing users to the repository where the font project files are made available.
Such hosting is typically done on sites like Github, Gitlab, GNU Savannah or any other git-based version control service.
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/2523'
)
def com_google_fonts_check_description_git_url(description_html):
"""Does DESCRIPTION file contain a upstream Git repo URL?"""
git_urls = []
for a_href in description_html.iterfind('.//a[@href]'):
link = a_href.get("href")
if "://git" in link:
git_urls.append(link)
yield INFO,\
Message("url-found",
f"Found a git repo URL: {link}")
if len(git_urls) > 0:
yield PASS, "Looks great!"
else:
yield FAIL,\
Message("lacks-git-url",
"Please host your font project on a public Git repo"
" (such as GitHub or GitLab) and place a link"
" in the DESCRIPTION.en_us.html file.")
@check(
id = 'com.google.fonts/check/description/valid_html',
conditions = ['description'],
rationale = """
Sometimes people write malformed HTML markup. This check should ensure the file is good.
Additionally, when packaging families for being pushed to the `google/fonts` git repo, if there is no DESCRIPTION.en_us.html file, some older versions of the `add_font.py` tool insert a placeholder description file which contains invalid html. This file needs to either be replaced with an existing description file or edited by hand.
""",
proposal = ['legacy:check/004',
'https://github.com/googlefonts/fontbakery/issues/2664']
)
def com_google_fonts_check_description_valid_html(descfile, description):
"""Is this a proper HTML snippet?"""
passed = True
if "<html>" in description or "</html>" in description:
yield FAIL,\
Message("html-tag",
f"{descfile} should not have an <html> tag,"
f" since it should only be a snippet that will"
f" later be included in the Google Fonts"
f" font family specimen webpage.")
from lxml import etree
try:
etree.fromstring("<html>" + description + "</html>")
except Exception as e:
passed = False
yield FAIL,\
Message("malformed-snippet",
f"{descfile} does not look like a propper HTML snippet."
f" Please look for syntax errors."
f" Maybe the following parser error message can help"
f" you find what's wrong:\n"
f"----------------\n"
f"{e}\n"
f"----------------\n")
if "<p>" not in description or "</p>" not in description:
passed = False
yield FAIL,\
Message("lacks-paragraph",
f"{descfile} does not include an HTML <p> tag.")
if passed:
yield PASS, f"{descfile} is a propper HTML file."
@check(
id = 'com.google.fonts/check/description/min_length',
conditions = ['description'],
proposal = 'legacy:check/005'
)
def | |
(sta - a_mean) / a_std
return _z
def trigger_onset(charfct, thres1, thres2, max_len=9e99, max_len_delete=False):
"""
Calculate trigger on and off times.
Given thres1 and thres2 calculate trigger on and off times from
characteristic function.
This method is written in pure Python and gets slow as soon as there
are more then 1e6 triggerings ("on" AND "off") in charfct --- normally
this does not happen.
:type charfct: NumPy :class:`~numpy.ndarray`
:param charfct: Characteristic function of e.g. STA/LTA trigger
:type thres1: float
:param thres1: Value above which trigger (of characteristic function)
is activated (higher threshold)
:type thres2: float
:param thres2: Value below which trigger (of characteristic function)
is deactivated (lower threshold)
:type max_len: int
:param max_len: Maximum length of triggered event in samples. A new
event will be triggered as soon as the signal reaches
again above thres1.
:type max_len_delete: bool
:param max_len_delete: Do not write events longer than max_len into
report file.
:rtype: List
:return: Nested List of trigger on and of times in samples
"""
# 1) find indices of samples greater than threshold
# 2) calculate trigger "of" times by the gap in trigger indices
# above the threshold i.e. the difference of two following indices
# in ind is greater than 1
# 3) in principle the same as for "of" just add one to the index to get
# start times, this operation is not supported on the compact
# syntax
# 4) as long as there is a on time greater than the actual of time find
# trigger on states which are greater than last of state an the
# corresponding of state which is greater than current on state
# 5) if the signal stays above thres2 longer than max_len an event
# is triggered and following a new event can be triggered as soon as
# the signal is above thres1
ind1 = np.where(charfct > thres1)[0]
if len(ind1) == 0:
return []
ind2 = np.where(charfct > thres2)[0]
#
on = deque([ind1[0]])
of = deque([-1])
# determine the indices where charfct falls below off-threshold
ind2_ = np.empty_like(ind2, dtype=bool)
ind2_[:-1] = np.diff(ind2) > 1
# last occurence is missed by the diff, add it manually
ind2_[-1] = True
of.extend(ind2[ind2_].tolist())
on.extend(ind1[np.where(np.diff(ind1) > 1)[0] + 1].tolist())
# include last pick if trigger is on or drop it
if max_len_delete:
# drop it
of.extend([1e99])
on.extend([on[-1]])
else:
# include it
of.extend([ind2[-1]])
#
pick = []
while on[-1] > of[0]:
while on[0] <= of[0]:
on.popleft()
while of[0] < on[0]:
of.popleft()
if of[0] - on[0] > max_len:
if max_len_delete:
on.popleft()
continue
of.appendleft(on[0] + max_len)
pick.append([on[0], of[0]])
return np.array(pick, dtype=np.int64)
def pk_baer(reltrc, samp_int, tdownmax, tupevent, thr1, thr2, preset_len,
p_dur):
"""
Wrapper for P-picker routine by <NAME>, <NAME>.
:param reltrc: time series as numpy.ndarray float32 data, possibly filtered
:param samp_int: number of samples per second
:param tdownmax: if dtime exceeds tdownmax, the trigger is examined for
validity
:param tupevent: min nr of samples for itrm to be accepted as a pick
:param thr1: threshold to trigger for pick (c.f. paper)
:param thr2: threshold for updating sigma (c.f. paper)
:param preset_len: no of points taken for the estimation of variance of
SF(t) on preset()
:param p_dur: p_dur defines the time interval for which the maximum
amplitude is evaluated Originally set to 6 secs
:return: (pptime, pfm) pptime sample number of parrival; pfm direction
of first motion (U or D)
.. note:: currently the first sample is not taken into account
.. seealso:: [Baer1987]_
"""
pptime = C.c_int()
# c_chcar_p strings are immutable, use string_buffer for pointers
pfm = C.create_string_buffer(b" ", 5)
# be nice and adapt type if necessary
reltrc = np.ascontiguousarray(reltrc, np.float32)
# index in pk_mbaer.c starts with 1, 0 index is lost, length must be
# one shorter
args = (len(reltrc) - 1, C.byref(pptime), pfm, samp_int,
tdownmax, tupevent, thr1, thr2, preset_len, p_dur)
errcode = clibsignal.ppick(reltrc, *args)
if errcode != 0:
raise MemoryError("Error in function ppick of mk_mbaer.c")
# add the sample to the time which is not taken into account
# pfm has to be decoded from byte to string
return pptime.value + 1, pfm.value.decode('utf-8')
def ar_pick(a, b, c, samp_rate, f1, f2, lta_p, sta_p, lta_s, sta_s, m_p, m_s,
l_p, l_s, s_pick=True):
"""
Pick P and S arrivals with an AR-AIC + STA/LTA algorithm.
The algorithm picks onset times using an Auto Regression - Akaike
Information Criterion (AR-AIC) method. The detection intervals are
successively narrowed down with the help of STA/LTA ratios as well as
STA-LTA difference calculations. For details, please see [Akazawa2004]_.
An important feature of this algorithm is that it requires comparatively
little tweaking and site-specific settings and is thus applicable to large,
diverse data sets.
:type a: :class:`numpy.ndarray`
:param a: Z signal the data.
:type b: :class:`numpy.ndarray`
:param b: N signal of the data.
:type c: :class:`numpy.ndarray`
:param c: E signal of the data.
:type samp_rate: float
:param samp_rate: Number of samples per second.
:type f1: float
:param f1: Frequency of the lower bandpass window.
:type f2: float
:param f2: Frequency of the upper .andpass window.
:type lta_p: float
:param lta_p: Length of LTA for the P arrival in seconds.
:type sta_p: float
:param sta_p: Length of STA for the P arrival in seconds.
:type lta_s: float
:param lta_s: Length of LTA for the S arrival in seconds.
:type sta_s: float
:param sta_s: Length of STA for the S arrival in seconds.
:type m_p: int
:param m_p: Number of AR coefficients for the P arrival.
:type m_s: int
:param m_s: Number of AR coefficients for the S arrival.
:type l_p: float
:param l_p: Length of variance window for the P arrival in seconds.
:type l_s: float
:param l_s: Length of variance window for the S arrival in seconds.
:type s_pick: bool
:param s_pick: If ``True``, also pick the S phase, otherwise only the P
phase.
:rtype: tuple
:returns: A tuple with the P and the S arrival.
"""
if not (len(a) == len(b) == len(c)):
raise ValueError("All three data arrays must have the same length.")
a = scipy.signal.detrend(a, type='linear')
b = scipy.signal.detrend(b, type='linear')
c = scipy.signal.detrend(c, type='linear')
# be nice and adapt type if necessary
a = np.require(a, dtype=np.float32, requirements=['C_CONTIGUOUS'])
b = np.require(b, dtype=np.float32, requirements=['C_CONTIGUOUS'])
c = np.require(c, dtype=np.float32, requirements=['C_CONTIGUOUS'])
# scale amplitudes to avoid precision issues in case of low amplitudes
# C code picks the horizontal component with larger amplitudes, so scale
# horizontal components with a common scaling factor
data_max = np.abs(a).max()
if data_max < 100:
a *= 1e6
a /= data_max
data_max = max(np.abs(b).max(), np.abs(c).max())
if data_max < 100:
for data in (b, c):
data *= 1e6
data /= data_max
s_pick = C.c_int(s_pick) # pick S phase also
ptime = C.c_float()
stime = C.c_float()
args = (len(a), samp_rate, f1, f2,
lta_p, sta_p, lta_s, sta_s, m_p, m_s, C.byref(ptime),
C.byref(stime), l_p, l_s, s_pick)
errcode = clibsignal.ar_picker(a, b, c, *args)
if errcode != 0:
bufs = ['buff1', 'buff1_s', 'buff2', 'buff3', 'buff4', 'buff4_s',
'f_error', 'b_error', 'ar_f', 'ar_b', 'buf_sta', 'buf_lta',
'extra_tr1', 'extra_tr2', 'extra_tr3']
if errcode <= len(bufs):
raise MemoryError('Unable to allocate %s!' % (bufs[errcode - 1]))
raise Exception('Error during PAZ calculation!')
return ptime.value, stime.value
def plot_trigger(trace, cft, thr_on, thr_off, show=True):
"""
Plot characteristic function of trigger along with waveform data and
trigger On/Off from given thresholds.
:type trace: :class:`~obspy.core.trace.Trace`
:param trace: waveform data
:type cft: :class:`numpy.ndarray`
:param cft: characteristic function as returned by a trigger in
:mod:`obspy.signal.trigger`
:type thr_on: float
:param thr_on: threshold for switching trigger on
:type thr_off: float
:param thr_off: threshold for switching trigger off
:type show: bool
:param show: Do not call `plt.show()` at end of routine. That way,
further modifications can be done to the figure before showing it.
"""
import matplotlib.pyplot as plt
df = trace.stats.sampling_rate
npts = trace.stats.npts
t = np.arange(npts, dtype=np.float32) / df
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, trace.data, 'k')
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, cft, 'k')
on_off | |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import json
import logging
import pandas as pd
from copy import deepcopy
from enum import Enum
from functools import wraps
from pydash import get, has, set_
from typing import Dict, List, Optional
from gs_quant.api.gs.assets import GsAsset, GsAssetApi
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.indices import GsIndexApi
from gs_quant.api.gs.reports import GsReportApi
from gs_quant.api.gs.users import GsUsersApi
from gs_quant.common import DateLimit, PositionType
from gs_quant.data.fields import DataMeasure
from gs_quant.entities.entity import EntityType, PositionedEntity
from gs_quant.entities.entitlements import Entitlements as BasketEntitlements
from gs_quant.errors import MqError, MqValueError
from gs_quant.json_encoder import JSONEncoder
from gs_quant.markets.indices_utils import *
from gs_quant.markets.position_set import PositionSet
from gs_quant.markets.securities import Asset, AssetType as SecAssetType
from gs_quant.session import GsSession
from gs_quant.target.data import DataQuery
from gs_quant.target.indices import *
from gs_quant.target.reports import Report, ReportStatus
_logger = logging.getLogger(__name__)
class ErrorMessage(Enum):
NON_ADMIN = 'You are not permitted to perform this action on this basket. Please make sure \
the basket owner has entitled your application properly if you believe this is a mistake'
NON_INTERNAL = 'You are not permitted to access this basket setting.'
UNINITIALIZED = 'Basket class object must be initialized using one of an existing basket\'s \
identifiers to perform this action'
UNMODIFIABLE = 'This property can not be modified since the basket has already been created'
def _validate(*error_msgs):
""" Confirms initialization is complete and checks for errors before calling function """
def _outer(fn):
@wraps(fn)
def _inner(self, *args, **kwargs):
if has(self, '_Basket__error_messages') and self._Basket__error_messages is not None:
if len(self._Basket__error_messages) < 1:
self._Basket__finish_initialization()
for error_msg in error_msgs:
if error_msg in self._Basket__error_messages:
raise MqError(error_msg.value)
return fn(self, *args, **kwargs)
return _inner
return _outer
class Basket(Asset, PositionedEntity):
"""
Basket which tracks an evolving portfolio of securities, and can be traded through cash or derivatives markets
"""
def __init__(self, gs_asset: GsAsset = None, **kwargs):
self.__error_messages = None
if gs_asset:
if gs_asset.type.value not in BasketType.to_list():
raise MqValueError(f'Failed to initialize. Asset {gs_asset.id} is not a basket')
self.__id = gs_asset.id
self.__initial_entitlements = gs_asset.entitlements
asset_entity: Dict = json.loads(json.dumps(gs_asset.as_dict(), cls=JSONEncoder))
Asset.__init__(self, gs_asset.id, gs_asset.asset_class, gs_asset.name,
exchange=gs_asset.exchange, currency=gs_asset.currency, entity=asset_entity)
PositionedEntity.__init__(self, gs_asset.id, EntityType.ASSET)
self.__populate_current_attributes_for_existing_basket(gs_asset)
else:
self.__populate_default_attributes_for_new_basket(**kwargs)
self.__error_messages = set([])
if get(kwargs, '_finish_init', False):
self.__finish_initialization()
@classmethod
def get(cls, identifier: str, **kwargs):
"""
Fetch an existing basket
:param identifier: Any common identifier for a basket (ric, ticker, etc.)
:return: Basket object
**Usage**
Get existing basket instance
**Examples**
Get basket details:
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
"""
gs_asset = cls.__get_gs_asset(identifier)
return cls(gs_asset=gs_asset, _finish_init=get(kwargs, '_finish_init', True))
@_validate()
def get_details(self) -> pd.DataFrame:
"""
Get basket details
:return: dataframe containing current basket properties
**Usage**
Get basket's current state
**Examples**
Get basket details:
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_details()
"""
props = list(CustomBasketsPricingParameters.properties().union(PublishParameters.properties(),
CustomBasketsCreateInputs.properties()))
props = sorted(props)
details = [{'name': k, 'value': get(self, k)} for k in props if has(self, k)]
return pd.DataFrame(details)
def create(self) -> Dict:
"""
Create a new custom basket in Marquee
:return: dictionary containing asset id and report id
**Usage**
Create a new custom basket in Marquee
**See also**
:func:`get_details` :func:`poll_status` :func:`update`
"""
inputs, pricing, publish = {}, {}, {}
for prop in CustomBasketsCreateInputs.properties():
set_(inputs, prop, get(self, prop))
for prop in CustomBasketsPricingParameters.properties():
set_(pricing, prop, get(self, prop))
for prop in PublishParameters.properties():
set_(publish, prop, get(self, prop))
set_(inputs, 'position_set', self.position_set.to_target(common=False))
set_(inputs, 'pricing_parameters', CustomBasketsPricingParameters(**pricing))
set_(inputs, 'publish_parameters', PublishParameters(**publish))
create_inputs = CustomBasketsCreateInputs(**inputs)
response = GsIndexApi.create(create_inputs)
gs_asset = GsAssetApi.get_asset(response.asset_id)
self.__latest_create_report = GsReportApi.get_report(response.report_id)
self.__init__(gs_asset=gs_asset, _finish_init=True)
return response.as_dict()
@_validate(ErrorMessage.UNINITIALIZED)
def clone(self):
"""
Retrieve a clone of an existing basket
:return: New basket instance with position set identical to current basket
**Usage**
Clone an existing basket's position set in a new basket instance prior to creation
**Examples**
Clone current basket:
>>> from gs_quant.markets.baskets import Basket
>>>
>>> parent_basket = Basket.get("GSMBXXXX")
>>> clone = parent_basket.clone()
**See also**
:func:`create`
"""
position_set = deepcopy(self.position_set)
return Basket(position_set=position_set, clone_parent_id=self.id, parent_basket=self.ticker)
@_validate(ErrorMessage.UNINITIALIZED, ErrorMessage.NON_ADMIN)
def update(self) -> Dict:
"""
Update your custom basket
:return: dictionary containing asset id and report id
**Usage**
Make updates to your basket's metadata, pricing options, publishing options, or composition
**See also**
:func:`get_details` :func:`poll_status` :func:`create`
"""
edit_inputs, rebal_inputs = self.__get_updates()
entitlements = self.__entitlements.to_target()
if not entitlements == self.__initial_entitlements:
response = GsAssetApi.update_asset_entitlements(self.id, entitlements)
if edit_inputs is None and rebal_inputs is None:
if response:
return response.as_dict()
raise MqValueError('Update failed: Nothing on the basket was changed')
elif edit_inputs is not None and rebal_inputs is None:
response = GsIndexApi.edit(self.id, edit_inputs)
elif rebal_inputs is not None and edit_inputs is None:
response = GsIndexApi.rebalance(self.id, rebal_inputs)
else:
response = self.__edit_and_rebalance(edit_inputs, rebal_inputs)
gs_asset = GsAssetApi.get_asset(self.id)
self.__latest_create_report = GsReportApi.get_report(response.report_id)
self.__init__(gs_asset=gs_asset, _finish_init=True)
return response.as_dict()
@_validate(ErrorMessage.UNINITIALIZED, ErrorMessage.NON_ADMIN)
def upload_position_history(self, position_sets: List[PositionSet]) -> Dict:
"""
Upload basket composition history
:param position_sets: list of dated position sets
:return: dictionary containing asset id and report id
**Usage**
Upload your basket's historical composition after it's been created
**Examples**
Upload composition history from a list of identifiers:
>>> from datetime import date
>>> from gs_quant.markets.baskets import Basket
>>> from gs_quant.markets.position_set import PositionSet
>>>
>>> first_position_set = PositionSet.from_list(['BBID1', 'BBID2'], date(2020, 1, 1))
>>> second_position_set = PositionSet.from_list(['BBID1','BBID2', 'BBID3'], date(2021, 1, 1))
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.upload_position_history([first_position_set, second_position_set])
**See also**
:class:`PositionSet`
"""
if self.default_backcast:
raise MqValueError('Unable to upload position history: option must be set during basket creation')
historical_position_sets = []
for position_set in position_sets:
positions = [IndicesPositionInput(p.asset_id, p.weight) for p in position_set.positions]
historical_position_sets.append(IndicesPositionSet(tuple(positions), position_set.date))
response = GsIndexApi.backcast(self.id, CustomBasketsBackcastInputs(tuple(historical_position_sets)))
return response.as_dict()
@_validate(ErrorMessage.UNINITIALIZED)
def poll_status(self, timeout: int = 600, step: int = 30) -> ReportStatus:
"""
Polls the status of the basket's most recent create/edit/rebalance report
:param timeout: how many seconds you'd like to poll for (default is 600 sec)
:param step: how frequently you'd like to check the report's status (default is every 30 sec)
:return: Report status
**Usage**
Poll the status of a newly created or updated basket
**Examples**
Poll most recent create/update report status:
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.poll_status(timeout=120, step=20)
**See also**
:func:`create` :func:`update`
"""
report = get(self, '__latest_create_report', self.__get_latest_create_report())
report_id = get(report, 'id')
return self.poll_report(report_id, timeout, step)
@_validate(ErrorMessage.UNINITIALIZED)
def get_latest_rebalance_data(self) -> Dict:
"""
Retrieve the most recent rebalance data for a basket
**Usage**
Retrieve the most recent rebalance data for a basket
**Examples**
Retrieve the most recent rebalance data for a basket
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_latest_rebalance_data()
**See also**
:func:`get_latest_rebalance_date`
"""
return GsIndexApi.last_rebalance_data(self.id)
@_validate(ErrorMessage.UNINITIALIZED)
def get_latest_rebalance_date(self) -> dt.date:
"""
Retrieve the most recent rebalance date for a basket
:return: dictionary
**Usage**
Retrieve the most recent rebalance date for a basket
**Examples**
Retrieve the most recent rebalance date for a basket
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_latest_rebalance_date()
**See also**
:func:`get_latest_rebalance_data`
"""
last_rebalance = GsIndexApi.last_rebalance_data(self.id)
return dt.datetime.strptime(last_rebalance['date'], '%Y-%m-%d').date()
@_validate(ErrorMessage.UNINITIALIZED)
def get_rebalance_approval_status(self) -> str:
"""
Retrieve the most recent rebalance submission's approval status
:return: current approval status
**Usage**
Retrieve the most recent rebalance submission's approval status
**Examples**
Retrieve the most recent rebalance submission's approval status
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.get_rebalance_approval_status()
**See also**
:func:`cancel_rebalance` :func:`poll_report`
"""
last_approval = GsIndexApi.last_rebalance_approval(self.id)
return get(last_approval, 'status')
@_validate(ErrorMessage.NON_ADMIN)
def cancel_rebalance(self) -> Dict:
"""
Cancel the most recent rebalance submission
**Usage**
Cancel the basket's most recent rebalance submission if it has not yet been approved
**Examples**
Cancel the basket's most recent rebalance submission
>>> from gs_quant.markets.baskets import Basket
>>>
>>> basket = Basket.get("GSMBXXXX")
>>> basket.cancel_rebalance()
**See also**
:func:`get_rebalance_approval_status` :func:`update`
"""
return GsIndexApi.cancel_rebalance(self.id)
@_validate(ErrorMessage.UNINITIALIZED)
def get_corporate_actions(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today() + dt.timedelta(days=10),
ca_type: List[CorporateActionType] = CorporateActionType.to_list()) -> pd.DataFrame:
"""
Retrieve corporate actions for a basket across a date range
:param start: start date (default minimum date value)
:param end: end date (default is maximum date value)
:param ca_type: list of corporate action types (default is all)
:return: dataframe | |
<reponame>jborrelli98/ground-station-app
'''
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This software is the ground station GUI software that will be used to view and
analyze flight data while also be able to configure the custom flight computer
built by the students of SEDS@IIT.
The goal is to make the software compatable with multiple OS enviroments with
minimal additional packages and easy to use for users unfamiliar with the
software.
TO DO:
# - fix bug on static plot need to move plot to see plotted data
# - have matplotlib plots appear in the gui window in quadrants
- have a performance metric bar on the side of the GUI
- be able to communicate with STM32F4 over USB (COM)
- have a window to print output of USB device
'''
### IMPORT START ###
from dataclasses import dataclass
from distutils import command
from faulthandler import disable
import string
from turtle import width
from typing_extensions import IntVar
import matplotlib
from matplotlib import image
from paramiko import Channel
from sqlalchemy import true
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
from matplotlib import pyplot as plt
import tkinter as tk
from tkinter import BOTH, DISABLED, TOP, Canvas, Entry, Label, PhotoImage, StringVar, ttk
from tkinter.filedialog import askopenfilename
import tkinter.font as tkFont
from PIL import ImageTk, Image
import pandas as pd
import numpy as np
import os
import sys
import settings
### IMPORT END ###
### STYLING START ###
LARGE_FONT = ("Verdona", 12)
style.use("ggplot")
live_plot = Figure(figsize=(5,5), dpi=100)
live_plot_subplot1 = live_plot.add_subplot(221)
live_plot_subplot2 = live_plot.add_subplot(222)
live_plot_subplot3 = live_plot.add_subplot(223)
live_plot_subplot4 = live_plot.add_subplot(224)
static_plot = Figure(figsize=(5,5), dpi=100)
static_plot_subplot1 = static_plot.add_subplot(221)
static_plot_subplot2 = static_plot.add_subplot(222)
static_plot_subplot3 = static_plot.add_subplot(223)
static_plot_subplot4 = static_plot.add_subplot(224)
### STYLING END ###
### GLOBAL VARIABLES START ###
PATH = os.path.dirname(__file__)
if sys.platform == "linux" or sys.platform == "linux2":
PLATFORM = "linux"
elif sys.platform == "darwin":
PLATFORM = "macOS"
elif sys.platform == "win32":
PLATFORM = "windows"
else:
print("WARNING: Unrecognized platform")
quit()
PATH_DATAFILE = os.path.join(PATH, 'data', 'Init.csv')
PATH_LIVEDATA = os.path.join(PATH, 'data', 'TestData.csv') # placeholder
### GLOBAL VARIABLES END ###
### CLASS START ###
class GSApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
menubar = tk.Menu(container)
# File Menu
fileMenu = tk.Menu(menubar, tearoff=0)
fileMenu.add_command(label="Save Settings", command = lambda: tk.messagebox.showinfo("Information","Not supported yet!"))
fileMenu.add_command(label="Open", command= lambda: select_file())
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command = lambda: quit()) # Fixed?
menubar.add_cascade(label="File", menu=fileMenu)
# Page Menu
pageMenu = tk.Menu(menubar, tearoff=0)
pageMenu.add_command(label="Home", command = lambda: self.show_frame(HomePage))
pageMenu.add_separator()
pageMenu.add_command(label="Data Analysis", command = lambda: self.show_frame(DataAnalysis))
pageMenu.add_command(label="FC Settings", command = lambda: self.show_frame(FCSettings))
pageMenu.add_command(label="Live Flight Data", command = lambda: self.show_frame(LiveFlight))
menubar.add_cascade(label="Page", menu=pageMenu)
# Settings Menu
settingsMenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label="Settings", menu=settingsMenu)
# Help Menu
helpMenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=helpMenu)
tk.Tk.config(self, menu=menubar)
self.frames = {}
# Load all pages initially
for page in (HomePage, DataAnalysis, FCSettings, LiveFlight):
frame = page(container, self)
self.frames[page] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(HomePage)
# Show frame that is requested
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
# Individual Pages Start
class HomePage(tk.Frame):
def __init__(self, parent, controller):
# Create multiple widgets in a frame to make organization easier
# title
tk.Frame.__init__(self, parent)
# Homescreen title
fontStyle = tkFont.Font(family="Lucida Grande", size=32)
label = ttk.Label(self, text=("𝑨𝒅 𝑨𝒔𝒕𝒓𝒂 𝑷𝒆𝒓 𝑨𝒔𝒑𝒆𝒓𝒂"), font=fontStyle)
label.pack(pady=5, padx=5)
label.place(relx=0.5, rely=0.1, anchor="n")
# menu
button = ttk.Button(self, text="Data Analysis",
command=lambda: controller.show_frame(DataAnalysis))
button.pack()
button.place(relx=0.3, rely=0.2, anchor="n")
button2 = ttk.Button(self, text="Flight Control Settings",
command=lambda: controller.show_frame(FCSettings))
button2.pack()
button2.place(relx=0.5, rely=0.2, anchor="n")
button3 = ttk.Button(self, text="Live Flight Data",
command=lambda: controller.show_frame(LiveFlight))
button3.pack()
button3.place(relx=0.7, rely=0.2, anchor="n")
# image
filepath_logo_nobg = os.path.join(PATH, 'images', 'SEDSIIT-logo_noBG.png')
render = ImageTk.PhotoImage(Image.open(filepath_logo_nobg))
img = ttk.Label(self, image=render)
img.image = render
img.pack()
img.place(relx=0.5, rely=0.3, anchor="n")
class DataAnalysis(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = ttk.Label(self, text="Data Analysis", font=LARGE_FONT)
label.pack(pady=10, padx=10)
button_file_select = ttk.Button(self, text="Select File",
command=lambda: select_file())
button_file_select.pack(side=TOP)
# static plot
canvas = FigureCanvasTkAgg(static_plot, self)
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas.draw()
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
class FCSettings(tk.Frame):
def DeleteWarningMessageBoxPopup():
tk.messagebox.showwarning("*Warning", "This will delete ALL DATA ON THE FLIGHT COMPUTER.\nAre you sure you want to delete all data?")
def TestingPageWarningMessageBoxPopup(idx):
if (idx == 6):
tk.messagebox.showwarning("*Warning", "USE CAREFULLY! (placeholder warning)")
else:
pass
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# Flight control settings header img
filepath_fcconfig_header = os.path.join(PATH, 'images', 'fcconfig_header_nobg.png')
render = ImageTk.PhotoImage(Image.open(filepath_fcconfig_header))
img = ttk.Label(self, image=render)
img.image = render
img.pack(pady=10, padx=10)
homeButton = ttk.Button(self, text="Home",
command=lambda: controller.show_frame(HomePage))
homeButton.pack()
# Flight control settings body
notebook = ttk.Notebook(self)
recovery = ttk.Frame(notebook)
auxPyro = ttk.Frame(notebook)
telemetryConfig = ttk.Frame(notebook)
calibration = ttk.Frame(notebook)
data = ttk.Frame(notebook)
aux = ttk.Frame(notebook)
testing = ttk.Frame(notebook)
notebook.add(recovery, text="𝙍𝙚𝙘𝙤𝙫𝙚𝙧𝙮")
notebook.add(auxPyro, text="𝘼𝙪𝙭 𝙋𝙮𝙧𝙤")
notebook.add(telemetryConfig, text="𝙏𝙚𝙡𝙚𝙢𝙚𝙩𝙧𝙮 𝘾𝙤𝙣𝙛𝙞𝙜")
notebook.add(calibration, text="𝘾𝙖𝙡𝙞𝙗𝙧𝙖𝙩𝙞𝙤𝙣")
notebook.add(data, text="𝘿𝙖𝙩𝙖")
notebook.add(aux, text="𝘼𝙪𝙭")
notebook.add(testing, text="𝙏𝙚𝙨𝙩𝙞𝙣𝙜")
notebook.pack(padx=16, pady=30)
#TODO: Fix the issue of no warning on testing tab when it is selected
idx = notebook.index(notebook.select())
notebook.bind('<<NotebookTabChanged>>', FCSettings.TestingPageWarningMessageBoxPopup(idx))
# Recovery frame <<<START>>>:
#Drogue Deploy Delay
drogueDeployDelayLabel = Label(recovery, text="Drogue Deploy Delay: ")
drogueDeployDelayEntryBox = Entry(recovery, width=12)
drogueDeployDelaySetButton = ttk.Button(recovery, text="Set")
#TODO: add stutus label for drogue deploy delay
#Main deployment altitude (AGL)
mainDeploymentAltitudeLabel = Label(recovery, text="Main deployment altitude (AGL): ")
mainDeploymentAltitudeEntryBox = Entry(recovery, width=12)
mainDeploymentAltitudeSetButton = ttk.Button(recovery, text="Set")
#TODO: add stutus label for main deployment altitude
#Pyro igniton time
pyroIgnitionTimeOptions = [ "NULL",
"0.5 seconds",
"1.0 seconds",
"2.0 seconds",
"3.0 seconds",
"4.0 seconds",
"5.0 seconds"]
pyroIgnitionTimeClicked = StringVar()
pyroIgnitionTimeClicked.set(pyroIgnitionTimeOptions[0])
pyroIgnitionTimeDropdown = ttk.OptionMenu(recovery, pyroIgnitionTimeClicked, *pyroIgnitionTimeOptions)
pyroIgnitionTimeLabel = Label(recovery, text="Pyro igniton time: ")
#Drogue Deploy Delay grid display
drogueDeployDelayLabel.grid(row=0, column=0, sticky="w")
drogueDeployDelayEntryBox.grid(row=0, column=1, sticky="w")
drogueDeployDelaySetButton.grid(row=0, column=2, sticky="w")
#Main Deployment Altitude grid display
mainDeploymentAltitudeLabel.grid(row=1, column=0, sticky="w")
mainDeploymentAltitudeEntryBox.grid(row=1, column=1, sticky="w")
mainDeploymentAltitudeSetButton.grid(row=1, column=2, sticky="w")
#Pyro igniton time grid display
pyroIgnitionTimeLabel.grid(row=2, column=0, sticky="w")
pyroIgnitionTimeDropdown.grid(row=2, column=1, sticky="w")
#LOGIC FOR RECOVERY
#TODO: IMPLEMENT LOGIC FOR RECOVERY
#<<<END>>> Recovery frame
#Aux Pyro Frame <<<START>>>:
auxPyroC_EnablePyroLabel = Label(auxPyro, text="Enable Pyros C: ")
auxPyroD_EnablePyroLabel = Label(auxPyro, text="Enable Pyros D: ")
auxPyroE_EnablePyroLabel = Label(auxPyro, text="Enable Pyros E: ")
auxPyroF_EnablePyroLabel = Label(auxPyro, text="Enable Pyros F: ")
auxPyroC_DeployPositionLabel = Label(auxPyro, text="Deploy position C: ")
auxPyroD_DeployPositionLabel = Label(auxPyro, text="Deploy position D: ")
auxPyroE_DeployPositionLabel = Label(auxPyro, text="Deploy position E: ")
auxPyroF_DeployPositionLabel = Label(auxPyro, text="Deploy position F: ")
auxPyroC_DelayAfterFlagLabel = Label(auxPyro, text="Delay C after Flag: ")
auxPyroD_DelayAfterFlagLabel = Label(auxPyro, text="Delay D after Flag: ")
auxPyroE_DelayAfterFlagLabel = Label(auxPyro, text="Delay E after Flag: ")
auxPyroF_DelayAfterFlagLabel = Label(auxPyro, text="Delay F after Flag: ")
self.auxPyroC_EnablePyroCheckboxValue = tk.IntVar(value=0)
self.auxPyroD_EnablePyroCheckboxValue = tk.IntVar(value=0)
self.auxPyroE_EnablePyroCheckboxValue = tk.IntVar(value=0)
self.auxPyroF_EnablePyroCheckboxValue = tk.IntVar(value=0)
self.auxPyroC_DelayAfterFlagCheckBoxValue = tk.IntVar(value=0)
self.auxPyroD_DelayAfterFlagCheckBoxValue = tk.IntVar(value=0)
self.auxPyroE_DelayAfterFlagCheckBoxValue = tk.IntVar(value=0)
self.auxPyroF_DelayAfterFlagCheckBoxValue = tk.IntVar(value=0)
auxPyroC_EnablePyroCheckBox = ttk.Checkbutton(auxPyro, variable=self.auxPyroC_EnablePyroCheckboxValue)
auxPyroD_EnablePyroCheckBox = ttk.Checkbutton(auxPyro, variable=self.auxPyroD_EnablePyroCheckboxValue)
auxPyroE_EnablePyroCheckBox = ttk.Checkbutton(auxPyro, variable=self.auxPyroE_EnablePyroCheckboxValue)
auxPyroF_EnablePyroCheckBox = ttk.Checkbutton(auxPyro, variable=self.auxPyroF_EnablePyroCheckboxValue)
auxPyroC_DelayAfterFlagCheckBox = ttk.Checkbutton(auxPyro, variable=self.auxPyroC_DelayAfterFlagCheckBoxValue)
auxPyroD_DelayAfterFlagCheckBox = ttk.Checkbutton(auxPyro, variable=self.auxPyroD_DelayAfterFlagCheckBoxValue)
auxPyroE_DelayAfterFlagCheckBox = ttk.Checkbutton(auxPyro, variable=self.auxPyroE_DelayAfterFlagCheckBoxValue)
auxPyroF_DelayAfterFlagCheckBox = ttk.Checkbutton(auxPyro, variable=self.auxPyroF_DelayAfterFlagCheckBoxValue)
auxPryoDeployPositions = [ "NULL",
"BECO",
"Stage",
"Separation",
"MECO",
"Apogee",
"Main",
"Deploy",
"Landing"]
auxPyroC_Clicked = StringVar()
auxPyroC_Clicked.set(auxPryoDeployPositions[0])
auxPyroC_DeployDropdown = ttk.OptionMenu(auxPyro, auxPyroC_Clicked, *auxPryoDeployPositions)
auxPyroD_Clicked = StringVar()
auxPyroD_Clicked.set(auxPryoDeployPositions[0])
auxPyroD_DeployDropdown = ttk.OptionMenu(auxPyro, auxPyroD_Clicked, *auxPryoDeployPositions)
auxPyroE_Clicked = StringVar()
auxPyroE_Clicked.set(auxPryoDeployPositions[0])
auxPyroE_DeployDropdown = ttk.OptionMenu(auxPyro, auxPyroE_Clicked, *auxPryoDeployPositions)
auxPyroF_Clicked = StringVar()
auxPyroF_Clicked.set(auxPryoDeployPositions[0])
auxPyroF_DeployDropdown = ttk.OptionMenu(auxPyro, auxPyroF_Clicked, *auxPryoDeployPositions)
#Aux Pyro for C
auxPyroC_EnablePyroLabel.grid(row=0, column=0, sticky="w")
auxPyroC_EnablePyroCheckBox.grid(row=0, column=1, sticky="w")
auxPyroC_DeployPositionLabel.grid(row=1, column=0, sticky="w")
auxPyroC_DeployDropdown.grid(row=1, column=1, sticky="w")
auxPyroC_DelayAfterFlagLabel.grid(row=2, column=0, sticky="w")
auxPyroC_DelayAfterFlagCheckBox.grid(row=2, column=1, sticky="w")
#Aux Pyro for D
auxPyroD_EnablePyroLabel.grid(row=4, column=0, sticky="w")
auxPyroD_EnablePyroCheckBox.grid(row=4, column=1, sticky="w")
auxPyroD_DeployPositionLabel.grid(row=5, column=0, sticky="w")
auxPyroD_DeployDropdown.grid(row=5, column=1, sticky="w")
auxPyroD_DelayAfterFlagLabel.grid(row=6, column=0, sticky="w")
auxPyroD_DelayAfterFlagCheckBox.grid(row=6, column=1, sticky="w")
#Aux Pyro for E
auxPyroE_EnablePyroLabel.grid(row=8, column=0, sticky="w")
auxPyroE_EnablePyroCheckBox.grid(row=8, column=1, sticky="w")
auxPyroE_DeployPositionLabel.grid(row=9, column=0, sticky="w")
auxPyroE_DeployDropdown.grid(row=9, column=1, sticky="w")
auxPyroE_DelayAfterFlagLabel.grid(row=10, column=0, sticky="w")
auxPyroE_DelayAfterFlagCheckBox.grid(row=10, column=1, sticky="w")
#Aux Pyro for F
auxPyroF_EnablePyroLabel.grid(row=12, column=0, sticky="w")
auxPyroF_EnablePyroCheckBox.grid(row=12, column=1, sticky="w")
auxPyroF_DeployPositionLabel.grid(row=13, column=0, sticky="w")
auxPyroF_DeployDropdown.grid(row=13, column=1, sticky="w")
auxPyroF_DelayAfterFlagLabel.grid(row=14, column=0, sticky="w")
auxPyroF_DelayAfterFlagCheckBox.grid(row=14, column=1, sticky="w")
#LOGIC FOR AUX PYRO
#TODO: IMPLEMENT LOGIC FOR AUX PYRO
#<<<END>>> AUX PYRO frame
#TELEMETRY CONFIG: Frame <<<START>>>:
transmitPowerLabel = ttk.Label(telemetryConfig, text="Transmit Power: ")
transmitPowerOptions = ["NULL",
"-1 dBm",
"2 dBm",
"5 dBm",
"8 dBm",
"11 dBm",
"14 | |
are stacked together so as to create a 4d filter.
# Using the same name = good idea?
filter_ref = tf.stack(kernels, axis=3, name='filter')
output = tf.nn.conv2d(_input, filter_ref, strides, padding)
return output, filter_ref
def dropout(self, _input):
"""
If the given keep_prob is not 1 AND if the graph is being trained,
performs a random dropout operation on a given input (_input).
The dropout probability is the keep_prob parameter.
Args:
_input: tensor, the operation's input.
"""
if self.keep_prob < 1:
output = tf.cond(
self.is_training,
lambda: tf.nn.dropout(_input, self.keep_prob),
lambda: _input
)
else:
output = _input
return output
# SIMPLEST OPERATIONS (FULLY CONNECTED) -----------------------------------
# -------------------------------------------------------------------------
def weight_variable_xavier(self, shape, name):
"""
Creates weights for a fully-connected layer, using the Xavier
initializer (keeps gradient scale roughly the same in all layers).
Args:
shape: `list` of `int`, shape of the weight matrix;
name: `str`, a name for identifying the weight matrix.
"""
return tf.get_variable(
name,
shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
def bias_variable(self, shape, name='bias'):
"""
Creates bias terms for a fully-connected layer, initialized to 0.0.
Args:
shape: `list` of `int`, shape of the bias matrix;
name: `str`, a name for identifying the bias matrix.
"""
initial = tf.constant(0.0, shape=shape)
return tf.get_variable(name, initializer=initial)
# COMPOSITE FUNCTION + BOTTLENECK -----------------------------------------
# -------------------------------------------------------------------------
def composite_function(self, _input, out_features, kernel_size=3):
"""
Composite function H_l([x_0, ..., x_l-1]) for a dense layer.
Takes a concatenation of previous outputs and performs:
- batch normalisation;
- ReLU activation function;
- 2d convolution, with required kernel size (side);
- dropout, if required (training the graph and keep_prob not set to 1).
Returns the output tensor and a reference to the 2d convolution filter,
as well as a list of the kernels in that filter, and the input tensor
for the 2d convolution.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output;
kernel_size: `int`, size of the square kernels (their side).
"""
with tf.variable_scope("composite_function"):
# batch normalisation
in_cv = self.batch_norm(_input)
# ReLU activation function
in_cv = tf.nn.relu(in_cv)
# 2d convolution
output, filter_ref, kernels = self.conv2d_with_kernels(
in_cv, out_features=out_features, kernel_size=kernel_size)
# dropout (if the graph is being trained and keep_prob is not 1)
output = self.dropout(output)
return output, filter_ref, kernels, in_cv
def reconstruct_composite_function(self, in_cv, kernels):
"""
Reconstruct the output of the composite function H_l([x_0, ..., x_l-1])
for a dense layer, given the convolution's input and its kernels.
Args:
in_cv: tensor, the input of the convolution;
kernels: `list` of tensors, the kernels for the convolution.
"""
# 2d convolution
output, filter_ref = self.conv2d_with_given_kernels(
in_cv, kernels)
# dropout
output = self.dropout(output)
return output, filter_ref
def bottleneck(self, _input, out_features):
"""
Bottleneck function, used before the composite function H_l in the
dense layers of DenseNet-BC.
Takes a concatenation of previous outputs and performs:
- batch normalisation,
- ReLU activation function,
- 2d convolution, with kernel size 1 (produces 4x the features of H_l),
- dropout, if required (training the graph and keep_prob not set to 1).
Returns the output tensor and a reference to the 2d convolution kernel.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output of H_l;
kernel_size: `int`, size of the square kernels (their side).
"""
with tf.variable_scope("bottleneck"):
# batch normalisation
output = self.batch_norm(_input)
# ReLU activation function
output = tf.nn.relu(output)
inter_features = out_features * 4
# 2d convolution (produces intermediate features)
output, filter_ref = self.conv2d(
output, out_features=inter_features, kernel_size=1,
padding='VALID')
# dropout (if the graph is being trained and keep_prob is not 1)
output = self.dropout(output)
return output, filter_ref
# BLOCKS AND THEIR INTERNAL LAYERS ----------------------------------------
# -------------------------------------------------------------------------
def add_new_kernels_to_layer(self, _input, in_cv, layer, kernel_num,
complementarity=True, kernel_size=3):
"""
Adds new convolution kernels to a layer within a block:
creates the kernels, reconstructs the composite function, and
concatenates outputs to ensure the DenseNet paradigm.
If required, uses a complementarity mechanism to initialise the new
kernels: the sign configuration is the opposite of that of the kernels
with lowest CS, unless that configuration is already taken (in which
case it must be differnet, but close to the opposite).
Returns the layer's new output tensor.
N.B.: This function is meant to be used ONLY in self-constructing mode
(i.e. when should_self_construct is true).
Args:
_input: tensor, the layer's input;
in_cv: tensor, the input for the layer's convolution;
layer: `int`, identifier number for this layer (within a block);
kernel_num: `int`, number of new (square) kernels to be added;
complementarity: `bool`, whether the complementarity mechanism
should be used to initialise new kernels or not;
kernel_size: `int`, size of the kernels (their side).
"""
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("composite_function"):
# if using the complementarity mechanism
if complementarity:
# get the sign distribution of all kernels in the layer
kernel_signs = []
for old_kernel in self.kernels_ref_list[-1][-1]:
kernel_signs.append(
np.sign(self.sess.run(old_kernel)))
# get the ids of the kernels with lowest CS
compl_kernels = sorted(
range(len(self.kCS_FIFO)),
key=lambda i: self.kCS_FIFO[i][-1])[:kernel_num]
# create and initialise kernel_num new kernels
in_features = int(in_cv.get_shape()[-1])
for new_k in range(kernel_num):
self.kernel_name_counter += 1
self.kernels_ref_list[-1][-1].append(
self.weight_variable_msra(
[kernel_size, kernel_size, in_features],
name='kernel'+str(self.kernel_name_counter)))
self.sess.run(tf.variables_initializer(
[self.kernels_ref_list[-1][-1][-1]]))
# if complementarity, make each new kernel complementary to
# one of the previously identified low-CS kernels
if complementarity:
# get the abs value contents of the new kernel
new_k_image = self.sess.run(
self.kernels_ref_list[-1][-1][-1])
new_k_image = np.absolute(new_k_image)
# sign distribution = opposite to the low-CS kernel
new_k_signs = -1*kernel_signs[compl_kernels[new_k]]
# check if sign distribution already exists
new_k_signs_try = new_k_signs
sign_distr_exists = True
patience = kernel_size*kernel_size*in_features
while sign_distr_exists and patience:
# compare with each of the distributions
sign_distr_exists = False
for sign_distr in kernel_signs:
sign_distr_exists = sign_distr_exists and (
new_k_signs == sign_distr).all()
# if so, switch one of the signs randomly
if sign_distr_exists:
new_k_signs_try = np.copy(new_k_signs)
new_k_signs_try[
np.random.randint(kernel_size)][
np.random.randint(kernel_size)][
np.random.randint(in_features)
] *= -1
patience -= 1
# finally, apply the sign distr and add it to the list
new_k_image = np.multiply(new_k_image, new_k_signs_try)
kernel_signs.append(new_k_signs_try)
# assign the new weight values to the kernel
self.sess.run(self.kernels_ref_list[-1][-1][-1].assign(
new_k_image))
# reconstruct the composite function from the current kernels
comp_out, filter_ref = self.reconstruct_composite_function(
in_cv, self.kernels_ref_list[-1][-1])
# save a reference to the composite function's filter
self.filter_ref_list[-1][-1] = filter_ref
# concatenate output with layer input to ensure DenseNet paradigm
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# Keep track of kernel CS.
self.kCS_FIFO.extend([
deque(maxlen=self.dkCS_softening) for i in range(kernel_num)])
self.dkCS_FIFO.extend([
deque(maxlen=self.dkCS_std_window) for i in range(kernel_num)])
return output
def remove_kernels_from_layer(self, _input, in_cv, layer,
kernels_to_prune):
"""
Removes specific convolution kernels in a layer within a block:
removes the kernels from the list, reconstructs the composite function,
and concatenates outputs to ensure the DenseNet paradigm.
Returns the layer's new output tensor.
N.B.: This function is meant to be used ONLY in self-constructing mode
(i.e. when should_self_construct is true).
Args:
_input: tensor, the layer's input;
in_cv: tensor, the input for the layer's convolution;
layer: `int`, identifier number for this layer (within a block);
kernels_to_prune: `list` of `int`, the specific kernels to remove.
"""
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("composite_function"):
# remove the kernels specified in kernels_to_prune
in_features = int(in_cv.get_shape()[-1])
print("\nPre-pruning kernels_ref_list length: %d" % len(
self.kernels_ref_list[-1][-1]))
for i in reversed(kernels_to_prune):
# iterate backwards so that kernel ids remain meaningful
self.pruned_varnames.append(
self.kernels_ref_list[-1][-1][i].name)
del self.kernels_ref_list[-1][-1][i]
for elem in self.pruned_varnames:
print(elem)
print("Post-pruning kernels_ref_list length: %d\n" % len(
self.kernels_ref_list[-1][-1]))
# reconstruct the composite function from the current kernels
comp_out, filter_ref = self.reconstruct_composite_function(
in_cv, self.kernels_ref_list[-1][-1])
# save a reference to the composite function's filter
self.filter_ref_list[-1][-1] = filter_ref
# concatenate output with layer input to ensure DenseNet paradigm
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# Keep track of kernel CS.
for i in reversed(kernels_to_prune):
del self.kCS_FIFO[i], self.dkCS_FIFO[i]
return output
def add_internal_layer(self, _input, layer, growth_rate):
"""
Adds a new convolutional (dense) layer within a block.
This layer will perform the composite function H_l([x_0, ..., x_l-1])
to obtain its output x_l.
It will then concatenate x_l with | |
prob, _ = \
generator(example, params.train_maximum_sql_length,
sampling=params.is_sampling,
forcing=True)
if seq[-1] == EOS_TOK:
seq = seq[:-1]
prob = prob[:-1]
rewards = torch.Tensor(np.ones(len(seq))).cuda()
loss = generator.update_gan_loss(prob, rewards)
forcing_loss += loss
torch.cuda.empty_cache()
progbar3.update(j)
progbar3.finish()
log.put("Forcing mean loss:\t" + str(forcing_loss/params.gan_batch_size))
print("Forcing mean loss: " + str(forcing_loss/params.gan_batch_size))
# Run an evaluation step on a sample of the training data.
train_eval_results = evaluate(train_sample,
generator,
params.train_maximum_sql_length,
name=os.path.join(params.logdir, "train-eval"),
write_results=True,
gold_forcing=True,
metrics=TRAIN_EVAL_METRICS)[0]
for name, value in train_eval_results.items():
log.put(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
print(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
valid_eval_results = evaluate(valid_data,
generator,
params.eval_maximum_sql_length,
name=os.path.join(params.logdir, "valid-eval"),
write_results=True,
gold_forcing=True,
metrics=VALID_EVAL_METRICS)[0]
for name, value in valid_eval_results.items():
log.put("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
print("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
print("Generating training samples!")
with torch.no_grad():
generate_samples(generator, train_data,
real_path, fake_path,
params.max_gen_len,
sampling=params.is_sampling,
# gen_num=params.gen_num / (1 - train_eval_results[Metrics.STRING_ACCURACY] / 100.),
train=True)
print("Finished generating training samples!")
dis_data_iter = DisDataIter(real_path,
fake_path,
params.dis_batch_size)
print("Finetuning discriminator!")
for _ in range(params.dis_k_steps):
metrics = dis_train_epoch(
discriminator,
dis_data_iter,
dis_criterion,
dis_optimizer)
log.put("Discriminator loss:\t" + str(metrics["loss"]))
log.put("Discriminator accuracy:\t" + str(metrics["acc"]))
log.put("Discriminator real accuracy:\t" + str(metrics["real_acc"]))
log.put("Discriminator fake accuracy:\t" + str(metrics["fake_acc"]))
log.put("Discriminator confidence:\t" + str(metrics["con"]))
log.put("Discriminator real confidence:\t" + str(metrics["real_con"]))
log.put("Discriminator fake confidence:\t" + str(metrics["fake_con"]))
print("Discriminator loss: " + str(metrics["loss"]))
print("Discriminator accuracy: " + str(metrics["acc"]))
print("Discriminator real accuracy: " + str(metrics["real_acc"]))
print("Discriminator fake accuracy: " + str(metrics["fake_acc"]))
print("Discriminator confidence: " + str(metrics["con"]))
print("Discriminator real confidence: " + str(metrics["real_con"]))
print("Discriminator fake confidence: " + str(metrics["fake_con"]))
print("Finished finetuning discriminator!")
# save checkpoint
ckp = {
'epoch': epoch,
'batches': train_batch,
'pos_in_batch': i+1,
'gen_state_dict': generator.state_dict(),
'dis_state_dict': discriminator.state_dict(),
'gen_optimizer_state_dict': generator.trainer.state_dict(),
'gen_bert_optimizer_state_dict': generator.bert_trainer.state_dict(),
'dis_optimizer_state_dict': dis_optimizer.state_dict()
}
save_ckp(ckp, params.logdir, params.adv_ckp)
progbar.update(i)
print("")
random.shuffle(train_batch)
random.shuffle(train_data)
start_pos_in_batch = 0
progbar.finish()
log.put("Finished adversarial training!")
log.close()
def mixed_mle(generator, discriminator, dis_criterion,
dis_optimizer, data, params, start_epoch=0,
start_batches=None, start_pos_in_batch=0,
start_clamp=0., start_len=0):
log = Logger(os.path.join(params.logdir, params.adv_logfile), 'w')
if params.interaction_level:
get_batch = data.get_interaction_batches
get_data = data.get_all_interactions
get_sample = data.get_random_interactions
evaluate = evaluate_interaction_sample
else:
get_batch = data.get_utterance_batches
get_data = data.get_all_utterances
get_sample = data.get_random_utterances
evaluate = evaluate_utterance_sample
if start_batches:
train_batch = start_batches
else:
train_batch = get_batch(
params.gan_batch_size,
max_output_length=params.train_maximum_sql_length
)
num_batch = len(train_batch)
train_data = get_data(
data.train_data,
max_output_length=params.train_maximum_sql_length
)
train_sample = get_sample(
params.train_evaluation_size,
max_output_length=params.train_maximum_sql_length
)
valid_data = get_data(
data.valid_data,
max_output_length=params.train_maximum_sql_length
)
# find max length gold_query
max_len = 0
for example in train_data:
utterance, = example.gold_utterances()
if len(utterance.gold_query()) > max_len:
max_len = len(utterance.gold_query())
progbar = get_progressbar("adversarial training ",
num_batch * params.adv_epoch * max_len)
progbar.start()
print("")
real_path = os.path.join(params.samples_dir, params.adv_real_file)
fake_path = os.path.join(params.samples_dir, params.adv_fake_file)
generator.set_dropout(params.dropout_amount)
for epoch in range(start_epoch, params.adv_epoch):
log.put("Epoch:\t" + str(epoch))
print("Epoch: " + str(epoch))
clamp = start_clamp
for k in range(start_len, max_len - 1):
clamp -= params.step_size
for i in range(start_pos_in_batch, num_batch):
batch = train_batch[i]
gen_loss = 0.
progbar2 = get_progressbar("generator ",
params.gan_batch_size)
progbar2.start()
for j, example in enumerate(batch.items):
seq, _, prob, pred = \
generator(example, params.train_maximum_sql_length,
sampling=params.is_sampling,
forcing=True)
with torch.no_grad:
l = clamp if len(seq) + clamp > 0 else -len(seq) + 1
rewards, probs = generator.get_reward_mm(
seq[:l], pred[:l], example,
params.roll_num, params.max_gen_len,
discriminator
)
# log.put("Generator reward:\t" + str(rewards.tolist()))
# print("Generator reward: " + str(rewards.tolist()))
rewards = torch.Tensor(rewards).cuda()
loss = generator.update_gan_loss_mm(prob, probs, rewards)
gen_loss += loss
torch.cuda.empty_cache()
progbar2.update(j)
progbar2.finish()
log.put("Generator mean loss:\t" + str(gen_loss/params.gan_batch_size))
print("Generator mean loss: " + str(gen_loss/params.gan_batch_size))
# Run an evaluation step on a sample of the training data.
train_eval_results = evaluate(train_sample,
generator,
params.train_maximum_sql_length,
name=os.path.join(params.logdir, "train-eval"),
write_results=True,
gold_forcing=True,
metrics=TRAIN_EVAL_METRICS)[0]
for name, value in train_eval_results.items():
log.put(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
print(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
valid_eval_results = evaluate(valid_data,
generator,
params.eval_maximum_sql_length,
name=os.path.join(params.logdir, "valid-eval"),
write_results=True,
gold_forcing=True,
metrics=VALID_EVAL_METRICS)[0]
for name, value in valid_eval_results.items():
log.put("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
print("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
print("Generating training samples!")
with torch.no_grad():
generate_samples(generator, train_data,
real_path, fake_path,
params.max_gen_len,
sampling=params.is_sampling,
gen_num=params.gen_num)
print("Finished generating training samples!")
dis_data_iter = DisDataIter(real_path,
fake_path,
params.dis_batch_size)
print("Finetuning discriminator!")
metrics = dis_train_epoch(
discriminator,
dis_data_iter,
dis_criterion,
dis_optimizer)
log.put("Discriminator loss:\t" + str(metrics["loss"]))
log.put("Discriminator accuracy:\t" + str(metrics["acc"]))
log.put("Discriminator real accuracy:\t" + str(metrics["real_acc"]))
log.put("Discriminator fake accuracy:\t" + str(metrics["fake_acc"]))
log.put("Discriminator confidence:\t" + str(metrics["con"]))
log.put("Discriminator real confidence:\t" + str(metrics["real_con"]))
log.put("Discriminator fake confidence:\t" + str(metrics["fake_con"]))
print("Discriminator loss: " + str(metrics["loss"]))
print("Discriminator accuracy: " + str(metrics["acc"]))
print("Discriminator real accuracy: " + str(metrics["real_acc"]))
print("Discriminator fake accuracy: " + str(metrics["fake_acc"]))
print("Discriminator confidence: " + str(metrics["con"]))
print("Discriminator real confidence: " + str(metrics["real_con"]))
print("Discriminator fake confidence: " + str(metrics["fake_con"]))
print("Finished finetuning discriminator!")
# save checkpoint
ckp = {
'epoch': epoch,
'batches': train_batch,
'pos_in_batch': i+1,
'gen_state_dict': generator.state_dict(),
'dis_state_dict': discriminator.state_dict(),
'gen_optimizer_state_dict': generator.trainer.state_dict(),
'gen_bert_optimizer_state_dict': generator.bert_trainer.state_dict(),
'dis_optimizer_state_dict': dis_optimizer.state_dict(),
'clamp': clamp,
'length': k
}
save_ckp(ckp, params.logdir, params.adv_ckp)
progbar.update(i)
print("")
random.shuffle(train_batch)
random.shuffle(train_data)
start_pos_in_batch = 0
progbar.finish()
log.put("Finished adversarial training!")
log.close()
def save_ckp(ckp, ckp_dir, ckp_filename):
f_path = os.path.join(ckp_dir, ckp_filename)
torch.save(ckp, f_path)
def load_ckp(ckp_file_path, model, optimizer, bert_optimizer=None):
ckp = torch.load(ckp_file_path)
epoch = ckp['epoch']
model.load_state_dict(ckp['state_dict'])
optimizer.load_state_dict(ckp['optimizer_state_dict'])
if bert_optimizer:
bert_optimizer.load_state_dict(ckp['bert_optimizer_state_dict'])
return epoch, model, optimizer, bert_optimizer
def load_adv_ckp(ckp_path, gen, dis, gen_optm, dis_optm, bert_optm=None, mle=False):
ckp = torch.load(ckp_path)
epoch = ckp['epoch']
batches = ckp['batches']
pos_in_batch = ckp['pos_in_batch']
gen.load_state_dict(ckp['gen_state_dict'])
dis.load_state_dict(ckp['dis_state_dict'])
gen_optm.load_state_dict(ckp['gen_optimizer_state_dict'])
dis_optm.load_state_dict(ckp['dis_optimizer_state_dict'])
if bert_optm:
bert_optm.load_state_dict(ckp['gen_bert_optimizer_state_dict'])
if mle:
clamp = ckp['clamp']
length = ckp['length']
else:
clamp, length = 0, 0
return epoch, batches, pos_in_batch, gen, dis, gen_optm, dis_optm, bert_optm, clamp, length
def main():
"""Main function that trains and/or evaluates a model."""
params = interpret_args()
if params.gan:
assert params.max_gen_len == params.train_maximum_sql_length \
== params.eval_maximum_sql_length
data = atis_data.ATISDataset(params)
generator = SchemaInteractionATISModel(
params,
data.input_vocabulary,
data.output_vocabulary,
data.output_vocabulary_schema,
None
)
generator = generator.cuda()
generator.build_optim()
if params.gen_from_ckp:
gen_ckp_path = os.path.join(params.logdir, params.gen_pretrain_ckp)
if params.fine_tune_bert:
gen_epoch, generator, generator.trainer, \
generator.bert_trainer = \
load_ckp(
gen_ckp_path,
generator,
generator.trainer,
generator.bert_trainer
)
else:
gen_epoch, generator, generator.trainer, _ = \
load_ckp(
gen_ckp_path,
generator,
generator.trainer
)
else:
gen_epoch = 0
print('====================Model Parameters====================')
print('=======================Generator========================')
for name, param in generator.named_parameters():
print(name, param.requires_grad, param.is_cuda, param.size())
assert param.is_cuda
print('==================Optimizer Parameters==================')
print('=======================Generator========================')
for param_group in generator.trainer.param_groups:
print(param_group.keys())
for param in param_group['params']:
print(param.size())
if params.fine_tune_bert:
print('=========================BERT===========================')
for param_group in generator.bert_trainer.param_groups:
print(param_group.keys())
for param in param_group['params']:
print(param.size())
sys.stdout.flush()
# Pre-train generator with MLE
if params.train:
print('=============== Pre-training generator! ================')
train(generator, data, params, gen_epoch)
print('=========== Pre-training generator complete! ===========')
dis_filter_sizes = [i for i in range(1, params.max_gen_len, 4)]
dis_num_filters = [(100 + i * 10)
for i in range(1, params.max_gen_len, 4)]
discriminator = Discriminator(
params,
data.dis_src_vocab,
data.dis_tgt_vocab,
params.max_gen_len,
params.num_dis_classes,
dis_filter_sizes,
dis_num_filters,
params.max_pos_emb,
params.num_tok_type,
params.dis_dropout
)
discriminator = discriminator.cuda()
dis_criterion = nn.NLLLoss(reduction='mean')
dis_criterion = dis_criterion.cuda()
dis_optimizer = optim.Adam(discriminator.parameters())
if params.dis_from_ckp:
dis_ckp_path = os.path.join(params.logdir, params.dis_pretrain_ckp)
dis_epoch, discriminator, dis_optimizer, _ = load_ckp(
dis_ckp_path,
discriminator,
dis_optimizer
)
else:
dis_epoch = 0
print('====================Model Parameters====================')
print('=====================Discriminator======================')
for name, param in discriminator.named_parameters():
print(name, param.requires_grad, param.is_cuda, param.size())
assert param.is_cuda
print('==================Optimizer Parameters==================')
print('=====================Discriminator======================')
for param_group in dis_optimizer.param_groups:
print(param_group.keys())
for param in param_group['params']:
print(param.size())
sys.stdout.flush()
# Pre-train discriminator
if params.pretrain_discriminator:
print('============= Pre-training discriminator! ==============')
pretrain_discriminator(
params,
generator,
discriminator,
dis_criterion,
dis_optimizer,
data,
start_epoch=dis_epoch
)
print('========= Pre-training discriminator complete! =========')
# Adversarial Training
if params.adversarial_training:
print('================ Adversarial training! =================')
generator.build_optim()
dis_criterion = nn.NLLLoss(reduction='mean')
dis_optimizer = optim.Adam(discriminator.parameters())
dis_criterion = dis_criterion.cuda()
if params.adv_from_ckp and params.mle is not "mixed_mle":
adv_ckp_path = os.path.join(params.logdir, params.adv_ckp)
if params.fine_tune_bert:
epoch, batches, pos_in_batch, generator, discriminator, \
generator.trainer, dis_optimizer, \
generator.bert_trainer, _, _ = \
load_adv_ckp(
adv_ckp_path,
generator,
discriminator,
generator.trainer,
dis_optimizer,
generator.bert_trainer)
else:
epoch, batches, pos_in_batch, generator, discriminator, \
generator.trainer, dis_optimizer, _, _, _ = \
load_adv_ckp(
adv_ckp_path,
generator,
discriminator,
generator.trainer,
dis_optimizer)
adv_train(
generator,
discriminator,
dis_criterion,
dis_optimizer,
data,
params,
start_epoch=epoch,
start_batches=batches,
start_pos_in_batch=pos_in_batch
)
elif params.adv_from_ckp and params.mle == "mixed_mle":
adv_ckp_path = os.path.join(params.logdir, params.adv_ckp)
if params.fine_tune_bert:
epoch, batches, pos_in_batch, generator, discriminator, \
generator.trainer, dis_optimizer, \
generator.bert_trainer, clamp, length = \
load_adv_ckp(
adv_ckp_path,
generator,
discriminator,
generator.trainer,
dis_optimizer,
generator.bert_trainer,
mle=True)
else:
epoch, batches, pos_in_batch, generator, discriminator, \
generator.trainer, dis_optimizer, _, clamp, length = \
load_adv_ckp(
adv_ckp_path,
generator,
discriminator,
generator.trainer,
dis_optimizer,
mle=True)
mixed_mle(
generator,
discriminator,
dis_criterion,
dis_optimizer,
data,
params,
start_epoch=epoch,
start_batches=batches,
start_pos_in_batch=pos_in_batch,
start_clamp=clamp,
start_len=length
)
else:
if params.mle == 'mixed_mle':
mixed_mle(
generator,
discriminator,
dis_criterion,
dis_optimizer,
data,
params
)
else:
adv_train(
generator,
discriminator,
dis_criterion,
dis_optimizer,
data,
params
)
if params.evaluate and 'valid' in params.evaluate_split:
print("================== Evaluating! ===================")
evaluate(generator, data, params, | |
<filename>dlgr/griduniverse/experiment.py
"""The Griduniverse."""
import datetime
import flask
import gevent
import itertools
import json
import logging
import math
import random
import string
import time
import uuid
from cached_property import cached_property
from faker import Factory
from sqlalchemy import create_engine
from sqlalchemy import func
from sqlalchemy.orm import (
sessionmaker,
scoped_session,
)
import dallinger
from dallinger.compat import unicode
from dallinger.config import get_config
from dallinger.experiment import Experiment
from dallinger.heroku.worker import conn as redis
from . import distributions
from .maze import Wall
from .maze import labyrinth
from .bots import Bot
from .models import Event
logger = logging.getLogger(__file__)
config = get_config()
# Make bot importable without triggering style warnings
Bot = Bot
class PluralFormatter(string.Formatter):
def format_field(self, value, format_spec):
if format_spec.startswith('plural'):
words = format_spec.split(',')
if value == 1 or value == '1' or value == 1.0:
return words[1]
else:
return words[2]
else:
return super(PluralFormatter, self).format_field(value, format_spec)
formatter = PluralFormatter()
def extra_parameters():
types = {
'network': unicode,
'max_participants': int,
'bot_policy': unicode,
'num_rounds': int,
'time_per_round': float,
'instruct': bool,
'columns': int,
'rows': int,
'window_columns': int,
'window_rows': int,
'block_size': int,
'padding': int,
'chat_visibility_threshold': float,
'spatial_chat': bool,
'visibility': int,
'visibility_ramp_time': int,
'background_animation': bool,
'player_overlap': bool,
'leaderboard_group': bool,
'leaderboard_individual': bool,
'leaderboard_time': int,
'motion_speed_limit': float,
'motion_auto': bool,
'motion_cost': float,
'motion_tremble_rate': float,
'show_chatroom': bool,
'show_grid': bool,
'others_visible': bool,
'num_colors': int,
'mutable_colors': bool,
'costly_colors': bool,
'pseudonyms': bool,
'pseudonyms_locale': unicode,
'pseudonyms_gender': unicode,
'contagion': int,
'contagion_hierarchy': bool,
'walls_density': float,
'walls_contiguity': float,
'walls_visible': bool,
'initial_score': int,
'dollars_per_point': float,
'tax': float,
'relative_deprivation': float,
'frequency_dependence': float,
'frequency_dependent_payoff_rate': float,
'donation_amount': int,
'donation_individual': bool,
'donation_group': bool,
'donation_ingroup': bool,
'donation_public': bool,
'num_food': int,
'respawn_food': bool,
'food_visible': bool,
'food_reward': int,
'food_pg_multiplier': float,
'food_growth_rate': float,
'food_maturation_speed': float,
'food_maturation_threshold': float,
'food_planting': bool,
'food_planting_cost': int,
'food_probability_distribution': unicode,
'seasonal_growth_rate': float,
'difi_question': bool,
'difi_group_label': unicode,
'difi_group_image': unicode,
'fun_survey': bool,
'pre_difi_question': bool,
'pre_difi_group_label': unicode,
'pre_difi_group_image': unicode,
'leach_survey': bool,
'intergroup_competition': float,
'intragroup_competition': float,
'identity_signaling': bool,
'identity_starts_visible': bool,
'score_visible': bool,
'alternate_consumption_donation': bool,
'use_identicons': bool,
'build_walls': bool,
'wall_building_cost': int,
'donation_multiplier': float,
'num_recruits': int,
'state_interval': float,
}
for key in types:
config.register(key, types[key])
def softmax(vector, temperature=1):
"""The softmax activation function."""
vector = [math.pow(x, temperature) for x in vector]
if sum(vector):
return [float(x) / sum(vector) for x in vector]
else:
return [float(len(vector)) for _ in vector]
class Gridworld(object):
"""A Gridworld in the Griduniverse."""
player_color_names = [
"BLUE",
"YELLOW",
"ORANGE",
"RED",
"PURPLE",
"TEAL"
]
player_colors = [
[0.50, 0.86, 1.00],
[1.00, 0.86, 0.50],
[0.91, 0.50, 0.02],
[0.64, 0.11, 0.31],
[0.85, 0.60, 0.85],
[0.77, 0.96, 0.90]
]
GREEN = [0.51, 0.69, 0.61]
WHITE = [1.00, 1.00, 1.00]
wall_locations = None
food_locations = None
walls_updated = True
food_updated = True
def __new__(cls, **kwargs):
if not hasattr(cls, 'instance'):
cls.instance = super(Gridworld, cls).__new__(cls)
return cls.instance
def __init__(self, **kwargs):
# If Singleton is already initialized, do nothing
if hasattr(self, 'num_players'):
return
self.log_event = kwargs.get('log_event', lambda x: None)
# Players
self.num_players = kwargs.get('max_participants', 3)
# Rounds
self.num_rounds = kwargs.get('num_rounds', 1)
self.time_per_round = kwargs.get('time_per_round', 300)
# Instructions
self.instruct = kwargs.get('instruct', True)
# Grid
self.columns = kwargs.get('columns', 25)
self.rows = kwargs.get('rows', 25)
self.window_columns = kwargs.get('window_columns', min(self.columns, 25))
self.window_rows = kwargs.get('window_rows', min(self.rows, 25))
self.block_size = kwargs.get('block_size', 10)
self.padding = kwargs.get('padding', 1)
self.chat_visibility_threshold = kwargs.get('chat_visibility_threshold', 0.4)
self.spatial_chat = kwargs.get('spatial_chat', False)
self.visibility = kwargs.get('visibility', 40)
self.visibility_ramp_time = kwargs.get('visibility_ramp_time', 4)
self.background_animation = kwargs.get('background_animation', True)
self.player_overlap = kwargs.get('player_overlap', False)
# Motion
self.motion_speed_limit = kwargs.get('motion_speed_limit', 8)
self.motion_auto = kwargs.get('motion_auto', False)
self.motion_cost = kwargs.get('motion_cost', 0)
self.motion_tremble_rate = kwargs.get('motion_tremble_rate', 0)
# Components
self.show_chatroom = kwargs.get('show_chatroom', False)
self.show_grid = kwargs.get('show_grid', True)
# Identity
self.others_visible = kwargs.get('others_visible', True)
self.num_colors = kwargs.get('num_colors', 3)
self.mutable_colors = kwargs.get('mutable_colors', False)
self.costly_colors = kwargs.get('costly_colors', False)
self.pseudonyms = kwargs.get('pseudonyms', True)
self.pseudonyms_locale = kwargs.get('pseudonyms_locale', 'en_US')
self.pseudonyms_gender = kwargs.get('pseudonyms_gender', None)
self.contagion = kwargs.get('contagion', 0)
self.contagion_hierarchy = kwargs.get('contagion_hierarchy', False)
self.identity_signaling = kwargs.get('identity_signaling', False)
self.identity_starts_visible = kwargs.get('identity_starts_visible',
False)
self.use_identicons = kwargs.get('use_identicons', False)
# Walls
self.walls_visible = kwargs.get('walls_visible', True)
self.walls_density = kwargs.get('walls_density', 0.0)
self.walls_contiguity = kwargs.get('walls_contiguity', 1.0)
self.build_walls = kwargs.get('build_walls', False)
self.wall_building_cost = kwargs.get('wall_building_cost', 0)
self.wall_locations = {}
# Payoffs
self.initial_score = kwargs.get('initial_score', 0)
self.dollars_per_point = kwargs.get('dollars_per_point', 0.02)
self.tax = kwargs.get('tax', 0.00)
self.relative_deprivation = kwargs.get('relative_deprivation', 1)
self.frequency_dependence = kwargs.get('frequency_dependence', 0)
self.frequency_dependent_payoff_rate = kwargs.get(
'frequency_dependent_payoff_rate', 0)
self.leaderboard_group = kwargs.get('leaderboard_group', False)
self.leaderboard_individual = kwargs.get('leaderboard_individual', False)
self.leaderboard_time = kwargs.get('leaderboard_time', 0)
# Donations
self.donation_amount = kwargs.get('donation_amount', 0)
self.donation_multiplier = kwargs.get('donation_multiplier', 1.0)
self.donation_individual = kwargs.get('donation_individual', False)
self.donation_group = kwargs.get('donation_group', False)
self.donation_ingroup = kwargs.get('donation_ingroup', False)
self.donation_public = kwargs.get('donation_public', False)
self.intergroup_competition = kwargs.get('intergroup_competition', 1)
self.intragroup_competition = kwargs.get('intragroup_competition', 1)
self.score_visible = kwargs.get('score_visible', False)
self.alternate_consumption_donation = kwargs.get(
'alternate_consumption_donation', False)
# Food
self.num_food = kwargs.get('num_food', 8)
self.respawn_food = kwargs.get('respawn_food', True)
self.food_visible = kwargs.get('food_visible', True)
self.food_reward = kwargs.get('food_reward', 1)
self.food_pg_multiplier = kwargs.get('food_pg_multiplier', 1)
self.food_growth_rate = kwargs.get('food_growth_rate', 1.00)
self.food_maturation_speed = kwargs.get('food_maturation_speed', 1)
self.food_maturation_threshold = kwargs.get(
'food_maturation_threshold', 0.0)
self.food_planting = kwargs.get('food_planting', False)
self.food_planting_cost = kwargs.get('food_planting_cost', 1)
self.food_probability_distribution = kwargs.get('food_probability_distribution', 'random')
self.seasonal_growth_rate = kwargs.get('seasonal_growth_rate', 1)
# Chat
self.chat_message_history = []
# Questionnaire
self.difi_question = kwargs.get('difi_question', False)
self.difi_group_label = kwargs.get('difi_group_label', 'Group')
self.difi_group_image = kwargs.get('difi_group_image', '/static/images/group.jpg')
self.fun_survey = kwargs.get('fun_survey', False)
self.pre_difi_question = kwargs.get('pre_difi_question', False)
self.pre_difi_group_label = kwargs.get('pre_difi_group_label', 'Group')
self.pre_difi_group_image = kwargs.get('pre_difi_group_image', '/static/images/group.jpg')
self.leach_survey = kwargs.get('leach_survey', False)
# Set some variables.
self.players = {}
self.food_locations = {}
self.food_consumed = []
self.start_timestamp = kwargs.get('start_timestamp', None)
self.round = 0
self.public_good = (
(self.food_reward * self.food_pg_multiplier) / self.num_players
)
if self.contagion_hierarchy:
self.contagion_hierarchy = range(self.num_colors)
random.shuffle(self.contagion_hierarchy)
if self.costly_colors:
self.color_costs = [2**i for i in range(self.num_colors)]
random.shuffle(self.color_costs)
# get food spawning probability distribution function and args
self.food_probability_info = {}
self.probability_function_args = []
parts = self.food_probability_distribution.split()
if len(parts) > 1:
self.food_probability_distribution = parts[0]
self.probability_function_args = parts[1:]
probability_distribution = "{}_probability_distribution".format(
self.food_probability_distribution)
self.food_probability_function = getattr(distributions,
probability_distribution,
None)
if self.food_probability_function is None:
logger.info(
"Unknown food probability distribution: {}.".format(
self.food_probability_distribution))
self.food_probability_function = distributions.random_probability_distribution
def can_occupy(self, position):
if self.player_overlap:
return not self.has_wall(position)
return not self.has_player(position) and not self.has_wall(position)
@property
def limited_player_colors(self):
return self.player_colors[:self.num_colors]
@property
def limited_player_color_names(self):
return self.player_color_names[:self.num_colors]
@property
def elapsed_round_time(self):
if self.start_timestamp is None:
return 0
return time.time() - self.start_timestamp
@property
def remaining_round_time(self):
if self.start_timestamp is None:
return 0
raw_remaining = self.time_per_round - self.elapsed_round_time
return max(0, raw_remaining)
@property
def group_donation_enabled(self):
return self.donation_group or self.donation_ingroup
@property
def donation_enabled(self):
return (
(
self.group_donation_enabled or
self.donation_individual or
self.donation_public
) and bool(self.donation_amount)
)
@property
def is_even_round(self):
return bool(self.round % 2)
@property
def donation_active(self):
"""Donation is enabled if:
1. at least one of the donation_individual, donation_group and
donation_public flags is set to True
2. donation_amount to some non-zero value
Further, donation is limited to even-numbered rounds if
alternate_consumption_donation is set to True.
"""
if not self.donation_enabled:
return False
if self.alternate_consumption_donation:
return self.is_even_round
return True
@property
def movement_enabled(self):
"""If we're alternating consumption and donation, Players can only move
during consumption rounds.
"""
if self.alternate_consumption_donation and self.donation_active:
return False
return True
@property
def consumption_active(self):
"""Food consumption is enabled on odd-numbered rounds if
alternate_consumption_donation is set to True.
"""
return not self.alternate_consumption_donation or not self.is_even_round
def players_with_color(self, color_id):
"""Return all the players with the specified color, which is how we
represent group/team membership.
"""
color_id = int(color_id)
return [p for p in self.players.values() if p.color_idx == color_id]
def check_round_completion(self):
if not self.game_started:
return
if not self.remaining_round_time:
self.round += 1
if self.game_over:
return
self.start_timestamp = time.time()
# Delay round for leaderboard display
if self.leaderboard_individual or self.leaderboard_group:
self.start_timestamp += self.leaderboard_time
for player in self.players.values():
player.motion_timestamp = 0
def compute_payoffs(self):
"""Compute payoffs from scores.
A player's payoff in the game can be expressed as the product of four
factors: the grand total number of points earned by all players, the
(softmax) proportion of the total points earned by the player's group,
the (softmax) proportion of the group's points earned by the player,
and the number of dollars per point.
Softmaxing the two proportions implements intragroup and intergroup
competition. When the parameters are 1, payoff is proportional to what
was scored and so there is no extrinsic competition. Increasing the
temperature introduces competition. For example, at 2, a pair of groups
that score in a 2:1 ratio will get payoff in a 4:1 ratio, and therefore
it pays to be in the highest-scoring group. The same logic applies to
intragroup competition: when the temperature is 2, a pair of players
within a group that score in a 2:1 ratio will get payoff in a 4:1
ratio, and therefore it pays to be a group's highest-scoring member.
"""
players = self.players.values()
player_groups = {}
| |
def add_ui_from_string(self, buffer):
if not isinstance(buffer, _basestring):
raise TypeError('buffer must be a string')
length = len(buffer)
return Gtk.UIManager.add_ui_from_string(self, buffer, length)
def insert_action_group(self, buffer, length=-1):
return Gtk.UIManager.insert_action_group(self, buffer, length)
UIManager = override(UIManager)
__all__.append('UIManager')
class ComboBox(Gtk.ComboBox, Container):
def get_active_iter(self):
success, aiter = super(ComboBox, self).get_active_iter()
if success:
return aiter
ComboBox = override(ComboBox)
__all__.append('ComboBox')
class Box(Gtk.Box):
def __init__(self, homogeneous=False, spacing=0, **kwds):
super(Box, self).__init__(**kwds)
self.set_homogeneous(homogeneous)
self.set_spacing(spacing)
Box = override(Box)
__all__.append('Box')
class SizeGroup(Gtk.SizeGroup):
def __init__(self, mode=Gtk.SizeGroupMode.VERTICAL):
super(SizeGroup, self).__init__(mode=mode)
SizeGroup = override(SizeGroup)
__all__.append('SizeGroup')
class MenuItem(Gtk.MenuItem):
def __init__(self, label=None, **kwds):
super(MenuItem, self).__init__(label=label, **kwds)
MenuItem = override(MenuItem)
__all__.append('MenuItem')
class Builder(Gtk.Builder):
def connect_signals(self, obj_or_map):
def _full_callback(builder, gobj, signal_name, handler_name, connect_obj, flags, obj_or_map):
handler = None
if isinstance(obj_or_map, dict):
handler = obj_or_map.get(handler_name, None)
else:
handler = getattr(obj_or_map, handler_name, None)
if handler is None:
raise AttributeError('Handler %s not found' % handler_name)
if not _callable(handler):
raise TypeError('Handler %s is not a method or function' % handler_name)
after = flags & GObject.ConnectFlags.AFTER
if connect_obj is not None:
if after:
gobj.connect_object_after(signal_name, handler, connect_obj)
else:
gobj.connect_object(signal_name, handler, connect_obj)
else:
if after:
gobj.connect_after(signal_name, handler)
else:
gobj.connect(signal_name, handler)
self.connect_signals_full(_full_callback, obj_or_map)
def add_from_string(self, buffer):
if not isinstance(buffer, _basestring):
raise TypeError('buffer must be a string')
length = len(buffer)
return Gtk.Builder.add_from_string(self, buffer, length)
def add_objects_from_string(self, buffer, object_ids):
if not isinstance(buffer, _basestring):
raise TypeError('buffer must be a string')
length = len(buffer)
return Gtk.Builder.add_objects_from_string(self, buffer, length, object_ids)
Builder = override(Builder)
__all__.append('Builder')
# NOTE: This must come before any other Window/Dialog subclassing, to ensure
# that we have a correct inheritance hierarchy.
class Window(Gtk.Window):
def __init__(self, type=Gtk.WindowType.TOPLEVEL, **kwds):
Gtk.Window.__init__(self, type=type, **kwds)
Window = override(Window)
__all__.append('Window')
class Dialog(Gtk.Dialog, Container):
def __init__(self,
title=None,
parent=None,
flags=0,
buttons=None,
_buttons_property=None,
**kwds):
# buttons is overloaded by PyGtk so we have to do the same here
# this breaks some subclasses of Dialog so add a _buttons_property
# keyword to work around this
if _buttons_property is not None:
kwds['buttons'] = _buttons_property
Gtk.Dialog.__init__(self, **kwds)
if title:
self.set_title(title)
if parent:
self.set_transient_for(parent)
if flags & Gtk.DialogFlags.MODAL:
self.set_modal(True)
if flags & Gtk.DialogFlags.DESTROY_WITH_PARENT:
self.set_destroy_with_parent(True)
# NO_SEPARATOR has been removed from Gtk 3
if hasattr(Gtk.DialogFlags, "NO_SEPARATOR") and (flags & Gtk.DialogFlags.NO_SEPARATOR):
self.set_has_separator(False)
import warnings
warnings.warn("Gtk.DialogFlags.NO_SEPARATOR has been depricated since Gtk+-3.0", DeprecationWarning)
if buttons is not None:
self.add_buttons(*buttons)
action_area = property(lambda dialog: dialog.get_action_area())
vbox = property(lambda dialog: dialog.get_content_area())
def add_buttons(self, *args):
"""
The add_buttons() method adds several buttons to the Gtk.Dialog using
the button data passed as arguments to the method. This method is the
same as calling the Gtk.Dialog.add_button() repeatedly. The button data
pairs - button text (or stock ID) and a response ID integer are passed
individually. For example:
>>> dialog.add_buttons(Gtk.STOCK_OPEN, 42, "Close", Gtk.ResponseType.CLOSE)
will add "Open" and "Close" buttons to dialog.
"""
def _button(b):
while b:
t, r = b[0:2]
b = b[2:]
yield t, r
try:
for text, response in _button(args):
self.add_button(text, response)
except (IndexError):
raise TypeError('Must pass an even number of arguments')
Dialog = override(Dialog)
__all__.append('Dialog')
class MessageDialog(Gtk.MessageDialog, Dialog):
def __init__(self,
parent=None,
flags=0,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.NONE,
message_format=None,
**kwds):
if message_format != None:
kwds['text'] = message_format
# type keyword is used for backwards compat with PyGTK
if 'type' in kwds:
import warnings
warnings.warn("The use of the keyword type as a parameter of the Gtk.MessageDialog constructor has been depricated. Please use message_type instead.", DeprecationWarning)
message_type = kwds.pop('type')
Gtk.MessageDialog.__init__(self,
_buttons_property=buttons,
message_type=message_type,
parent=parent,
flags=flags,
**kwds)
def format_secondary_text(self, message_format):
self.set_property('secondary-use-markup', False)
self.set_property('secondary-text', message_format)
def format_secondary_markup(self, message_format):
self.set_property('secondary-use-markup', True)
self.set_property('secondary-text', message_format)
MessageDialog = override(MessageDialog)
__all__.append('MessageDialog')
class AboutDialog(Gtk.AboutDialog):
def __init__(self, **kwds):
Gtk.AboutDialog.__init__(self, **kwds)
AboutDialog = override(AboutDialog)
__all__.append('AboutDialog')
class ColorSelectionDialog(Gtk.ColorSelectionDialog):
def __init__(self, title=None, **kwds):
Gtk.ColorSelectionDialog.__init__(self, title=title, **kwds)
ColorSelectionDialog = override(ColorSelectionDialog)
__all__.append('ColorSelectionDialog')
class FileChooserDialog(Gtk.FileChooserDialog):
def __init__(self,
title=None,
parent=None,
action=Gtk.FileChooserAction.OPEN,
buttons=None,
**kwds):
Gtk.FileChooserDialog.__init__(self,
action=action,
title=title,
parent=parent,
buttons=buttons,
**kwds)
FileChooserDialog = override(FileChooserDialog)
__all__.append('FileChooserDialog')
class FontSelectionDialog(Gtk.FontSelectionDialog):
def __init__(self, title=None, **kwds):
Gtk.FontSelectionDialog.__init__(self, title=title, **kwds)
FontSelectionDialog = override(FontSelectionDialog)
__all__.append('FontSelectionDialog')
class RecentChooserDialog(Gtk.RecentChooserDialog):
def __init__(self,
title=None,
parent=None,
manager=None,
buttons=None,
**kwds):
Gtk.RecentChooserDialog.__init__(self,
recent_manager=manager,
title=title,
parent=parent,
buttons=buttons,
**kwds)
RecentChooserDialog = override(RecentChooserDialog)
__all__.append('RecentChooserDialog')
class IconView(Gtk.IconView):
def __init__(self, model=None, **kwds):
Gtk.IconView.__init__(self, model=model, **kwds)
def get_item_at_pos(self, x, y):
success, path, cell = super(IconView, self).get_item_at_pos(x, y)
if success:
return (path, cell,)
def get_visible_range(self):
success, start_path, end_path = super(IconView, self).get_visible_range()
if success:
return (start_path, end_path,)
def get_dest_item_at_pos(self, drag_x, drag_y):
success, path, pos = super(IconView, self).get_dest_item_at_pos(drag_x, drag_y)
if success:
return path, pos
IconView = override(IconView)
__all__.append('IconView')
class ToolButton(Gtk.ToolButton):
def __init__(self, stock_id=None, **kwds):
Gtk.ToolButton.__init__(self, stock_id=stock_id, **kwds)
ToolButton = override(ToolButton)
__all__.append('ToolButton')
class IMContext(Gtk.IMContext):
def get_surrounding(self):
success, text, cursor_index = super(IMContext, self).get_surrounding()
if success:
return (text, cursor_index,)
IMContext = override(IMContext)
__all__.append('IMContext')
class RecentInfo(Gtk.RecentInfo):
def get_application_info(self, app_name):
success, app_exec, count, time = super(RecentInfo, self).get_application_info(app_name)
if success:
return (app_exec, count, time,)
RecentInfo = override(RecentInfo)
__all__.append('RecentInfo')
class TextBuffer(Gtk.TextBuffer):
def _get_or_create_tag_table(self):
table = self.get_tag_table()
if table is None:
table = Gtk.TextTagTable()
self.set_tag_table(table)
return table
def create_tag(self, tag_name=None, **properties):
"""
@tag_name: name of the new tag, or None
@properties: keyword list of properties and their values
Creates a tag and adds it to the tag table of the TextBuffer.
Equivalent to creating a Gtk.TextTag and then adding the
tag to the buffer's tag table. The returned tag is owned by
the buffer's tag table.
If @tag_name is None, the tag is anonymous.
If @tag_name is not None, a tag called @tag_name must not already
exist in the tag table for this buffer.
Properties are passed as a keyword list of names and values (e.g.
foreground = 'DodgerBlue', weight = Pango.Weight.BOLD)
Return value: a new tag
"""
tag = Gtk.TextTag(name=tag_name, **properties)
self._get_or_create_tag_table().add(tag)
return tag
def create_mark(self, mark_name, where, left_gravity=False):
return Gtk.TextBuffer.create_mark(self, mark_name, where, left_gravity)
def set_text(self, text, length=-1):
Gtk.TextBuffer.set_text(self, text, length)
def insert(self, iter, text, length=-1):
if not isinstance(text , _basestring):
raise TypeError('text must be a string, not %s' % type(text))
Gtk.TextBuffer.insert(self, iter, text, length)
def insert_with_tags(self, iter, text, *tags):
start_offset = iter.get_offset()
self.insert(iter, text)
if not tags:
return
start = self.get_iter_at_offset(start_offset)
for tag in tags:
self.apply_tag(tag, start, iter)
def insert_with_tags_by_name(self, iter, text, *tags):
if not tags:
return
tag_objs = []
for tag in tags:
tag_obj = self.get_tag_table().lookup(tag)
if not tag_obj:
raise ValueError('unknown text tag: %s' % tag)
tag_objs.append(tag_obj)
self.insert_with_tags(iter, text, *tag_objs)
def insert_at_cursor(self, text, length=-1):
if not isinstance(text , _basestring):
raise TypeError('text must be a string, not %s' % type(text))
Gtk.TextBuffer.insert_at_cursor(self, text, length)
def get_selection_bounds(self):
success, start, end = super(TextBuffer, self).get_selection_bounds()
if success:
return (start, end)
else:
return ()
TextBuffer = override(TextBuffer)
__all__.append('TextBuffer')
class TextIter(Gtk.TextIter):
def forward_search(self, string, flags, limit):
success, match_start, match_end = super(TextIter, self).forward_search(string,
flags, limit)
if success:
return (match_start, match_end)
else:
return None
def backward_search(self, string, flags, limit):
success, match_start, match_end = super(TextIter, self).backward_search(string,
flags, limit)
if success:
return (match_start, match_end)
else:
return None
def begins_tag(self, tag=None):
return super(TextIter, self).begins_tag(tag)
def ends_tag(self, tag=None):
return super(TextIter, self).ends_tag(tag)
def toggles_tag(self, tag=None):
return super(TextIter, self).toggles_tag(tag)
TextIter = override(TextIter)
__all__.append('TextIter')
class TreeModel(Gtk.TreeModel):
def __len__(self):
return self.iter_n_children(None)
def __bool__(self):
return True
# alias for Python 2.x object protocol
__nonzero__ = __bool__
def _getiter(self, key):
if isinstance(key, Gtk.TreeIter):
return key
elif isinstance(key, int) and key < 0:
index = len(self) + key
if index < 0:
raise IndexError("row index is out of bounds: %d" % key)
try:
aiter = self.get_iter(index)
except ValueError:
raise IndexError("could not find tree path '%s'" % key)
return aiter
else:
try:
aiter = self.get_iter(key)
except ValueError:
raise IndexError("could not find tree path '%s'" % key)
return aiter
def __getitem__(self, key):
aiter = self._getiter(key)
return TreeModelRow(self, aiter)
def __setitem__(self, key, value):
row = self[key]
self.set_row(row.iter, value)
def __delitem__(self, key):
aiter = self._getiter(key)
self.remove(aiter)
def __iter__(self):
return TreeModelRowIter(self, self.get_iter_first())
def get_iter(self, path):
if not isinstance(path, Gtk.TreePath):
path = TreePath(path)
success, aiter = super(TreeModel, self).get_iter(path)
if not success:
raise ValueError("invalid tree path '%s'" % path)
return aiter
def get_iter_first(self):
success, aiter = super(TreeModel, self).get_iter_first()
if success:
return aiter
def get_iter_from_string(self, path_string):
success, aiter = super(TreeModel, self).get_iter_from_string(path_string)
if not success:
raise ValueError("invalid tree path '%s'" % path_string)
return aiter
def iter_next(self, aiter):
next_iter = aiter.copy()
success = super(TreeModel, self).iter_next(next_iter)
if success:
return next_iter
def iter_previous(self, aiter):
prev_iter = aiter.copy()
success = super(TreeModel, self).iter_previous(prev_iter)
if success:
return prev_iter
def iter_children(self, aiter):
success, child_iter = super(TreeModel, self).iter_children(aiter)
if success:
return child_iter
def iter_nth_child(self, parent, n):
success, child_iter = super(TreeModel, self).iter_nth_child(parent, n)
if success:
return child_iter
def iter_parent(self, aiter):
success, parent_iter = super(TreeModel, self).iter_parent(aiter)
if success:
return parent_iter
def _convert_row(self, row):
# TODO: Accept a dictionary for row
# model.append(None,{COLUMN_ICON: icon, COLUMN_NAME: name})
if isinstance(row, str):
raise TypeError('Expected a list or tuple, but got | |
from __future__ import division
import json
import os
import shutil
import numpy as np
import torch, cv2
from random import choice
from torch.utils.data import Dataset
import json
from PIL import Image
import random
from utils.image import _palette
def all_to_onehot(masks, labels):
if len(masks.shape) == 3:
Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np.uint8)
else:
Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1]), dtype=np.uint8)
for k, l in enumerate(labels):
Ms[k] = (masks == l).astype(np.uint8)
return Ms
class VOS_Train(Dataset):
def __init__(self,
image_root,
label_root,
imglistdic,
transform=None,
rgb=False,
repeat_time=1,
rand_gap=3,
curr_len=3,
rand_reverse=True
):
self.image_root = image_root
self.label_root = label_root
self.rand_gap = rand_gap
self.curr_len = curr_len
self.rand_reverse = rand_reverse
self.repeat_time = repeat_time
self.transform = transform
self.rgb = rgb
self.imglistdic = imglistdic
self.seqs = list(self.imglistdic.keys())
print('Video num: {}'.format(len(self.seqs)))
def __len__(self):
return int(len(self.seqs) * self.repeat_time)
def reverse_seq(self, imagelist, lablist):
if np.random.randint(2) == 1:
imagelist = imagelist[::-1]
lablist = lablist[::-1]
return imagelist, lablist
def get_ref_index(self, seqname, lablist, objs, min_fg_pixels=200, max_try=5):
for _ in range(max_try):
ref_index = np.random.randint(len(lablist))
ref_label = Image.open(os.path.join(self.label_root, seqname, lablist[ref_index]))
ref_label = np.array(ref_label, dtype=np.uint8)
ref_objs = list(np.unique(ref_label))
is_consistent = True
for obj in ref_objs:
if obj == 0:
continue
if obj not in objs:
is_consistent = False
xs, ys = np.nonzero(ref_label)
if len(xs) > min_fg_pixels and is_consistent:
break
return ref_index
def get_ref_index_v2(self, seqname, lablist, min_fg_pixels=200, max_try=5):
for _ in range(max_try):
ref_index = np.random.randint(len(lablist))
ref_label = Image.open(os.path.join(self.label_root, seqname, lablist[ref_index]))
ref_label = np.array(ref_label, dtype=np.uint8)
xs, ys = np.nonzero(ref_label)
if len(xs) > min_fg_pixels:
break
return ref_index
def get_curr_gaps(self):
curr_gaps = []
total_gap = 0
for _ in range(self.curr_len):
gap = int(np.random.randint(self.rand_gap) + 1)
total_gap += gap
curr_gaps.append(gap)
return curr_gaps, total_gap
def get_prev_index(self, lablist, total_gap):
search_range = len(lablist) - total_gap
if search_range > 1:
prev_index = np.random.randint(search_range)
else:
prev_index = 0
return prev_index
def check_index(self, total_len, index, allow_reflect=True):
if total_len <= 1:
return 0
if index < 0:
if allow_reflect:
index = -index
index = self.check_index(total_len, index, True)
else:
index = 0
elif index >= total_len:
if allow_reflect:
index = 2 * (total_len - 1) - index
index = self.check_index(total_len, index, True)
else:
index = total_len - 1
return index
def get_curr_indices(self, lablist, prev_index, gaps):
total_len = len(lablist)
curr_indices = []
now_index = prev_index
for gap in gaps:
now_index += gap
curr_indices.append(self.check_index(total_len, now_index))
return curr_indices
def get_image_label(self, seqname, imagelist, lablist, index):
image = cv2.imread(os.path.join(self.image_root, seqname, imagelist[index]))
image = np.array(image, dtype=np.float32)
if self.rgb:
image = image[:, :, [2, 1, 0]]
label = Image.open(os.path.join(self.label_root, seqname, lablist[index]))
label = np.array(label, dtype=np.uint8)
return image, label
def __getitem__(self, idx):
idx = idx % len(self.seqs)
seqname = self.seqs[idx]
imagelist, lablist = self.imglistdic[seqname]
frame_num = len(imagelist)
if self.rand_reverse:
imagelist, lablist = self.reverse_seq(imagelist, lablist)
is_consistent = False
max_try = 5
try_step = 0
while(is_consistent == False and try_step < max_try):
try_step += 1
# get prev frame
curr_gaps, total_gap = self.get_curr_gaps()
prev_index = self.get_prev_index(lablist, total_gap)
prev_image, prev_label = self.get_image_label(seqname, imagelist, lablist, prev_index)
prev_objs = list(np.unique(prev_label))
# get curr frames
curr_indices = self.get_curr_indices(lablist, prev_index, curr_gaps)
curr_images, curr_labels, curr_objs = [], [], []
for curr_index in curr_indices:
curr_image, curr_label = self.get_image_label(seqname, imagelist, lablist, curr_index)
c_objs = list(np.unique(curr_label))
curr_images.append(curr_image)
curr_labels.append(curr_label)
curr_objs.extend(c_objs)
objs = list(np.unique(prev_objs + curr_objs))
# get ref frame
ref_index = self.get_ref_index_v2(seqname, lablist)
ref_image, ref_label = self.get_image_label(seqname, imagelist, lablist, ref_index)
ref_objs = list(np.unique(ref_label))
is_consistent = True
for obj in objs:
if obj == 0:
continue
if obj not in ref_objs:
is_consistent = False
break
# get meta info
obj_num = list(np.sort(ref_objs))[-1]
sample = {'ref_img':ref_image, 'prev_img':prev_image, 'curr_img':curr_images,
'ref_label':ref_label,'prev_label':prev_label,'curr_label':curr_labels}
sample['meta'] = {'seq_name':seqname, 'frame_num':frame_num, 'obj_num':obj_num}
if self.transform is not None:
sample = self.transform(sample)
return sample
class DAVIS2017_Train(VOS_Train):
def __init__(self,
split=['train'],
root='./DAVIS',
transform=None,
rgb=False,
repeat_time=1,
full_resolution=True,
year=2017,
rand_gap=3,
curr_len=3,
rand_reverse=True
):
if full_resolution:
resolution = 'Full-Resolution'
if not os.path.exists(os.path.join(root, 'JPEGImages', resolution)):
print('No Full-Resolution, use 480p instead.')
resolution = '480p'
else:
resolution = '480p'
image_root = os.path.join(root, 'JPEGImages', resolution)
label_root = os.path.join(root, 'Annotations', resolution)
seq_names = []
for spt in split:
with open(os.path.join(root, 'ImageSets', str(year), spt + '.txt')) as f:
seqs_tmp = f.readlines()
seqs_tmp = list(map(lambda elem: elem.strip(), seqs_tmp))
seq_names.extend(seqs_tmp)
imglistdic = {}
for seq_name in seq_names:
images = list(np.sort(os.listdir(os.path.join(image_root, seq_name))))
labels = list(np.sort(os.listdir(os.path.join(label_root, seq_name))))
imglistdic[seq_name] = (images, labels)
super(DAVIS2017_Train, self).__init__(
image_root,
label_root,
imglistdic,
transform,
rgb,
repeat_time,
rand_gap,
curr_len,
rand_reverse)
class YOUTUBE_VOS_Train(VOS_Train):
def __init__(self,
root='./train',
transform=None,
rgb=False,
rand_gap=3,
curr_len=3,
rand_reverse=True
):
image_root = os.path.join(root, 'JPEGImages')
label_root = os.path.join(root, 'Annotations')
self.seq_list_file = os.path.join(root, 'meta.json')
self._check_preprocess()
seq_names = list(self.ann_f.keys())
imglistdic={}
for seq_name in seq_names:
data = self.ann_f[seq_name]['objects']
obj_names = list(data.keys())
images = []
labels = []
for obj_n in obj_names:
if len(data[obj_n]["frames"]) < 2:
print("Short object: " + seq_name + '-' + obj_n)
continue
images += list(map(lambda x: x + '.jpg', list(data[obj_n]["frames"])))
labels += list(map(lambda x: x + '.png', list(data[obj_n]["frames"])))
images = np.sort(np.unique(images))
labels = np.sort(np.unique(labels))
if len(images) < 2:
print("Short video: " + seq_name)
continue
imglistdic[seq_name] = (images, labels)
super(YOUTUBE_VOS_Train, self).__init__(
image_root,
label_root,
imglistdic,
transform,
rgb,
1,
rand_gap,
curr_len,
rand_reverse)
def _check_preprocess(self):
if not os.path.isfile(self.seq_list_file):
print('No such file: {}.'.format(self.seq_list_file))
return False
else:
self.ann_f = json.load(open(self.seq_list_file, 'r'))['videos']
return True
class TEST(Dataset):
def __init__(self,
curr_len=3,
obj_num=3,
transform=None,
):
self.curr_len = curr_len
self.obj_num = obj_num
self.transform = transform
def __len__(self):
return 3000
def __getitem__(self, idx):
img = np.zeros((800, 800, 3)).astype(np.float32)
label = np.ones((800, 800)).astype(np.uint8)
sample = {'ref_img':img, 'prev_img':img, 'curr_img':[img]*self.curr_len,
'ref_label':label, 'prev_label':label, 'curr_label':[label]*self.curr_len}
sample['meta'] = {'seq_name':'test', 'frame_num':100, 'obj_num':self.obj_num}
if self.transform is not None:
sample = self.transform(sample)
return sample
class _EVAL_TEST(Dataset):
def __init__(self, transform, seq_name):
self.seq_name = seq_name
self.num_frame = 10
self.transform = transform
def __len__(self):
return self.num_frame
def __getitem__(self, idx):
current_frame_obj_num = 2
height = 400
width = 400
img_name = 'test{}.jpg'.format(idx)
current_img = np.zeros((height, width, 3)).astype(np.float32)
if idx == 0:
current_label = (current_frame_obj_num * np.ones((height, width))).astype(np.uint8)
sample = {'current_img':current_img, 'current_label':current_label}
else:
sample = {'current_img':current_img}
sample['meta'] = {'seq_name':self.seq_name, 'frame_num':self.num_frame, 'obj_num':current_frame_obj_num,
'current_name':img_name, 'height':height, 'width':width, 'flip':False}
if self.transform is not None:
sample = self.transform(sample)
return sample
class EVAL_TEST(object):
def __init__(self, transform=None, result_root=None):
self.transform = transform
self.result_root = result_root
self.seqs = ['test1', 'test2', 'test3']
def __len__(self):
return len(self.seqs)
def __getitem__(self, idx):
seq_name = self.seqs[idx]
if not os.path.exists(os.path.join(self.result_root, seq_name)):
os.makedirs(os.path.join(self.result_root, seq_name))
seq_dataset = _EVAL_TEST(self.transform, seq_name)
return seq_dataset
class VOS_Test(Dataset):
def __init__(self, image_root, label_root, seq_name, images, labels, rgb=False, transform=None, single_obj=False, resolution=None):
self.image_root = image_root
self.label_root = label_root
self.seq_name = seq_name
self.images = images
self.labels = labels
self.obj_num = 1
self.num_frame = len(self.images)
self.transform = transform
self.rgb = rgb
self.single_obj = single_obj
self.resolution = resolution
self.obj_nums = []
self.objs = []
temp_obj_num = 0
obj_list_temp=[0]
objs = []
masks = []
info = {}
info['gt_obj'] = {}
for img_name in self.images:
self.obj_nums.append(temp_obj_num)
objs.append(obj_list_temp)
current_label_name = img_name.split('.')[0] + '.png'
### BUG BUG BUG
if current_label_name in self.labels:
current_label = self.read_label(current_label_name)
if temp_obj_num < np.unique(current_label)[-1]:
temp_obj_num = np.unique(current_label)[-1]
label_list = np.unique(current_label).tolist()
for i in label_list:
if i!=0:
if i not in obj_list_temp:
obj_list_temp.append(i)
current_path = os.path.join(self.label_root, self.seq_name, current_label_name)
masks.append(np.array(Image.open(current_path).convert('P'), dtype=np.uint8))
this_labels = np.unique(masks[-1])
this_labels = this_labels[this_labels!=0]
info['gt_obj'][i] = this_labels
else:
masks.append(np.zeros_like(masks[0]))
self.objs = objs
masks = np.stack(masks, 0)
# Construct the forward and backward mapping table for labels
# this is because YouTubeVOS's labels are sometimes not continuous
# while we want continuous ones (for one-hot)
# so we need to maintain a backward mapping table
labels = np.unique(masks).astype(np.uint8)
labels = labels[labels!=0]
info['label_convert'] = {}
info['label_backward'] = {}
idx = 1
for l in labels:
info['label_convert'][l] = idx
info['label_backward'][idx] = l
idx += 1
masks = all_to_onehot(masks, labels)
self.masks = masks
self.info = info
#print("self.masks.shape",self.masks.shape)
def __len__(self):
return len(self.images)
def read_image(self, idx):
img_name = self.images[idx]
img_path = os.path.join(self.image_root, self.seq_name, img_name)
img = cv2.imread(img_path)
img = np.array(img, dtype=np.float32)
if self.rgb:
img = img[:, :, [2, 1, 0]]
return img
def read_label(self, label_name):
label_path = os.path.join(self.label_root, self.seq_name, label_name)
label = Image.open(label_path)
label = np.array(label, dtype=np.uint8)
if self.single_obj:
label = (label > 0).astype(np.uint8)
return label
def __getitem__(self, idx):
img_name = self.images[idx]
current_img = self.read_image(idx)
height, width, channels = current_img.shape
if self.resolution is not None:
width = int(np.ceil(float(width) * self.resolution / float(height)))
height = int(self.resolution)
current_label_name = img_name.split('.')[0] + '.png'
obj_num = self.obj_nums[idx]
obj_list = self.objs[idx]
if current_label_name in self.labels:
current_label = | |
import telegram
import sqlite3
import function
import thread_lock
import battlebuild
import random
import air
import drawmap
import traceback
from telegram import ReplyKeyboardRemove, ReplyKeyboardMarkup
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, MessageHandler, Filters
#TODO some card req not finish
#----------------------Pass-----------------------
pass_ = True
#----------------------Reallocate Resources-----------------------
def r_r(bot, country, session):
print('in r_r')
db = sqlite3.connect(session.get_db_dir())
group_chat_id = db.execute("select chatid from game;").fetchall()
text = "<b>" + function.countryid2name[country] + "</b> uses Reallocate Resources"
bot.send_message(chat_id = group_chat_id[0][0], text = text, parse_mode=telegram.ParseMode.HTML)
if country == 'us' and db.execute("select location from card where cardid = 355;").fetchall()[0][0] == 'played':
function.discardhand(bot, country, 1, session)
else:
function.discardhand(bot, country, 4, session)
lock_id = session.add_lock()
info = r_r_info(bot, country, lock_id, db)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
if info[2] != None:
session.thread_lock(lock_id)
def r_r_info(bot, country, lock_id, db):
chat_id = db.execute("select playerid from country where id = :country;", {'country':country}).fetchall()
keyboard = []
card_list = ['Build Army', 'Build Navy', 'Land Battle', 'Sea Battle', 'Deploy Air Force']
for card in card_list:
if country == 'us' and db.execute("select location from card where cardid = 356;").fetchall()[0][0] == 'played' and db.execute("select count(*) from card where name = :card and location in ('discardu', 'discardd', 'played') and control=:country;", {'country':country, 'card':card}).fetchall()[0][0] > 0:
keyboard.append([InlineKeyboardButton('Discard - ' + card, callback_data="['r_r', '{}', '{}', {}]".format(country, card, lock_id))])
elif db.execute("select count(*) from card where name = :card and location = 'deck' and control = :country;", {'country':country, 'card':card}).fetchall()[0][0] > 0:
keyboard.append([InlineKeyboardButton(card, callback_data="['r_r', '{}', '{}', {}]".format(country, card, lock_id))])
if len(keyboard) > 0:
if country == 'us' and db.execute("select location from card where cardid = 356;").fetchall()[0][0] == 'played':
text = "Choose a card from your deck and discrd pile:"
else:
text = "Choose a card from your deck:"
reply_markup = InlineKeyboardMarkup(keyboard)
else:
if country == 'us' and db.execute("select location from card where cardid = 356;").fetchall()[0][0] == 'played':
text = "You have no Build Army, Build Navy, Land Battle, Sea Battle or Deploy Air Force in your deck and discrd pile"
else:
text = "You have no Build Army, Build Navy, Land Battle, Sea Battle or Deploy Air Force in your deck"
reply_markup = None
return chat_id[0][0], text, reply_markup
def r_r_cb(bot, query, query_list, session):
db = sqlite3.connect(session.get_db_dir())
if query_list[1] == 'confirm':
bot.delete_message(chat_id= query.message.chat_id, message_id = query.message.message_id)
if query_list[2] == 'us' and db.execute("select location from card where cardid = 356;").fetchall()[0][0] == 'played' and db.execute("select count(*) from card where name = :card and location in ('discardu', 'discardd', 'played') and control=:country;", {'country':query_list[2], 'card':query_list[3]}).fetchall()[0][0] > 0:
card_id = db.execute("select min(cardid) from card where name = :card and location in ('discardu', 'discardd', 'played') and control=:country;", {'country':query_list[2], 'card':query_list[3]}).fetchall()[0][0]
else:
card_id = db.execute("select min(cardid) from card where name = :card and location = 'deck' and control=:country;", {'country':query_list[2], 'card':query_list[3]}).fetchall()[0][0]
function.movecardhand(bot, card_id, db)
function.shuffledeck(bot, query_list[2], db)
session.release_lock(query_list[-1])
elif query_list[1] == 'back':
info = r_r_info(bot, query_list[2], query_list[3], db)
bot.edit_message_text(chat_id = query.message.chat_id, message_id = query.message.message_id, text = info[1], reply_markup = info[2])
else:
text = "You chose " + query_list[2]
keyboard = [[InlineKeyboardButton('Confirm', callback_data="['r_r', 'confirm', '{}', '{}', {}]".format(query_list[1], query_list[2], query_list[3]))],
[InlineKeyboardButton('Back', callback_data="['r_r', 'back', '{}', {}]".format(query_list[1], query_list[3]))]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(chat_id=query.message.chat_id, message_id=query.message.message_id, text = text, reply_markup = reply_markup)
#---------------------Play Card------------------------
cardtofunction = {#ge
2:1,3:1,4:1,5:1,6:1,
8:7,
10:9,11:9,12:9,13:9,14:9,15:9,16:9,
18:17,
22:21,
#jp
69:68,70:68,71:68,
73:72,74:72,75:72,76:72,77:72,78:72,
80:79,81:79,
83:82,84:82,85:82,
#it
129:128,130:128,131:128,
133:132,134:132,135:132,
137:136,138:136,139:136,140:136,
142:141,
#uk
183:182,184:182,185:182,186:182,
188:187,189:187,190:187,191:187,192:187,
194:193,195:193,196:193,
198:197,199:197,200:197,201:197,
208:204,
215:214,
#su
247:246,248:246,249:246,250:246,251:246,252:246,253:246,254:246,
257:256,258:256,259:256,260:256,261:256,262:256,
264:263,
#us
305:304,306:304,307:304,308:304,
310:309,311:309,312:309,313:309,
315:314,316:314,317:314,
319:318,320:318,321:318,
323:322,
327:326
}
def play_card(bot, cardid, country, session):
db = sqlite3.connect(session.get_db_dir())
db.execute("update card set location = 'played' where cardid = :card", {'card':cardid})
db.commit()
group_chat = db.execute("select chatid from game;").fetchall()
country_name = db.execute("select name from country where id = :country;", {'country':country}).fetchall()
card_info = db.execute("select name, type, text from card where cardid = :card;", {'card':cardid}).fetchall()
if card_info[0][1] == "Response":
text = country_name[0][0] + " play a <b>Response</b>"
else:
text = country_name[0][0] + " play " + card_info[0][1] + " - <b>" + card_info[0][0] +"</b>\n" + card_info[0][2]
bot.send_message(chat_id = group_chat[0][0], text = text, parse_mode=telegram.ParseMode.HTML)
if card_info[0][1] not in ("Status", "Response"):
if cardid in cardtofunction:
function = "c" + str(cardtofunction[cardid]) + "(bot, session)"
else:
function = "c" + str(cardid) + "(bot, session)"
eval(function)
#---------------------Use Status------------------------
need_info_status = [42,43,45,50,53,61,63,64,97,98,99,101,106,107,109,110,111,119,120,124,126,167,168,169,170,174,177,227,229,230,231,232,233,234,242,243,244,276,282,284,285,286,287,288,289,290,292,295,300,303,344,346,348,349,350,354,362,363,367,370]
once_per_turn_status = [42,43,45,47,50,52,227,229,272,275,276,280,344,348,350,351,353,357]
need_cost_status = [42,43,44,45,50,161,227,229,275,276,278,279,280,282,284,344,345,346,348,350,353,354,357]
need_cost_bolster = [60,61,63,64,65,66,119,120,121,122,124,126,127,175,177,178,239,240,241,242,243,244,245,296,297,298,300,301,302,303,362,363,366,368,369,370]
def play_status(bot, cardid, country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
card_info = db.execute("select name, type, text from card where cardid = :card;", {'card':cardid}).fetchall()
group_chat = db.execute("select chatid from game;").fetchall()
country_name = db.execute("select name from country where id = :country;", {'country':country}).fetchall()
text = country_name[0][0] + " use " + card_info[0][1] + " - <b>" + card_info[0][0] +"</b>\n" + card_info[0][2]
bot.send_message(chat_id = group_chat[0][0], text = text, parse_mode=telegram.ParseMode.HTML)
if card_info[0][1] == 'Response':
db.execute("update card set location = 'used' where cardid = :card", {'card':cardid})
db.commit()
elif card_info[0][1] == 'Bolster':
if cardid in need_cost_bolster:
cost_function = "c" + str(cardid) + "_cost(bot, session)"
eval(cost_function)
lock_id = session.add_lock()
db.execute("update card set location = 'used' where cardid = :card", {'card':cardid})
db.commit()
import status_handler
status_handler.send_status_card(bot, country, 'Using Bolster', lock_id, session, card_id = cardid)
else:
if cardid in need_cost_status:
cost_function = "c" + str(cardid) + "_cost(bot, session)"
eval(cost_function)
lock_id = session.add_lock()
if cardid in once_per_turn_status:
db.execute("update card set location = 'turn' where cardid = :card", {'card':cardid})
db.commit()
import status_handler
status_handler.send_status_card(bot, country, 'Using Status', lock_id, session, card_id = cardid)
global c241_used, c239_used
if c241_used or c239_used:
c239_used = False
c241_used = False
text = card_info[0][0] + " cancelled"
bot.send_message(chat_id = group_chat[0][0], text = text)
else:
if cardid in need_info_status:
function = "c" + str(cardid) + "(bot, {}, session)".format(handler_id)
else:
function = "c" + str(cardid) + "(bot, session)"
eval(function)
#---------------------card------------------------
#------------------c1~6----------------------
def c1(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
#space_list = function.filter_build_list(function.control_supplied_space_list('ge', db), 'ge', db, space_type = 'land')
space_list = function.build_list('ge', db, space_type = 'land')
info = battlebuild.build_info(bot, 'ge', space_list, 1, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c7~8----------------------
def c7(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
#space_list = function.filter_build_list(function.control_supplied_space_list('ge', db), 'ge', db, space_type = 'sea')
space_list = function.build_list('ge', db, space_type = 'sea')
info = battlebuild.build_info(bot, 'ge', space_list, 7, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c9~16----------------------
def c9(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
#space_list = function.filter_battle_list(function.control_supplied_space_list('ge', db), 'ge', db, space_type = 'land')
space_list = function.battle_list('ge', db, space_type = 'land')
info = battlebuild.battle_info(bot, 'ge', space_list, 9, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c17~18----------------------
def c17(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
#space_list = function.filter_battle_list(function.control_supplied_space_list('ge', db), 'ge', db, space_type = 'sea')
space_list = function.battle_list('ge', db, space_type = 'sea')
info = battlebuild.battle_info(bot, 'ge', space_list, 17, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c19---------------------
def c19(bot, session):
db = sqlite3.connect(session.get_db_dir())
if {11,20}.isdisjoint(set(function.control_side_space_list('Axis', db, space_type = 'all'))):
function.ewdiscard(bot, 19, 'ge', 'su', 5, session)
function.add_vp(bot, 'ge', 1, db)
#------------------c20---------------------
def c20(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
chat_id = db.execute("select playerid from country where id = 'ge';").fetchall()
keyboard = [[InlineKeyboardButton("United Kingdom", callback_data="['c20', 'uk', {}]".format(lock_id))],
[InlineKeyboardButton("Soviet Union", callback_data="['c20', 'su', {}]".format(lock_id))],
[InlineKeyboardButton("United States", callback_data="['c20', 'us', {}]".format(lock_id))]]
text = "Choose a player to discard the top card of its draw deck:"
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id = chat_id[0][0], text = text, reply_markup = reply_markup)
session.thread_lock(lock_id)
def c20_cb(bot, query, query_list, session):
db = sqlite3.connect(session.get_db_dir())
bot.delete_message(chat_id = query.message.chat_id, message_id = query.message.message_id)
if query_list[1] == 'uk':
#function.discarddeck(bot, 'ge', 4, db)
function.ewdiscard(bot, 20, 'ge', 'uk', 1, session)
elif query_list[1] == 'su':
#function.discarddeck(bot, 'it', 4, db)
function.ewdiscard(bot, 20, 'ge', 'su', 1, session)
elif query_list[1] == 'us':
#function.discarddeck(bot, 'it', 4, db)
function.ewdiscard(bot, 20, 'ge', 'us', 1, session)
function.add_vp(bot, 'ge', 3, db)
session.release_lock(query_list[-1])
#------------------c21~22----------------------
def c21(bot, session):
db = sqlite3.connect(session.get_db_dir())
space_list = function.within('Axis', function.control_space_list('ge', db), 1, db)
if 9 in space_list:
#function.discarddeck(bot, 'uk', 3, db)
function.ewdiscard(bot, 21, 'ge', 'uk', 3, session)
function.add_vp(bot, 'ge', 1, db)
#------------------c23----------------------
def c23(bot, session):
db = sqlite3.connect(session.get_db_dir())
space_list = function.control_space_list('ge', db)
space_list2 = function.within('Axis', space_list, 1, db)
if 9 in space_list:
#function.discarddeck(bot, 'us', 5, db)
function.ewdiscard(bot, 23, 'ge', 'us', 5, session)
function.add_vp(bot, 'ge', 1, db)
elif 9 in space_list2:
#function.discarddeck(bot, 'us', 3, db)
function.ewdiscard(bot, 23, 'ge', 'us', 3, session)
function.add_vp(bot, 'ge', 1, db)
#------------------c24----------------------
def c24(bot, session):
db = sqlite3.connect(session.get_db_dir())
space_list = function.control_space_list('ge', db)
if 11 in space_list:
| |
# -*- coding: utf-8 -*-
from django import forms
from django.utils.safestring import mark_safe
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field, Div, HTML
from .models import Candidate, EXPERTISE_CHOICES
from crm.models import Organization
class CandidateAddForm(forms.Form):
candidate_first_name = forms.CharField(label="First name")
candidate_last_name = forms.CharField(label="Last name")
candidate_job_title = forms.CharField(label="Job title", required=False)
candidate_email = forms.EmailField(label="E-mail")
candidate_phone_number = forms.CharField(label="Phone number", required=False)
reason = forms.CharField(
widget=forms.Textarea,
label="Reason for nomination",
help_text="This may be used in consideration by the Nominating Committee but will not be displayed on the ballot. Candidates will be contacted to provide their own profile information for the ballot.",
)
organization = forms.ChoiceField(
label="Member Institution this candidate represents"
)
sponsor_first_name = forms.CharField(label="First name")
sponsor_last_name = forms.CharField(label="Last name")
sponsor_email = forms.EmailField(label="E-mail")
terms = forms.BooleanField(label="I have read the terms", required=True)
expertise = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
label="Which of the following areas of expertise does nominee bring to the Board",
choices=EXPERTISE_CHOICES,
)
expertise_other = forms.CharField(
label='If you chose "Other" as area of expertise, please indicate it here',
max_length=255,
required=False,
)
def __init__(self, *args, **kwargs):
super(CandidateAddForm, self).__init__(*args, **kwargs)
self.fields["organization"].choices = [
(x.id, x.display_name) for x in Organization.active.all()
]
self.helper = FormHelper(self)
self.helper.form_show_errors = True
self.helper.layout = Layout(
Div(
HTML("<h2>Nominate a Candidate for Board of Directors</h2>"),
css_class="row",
),
Div(
HTML("<h3>Nominee Information</h3>"),
Field("candidate_first_name"),
Field("candidate_last_name"),
Field("candidate_job_title"),
Field("candidate_email"),
Field("candidate_phone_number"),
Field("reason"),
Div(
Field("expertise"), Field("expertise_other"), css_class="expertise"
),
Field("organization"),
css_class="row",
),
Div(
HTML("<h3>Submitter Information</h3>"),
Field("sponsor_first_name"),
Field("sponsor_last_name"),
Field("sponsor_email"),
css_class="row",
),
Div(
HTML(
"""<h3>Terms and Conditions</h3>
<h4>General responsibilities of Board Members</h4>
<ul>
<li>The Board of Directors is charged with setting the strategic direction of Open Education Global. Board members make high level decisions concerning the mission, outputs, finances and services of the Organization. Board members are expected to act in the best interest of the organization and its members in all deliberations. </li>
<li>To fulfil its charge, the Board will hold four meetings a year, two of which will be in person (situation permitting) and two online.</li>
<li>The Board member's institution is expected to cover the cost of the Board member's travel to meetings and time that is given to Open Education Global.</li>
<li>The Board, or its sub-committees, may decide to conduct some business by conference call between in-person meetings. While the frequency and amount of time required for these calls will depend on the nature of the business being conducted, one might anticipate that the Board itself would not normally meet by phone more than once a month.</li>
<li>Likewise, it is anticipated that Board members will serve as liaisons with various standing committees and work groups, and will represent the Organization from time to time at various meetings and/or events.</li>
</ul>
<p>I AM AWARE THAT BOARD MEMBERS EXPEND CONSIDERABLE NON-REIMBURSED TIME AND MONEY IN THE FULFILLMENT OF THEIR DUTIES. I ATTEST THAT I HAVE THE CONSENT OF THE NOMINEE IN THIS MATTER. I ALSO ATTEST THAT THE NOMINEE IS QUALIFIED AND ABLE TO SERVE IF ELECTED.</p>
<p><a href="https://www.oeconsortium.org/wp-content/uploads/2013/07/Bylaws_Open-Education_Consortium_Incorporated_-_March-1-2017.pdf" target="_blank">(See Open Education Global By-Laws Article III for qualification and responsibilities of Board Members).</a></p>
"""
),
css_class="row terms",
),
Div(Field("terms"), css_class="row"),
)
self.helper.layout.append(Submit("Submit", "submit"))
def clean_expertise_other(self):
cleaned_data = self.cleaned_data
if "5" in cleaned_data.get("expertise") and not cleaned_data.get(
"expertise_other"
):
raise forms.ValidationError("This is a required field", code="invalid")
return cleaned_data.get("expertise_other")
def clean_expertise(self):
cleaned_data = self.cleaned_data
return ",".join([i for i in cleaned_data.get("expertise", [])])
ACCEPTANCE_CHOICES = ((1, "I ACCEPT this nomination"), (0, "I DECLINE this nomination"))
class CandidateEditForm(forms.ModelForm):
acceptance = forms.BooleanField(
widget=forms.RadioSelect(choices=ACCEPTANCE_CHOICES),
label="Acceptance of nomination",
)
expertise = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
label="Which of the following areas of expertise do you bring to the Board",
choices=EXPERTISE_CHOICES,
)
expertise_other = forms.CharField(
label='If you chose "Other" as area of expertise, please indicate it here',
max_length=255,
required=False,
)
agreement_cost = forms.BooleanField(
label="I understand that The Board members institution is expected to cover the cost of the Board member's travel to meetings and time that is given to Open Education Global.",
required=True,
)
agreement_fund = forms.BooleanField(
label="I have verified with my institution that they will fund the costs associated with attending the two annual in person meetings of the OEG Board.",
required=True,
)
def __init__(self, *args, **kwargs):
instance = kwargs.get("instance")
initial = {}
if instance.status == "accepted":
initial["acceptance"] = ACCEPTANCE_CHOICES[0][0]
if instance.expertise:
initial["expertise"] = instance.expertise.split(",")
if initial:
kwargs["initial"] = initial
super(CandidateEditForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_show_errors = True
self.fields["candidate_first_name"].label = "First name"
self.fields["candidate_last_name"].label = "Last name"
self.fields["candidate_job_title"].label = "Job title"
self.fields["candidate_phone_number"].label = "Phone number"
self.fields["candidate_email"].label = "Email"
self.fields["email_alternate"].label = "Alternative e-mail"
self.fields["organization"].label = "Institution you represent"
for field in [
"biography",
"vision",
"ideas",
"expertise",
"expertise_expanded",
]:
self.fields[field].required = True
self.fields[
"biography"
].label = "Candidate Biography <br /><br />Please provide a brief summary of your experience and qualifications <br />This will be displayed on the ballot."
self.fields[
"vision"
].label = "Candidate Vision and Goals <br /><br />Please provide a brief list of what you hope to accomplish for Open Education Global if you are elected to the board of directors. This will be displayed on the ballot."
self.fields[
"ideas"
].label = "Ideas for Open Education Global <br /><br />Please provide a brief description of your ideas for Open Education Global in general."
self.fields[
"expertise"
].label = "Your expertise and skills <br /><br />Please provide a brief description of expertise and skills (e.g. technical knowledge, financial knowledge, influence in public policy)."
self.fields[
"external_url"
].label = 'External Link <br /><br />You may optionally share a link to an external page such as a CV, blog or social networking profile page. Please include the "http://" portion.'
self.fields[
"expertise_expanded"
].label = "Please provide a brief description of your expertise and skills with special reference to the areas of expertise check boxes you selected above"
self.helper.layout = Layout(
Div(
HTML("<h2>Update Your Candidacy for Board of Directors</h2>"),
HTML(
u"<p>You have been nominated as a candidate for Open Education Global Board of Directors by {instance.sponsor_first_name} {instance.sponsor_last_name}. <br /> Please review this page and complete any missing information to accept your nomination.</p>".format(
instance=instance
)
),
css_class="row",
),
Div(
HTML("<h3>Personal Profile Information</h3>"),
Field("candidate_first_name"),
Field("candidate_last_name"),
Field("candidate_job_title"),
Field("candidate_email"),
Field("email_alternate"),
Field("candidate_phone_number"),
Field("organization"),
css_class="row",
),
Div(
HTML("<h3>Nominee Information</h3>"),
Field("biography"),
Field("vision"),
Field("ideas"),
Field("expertise"),
Field("expertise_other"),
Field("expertise_expanded"),
Field("external_url"),
css_class="row",
),
Div(
HTML(
"""<h3>Terms and Conditions</h3>
<h4>General responsibilities of Board Members</h4>
<ul>
<li>The Board of Directors is charged with setting the strategic direction of Open Education Global. Board members make high level decisions concerning the mission, outputs, finances and services of the Organization. Board members are expected to act in the best interest of the organization and its members in all deliberations. </li>
<li>To fulfill its charge, the Board will hold four in meetings a year, two of which will be in person and two online. It is important that board members make every attempt to fully participate in all meetings. While substitutions are permitted if necessary, the substitute will not be allowed to vote on the Board member's behalf.</li>
<li>The Board member's institution is expected to cover the cost of the Board member's travel to meetings and time that is given to Open Education Global.</li>
<li>The Board, or its sub-committees, may decide to conduct some business by conference call between in-person meetings. While the frequency and amount of time required for these calls will depend on the nature of the business being conducted, one might anticipate that the Board itself would not normally meet by phone more than once a month.</li>
<li>Likewise, it is anticipated that Board members will serve as liaisons with various standing committees and work groups, and will represent the Organization from time to time at various meetings and/or events.</li>
</ul>
<p>I AM AWARE THAT BOARD MEMBERS EXPEND CONSIDERABLE NON-REIMBURSED TIME AND MONEY IN THE FULFILLMENT OF THEIR DUTIES. I ATTEST THAT I AM QUALIFIED AND ABLE TO SERVE IF ELECTED.</p>
<p><a href="https://www.oeconsortium.org/wp-content/uploads/2013/07/Bylaws_Open-Education_Consortium_Incorporated_-_March-1-2017.pdf" target="_blank">(See Open Education Global By-Laws Article III for qualification and responsibilities of Board Members).</a></p>
"""
),
Field("agreement_cost"),
Field("agreement_fund"),
css_class="row terms",
),
Div(Field("acceptance"), css_class="row"),
)
self.helper.layout.append(
Submit("submit", "Update my Candidacy for Board of Directors")
)
def save(self, *args, **kwargs):
candidate = | |
<filename>build/management/commands/build_annotation.py
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.utils.text import slugify
from django.db import IntegrityError
from django.db import transaction
from build.management.commands.base_build import Command as BaseBuild
from residue.models import Residue
from residue.functions import *
from protein.models import Protein, ProteinConformation, ProteinSegment, ProteinFamily
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
import logging
import os
import sys
import re
import yaml
from datetime import datetime
import time
from collections import OrderedDict
from itertools import islice
from urllib.request import urlopen, quote
import xlrd
import operator
import traceback
import numbers
import datetime
from random import randrange
class Command(BaseBuild):
help = 'Reads source data and creates annotations'
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
logger = logging.getLogger(__name__)
# source file directory
annotation_source_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'Structural_Annotation.xlsx'])
generic_numbers_source_dir = os.sep.join([settings.DATA_DIR, 'residue_data', 'generic_numbers'])
annotation_source_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'Structural_Annotation.xlsx'])
non_xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'non_xtal_segends.yaml'])
with open(non_xtal_seg_end_file, 'r') as f:
non_xtal_seg_end = yaml.load(f)
all_anomalities_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'all_anomalities.yaml'])
with open(all_anomalities_file, 'r') as f:
all_anomalities = yaml.load(f)
sequence_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'sequences.yaml'])
with open(sequence_file, 'r') as f:
gpcr_sequences = yaml.load(f)
xtal_anomalities_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_anomalities.yaml'])
non_xtal_seg_end_bw_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'non_xtal_segends_bw.yaml'])
xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_segends.yaml'])
mod_xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'mod_xtal_segends.yaml'])
xtal_seg_end_bw_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_segends_bw.yaml'])
segments = ProteinSegment.objects.filter(partial=False)
all_segments = {ps.slug: ps for ps in ProteinSegment.objects.all()} # all segments dict for faster lookups
schemes = parse_scheme_tables(generic_numbers_source_dir)
pconfs = list(ProteinConformation.objects.filter(protein__sequence_type__slug='wt').all())
pw_aln_error = ['celr3_mouse','celr3_human','gpr98_human']
track_rf_annotations = {}
def handle(self, *args, **options):
try:
self.logger.info('CREATING RESIDUES')
self.prepare_input(options['proc'], self.pconfs)
# if (self.check_if_residues()):
# self.prepare_input(1, self.pconfs)
# self.main_func([0, False],0)
# self.analyse_rf_annotations()
self.logger.info('COMPLETED CREATING RESIDUES')
except Exception as msg:
print(msg)
self.logger.error(msg)
def analyse_rf_annotations(self):
## THIS ONLY WORKS IF NOT RUNNING IN PARALLIZED
self.track_rf_annotations = OrderedDict(sorted(self.track_rf_annotations.items()))
match = 0
unmatch = 0
for rf, vals in self.track_rf_annotations.items():
if len(vals['anomalities'])>1:
unmatch += 1
print(vals['name'],rf,"templs:",len(vals['templs']),"anomalities",len(vals['anomalities']))
# print("templs:",vals['templs'])
#print("anomalities",vals['anomalities'])
common = set.intersection(*map(set,vals['anomalities']))
non_common = []
print("Common:",common)
for l in vals['anomalities']:
for v in l:
if v not in common:
if v not in non_common:
non_common.append(v)
print("Non-Common:",non_common)
else:
match += 1
print("Match RF",match,"Unmatch RF",unmatch)
def generate_bw(self, i, v, aa):
#return dict
a = {'aa':aa, 'pos':i, 's':'', 'numbers':{'bw':''}}
if i<int(v['1b']):
a['s'] = 'N-term'
elif i<=int(v['1e']):
a['s'] = 'TM1'
a['numbers']['bw'] = '1.'+str(50+i-int(v['1x']))
elif v['i1x']!="-" and v['i1e']!="-" and i<int(v['2b']):
if i<int(v['i1b']):
a['s'] = 'ICL1'
elif i<=int(v['i1e']):
a['s'] = 'ICL1'
a['numbers']['bw'] = '12.'+str(50+i-int(v['i1x']))
else:
a['s'] = 'ICL1'
elif i<int(v['2b']):
a['s'] = 'ICL1'
elif i<=int(v['2e']):
a['s'] = 'TM2'
a['numbers']['bw'] = '2.'+str(50+i-int(v['2x']))
elif v['e1x']!="-" and i<int(v['3b']):
if i<int(v['e1b']):
a['s'] = 'ECL1'
elif i<=int(v['e1e']):
a['s'] = 'ECL1'
a['numbers']['bw'] = '23.'+str(50+i-int(v['e1x']))
else:
a['s'] = 'ECL1'
elif i<int(v['3b']):
a['s'] = 'ECL1'
elif i<=int(v['3e']):
a['s'] = 'TM3'
a['numbers']['bw'] = '3.'+str(50+i-int(v['3x']))
elif v['i2x']!="-" and i<int(v['4b']):
if i<int(v['i2b']):
a['s'] = 'ICL2'
elif i<=int(v['i2e']):
a['s'] = 'ICL2'
a['numbers']['bw'] = '34.'+str(50+i-int(v['i2x']))
else:
a['s'] = 'ICL2'
elif i<int(v['4b']):
a['s'] = 'ICL2'
elif i<=int(v['4e']):
a['s'] = 'TM4'
a['numbers']['bw'] = '4.'+str(50+i-int(v['4x']))
elif v['e2x']!="-" and i<int(v['5b']) and v['e2b']!="-":
if i<int(v['e2b']):
a['s'] = 'ECL2'
elif i<=int(v['e2e']):
a['s'] = 'ECL2'
a['numbers']['bw'] = '45.'+str(50+i-int(v['e2x']))
else:
a['s'] = 'ECL2'
elif i<int(v['5b']):
a['s'] = 'ECL2'
elif i<=int(v['5e']):
a['s'] = 'TM5'
a['numbers']['bw'] = '5.'+str(50+i-int(v['5x']))
elif i<int(v['6b']):
a['s'] = 'ICL3'
elif i<=int(v['6e']):
a['s'] = 'TM6'
a['numbers']['bw'] = '6.'+str(50+i-int(v['6x']))
elif v['7b']=="-": #fix for npy6r_human
a['s'] = 'C-term'
elif i<int(v['7b']):
a['s'] = 'ECL3'
elif i<=int(v['7e']):
a['s'] = 'TM7'
a['numbers']['bw'] = '7.'+str(50+i-int(v['7x']))
elif v['8x']!="-":
if i<int(v['8b']):
a['s'] = 'ICL4'
elif i<=int(v['8e']):
a['s'] = 'H8'
a['numbers']['bw'] = '8.'+str(50+i-int(v['8x']))
else:
a['s'] = 'C-term'
else:
a['s'] = 'C-term'
if a['numbers']['bw'] == '':
a['numbers'].pop('bw', None)
return a
def b_and_c_check(self,b_and_c,number,seg):
offset = 0
bulge = False
if seg in b_and_c:
bcs = sorted(b_and_c[seg])
if int(number)<50:
bcs = sorted(bcs, reverse=True)
for bc in bcs:
if len(bc)>2: #bulge
# print(bc[0:2],number,offset)
if int(bc[0:2])<50 and int(number)+offset<int(bc[0:2]): #before x50 and before bulge, do smt
offset += 1 #eg if 5x461, then 5.46 becomes 5x461, 5.45 becomes 5x46
elif int(bc[0:2])<50 and int(number)+offset==int(bc[0:2]): #before x50 and is bulge, do smt
bulge = True # eg if 5x461, then 5.46 becomes 5x461
elif int(bc[0:2])>=50 and int(number)+offset>int(bc[0:2])+1: #after x50 and after bulge, do smt
offset -= 1 #eg if 2x551, then 2.56 becomes 2x551, 5.57 becomes 5x56
elif int(bc[0:2])>=50 and int(number)+offset==int(bc[0:2])+1: #after x50 and 1 after bulge, do smt
bulge = True # eg if 2x551, then 2.56 becomes 2x551
else: #2 numbers, it's a constriction
if int(bc[0:2])<50 and int(number)+offset<=int(bc[0:2]): #before x50 and before or equal constrictions, do smt
offset -= 1 #eg if constriction is 7x44, then 7.44 becomes 7x43, 7.43 becomes 7x42
if int(bc[0:2])>50 and int(number)+offset>=int(bc[0:2]): #before x50 and before or equal constrictions, do smt
offset += 1 #eg if constriction is 4x57, then 4.57 becomes 4x58, 4.58 becomes 4x59
if bulge!=True:
gn = str(int(number)+offset)
elif int(number)<50:
gn = str(int(number)+offset)+"1"
elif int(number)>=50:
gn = str(int(number)-1+offset)+"1"
# print(gn,number,offset,bulge)
return gn
def check_if_residues(self):
fail = False
for p in self.pconfs:
if Residue.objects.filter(protein_conformation=p).count():
pass
else:
print("No residues for ",p)
self.logger.error('No residues for parent {}'.format(p))
fail = True
return fail
def main_func(self, positions, iteration,count,lock):
self.logger.info('STARTING ANNOTATION PROCESS {}'.format(positions))
# if not positions[1]:
# pconfs = self.pconfs[positions[0]:]
# else:
#
pconfs = self.pconfs
proteins = list(self.non_xtal_seg_end)
# print(data)
counter = 0
lacking = []
while count.value<len(self.pconfs):
with lock:
p = self.pconfs[count.value]
count.value +=1
entry_name = p.protein.entry_name
ref_positions = None
counter += 1
missing_x50s = []
aligned_gn_mismatch_gap = 0
human_ortholog = ''
# self.logger.info('DOING {}'.format(p))
# if p.protein.residue_numbering_scheme.slug!='gpcrdbc' or p.protein.species.common_name != "Human":
# continue
# if p.protein.species.common_name != "Human":
# continue
# if p.protein.entry_name !='aa2ar_human':
# continue
# print(p.protein.entry_name)
# continue
# Residue.objects.filter(protein_conformation=p).delete()
if Residue.objects.filter(protein_conformation=p).count():
# print(counter,entry_name,"already done")
continue
else:
# print(counter,p)
if p.protein.species.common_name != "Human" and entry_name not in proteins:
human_ortholog = Protein.objects.filter(family=p.protein.family, sequence_type__slug='wt', species__common_name='Human')
if human_ortholog.exists():
human_ortholog = human_ortholog.get()
if human_ortholog.entry_name not in proteins:
if human_ortholog.entry_name not in lacking:
lacking.append(human_ortholog.entry_name)
print(counter,p,human_ortholog.entry_name, 'not in excel')
self.logger.error('Human ortholog ({}) of {} has no annotation in excel'.format(human_ortholog.entry_name,entry_name))
continue
if human_ortholog.entry_name in proteins:
# print(counter,entry_name,'check sequences')
ref_positions, aligned_gn_mismatch_gap = self.compare_human_to_orthologue(human_ortholog, p.protein, self.non_xtal_seg_end[human_ortholog.entry_name],counter)
s = p.protein.sequence
v = self.non_xtal_seg_end[human_ortholog.entry_name]
new_v = {}
# print(v)
failed = None
x50s = ['1x','i1x','2x','e1x','3x','i2x','4x','e2x','5x','6x','7x','8x']
x50s_must_have = ['1x','2x','3x','4x','5x','6x','7x']
for x50 in x50s:
#1b 1x 1e i1b i1x i1e 2b 2x 2e e1b e1x e1e 3b 3x 3e i2b i2x i2e 4b 4x 4e e2b e2x e2e 5b 5x 5e 6b 6x 6e 7b 7x 7e 8b 8x 8e
val = v[x50]
if isinstance(val, numbers.Real):
i = int(val)
try:
i_b = int(v[x50[:-1]+"b"])
length_to_b = i_b-i
i_e = int(v[x50[:-1]+"e"])
length_to_e = i_e-i
except:
print("Error in annotation",entry_name,human_ortholog.entry_name)
self.logger.error('Error in annotation {}<->{} ({})'.format(entry_name,human_ortholog.entry_name,val))
failed = True
break
if i in ref_positions:
new_v[x50] = ref_positions[i]
## MAYBE NEED SOME RULES HERE....
new_v[x50[:-1]+"b"] = ref_positions[i]+length_to_b
new_v[x50[:-1]+"e"] = ref_positions[i]+length_to_e
else:
new_v[x50] = 0
new_v[x50[:-1]+"b"] = 0
new_v[x50[:-1]+"e"] = 0
missing_x50s.append(x50)
if x50 in x50s_must_have:
# print(entry_name,"tranlated ",x50," no index in ortholog, deleting pconf and protein")
self.logger.info('{} tranlated {} no index in ortholog, deleting pconf and protein'.format(entry_name,x50))
failed = True
p.protein.delete()
p.delete()
break
else:
new_v[x50] = 0
new_v[x50[:-1]+"b"] = 0
new_v[x50[:-1]+"e"] = 0
# print(new_v)
if failed:
continue
# if aligned_gn_mismatch_gap>20:
# self.logger.warning('{} ({}) lots of misaligned GN {}'.format(entry_name,human_ortholog.entry_name,aligned_gn_mismatch_gap))
# print(entry_name,"(",human_ortholog.entry_name,") lots of misaligned GN",aligned_gn_mismatch_gap)
v = new_v
#exit()
b_and_c = {}
for entry,gn in self.all_anomalities[human_ortholog.entry_name].items():
if len(entry)<3:
continue
if entry[1]=='x' or entry[2]=='x':
if gn!="" and gn!='-':
seg, number = entry.split("x")
if seg not in b_and_c:
b_and_c[seg] = []
b_and_c[seg].append(number)
if gn!=entry:
print('Something off with b_and_c for',human_ortholog.entry_name,'gn',gn,'entry',entry)
else:
# pass
self.logger.warning('{} has no human template, deleting'.format(entry_name))
p.protein.delete()
p.delete()
failed = True
#continue
elif entry_name in proteins:
# print(entry_name,"not done but ready")
v = self.non_xtal_seg_end[entry_name]
# if counter>20:
# break
s = self.gpcr_sequences[entry_name]['Sequence']
b_and_c = {}
b_and_c_mod = []
for entry,gn in self.all_anomalities[entry_name].items():
if len(entry)<3:
continue
if entry[1]=='x' or entry[2]=='x':
if gn!="" and gn!='-':
seg, number = entry.split("x")
if seg not in b_and_c:
b_and_c[seg] = []
b_and_c[seg].append(number)
b_and_c_mod.append(entry)
if gn!=entry:
print('Something off with b_and_c for',entry_name,'gn',gn,'entry',entry)
else: #human but not in proteins
# print(entry_name," human but no annotation")
self.logger.error('{} is human but has no annotation'.format(entry_name))
continue
#continue
# self.logger.info('Parsed Seq and B&C {}'.format(entry_name))
# print(counter,entry_name,"make residues")
# continue
# if p.protein.family.parent.slug | |
<filename>src/biopsykit/carwatch_logs/log_data.py
"""Module providing classes and utility functions for handling log data from *CARWatch App*."""
import json
import warnings
from datetime import datetime
from typing import Dict, Optional, Sequence, Union
import numpy as np
import pandas as pd
from typing_extensions import Literal
from biopsykit.carwatch_logs import log_actions, log_extras
from biopsykit.utils.time import tz
subject_conditions: Dict[str, str] = {
"UNDEFINED": "Undefined",
"KNOWN_ALARM": "Known Alarm",
"UNKNOWN_ALARM": "Unknown Alarm",
"SPONTANEOUS": "Spontaneous Awakening",
}
smartphone_models: Dict[str, str] = {
"Nexus 7": "Google Nexus 7",
"HTC 10": "HTC 10",
"ALE-L21": "Huawei P8 Lite",
"VTR-L29": "Huawei P10",
"VOG-L29": "Huawei P30 Pro",
"FIG-LX1": "Huawei P Smart",
"MEDION S5004": "MEDION S5004",
"Moto G (4)": "Motorola Moto G4",
"Moto G (5)": "Motorola Moto G5",
"ONEPLUS A6013": "OnePlus 6T",
"Redmi Note 7": "Redmi Note 7",
"SM-G920F": "Samsung Galaxy S6",
"SM-G930F": "Samsung Galaxy S7",
"SM-G950F": "Samsung Galaxy S8",
"SM-G973F": "Samsung Galaxy S10",
"SM-G970F": "Samsung Galaxy S10e",
"SM-A750FN": "Samsung Galaxy A7",
"SM-A205F": "Samsung Galaxy A20",
"SM-A520F": "Samsung Galaxy A5",
"SM-A500FU": "Samsung Galaxy A5",
"Mi A1": "Xiaomi Mi A1",
}
class LogDataInfo:
"""Class representing general log data information."""
def __init__(
self,
subject_id: str,
condition: str,
log_days: Optional[Sequence[datetime]] = None,
):
"""Initialize a new ``LogDataInfo`` instance.
Parameters
----------
subject_id : str
subject ID
condition : str
study condition of participant
log_days : list of :class:`datetime.datetime`, optional
list of dates during which log data was collected or ``None`` to leave empty. Default: ``None``
"""
self.subject_id: str = subject_id
self.condition: str = condition
if log_days is None:
log_days = []
self.log_days: Sequence[datetime] = log_days
self.app_metadata: Dict[str, Union[int, str]] = {
log_extras.app_version_code: 10000,
log_extras.app_version_name: "1.0.0",
}
self.phone_metadata: Dict[str, str] = {
log_extras.brand: "",
log_extras.manufacturer: "",
log_extras.model: "",
log_extras.version_sdk_level: 0,
log_extras.version_security_patch: "",
log_extras.version_release: "",
}
@property
def app_version_code(self) -> int:
"""App version code.
Returns
-------
int
version code of CARWatch App
"""
return self.app_metadata[log_extras.app_version_code]
@property
def app_version_name(self) -> str:
"""Return app version name.
Returns
-------
str
version name of CARWatch App
"""
return self.app_metadata[log_extras.app_version_name]
@property
def model(self) -> str:
"""Return smartphone model.
Returns
-------
str
name of smartphone model or "n/a" if information is not available
"""
return self.phone_metadata[log_extras.model] if self.phone_metadata else "n/a"
@property
def manufacturer(self) -> str:
"""Return smartphone manufacturer.
Returns
-------
str
name of smartphone manufacturer or "n/a" if information is not available
"""
return self.phone_metadata[log_extras.manufacturer] if self.phone_metadata else "n/a"
@property
def android_version(self) -> int:
"""Return Android version.
Returns
-------
int
SDK version of Android version or 0 if information is not available
"""
return self.phone_metadata[log_extras.version_sdk_level] if self.phone_metadata else 0
class LogData:
"""Class representing log data."""
log_actions: Dict[str, Sequence[str]] = {
log_actions.app_metadata: [
log_extras.app_version_code,
log_extras.app_version_name,
],
log_actions.phone_metadata: [
log_extras.brand,
log_extras.manufacturer,
log_extras.model,
log_extras.version_sdk_level,
log_extras.version_security_patch,
log_extras.version_release,
],
log_actions.subject_id_set: [
log_extras.subject_id,
log_extras.subject_condition,
],
log_actions.alarm_set: [
log_extras.alarm_id,
log_extras.timestamp,
log_extras.is_repeating,
log_extras.is_hidden,
log_extras.hidden_timestamp,
],
log_actions.timer_set: [log_extras.alarm_id, log_extras.timestamp],
log_actions.alarm_cancel: [log_extras.alarm_id],
log_actions.alarm_ring: [log_extras.alarm_id, log_extras.saliva_id],
log_actions.alarm_snooze: [
log_extras.alarm_id,
log_extras.snooze_duration,
log_extras.source,
],
log_actions.alarm_stop: [
log_extras.alarm_id,
log_extras.source,
log_extras.saliva_id,
],
log_actions.alarm_killall: [],
log_actions.evening_salivette: [log_extras.alarm_id],
log_actions.barcode_scan_init: [],
log_actions.barcode_scanned: [
log_extras.alarm_id,
log_extras.saliva_id,
log_extras.barcode_value,
],
log_actions.invalid_barcode_scanned: [log_extras.barcode_value],
log_actions.duplicate_barcode_scanned: [
log_extras.barcode_value,
log_extras.other_barcodes,
],
log_actions.spontaneous_awakening: [log_extras.alarm_id],
log_actions.lights_out: [],
log_actions.day_finished: [log_extras.day_counter],
log_actions.service_started: [],
log_actions.service_stopped: [],
log_actions.screen_off: [],
log_actions.screen_on: [],
log_actions.user_present: [],
log_actions.phone_boot_init: [],
log_actions.phone_boot_complete: [],
# TODO add further log actions
}
def __init__(self, data: pd.DataFrame, error_handling: Optional[Literal["ignore", "warn"]] = "ignore"):
"""Initialize new ``LogData`` instance.
Parameters
----------
data : :class:`~pandas.DataFrame`
log data as dataframe
error_handling : {"ignore", "warn"}
how to handle error when parse log data. ``error_handling`` can be one of the following:
* "warn" to issue warning when no "Subject ID Set" action was found in the data (indicating that a
participant did not correctly register itself for the study or that log data is corrupted)
* "ignore" to ignore warning.
"""
self.data: pd.DataFrame = data
self.error_handling: str = error_handling
self.selected_day = None
self.selected_action = None
self.info: LogDataInfo = self.extract_info()
def extract_info(self) -> LogDataInfo:
"""Extract log data information.
Returns
-------
:class:`~biopsykit.carwatch_logs.log_data.LogDataInfo`
``LogDataInfo`` object
"""
# Subject Information
subject_dict = get_extras_for_log(self, log_actions.subject_id_set)
subject_id: str = ""
condition: str = subject_conditions["UNDEFINED"]
if subject_dict:
subject_id = subject_dict[log_extras.subject_id]
condition = subject_conditions.get(
subject_dict[log_extras.subject_condition], subject_conditions["UNDEFINED"]
)
elif self.error_handling == "warn":
warnings.warn("Action 'Subject ID Set' not found – Log Data may be invalid!")
# App Metadata
app_dict = get_extras_for_log(self, log_actions.app_metadata)
# Phone Metadata
phone_dict = get_extras_for_log(self, log_actions.phone_metadata)
if log_extras.model in phone_dict and phone_dict[log_extras.model] in smartphone_models:
phone_dict[log_extras.model] = smartphone_models[phone_dict[log_extras.model]]
# Log Info
log_days = np.array([ts.date() for ts in self.data.index.normalize().unique()])
log_info = LogDataInfo(subject_id, condition, log_days)
log_info.log_days = log_days
log_info.phone_metadata = phone_dict
if app_dict:
log_info.app_metadata = app_dict
return log_info
def _ipython_display_(self):
self.print_info()
def print_info(self):
"""Display Markdown-formatted log data information."""
try:
from IPython.core.display import Markdown, display # pylint:disable=import-outside-toplevel
except ImportError as e:
raise ImportError(
"Displaying LogData information failed because "
"IPython cannot be imported. Install it via 'pip install ipython'."
) from e
display(Markdown("Subject ID: **{}**".format(self.subject_id)))
display(Markdown("Condition: **{}**".format(self.condition)))
display(Markdown("App Version: **{}**".format(self.app_version)))
display(Markdown("Android Version: **{}**".format(self.android_version)))
display(Markdown("Phone: **{}**".format(self.model)))
display(Markdown("Logging Days: **{} – {}**".format(str(self.start_date), str(self.end_date))))
@property
def subject_id(self) -> str:
"""Return Subject ID.
Returns
-------
str
Subject ID
"""
return self.info.subject_id
@property
def condition(self) -> str:
"""Return study condition from log data.
Returns
-------
str
study condition from log data
"""
return self.info.condition
@property
def android_version(self) -> int:
"""Return Android version.
Returns
-------
int
SDK version of Android version or 0 if information is not available
"""
return self.info.android_version
@property
def app_version(self) -> str:
"""Return app version name.
Returns
-------
str
version name of CARWatch App
"""
return self.info.app_version_name.split("_")[0]
@property
def manufacturer(self) -> str:
"""Return smartphone manufacturer.
Returns
-------
str
name of smartphone manufacturer or "n/a" if information is not available
"""
return self.info.manufacturer
@property
def model(self) -> str:
"""Return smartphone model.
Returns
-------
str
name of smartphone model or "n/a" if information is not available
"""
return self.info.model
@property
def finished_days(self) -> Sequence[datetime.date]:
"""Return list of days where CAR procedure was completely logged successfully.
Returns
-------
list
list of dates that were finished successfully
"""
return get_logs_for_action(self, log_actions.day_finished).index
@property
def num_finished_days(self) -> int:
"""Return number of days where CAR procedure was completely logged successfully.
Returns
-------
int
number of successfully finished days
"""
return len(self.finished_days)
@property
def log_dates(self) -> Sequence[datetime.date]:
"""Return list of all days with log data.
Returns
-------
list
list of dates that contain at least one log data event
"""
return self.info.log_days
@property
def start_date(self) -> datetime.date:
"""Return start date of log data.
Returns
-------
:class:`datetime.date`
start date
"""
if self.log_dates is not None and len(self.log_dates) > 0:
return self.log_dates[0]
return None
@property
def end_date(self) -> datetime.date:
"""Return end date of log data.
Returns
-------
:class:`datetime.date`
end date
"""
if self.log_dates is not None and len(self.log_dates) > 0:
return self.log_dates[-1]
return None
def get_filtered_logs(log_data: LogData) -> pd.DataFrame:
"""Return filtered logs for selected action and selected day.
Parameters
----------
log_data : :class:`~biopsykit.carwatch_logs.log_data.LogData`
log data
Returns
-------
:class:`~pandas.DataFrame`
dataframe with filtered log data
"""
return get_logs_for_action(log_data, log_action=log_data.selected_action, selected_day=log_data.selected_day)
def get_logs_for_date(data: Union[LogData, pd.DataFrame], date: Union[str, datetime.date]) -> pd.DataFrame:
"""Filter log data for a specific date.
Parameters
----------
data : :class:`~biopsykit.carwatch_logs.log_data.LogData` or :class:`~pandas.DataFrame`
log data as ``LogData`` object or as dataframe
date : :class:`datetime.date` or str
date to filter log data for
Returns
-------
:class:`~pandas.DataFrame`
dataframe with log data for specific date
"""
if isinstance(data, LogData):
data = data.data
date = pd.Timestamp(date).tz_localize(tz)
if date is pd.NaT:
return data
return data.loc[data.index.normalize() == date]
def split_nights(data: Union[LogData, pd.DataFrame], diff_hours: Optional[int] = 12) -> Sequence[pd.DataFrame]:
"""Split continuous log data into individual nights.
This function splits log data into individual nights when two successive timestamps differ more than the threshold
provided by ``diff_hours``.
Parameters
----------
data : :class:`~pandas.DataFrame`
input log data
diff_hours : int, optional
minimum difference between two successive log data timestamps required to split data into individual nights
Returns
-------
list
list of dataframes with log data split into individual nights
"""
if isinstance(data, LogData):
data = data.data
idx_split = np.where(np.diff(data.index, prepend=data.index[0]) > pd.Timedelta(diff_hours, "hours"))[0]
list_nights = np.split(data, idx_split)
return list_nights
def get_logs_for_action(
data: Union[LogData, pd.DataFrame],
log_action: str,
selected_day: Optional[datetime.date] = None,
rows: Optional[Union[str, int, Sequence[int]]] = None,
) -> Union[pd.DataFrame, pd.Series]:
"""Filter log data for a specific action.
Parameters
----------
data : :class:`~biopsykit.carwatch_logs.log_data.LogData` or :class:`~pandas.DataFrame`
log data as ``LogData`` object or as | |
not t.match(Tokens.DML, 'SELECT'):
raise Exception("Invalid subselect statement")
self._select(p, t)
else:
tablename = current_token.value.strip()
tablealias = current_token.get_name().strip()
if tablename == tablealias:
table_name_tokens.append(current_token)
else:
inject_table_view(current_token, tablename, tablealias)
elif isinstance(current_token, Types.Parenthesis):
t = self._token_next(current_token, self._token_first(current_token))
if t.match(Tokens.DML, 'SELECT'):
identifier_token = self._token_next(parent, current_token)
as_token = None
if identifier_token.match(Tokens.Keyword, 'AS'):
as_token = identifier_token
identifier_token = self._token_next(parent, identifier_token)
if not isinstance(identifier_token, Types.Identifier):
raise Exception("Invalid subselect statement")
next_token = self._token_next(parent, identifier_token)
self._select(current_token, t)
if as_token:
self._token_delete(parent, as_token)
inject_table_alias(identifier_token, identifier_token.value)
elif current_token.ttype == Tokens.Punctuation:
if table_name_tokens:
next_token = self._token_next(parent, current_token)
table_name_tokens = process_table_name_tokens(table_name_tokens)
elif current_token.match(Tokens.Keyword, ['JOIN', 'LEFT', 'RIGHT', 'INNER', 'OUTER'] + self._join_statements):
join_tokens.append(current_token.value.strip().upper())
join = ' '.join(join_tokens)
if join in self._join_statements:
join_tokens = list()
table_name_tokens = process_table_name_tokens(table_name_tokens)
next_token = self._select_join(parent,
current_token,
['JOIN', 'LEFT', 'RIGHT', 'INNER', 'OUTER']
+ self._join_statements
+ self._from_end_words)
elif current_token.ttype == Tokens.Keyword or \
current_token.ttype == Tokens.Token.Literal.Number.Integer:
table_name_tokens.append(current_token)
else:
raise Exception("Failed to parse FROM table name")
prev_token = current_token
current_token = next_token
if prev_token:
process_table_name_tokens(table_name_tokens)
return current_token
def _select(self, parent, start_token, insert_table=None):
token = self._token_next(parent, start_token)
fields_token = self._token_next(parent, token) if token.match(Tokens.Keyword, ['ALL', 'DISTINCT']) else token
current_token, field_lists = self._select_expression_tokens(parent, fields_token, ['FROM'] + self._from_end_words)
def handle_insert_table(table_name):
if insert_table and insert_table in self._translate_tables:
if not field_lists or not field_lists[-1]:
raise Exception("Invalid SELECT field list")
last_token = list(field_lists[-1][-1].flatten())[-1]
for keyword in ["'", self._product_prefix, "'", ' ', ',']:
self._token_insert_after(last_token.parent, last_token, Types.Token(Tokens.Keyword, keyword))
return
table_name_callback = handle_insert_table if insert_table else None
from_token = self._token_next_match(parent, start_token, Tokens.Keyword, 'FROM')
if not from_token:
# FROM not always required, example would be SELECT CURRVAL('"ticket_id_seq"')
return current_token
current_token = self._select_from(parent,
from_token, self._from_end_words,
table_name_callback=table_name_callback)
if not current_token:
return None
while current_token:
if isinstance(current_token, Types.Where) or \
current_token.match(Tokens.Keyword, ['GROUP', 'HAVING', 'ORDER', 'LIMIT']):
if isinstance(current_token, Types.Where):
self._where(parent, current_token)
start_token = self._token_next(parent, current_token)
next_token = self._token_next_match(parent,
start_token,
Tokens.Keyword,
self._from_end_words) if start_token else None
elif current_token.match(Tokens.Keyword, ['UNION']):
token = self._token_next(parent, current_token)
if not token.match(Tokens.DML, 'SELECT'):
raise Exception("Invalid SELECT UNION statement")
token = self._select(parent, current_token, insert_table=insert_table)
next_token = self._token_next(parent, token) if token else None
else:
raise Exception("Unsupported SQL statement")
current_token = next_token
return current_token
def _replace_table_entity_name(self, parent, token, table_name, entity_name=None):
if not entity_name:
entity_name = table_name
next_token = self._token_next(parent, token)
if not table_name in self._skip_tables + self._translate_tables:
token_to_replace = parent.tokens[self._token_idx(parent, token)]
if isinstance(token_to_replace, Types.Function):
t = self._token_first(token_to_replace)
if isinstance(t, Types.Identifier):
token_to_replace.tokens[self._token_idx(token_to_replace, t)] = Types.Token(Tokens.Keyword,
self._prefixed_table_entity_name(entity_name))
elif isinstance(token_to_replace, Types.Identifier) or isinstance(token_to_replace, Types.Token):
parent.tokens[self._token_idx(parent, token_to_replace)] = Types.Token(Tokens.Keyword,
self._prefixed_table_entity_name(entity_name))
else:
raise Exception("Internal error, invalid table entity token type")
return next_token
def _insert(self, parent, start_token):
token = self._token_next(parent, start_token)
if not token.match(Tokens.Keyword, 'INTO'):
raise Exception("Invalid INSERT statement")
def insert_extra_columns(tablename, columns_token):
columns_present = []
if tablename in self._translate_tables and \
isinstance(columns_token, Types.Parenthesis):
ptoken = self._token_first(columns_token)
if not ptoken.match(Tokens.Punctuation, '('):
raise Exception("Invalid INSERT statement, expected parenthesis around columns")
ptoken = self._token_next(columns_token, ptoken)
last_token = ptoken
while ptoken:
if isinstance(ptoken, Types.IdentifierList):
if not 'product' in columns_present \
and any(i.get_name() == 'product'
for i in ptoken.get_identifiers()
if isinstance(i, Types.Identifier)):
columns_present.append('product')
elif not 'id' in columns_present \
and tablename == 'ticket' \
and isinstance(ptoken, Types.IdentifierList) \
and any((t.ttype is None or t.is_keyword)
and t.value == 'id'
for t in ptoken.get_identifiers()):
columns_present.append('id')
last_token = ptoken
ptoken = self._token_next(columns_token, ptoken)
if not last_token or \
not last_token.match(Tokens.Punctuation, ')'):
raise Exception("Invalid INSERT statement, unable to find column parenthesis end")
columns_to_insert = []
if not 'product' in columns_present:
columns_to_insert += [',', ' ', self._product_column]
if self._id_calculated \
and tablename == 'ticket'\
and not 'id' in columns_present:
columns_to_insert += [',', ' ', 'id']
for keyword in columns_to_insert:
self._token_insert_before(columns_token, last_token, Types.Token(Tokens.Keyword, keyword))
return columns_present
def insert_extra_column_values(tablename, ptoken, before_token,
columns_present):
if tablename in self._translate_tables:
values_to_insert = []
if not 'product' in columns_present:
values_to_insert += [',', "'", self._product_prefix, "'"]
if self._id_calculated \
and tablename == 'ticket' \
and not 'id' in columns_present:
values_to_insert += [
',', """COALESCE((SELECT MAX(id) FROM
(SELECT * FROM ticket WHERE product='%s')
AS subquery), 0)+1""" %
(self._product_prefix,)
]
for keyword in values_to_insert:
self._token_insert_before(ptoken, before_token, Types.Token(Tokens.Keyword, keyword))
return
tablename = None
table_name_token = self._token_next(parent, token)
columns_present = []
if isinstance(table_name_token, Types.Function):
token = self._token_first(table_name_token)
if isinstance(token, Types.Identifier):
tablename = token.get_name()
columns_token = self._replace_table_entity_name(table_name_token, token, tablename)
if columns_token.match(Tokens.Keyword, 'VALUES'):
token = columns_token
else:
columns_present = insert_extra_columns(tablename, columns_token)
token = self._token_next(parent, table_name_token)
else:
tablename = table_name_token.value
columns_token = self._replace_table_entity_name(parent, table_name_token, tablename)
if columns_token.match(Tokens.Keyword, 'VALUES'):
token = columns_token
else:
columns_present = insert_extra_columns(tablename, columns_token)
token = self._token_next(parent, columns_token)
if token.match(Tokens.Keyword, 'VALUES'):
separators = [',', '(', ')']
token = self._token_next(parent, token)
while token:
if isinstance(token, Types.Parenthesis):
ptoken = self._token_first(token)
if not ptoken.match(Tokens.Punctuation, '('):
raise Exception("Invalid INSERT statement")
last_token = ptoken
while ptoken:
if not ptoken.match(Tokens.Punctuation, separators) and \
not ptoken.match(Tokens.Keyword, separators) and \
not ptoken.is_whitespace():
ptoken = self._expression_token_unwind_hack(token, ptoken, self._token_prev(token, ptoken))
self._eval_expression_value(token, ptoken)
last_token = ptoken
ptoken = self._token_next(token, ptoken)
if not last_token or \
not last_token.match(Tokens.Punctuation, ')'):
raise Exception("Invalid INSERT statement, unable to find column value parenthesis end")
insert_extra_column_values(tablename, token, last_token, columns_present)
elif not token.match(Tokens.Punctuation, separators) and\
not token.match(Tokens.Keyword, separators) and\
not token.is_whitespace():
raise Exception("Invalid INSERT statement, unable to parse VALUES section")
token = self._token_next(parent, token)
elif token.match(Tokens.DML, 'SELECT'):
self._select(parent, token, insert_table=tablename)
else:
raise Exception("Invalid INSERT statement")
return
def _update_delete_where_limit(self, table_name, parent, start_token):
if not start_token:
return
where_token = start_token if isinstance(start_token, Types.Where) \
else self._token_next_by_instance(parent, start_token, Types.Where)
if where_token:
self._where(parent, where_token)
if not table_name in self._translate_tables:
return
if where_token:
keywords = [self._product_column, '=', "'", self._product_prefix, "'", ' ', 'AND', ' ']
keywords.reverse()
token = self._token_first(where_token)
if not token.match(Tokens.Keyword, 'WHERE'):
token = self._token_next_match(where_token, token, Tokens.Keyword, 'WHERE')
if not token:
raise Exception("Invalid UPDATE statement, failed to parse WHERE")
for keyword in keywords:
self._token_insert_after(where_token, token, Types.Token(Tokens.Keyword, keyword))
else:
keywords = ['WHERE', ' ', self._product_column, '=', "'", self._product_prefix, "'"]
limit_token = self._token_next_match(parent, start_token, Tokens.Keyword, 'LIMIT')
if limit_token:
for keyword in keywords:
self._token_insert_before(parent, limit_token, Types.Token(Tokens.Keyword, keyword))
self._token_insert_before(parent, limit_token, Types.Token(Tokens.Keyword, ' '))
else:
last_token = token = start_token
while token:
last_token = token
token = self._token_next(parent, token)
keywords.reverse()
for keyword in keywords:
self._token_insert_after(parent, last_token, Types.Token(Tokens.Keyword, keyword))
return
def _get_entity_name_from_token(self, parent, token):
tablename = None
if isinstance(token, Types.Identifier):
tablename = token.get_name()
elif isinstance(token, Types.Function):
token = self._token_first(token)
if isinstance(token, Types.Identifier):
tablename = token.get_name()
elif isinstance(token, Types.Token):
tablename = token.value
return tablename
def _update(self, parent, start_token):
table_name_token = self._token_next(parent, start_token)
tablename = self._get_entity_name_from_token(parent, table_name_token)
if not tablename:
raise Exception("Invalid UPDATE statement, expected table name")
token = self._replace_table_entity_name(parent, table_name_token, tablename)
set_token = self._token_next_match(parent, token, Tokens.Keyword, 'SET')
if set_token:
token = set_token
while token and \
not isinstance(token, Types.Where) and \
not token.match(Tokens.Keyword, 'LIMIT'):
if not token.match(Tokens.Keyword, 'SET') and \
not token.match(Tokens.Punctuation, ','):
raise Exception("Invalid UPDATE statement, failed to match separator")
column_token = self._token_next(parent, token)
if isinstance(column_token, Types.Comparison):
token = self._token_next(parent, column_token)
continue
equals_token = self._token_next(parent, column_token)
if not equals_token.match(Tokens.Token.Operator.Comparison, '='):
raise Exception("Invalid UPDATE statement, SET equals token mismatch")
expression_token = self._token_next(parent, equals_token)
expression_token = self._expression_token_unwind_hack(parent, expression_token, equals_token)
self._eval_expression_value(parent, expression_token)
token = self._token_next(parent, expression_token)
start_token = token
self._update_delete_where_limit(tablename, parent, start_token)
return
def _delete(self, parent, start_token):
token = self._token_next(parent, start_token)
if not token.match(Tokens.Keyword, 'FROM'):
raise Exception("Invalid DELETE statement")
table_name_token = self._token_next(parent, token)
tablename = self._get_entity_name_from_token(parent, table_name_token)
if not tablename:
raise Exception("Invalid DELETE statement, expected table name")
start_token = self._replace_table_entity_name(parent, table_name_token, tablename)
self._update_delete_where_limit(tablename, parent, start_token)
return
def _create(self, parent, start_token):
token = self._token_next(parent, start_token)
if token.match(Tokens.Keyword, 'TEMPORARY'):
token = self._token_next(parent, token)
if token.match(Tokens.Keyword, 'TABLE'):
token = self._token_next(parent, token)
while token.match(Tokens.Keyword, ['IF', 'NOT', 'EXIST']) or \
token.is_whitespace():
token = self._token_next(parent, token)
table_name = self._get_entity_name_from_token(parent, token)
if not table_name:
raise Exception("Invalid CREATE TABLE statement, expected table name")
as_token = self._token_next_match(parent, token,
Tokens.Keyword, 'AS')
self._replace_table_entity_name(parent, token, table_name)
if as_token:
select_token = self._token_next_match(parent, as_token,
Tokens.DML, 'SELECT')
if select_token:
return self._select(parent, select_token)
elif token.match(Tokens.Keyword, ['UNIQUE', 'INDEX']):
if token.match(Tokens.Keyword, 'UNIQUE'):
token = self._token_next(parent, token)
if token.match(Tokens.Keyword, 'INDEX'):
index_token = self._token_next(parent, token)
index_name = self._get_entity_name_from_token(parent, index_token)
if not index_name:
raise Exception("Invalid CREATE INDEX statement, expected index name")
on_token = self._token_next_match(parent, index_token, Tokens.Keyword, 'ON')
if not on_token:
raise Exception("Invalid CREATE INDEX statement, expected ON specifier")
table_name_token = self._token_next(parent, on_token)
table_name = self._get_entity_name_from_token(parent, table_name_token)
if not table_name:
raise Exception("Invalid CREATE INDEX statement, expected table name")
self._replace_table_entity_name(parent, table_name_token, table_name)
self._replace_table_entity_name(parent, index_token, | |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 11 21:09:02 2016
@author: alisazhila
"""
import dataset_generator as gd
import random
import quadprog as qp
import numpy as np
import cvxopt
from sklearn.svm import SVC
import scipy.optimize
#import sys
#sys.exit()
TINY_ADD = np.power(10.0, -13)
#np.testing.assert_array_almost_equal(result.x, xf)
def solve_qp_scipy(G, a, C, b, meq=0):
# Minimize 1/2 x^T G x - a^T x
# Subject to C.T x >= b
def f(x):
return 0.5 * np.dot(x, G).dot(x) - np.dot(a, x)
if C is not None and b is not None:
constraints = [{
'type': 'ineq',
'fun': lambda x, C=C, b=b, i=i: (np.dot(C.T, x) - b)[i]
} for i in range(C.shape[1])]
else:
constraints = []
result = scipy.optimize.minimize(f, x0=np.zeros(len(G)), method='COBYLA',
constraints=constraints, tol=1e-10)
return result
def scalar_product(w, x):
product = 0
for i in range(len(w)):
product += w[i]*x[i]
return product
def update_weights(w, datapoint):
w_new = []
y = datapoint[-1]
for i in range(len(w)):
w_new.append(w[i]+y*datapoint[i])
return w_new
def perceptron(training_data):
#random.seed(100)
#start PLA with all-zero vector
w = [0]*(len(training_data[0])-1) #w/o label point
#print w
is_misclassified = True
iteration = 0
while is_misclassified and iteration < 10000:
misclassifieds = []
iteration +=1
is_misclassified = False
#datapoint : (x0, x1, x2)
i = 0
for datapoint in training_data:
#print scalar_product(w, datapoint[:-1])
#print "datapoint: ", i, " (h, y): ", np.sign(scalar_product(w, datapoint[:-1])), datapoint[-1]
i+=1
if scalar_product(w, datapoint[:-1])*datapoint[-1] <= 0:
#if scalar_product(w, datapoint[:-1])!= datapoint[-1]:
is_misclassified = True
misclassifieds.append(datapoint)
if is_misclassified:
rand_datapoint_index = random.randint(0, len(misclassifieds)-1)
#print rand_datapoint_index
w = update_weights(w, misclassifieds[rand_datapoint_index])
#print w
#print iteration
return w, iteration
#test_set: (x0, x1, x2, label)
def calculate_err_out_pla(test_set_w_labels, w):
#print test_set[:3]
err = 0.0
for datapoint in test_set_w_labels:
if scalar_product(w, datapoint[:-1])*datapoint[-1] <= 0:
err+=1
return err/float(len(test_set_w_labels))
#Minimize 1/2 x^T G x - a^T x
#Subject to C.T x >= b
'''
solve_qp(...)
Solve a strictly convex quadratic program
Minimize 1/2 x^T G x - a^T x
Subject to C.T x >= b
Parameters
----------
G : array, shape=(n, n)
matrix appearing in the quadratic function to be minimized
a : array, shape=(n,)
vector appearing in the quadratic function to be minimized
C : array, shape=(n, m)
matrix defining the constraints under which we want to minimize the
quadratic function
b : array, shape=(m), default=None
vector defining the constraints
meq : int, default=0
the first meq constraints are treated as equality constraints,
all further as inequality constraints (defaults to 0).
factorized : bool, default=False
If True, then we are passing :math:`R^{−1}` (where :math:`G = R^T R`)
instead of the matrix G in the argument G.
Returns
-------
x : array, shape=(n,)
vector containing the solution of the quadratic programming problem.
'''
def svm(dataset_w_labels):
#random.seed(100)
N = len(dataset_w_labels)
G = []
a = []
C_T = [] # N x m=N+1 (Y_T*alpha = 0 constraint + alpha_i>=0 for i[1..N])
C_T.append([]) # stub for Y^T ; dim N, 1
C2_T = []
C2_T.append([])
C2_T.append([]) # stub for minus_Y^T ; dim N, 1
Y_T = []
minus_Y_T = []
b = [] #dim m=N+1
for i in range(N):
row = []
a.append(1.0)
datapoint_i = dataset_w_labels[i]
Y_T.append(datapoint_i[-1])
minus_Y_T.append(-1*datapoint_i[-1])
b.append(0.0)
I = []
for j in range(N):
datapoint_j=dataset_w_labels[j]
#print datapoint_i[-1], datapoint_j[-1]
g_ij = datapoint_i[-1]*datapoint_j[-1]*scalar_product(datapoint_i[:-1], datapoint_j[:-1])
# sly modification to make teh G mtx positive definite:
if i == j:
g_ij += TINY_ADD
row.append(g_ij)
I.append(0)
I[i] = 1
G.append(row)
C_T.append(I)
C2_T.append(I)
C_T[0] = Y_T
C2_T[0] = Y_T
C2_T[1] = minus_Y_T
b.append(0.0)
#for the "equality through 2 inequalities" case
b2 = []
b2.extend(b)
b2.append(0.0)
a = np.asarray(a, dtype=np.double)
#print "a=1vec:", a
G = np.array([np.array(gi, dtype=np.double) for gi in G])
#print G
#b = np.transpose(np.asarray(b, dtype=np.double))
b = np.asarray(b, dtype=np.double)
#print "b=0vec:", b
C_T = np.array([np.array(ci, dtype=np.double) for ci in C_T])
#print "C_before_T: \n", C_T
C = np.transpose(C_T)
#print C
alpha, f, xu, iters, lagr, iact = qp.solve_qp(G, a, C, b, meq = 1)
'''
print 'equality via 2 inequalities:'
b2 = np.asarray(b2, dtype=np.double)
#print b2
C2 = np.transpose(np.array([np.array(ci, dtype=np.double) for ci in C2_T]))
#print C2
alpha, f, xu, iters, lagr, iact = qp.solve_qp(G, a, C2, b2)
'''
#print alpha, f, xu, iters, lagr, iact
#print "alpha = ", alpha
#print "lagr =", lagr
#result = solve_qp_scipy(G, a, C2, b2, meq=0)
#alpha2 = result.x
#print "alpha2:", result.x
w = [0, 0]
sv_num = 0
non_zero_ids = []
#print "threshold = ", np.power(10.0, -2)
print "threshold = ", 1/float(N*N)
for i in range(N):
datapoint_i = dataset_w_labels[i]
"the sly modification, pt 2: because we get more non-zero alphas,"
"we're discarding small values"
#print "alpha, epsilon: ", alpha[i], 1/float(N*N)
if alpha[i] > 1/float(N*N):
#if alpha[i] > 1/float(N):
#if alpha[i] > np.power(10.0, -2):
#print i, alpha[i]
non_zero_ids.append(i)
sv_num+=1
w[0] += alpha[i]*datapoint_i[-1]*datapoint_i[0]
w[1] += alpha[i]*datapoint_i[-1]*datapoint_i[1]
#print "Non zero SVs:", non_zero_ids
#line W*X+c
#taking first non-zero vec
rand_idx = random.randint(0, len(non_zero_ids)-1)
sv = dataset_w_labels[non_zero_ids[rand_idx]]
#print sv
#print w
#print scalar_product(w, sv[:-1])
#print 1/float(sv[-1])
c = 1/float(sv[-1])-scalar_product(w, sv[:-1])
#print c
return w, c, alpha, sv_num
'''
Minimize 1/2 x^T P x + q^T x
subject to Gx <= h
Ax = b
from cvxopt import solvers
sol = solvers.qp(P,q,G,h)
That’s it! If you had A, b as well, you would call:
sol = solvers.qp(P,q,G,h,A,b)
'''
def svm_cvxopt(dataset_w_labels):
#random.seed(100)
N = len(dataset_w_labels)
P = []
q = []
G = [] # N x N (alpha_i>=0 for i[1..N])
Y = []
h = [] #dim N
b = [0.0] #Ax = b => Y_T*x = 0
for i in range(N):
row = []
q.append(-1.0)
datapoint_i = dataset_w_labels[i]
Y.append([datapoint_i[-1]])
h.append(0.0)
I = []
for j in range(N):
datapoint_j=dataset_w_labels[j]
#print datapoint_i[-1], datapoint_j[-1]
p_ij = datapoint_i[-1]*datapoint_j[-1]*scalar_product(datapoint_i[:-1], datapoint_j[:-1])
' sly modification to make the P mtx positive definite:'
if i == j:
p_ij += TINY_ADD
row.append(p_ij)
I.append(0.0)
I[i] = -1.0
P.append(row)
#inequality condition matrix
G.append(I)
q=cvxopt.matrix(q, tc='d')
#print "q=-1vec:", q
P = cvxopt.matrix(P, tc='d')#np.array([np.array(p_i, dtype=np.double) for p_i in P])
#print P
h = cvxopt.matrix(h, tc='d')
#print "h=0vec:", h
G = cvxopt.matrix(G, tc='d')
#print "Ineq constr mtx: \n", G
A = cvxopt.matrix(Y, tc='d')
#print "A vec: ", A
b = cvxopt.matrix(b, tc='d')
#print "b =", b
solution = cvxopt.solvers.qp(P,q,G,h,A,b)
alpha = solution['x']
#print alpha
w = [0, 0]
sv_num = 0
non_zero_ids = []
for i in range(N):
datapoint_i = dataset_w_labels[i]
"the sly modification, pt 2: because we get more non-zero alphas,"
"we're discarding small values"
#if alpha[i] > 1/float(N*N):
if alpha[i] > np.power(10.0, -2):
#if alpha[i] > 0:
#print i, alpha[i]
non_zero_ids.append(i)
sv_num+=1
w[0] += alpha[i]*datapoint_i[-1]*datapoint_i[0]
w[1] += alpha[i]*datapoint_i[-1]*datapoint_i[1]
print "Non zero SVs:", non_zero_ids
'line W*X+c'
'taking random non-zero SV'
rand_idx = random.randint(0, len(non_zero_ids)-1)
sv = dataset_w_labels[non_zero_ids[rand_idx]]
#print sv
#print w
#print scalar_product(w, sv[:-1])
#print 1/float(sv[-1])
c = 1/float(sv[-1])-scalar_product(w, sv[:-1])
#print c
return w, c, alpha, sv_num
#test_set: (x1, x2, label)
def calculate_err_out_svm(test_set_w_labels, w, c):
#print test_set[:3]
err = 0.0
for datapoint in test_set_w_labels:
if (scalar_product(w, datapoint[:-1])+c)*datapoint[-1] <= 0:
err+=1
return err/float(len(test_set_w_labels))
'''
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y)
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
'''
def svm_skt(dataset_w_labels):
'modifying the dataset as needed by SVC'
datapoints = []
labels = []
for datapoint in dataset_w_labels:
datapoints.append(datapoint[:-1])
labels.append(datapoint[-1])
clf = SVC(C=float("inf"), kernel='linear')
clf.fit(datapoints, labels)
return clf
def calculate_error_out_svm_skt(test_set_w_labels, model):
err = 0.0
for datapoint in test_set_w_labels:
if model.predict([datapoint[:-1]])*datapoint[-1] <= 0:
err+=1
return err/float(len(test_set_w_labels))
def experiment(num_exp_runs, tr_set_size, test_set_size):
err_avg_pla = 0
err_avg_svm = 0
svm_better_pla_num = 0
sv_num_avg = 0
for j in range(num_exp_runs):
#print "==========================="
#print "Running experiment #", j+1
print | |
"type" : {
"basetype" : "Enumeration",
"telnet" : {
"nodetype" : "namednumber",
"number" : "1"
},
"ssh" : {
"nodetype" : "namednumber",
"number" : "2"
},
"ftp" : {
"nodetype" : "namednumber",
"number" : "3"
},
"http" : {
"nodetype" : "namednumber",
"number" : "4"
},
"https" : {
"nodetype" : "namednumber",
"number" : "5"
},
"icmp" : {
"nodetype" : "namednumber",
"number" : "6"
},
"snmp" : {
"nodetype" : "namednumber",
"number" : "7"
},
},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"accessCtlEnable" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"accessCtlServicePort" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"accessCtlTimeout" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.18.1.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"securedClientTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.2",
"status" : "current",
"description" :
"""""",
}, # table
"securedClientEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.2.1",
"status" : "current",
"linkage" : [
"securedClientIndex",
],
"description" :
"""An entry in securedClientTable.""",
}, # row
"securedClientIndex" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"securedClientEnable" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"securedClientStartIp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"securedClientEndIp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.2.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"securedClientService" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.18.2.1.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Bits",
"telnet" : {
"nodetype" : "namednumber",
"number" : "0"
},
"ftp" : {
"nodetype" : "namednumber",
"number" : "1"
},
"http" : {
"nodetype" : "namednumber",
"number" : "2"
},
"icmp" : {
"nodetype" : "namednumber",
"number" : "3"
},
"snmp" : {
"nodetype" : "namednumber",
"number" : "4"
},
"ssh" : {
"nodetype" : "namednumber",
"number" : "5"
},
"https" : {
"nodetype" : "namednumber",
"number" : "6"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"queuingMethodSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.19",
}, # node
"portQueuingMethodTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.19.1",
"status" : "current",
"description" :
"""""",
}, # table
"portQueuingMethodEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.19.1.1",
"status" : "current",
"linkage" : [
"dot1dBasePort",
"portQueuingMethodQueue",
],
"description" :
"""An entry in portQueuingMethodTable.""",
}, # row
"portQueuingMethodQueue" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.19.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""0...7""",
}, # column
"portQueuingMethodWeight" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.19.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""0...15""",
}, # column
"dhcpSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.20",
}, # node
"globalDhcpRelay" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.20.1",
}, # node
"globalDhcpRelayEnable" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.20.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"globalDhcpRelayOption82Enable" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.20.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"globalDhcpRelayInfoEnable" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.20.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"globalDhcpRelayInfoData" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.20.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"maxNumberOfGlobalDhcpRelayRemoteServer" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.20.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"globalDhcpRelayRemoteServerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.172.16.31.10.20.1.6",
"status" : "current",
"description" :
"""""",
}, # table
"globalDhcpRelayRemoteServerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.20.1.6.1",
"create" : "true",
"status" : "current",
"linkage" : [
"globalDhcpRelayRemoteServerIp",
],
"description" :
"""An entry in globalDhcpRelayRemoteServerTable.""",
}, # row
"globalDhcpRelayRemoteServerIp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.20.1.6.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"globalDhcpRelayRemoteServerRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.172.16.31.10.20.1.6.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpServer" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.20.2",
}, # node
"maxNumberOfDhcpServers" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.20.2.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The maximum number of DHCP server entries that can be created.
A value of 0 for this object implies that there exists settings for
global DHCP relay.""",
}, # scalar
"dhcpServerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.20.2.2",
"status" : "current",
"description" :
"""""",
}, # table
"dhcpServerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.20.2.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"dhcpServerVid",
],
"description" :
"""An entry in dhcpServerTable.""",
}, # row
"dhcpServerVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.20.2.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"dhcpServerStartAddr" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.172.16.31.10.20.2.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpServerPoolSize" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.20.2.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpServerMask" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.20.2.2.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpServerGateway" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.172.16.31.10.20.2.2.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpServerPrimaryDNS" : {
| |
"""
PVSource.py
Author: <NAME>, Array Lead (2020).
Contact: <EMAIL>
Created: 11/14/20
Last Modified: 03/07/21
Description: Implementation of the PVEnvironment class.
"""
# Library Imports.
import json
import jsbeautifier
# Custom Imports.
class PVEnvironment:
"""
The PVEnvironment class is a concrete base class that manages the
environmental conditions received by the PVSource at any cycle. In fact, it
manages the cycle time of the entire simulation, and outputs the
environmental conditions based on that cycle. It has the ability to extract
environmental regimes from JSON files, as well as generate a unit step
function with a fixed irradiance and temperature for steady state behavior
testing.
"""
# The smallest cycle number the simulation can be.
MIN_CYCLES = 0
# A dictionary referencing module_type strings to the number of cells.
_cellDefinitions = {"1x1": 1, "1x2": 2, "2x2": 4, "2x4": 8}
# Where all lookup files are located.
_fileRoot = "./External/"
def __init__(self):
pass
def setupModel(self, source=(1, 1000, 25), maxCycles=200):
"""
Sets up the initial source parameters.
Parameters
----------
source: Union -> tuple `(1, 1000, 255)` or string `single_cell.json`
Specifies how and/or where the source model is defined and its
environmental regime over time. It checks for either a tuple of
initial conditions (Step response mode) or a string pointing to a
JSON file in 'External/'. Step response mode can only performed with
a single module of arbitrary cell length.
The method builds a data model of the modules in the PVSource and
a mapping of their environmental regime to return on demand.
A tuple may only have 1, 2, 4, or 8 cells in the step response.
maxCycles: int
Maximum number of cycles our environment should extend to.
Return
------
bool: True for success, False elsewise. Upon encountering an exception,
the PVEnvironment source becomes None.
"""
# Current cycle of the PVEnvironment. Dictates what environmental
# conditions come out at the time. Adjustable.
self._cycle = PVEnvironment.MIN_CYCLES
# Maximum cycle in the environment. We extrapolate data up to this point.
self._maxCycle = maxCycles
# Reference to the dictionary containing the environmental properties for
# each module in the PVSource.
try:
if isinstance(source, str):
# Source file input.
self._sourceFile = source
# Check for relevant filename at /External/
self._source = json.load(open(PVEnvironment._fileRoot + source))
return True
# TODO: validate whether the header matches.
elif isinstance(source, tuple):
self._source = {
"name": "Single String Model.",
"description": str(source[0])
+ " cell(s) in series. "
+ "Emulates a step function with irradiance "
+ str(source[1])
+ " and temperature "
+ str(source[2])
+ " for t => 0.",
"num_modules": 1,
"pv_model": {
"0": {
# This is a bit of annoying code that takes our
# numCells, converts it into the right key
# (i.e. "1x1"), which is THEN later used to convert
# it back into the number of cells per module.
#
# We want the keyed version because in the event we
# eventually want to save our modules definition
# into a JSON file.
#
# Of course, this limits the amount of cell options
# to 1, 2, 4, or 8 cells.
"module_type": list(PVEnvironment._cellDefinitions.keys())[
list(PVEnvironment._cellDefinitions.values()).index(
source[0]
)
],
"env_type": "Step",
"needs_interp": False,
"env_regime": [source[1], source[2]],
}
},
}
return True
else:
raise Exception(
"Invalid source. Currently supported types are a "
+ "properly formatted JSON file or a step response tuple in "
+ "the format (irradiance, temperature)."
)
except Exception as e:
print(e)
self._source = None
return False
def getCycle(self):
"""
Returns the current cycle of the environment.
Return
------
int: Current cycle.
"""
return self._cycle
def setCycle(self, cycle):
"""
Sets the internal cycle of the PVEnvironment. Cannot be larger than max
cycle.
Parameters
----------
cycle: int
The current moment in time the environment should be set to.
Return
------
bool: Whether cycle was successfully incremented or not.
"""
if PVEnvironment.MIN_CYCLES <= cycle and cycle <= self._maxCycle:
self._cycle = cycle
return True
else:
print(
"We can never have a negative cycle in the PVEnvironment, nor "
+ "can we exceed the maximum cycles defined at initialization. "
+ "As such, the current cycle is not changed."
)
return False
def incrementCycle(self):
"""
Cycles the internal clock once. Halts the clock when the max cycle is
reached.
Return
------
bool: Whether cycle was successfully incremented or not.
"""
if self._cycle < self._maxCycle:
self._cycle += 1
return True
return False
def getModuleDefinition(self, moduleName, voltage):
"""
Gets the module definition of a specific module at the current cycle.
The module definition is in the following format:
moduleDef = {
"numCells": int,
"voltage": float, (V)
"irradiance": float, (W/m^2)
"temperature": float, (C)
}
Parameters
----------
moduleName: String
Key to the source dictionary that corresponds to the module
selected. The moduleDef of this module is constructed and returned.
voltage: float
Voltage across the module in Volts.
Returns
-------
dict: moduleDef
A dictionary of the selected module's properties.
Throws an exception for non existent modules and invalid module types.
If the entry in the env_regime does not exist for the module, this
method will perform a comprehensive interpolation for the profile up
until the max cycle.
"""
module = self._source["pv_model"].get(moduleName)
if module is not None:
if module["env_type"] == "Array":
if (
module["needs_interp"] == False
or module["env_regime"][self._cycle][0] != self._cycle
):
# Take the current and next entry and add all interpolations
# to a new list.
events = []
for (idx, event) in enumerate(module["env_regime"][0:-1]):
currEvent = event
nextEvent = module["env_regime"][
(idx + 1) % len(module["env_regime"])
]
numEntries = nextEvent[0] - currEvent[0]
slopeIrrad = (nextEvent[1] - currEvent[1]) / numEntries
slopeTemp = (nextEvent[2] - currEvent[2]) / numEntries
for idx in range(currEvent[0], nextEvent[0]):
events.append(
[
idx,
currEvent[1] + slopeIrrad * (idx - currEvent[0]),
currEvent[2] + slopeTemp * (idx - currEvent[0]),
]
)
# Append the last event.
events.append(module["env_regime"][-1])
# Write the last interpolated event for all cycles extending
# to max_cycles.
lastEvent = events[-1]
for idx in range(lastEvent[0] + 1, self._maxCycle + 1):
events.append([idx, lastEvent[1], lastEvent[2]])
module["env_regime"] = events
module["needs_interp"] = True
# Get current model conditions.
envConditions = module["env_regime"][self._cycle]
# An array of size 2 is returned.
return {
"numCells": PVEnvironment._cellDefinitions[module["module_type"]],
"voltage": voltage,
"irradiance": envConditions[1],
"temperature": envConditions[2],
}
elif module["env_type"] == "Step":
return {
"numCells": PVEnvironment._cellDefinitions[module["module_type"]],
"voltage": voltage,
"irradiance": module["env_regime"][0],
"temperature": module["env_regime"][1],
}
else:
raise Exception("Undefined environment type " + module["env_type"])
else:
raise Exception(
"Module does not exist in PVEnvironment with the name " + moduleName
)
def getSourceDefinition(self, voltage):
"""
Gets the source definition at the current cycle.
The modules definition is in the following format:
modulesDef = {
"0": {
"numCells": int,
"voltage": float, (V)
"irradiance": float, (W/m^2)
"temperature": float, (C)
},
...
}
Parameters
----------
voltage: float
Voltage across the module in Volts.
Returns
-------
dict: modulesDef
A dictionary of the source properties.
Throws an exception for non existent modules and invalid module types.
"""
modulesDef = {}
modules = self._source["pv_model"]
for key in modules.keys():
modulesDef[key] = self.getModuleDefinition(key, voltage)
return modulesDef
def getModuleNumCells(self, moduleName):
"""
Gets the module num cells given the module name.
Parameters
----------
moduleName: String
Key to the source dictionary that corresponds to the module
selected.
Returns
-------
int: Number of cells in series within this module.
"""
modulesDict = self.getModuleMapping()
return PVEnvironment._cellDefinitions[modulesDict[moduleName]]
def getSourceNumCells(self):
"""
Gets the total num cells in the array.
Returns
-------
int: Number of cells in series within the entire array.
"""
numCells = 0
for moduleName in self._source["pv_model"].keys():
numCells += self.getModuleNumCells(moduleName)
return numCells
def getModuleEnvironmentDefinition(self, moduleName):
"""
A stripped down version of getModuleDefinition. Returns just the
environment definition of the module referenced.
The environment definition is in the following format:
envDef = {
"irradiance": float, (W/m^2)
"temperature": float, (C)
}
Parameters
----------
moduleName: String
Key to the source dictionary that corresponds to the module
selected. The moduleDef of this module is constructed and returned.
Returns
-------
dict: moduleDef
A dictionary of the source environment properties.
Throws an exception for non existent modules and | |
pitch stab
# xxxxxx0x no yaw stab, xxxxxx1x yaw stab
returns: rslt, numpy array of strings containing the translated yawpitch values
"""
rslt = np.full(arr.shape, 'N', dtype='U2')
first_bit_chk = np.bitwise_and(arr, (1 << 0)).astype(bool)
sec_bit_chk = np.bitwise_and(arr, (1 << 1)).astype(bool)
rslt[np.intersect1d(np.where(first_bit_chk), np.where(sec_bit_chk))] = 'PY'
rslt[np.intersect1d(np.where(first_bit_chk), np.where(sec_bit_chk == False))] = 'P'
rslt[np.intersect1d(np.where(first_bit_chk == False), np.where(sec_bit_chk))] = 'Y'
return rslt
def translate_mode_tostring(self, arr):
"""
Translate the binary code to a string identifier (for MRZ pulseForm). Allows user to understand the mode
without translating the integer code in their head. Kluster will build plots using these string identifiers
in the legend.
'mode' = 'CW' for continuous waveform, 'FM' for frequency modulated, 'MIX' for both
0 for CW, 1 for MIX, 2 for FM
returns: rslt, numpy array of strings containing the translated mode values
"""
rslt = np.full(arr.shape, 'MIX', dtype='U3')
rslt[np.where(arr == 0)] = 'CW'
rslt[np.where(arr == 1)] = 'MIX'
rslt[np.where(arr == 2)] = 'FM'
return rslt
def translate_mode_two_tostring(self, arr):
"""
Translate the binary code to a string identifier (for MRZ depthMode). Allows user to understand the mode
without translating the integer code in their head. Kluster will build plots using these string identifiers
in the legend.
0 = VS, 1 = SH, 2 = ME, 3 = DE, 4 = DR, 5 = VD, 6 = ED, 7 = XD
if mode is manually selected, there will be an 'm' in front (ex: VSm)
returns: rslt, numpy array of strings containing the translated mode_two values
"""
rslt = np.zeros(arr.shape, dtype='U3')
rslt[np.where(arr == 7)] = 'XD'
rslt[np.where(arr == 6)] = 'ED'
rslt[np.where(arr == 5)] = 'VD'
rslt[np.where(arr == 4)] = 'DR'
rslt[np.where(arr == 3)] = 'DE'
rslt[np.where(arr == 2)] = 'ME'
rslt[np.where(arr == 1)] = 'SH'
rslt[np.where(arr == 0)] = 'VS'
rslt[np.where(arr == 107)] = 'XDm'
rslt[np.where(arr == 106)] = 'EDm'
rslt[np.where(arr == 105)] = 'VDm'
rslt[np.where(arr == 104)] = 'DRm'
rslt[np.where(arr == 103)] = 'DEm'
rslt[np.where(arr == 102)] = 'MEm'
rslt[np.where(arr == 101)] = 'SHm'
rslt[np.where(arr == 100)] = 'VSm'
return rslt
def translate_runtime_parameters_todict(self, r_text):
"""
runtime parameters text comes from file as a string with carriage retuns between entries.
ex: '"\\nSector coverage\\nMax angle Port: 70.0\\nMax angle Starboard: 70.0\\nMax coverage Port: ..."'
we want a dictionary of key: value pairs so we can save them as an xarray attribute and read them as a dict
whenever we need to access. Also, we translate the keys to something more human readable. The translated
key names will match up with .all files read with par module as well, so there is some cross compatibility (useful
for Kluster multibeam processing)
ex:
returns: translated, dict of translated runtime parameters and values
"""
translated = {}
entries = r_text.split('\n')
for entry in entries:
if entry and (entry.find(':') != -1): # valid entries look like 'key: value', the rest are headers or blank
key, value = entry.split(':')
translated[key] = value.lstrip().rstrip()
return translated
def _translate_sonar_model_number(self, settings: dict, model_number: str):
sonar_translator = {'em2040': [None, 'tx', 'rx', None],
'em2040_dual_rx': [None, 'tx', 'rx_port', 'rx_stbd'],
'em2040_dual_tx': ['tx_port', 'tx_stbd', 'rx_port', None],
'em2040_dual_tx_rx': ['tx_port', 'tx_stbd', 'rx_port', 'rx_stbd'],
# EM2040c is represented in the .all file as em2045
'em2045': [None, 'txrx', None, None],
'em2045_dual': [None, 'txrx_port', 'txrx_stbd', None]}
possibles = [sonar for sonar in list(sonar_translator.keys()) if sonar.find(model_number) > -1]
if len(possibles) <= 1: # not a potential dual head system
return model_number
else:
# get here for all the 2040 variants
offs = ['transducer_0_along_location', 'transducer_1_along_location', 'transducer_2_along_location',
'transducer_3_along_location']
srch_offsets = [(off in settings) for off in offs]
for poss in possibles:
off_test = [(lvr is not None) for lvr in sonar_translator[poss]]
if off_test == srch_offsets:
return poss
#print('Unable to determine sonar model from {}'.format(model_number))
return model_number
def translate_installation_parameters_todict(self, i_text):
"""
installation parameters text comes from file as a comma delimited string with mix of = and ; separating the
key/value pairs
ex: 'SCV:Empty,EMXV:EM2040P,\nPU_0,\nSN=53011,\nIP=172.16.31.10:0xffff0000,\nUDP=1997,...'
we want a dictionary of key: value pairs so we can save them as an xarray attribute and read them as a dict
whenever we need to access. Also, we translate the keys to something more human readable. The translated
key names will match up with .all files read with par module as well, so there is some cross compatibility (useful
for Kluster multibeam processing)
ex: {"operator_controller_version": "Empty", "multibeam_system": "EM2040P", "pu_id_type": "0",
"pu_serial_number": "53011", "ip_address_subnet_mask": "172.16.31.10:0xffff0000",
"command_tcpip_port": "1997",...}
returns: translated, dict of translated installation parameters and values
"""
translate_install = {'SCV:': 'operator_controller_version', 'EMXV:': 'sonar_model_number', 'PU_': 'pu_id_type',
'SN=': 'pu_serial_number', 'IP=': 'ip_address_subnet_mask', 'UDP=': 'command_tcpip_port',
'TYPE=': 'cpu_type', 'DCL:': 'dcl_version', 'KMALL:': 'kmall_version',
'SYSTEM:': 'system_description', 'EMXI:SWLZ=': 'waterline_vertical_location'}
translate_versions = {'CPU:': 'cpu_software_version', 'VXW:': 'vxw_software_version',
'FILTER:': 'filter_software_version', 'CBMF:': 'cbmf_software_version',
'TX:': 'tx_software_version', 'RX:': 'rx_software_version'}
translate_serial = {'TX:': 'tx_serial_number', 'RX:': 'rx_serial_number'}
# device translator will use the device identifier plus the values here, ex: 'TRAI_HD1' + '_serial_number'
translate_device_ident = {'ATTI_1': 'motion_sensor_1', 'ATTI_2': 'motion_sensor_2', 'ATTI_3': 'motion_sensor_3',
'POSI_1': 'position_1', 'POSI_2': 'position_2', 'POSI_3': 'position_3',
'CLCK': 'clock', 'SVPI': 'sound_velocity_1', 'TRAI_HD1': 'transducer_1', 'DPHI': 'depth_pressure'}
translate_device = {'N=': '_serial_number', 'X=': '_along_location', 'Y=': '_athwart_location',
'Z=': '_vertical_location', 'R=': '_roll_angle', 'P=': '_pitch_angle',
'H=': '_heading_angle', 'S=': '_sounder_size_deg',
'V=': '_version', 'W=': '_system_description', 'IPX=': '_port_sector_forward',
'IPY=': '_port_sector_starboard', 'IPZ=': '_port_sector_down',
'ICX=': '_center_sector_forward', 'ICY=': '_center_sector_starboard',
'ICZ=': '_center_sector_down', 'ISX=': '_starboard_sector_forward',
'ISY=': '_starboard_sector_starboard', 'ISZ=': '_starboard_sector_down',
'IX=': '_internal_offset_forward', 'IY=': '_internal_offset_starboard',
'IZ=': '_internal_offset_down',
'ITX=': '_tx_forward', 'ITY=': '_tx_starboard', 'ITZ=': '_tx_down',
'IRX=': '_rx_forward', 'IRY=': '_rx_starboard', 'IRZ=': '_rx_down', 'D=': '_time_delay',
'G=': '_datum', 'T=': '_time_stamp', 'C=': '_motion_compensation', 'F=': '_data_format',
'Q=': '_quality_check', 'I=': '_input_source', 'U=': '_active_passive',
'M=': 'motion_reference', 'A=': '_1pps', 'O=': '_offset'}
# split by comma delimited groups
records = [i_text.split(',') for i_text in i_text.split('\n')]
# subgroups are semicolon delimited
# ex: TRAI_HD1:N=218;X=-0.293;Y=0.000;Z=0.861;R=0.496...
records_flatten = [r.split(';') for rec in records for r in rec if r]
translated = {}
translate = translate_install
for rec in records_flatten: # check for dual head and modify the translation to get the kluster standard convention
if rec[0][:8] == 'TRAI_RX2' or rec[0][:8] == 'TRAI_TX2':
translate_device_ident['TRAI_TX1'] = 'transducer_0'
translate_device_ident['TRAI_TX2'] = 'transducer_1'
translate_device_ident['TRAI_RX1'] = 'transducer_2'
translate_device_ident['TRAI_RX2'] = 'transducer_3'
break
past_rec = []
for rec in records_flatten:
# subgroups are parsed here, first rec contains the prefix
# ex: ['ATTI_1:X=0.000', 'Y=0.000', 'Z=0.000', 'R=0.000', 'P=0.000', 'H=0.000', 'D=0.000'...
if len(rec) > 1:
if rec[0] == '' and past_rec:
# they tacked on the phase center offsets without a header for some reason
# ex: ['', 'IPX=0.0000', 'IPY=-0.05540', 'IPZ=-0.01200', 'ICX=0.00000', 'ICY=0.01315', 'ICZ=-0.00600', 'ISX=0.00000', 'ISY=0.05540', 'ISZ=-0.01200']
if past_rec[0][:4] == 'TRAI':
rec[0] = past_rec[0]
else:
print('unable to read from IIP block {}'.format(rec))
continue
prefix, first_rec = rec[0].split(':')
try:
prefix = translate_device_ident[prefix]
except KeyError: # if its a prefix we haven't seen before, just pass it through
pass
try:
ky, data = first_rec.split('=')
except ValueError: # if there is no equals sign, it must be some data that we don't want
continue
translated[prefix + translate_device[ky + '=']] = data
for subrec in rec[1:]:
ky, data = subrec.split('=')
translated[prefix + translate_device[ky + '=']] = data
# regular groups parsed here, use the headers to determine which translator to use
# ex: ['CBMF:1.11 18.02.20 ']
else:
if rec[0] == 'VERSIONS:':
translate = translate_versions
continue
elif rec[0] == 'SERIALno:':
translate = translate_serial
continue
elif rec[0] in ['VERSIONS-END', 'SERIALno-END']:
translate = translate_install
continue
elif rec[0][-7:] == 'NOT_SET':
continue
key = [trans_key for trans_key in translate if rec[0].find(trans_key) != -1]
if len(key) == 0:
print('Unable to parse {}'.format(rec))
elif len(key) == 1:
translated[translate[key[0]]] = rec[0][len(key[0]):].rstrip()
else:
raise ValueError('Found multiple entries valid for record {}:{}'.format(rec, key))
past_rec = rec
# plug in new keys for active position/motion sensor needed for kluster to identify the right sensor
for mot_sens in ['motion_sensor_1_active_passive', 'motion_sensor_2_active_passive',
'motion_sensor_3_active_passive']:
if mot_sens in translated:
if translated[mot_sens] in ['ACTIVE', 'ACTIVE_VEL']:
translated['active_heading_sensor'] = 'motion_' + mot_sens[14] # 'motion_1' in most cases
for pos_sens in ['position_1_active_passive', 'position_2_active_passive', 'position_3_active_passive']:
if pos_sens in translated:
if translated[pos_sens] == 'ACTIVE':
translated['active_position_system_number'] = 'position_' + pos_sens[9] # 'position_1'
translated['sonar_model_number'] = self._translate_sonar_model_number(translated, translated['sonar_model_number'].lower())
return translated
def fast_read_start_end_time(self):
"""
Get the start and end time for the file without mapping the file
returns: list, [UTC | |
import numpy as np
from PTSS import PtssJoint as ptssjnt
from PTSS import Ptss as ptss
from CFNS import CyFns as cfns
# define the 4th order Runge-Kutta algorithm.
class RK4:
def rk4(self, y0, dy, step):
k1 = step * dy
k2 = step * (dy + 1 / 2 * k1)
k3 = step * (dy + 1 / 2 * k2)
k4 = step * (dy + k3)
y1 = y0 + 1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
return y1
# list the functions used in the modules
class FNS:
def __init__(self):
self.cfns = cfns()
# ____________________________________________________________________________________________________________
# Common Functions
# threshold at some value
def thresh_fn(self, x, thresh):
return np.sign(x - thresh) * (x - thresh) * self.indic_fn(x - thresh)
# bound within some interval
def bound_fn(self, x, thresh):
rightbd = np.heaviside(thresh - x, 0)
out = x * rightbd + thresh * (rightbd + 1) % 2
leftbd = np.heaviside(out - -thresh, 0)
out = out * leftbd + -thresh * ((leftbd + 1) % 2)
return out
# cutoff outside some interval
def cutoff_fn(self, x, thresh):
rightbd = np.heaviside(x - thresh, 0)
rightout = x * rightbd
leftbd = np.heaviside(-x - thresh, 0)
leftout = x * leftbd
return rightout + leftout
# check at some value
def delta_fn(self, x, a):
if np.all(x == a) == True:
return 1
else:
return 0
# check within some interval
def cond_fn(self, x, a):
out = np.heaviside(a - x, 0) * np.heaviside(x - -a, 0)
return out
# check at some index
def index_fn(self, j, i, b, a):
return 1 - self.delta_fn(j, b) * self.delta_fn(i, a)
# check sign at zero
def indic_fn(self, x):
return np.heaviside(x, 0)
# binary sampling function
def sample_fn(self, x, thresh):
return 1 * self.indic_fn(x - thresh)
# sigmoid sampling function
def sigmoid_fn(self, x, offset, power):
return x**power / (offset**power + x**power)
# enlarge array size
def enlarge(self, x, y, stride, num, type):
# given x is large and y is small and type is number of relevant dimensions
if type == '3':
for a in range(0, num, stride):
for b in range(0, num, stride):
for c in range(0, num, stride):
new_a = a // stride
new_b = b // stride
new_c = c // stride
x[a][b][c] = y[new_a][new_b][new_c]
return x
if type == '2':
for a in range(0, num, stride):
for b in range(0, num, stride):
new_a = a // stride
new_b = b // stride
x[a][b] = y[new_a][new_b]
return x
# shrink array size
def shrink(self, x, y, stride, num, type):
# given x is large and y is small and type is number of relevant dimensions
new_num = num // stride
if type == '3':
for a in range(0, new_num):
for b in range(0, new_num):
for c in range(0, new_num):
new_a = self.index_bound(a * stride, num)
new_b = self.index_bound(b * stride, num)
new_c = self.index_bound(c * stride, num)
y[a][b][c] = x[new_a][new_b][new_c]
return y
if type == '2':
for a in range(0, new_num):
for b in range(0, new_num):
new_a = self.index_bound(a * stride, num)
new_b = self.index_bound(b * stride, num)
y[a][b] = x[new_a][new_b]
return y
# bound index
def index_bound(self, x, size):
if x < size:
return x
else:
return size - 1
# ____________________________________________________________________________________________________________
# EYES Module
# bound array index
def retmap_bound(self, x, size):
if x < 0:
return 0
if x > size - 1:
return size - 1
else:
return x
# check if maximal value of array is not at center
def fixate(self, gaz_map, size):
fix_map = np.ones((2, 2 * size, 2 * size))
for s in range(2):
fix_map[s][size, size] = 1
if np.array_equal(fix_map, gaz_map) == True:
return 0
else:
return 1
# compute difference btw agonist and antagonist for learning variables
def diff_mat(self, x, size):
mat = np.zeros((2, 2, 2, size, size))
for s in range(2):
for m in range(2):
mat[s][m] = x[s][m][0] - x[s][m][1], x[s][m][1] - x[s][m][0]
return mat
# check epoch within an interval in the forward direction
def forwd_period(self, t, T, interval):
if (t // interval) * interval + 0 <= t and t < (t // interval) * interval + T:
return 1
else:
return 0
# check epoch within an interval in the backward direction
def backw_period(self, t, T, interval):
if (t // interval + 1) * interval - T <= t and t < (t // interval + 1) * interval + 0:
return 1
else:
return 0
# list epochs within some interval
def intv_period(self, t, interval):
lb = (t // interval) * interval
ub = (t // interval + 1) * interval
return np.arange(lb, ub, 1)
# list epoch-value pairs within some interval
def add_error(self, z, t, interval):
range = self.intv_period(t, interval)
value = [z] * interval
add = [(x, y) for x, y in zip(range, value)]
return add
# check if equal to the zero array
def test_zero(self, x):
if np.array_equal(x, np.zeros(x.shape)):
return 1
else:
return 0
# extract index of maximal value for an array centered at zero
def argmax(self, x, size):
if self.test_zero(x) != 1:
out = np.array(np.unravel_index(np.argmax(x), x.shape)) - size # format is (height, width)
return out
else:
return np.zeros(2)
# populate in a neighborhood around the given index
def arrmax(self, max, size):
ptts = ptss(2 * size, 2 * size)
out = np.zeros((2, 2 * size, 2 * size))
for s in range(2):
b_max, a_max = np.array(max[s], dtype=int)
bound = ptts.ptss_bound(b_max, a_max, 2 * size, 2 * size, '2')
for b in bound[0]:
for a in bound[1]:
out[s][b][a] = ptts.ptss_gradient(b, a, b_max, a_max, '2')
return out
# compute sum btw agonist and antagonist
def sum_mus(self, x):
mus = np.zeros((2))
for s in range(2):
mus[s] = np.sum(x[s])
return mus
# extract agonist
def extract_ang(self, x):
out = np.zeros(3)
for k in range(3):
out[k] = x[k][0]
return out
# convert normalized activity into angle for eye variables
def conv_targ(self, x):
# for eye movement and representation
ang_rang = 1.0 * np.radians([-45, 45])
dist_rang = 1.0 * np.array((5, 50))
# -----------------------------------------------------------------------------------------------------------
# for eye-leg/eye-hand coordination
#ang_rang = 2.0 * np.radians([-45, 45])
#dist_rang = 2.5 * np.array((5, 50))
# -----------------------------------------------------------------------------------------------------------
est = self.extract_ang(x)
horz = ang_rang[0] + (ang_rang[1] - ang_rang[0]) * est[0]
vert = ang_rang[0] + (ang_rang[1] - ang_rang[0]) * est[1]
a = dist_rang[0] / (dist_rang[1] - dist_rang[0])
b = (1 + a) * dist_rang[0]
dist = b / (a + est[2])
return np.array((np.degrees(horz), np.degrees(vert), dist))
# parse eye variables
def parse_eye(self, mus, size):
ptts = ptss(2 * size, 2 * size)
left, right = mus
left_agn, left_ant = np.transpose(left, (1, 0))
right_agn, right_ant = np.transpose(right, (1, 0))
left_vert, left_horz = ptts.parse(left_agn, left_ant, '2')
right_vert, right_horz = ptts.parse(right_agn, right_ant, '2')
return np.array([(left_vert, left_horz), (right_vert, right_horz)], dtype=int)
# parse general variables with 3 coordinates
def parse_targ(self, mus, num_deg, type):
if type == 'eye':
ang_parse = self.parse_coord(mus[0:2], num_deg, '2')
dist_parse = self.parse_coord(mus[2], num_deg, '1')
parse = np.array((*ang_parse, *dist_parse))
return parse
if type == 'col':
store = np.zeros((2, 3), dtype=int)
for l in range(2):
ang_parse = self.parse_coord(mus[l][0:2], num_deg, '2')
dist_parse = self.parse_coord(mus[l][2], num_deg, '1')
parse = np.array((*ang_parse, *dist_parse))
store[l] = parse
return store
if type == 'jnt':
store = np.zeros((2, 3, 3), dtype=int)
for s in range(2):
for l in range(3):
ang_parse = self.parse_coord(mus[s][l][0:2], num_deg, '2')
dist_parse = self.parse_coord(mus[s][l][2], num_deg, '1')
parse = np.array((*ang_parse, *dist_parse))
store[s][l] = parse
return store
# parse general variables with 1 or 2 coordinates
def parse_coord(self, mus, num_deg, type):
ptts = ptss(num_deg, num_deg)
if type == '1':
agn, ant = mus
parse = ptts.parse(np.array((agn, 0)), np.array((ant, 0)), '1')
return parse
if type == '2':
agn, ant = np.transpose(mus, (1, 0))
parse = ptts.parse(agn, ant, '2')
return parse
# interchange rows in array
def rev_row(self, x):
out = np.zeros((2, 2, 2))
for s in range(2):
out[s][0] = x[s][1]
out[s][1] = x[s][0]
return out
# | |
:param K:
:type K: int &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfIntegerListOfShape_ChangeFind1(self, *args)
def _kill_pointed(self):
"""_kill_pointed(TopTools_DataMapOfIntegerListOfShape self)"""
return _TopTools.TopTools_DataMapOfIntegerListOfShape__kill_pointed(self)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
TopTools_DataMapOfIntegerListOfShape.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_Assign,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.Set = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_Set,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_ReSize,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_Clear,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_Bind,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_IsBound,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_UnBind,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.Find = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_Find,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_ChangeFind,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_Find1,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape_ChangeFind1,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerListOfShape__kill_pointed,None,TopTools_DataMapOfIntegerListOfShape)
TopTools_DataMapOfIntegerListOfShape_swigregister = _TopTools.TopTools_DataMapOfIntegerListOfShape_swigregister
TopTools_DataMapOfIntegerListOfShape_swigregister(TopTools_DataMapOfIntegerListOfShape)
class TopTools_DataMapOfIntegerShape(OCC.TCollection.TCollection_BasicMap):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param NbBuckets: default value is 1
:type NbBuckets: int
:rtype: None
"""
_TopTools.TopTools_DataMapOfIntegerShape_swiginit(self,_TopTools.new_TopTools_DataMapOfIntegerShape(*args))
def Assign(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfIntegerShape &
:rtype: TopTools_DataMapOfIntegerShape
"""
return _TopTools.TopTools_DataMapOfIntegerShape_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfIntegerShape &
:rtype: TopTools_DataMapOfIntegerShape
"""
return _TopTools.TopTools_DataMapOfIntegerShape_Set(self, *args)
def ReSize(self, *args):
"""
:param NbBuckets:
:type NbBuckets: int
:rtype: None
"""
return _TopTools.TopTools_DataMapOfIntegerShape_ReSize(self, *args)
def Clear(self, *args):
"""
:rtype: None
"""
return _TopTools.TopTools_DataMapOfIntegerShape_Clear(self, *args)
def Bind(self, *args):
"""
:param K:
:type K: int &
:param I:
:type I: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfIntegerShape_Bind(self, *args)
def IsBound(self, *args):
"""
:param K:
:type K: int &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfIntegerShape_IsBound(self, *args)
def UnBind(self, *args):
"""
:param K:
:type K: int &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfIntegerShape_UnBind(self, *args)
def Find(self, *args):
"""
:param K:
:type K: int &
:rtype: TopoDS_Shape
"""
return _TopTools.TopTools_DataMapOfIntegerShape_Find(self, *args)
def ChangeFind(self, *args):
"""
:param K:
:type K: int &
:rtype: TopoDS_Shape
"""
return _TopTools.TopTools_DataMapOfIntegerShape_ChangeFind(self, *args)
def Find1(self, *args):
"""
:param K:
:type K: int &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfIntegerShape_Find1(self, *args)
def ChangeFind1(self, *args):
"""
:param K:
:type K: int &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfIntegerShape_ChangeFind1(self, *args)
def _kill_pointed(self):
"""_kill_pointed(TopTools_DataMapOfIntegerShape self)"""
return _TopTools.TopTools_DataMapOfIntegerShape__kill_pointed(self)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
TopTools_DataMapOfIntegerShape.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_Assign,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.Set = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_Set,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_ReSize,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_Clear,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_Bind,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_IsBound,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_UnBind,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.Find = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_Find,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_ChangeFind,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_Find1,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape_ChangeFind1,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfIntegerShape__kill_pointed,None,TopTools_DataMapOfIntegerShape)
TopTools_DataMapOfIntegerShape_swigregister = _TopTools.TopTools_DataMapOfIntegerShape_swigregister
TopTools_DataMapOfIntegerShape_swigregister(TopTools_DataMapOfIntegerShape)
class TopTools_DataMapOfOrientedShapeInteger(OCC.TCollection.TCollection_BasicMap):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param NbBuckets: default value is 1
:type NbBuckets: int
:rtype: None
"""
_TopTools.TopTools_DataMapOfOrientedShapeInteger_swiginit(self,_TopTools.new_TopTools_DataMapOfOrientedShapeInteger(*args))
def Assign(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfOrientedShapeInteger &
:rtype: TopTools_DataMapOfOrientedShapeInteger
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfOrientedShapeInteger &
:rtype: TopTools_DataMapOfOrientedShapeInteger
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_Set(self, *args)
def ReSize(self, *args):
"""
:param NbBuckets:
:type NbBuckets: int
:rtype: None
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_ReSize(self, *args)
def Clear(self, *args):
"""
:rtype: None
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_Clear(self, *args)
def Bind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:param I:
:type I: int &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_Bind(self, *args)
def IsBound(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_IsBound(self, *args)
def UnBind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_UnBind(self, *args)
def Find(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: int
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_Find(self, *args)
def ChangeFind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: int
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_ChangeFind(self, *args)
def Find1(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_Find1(self, *args)
def ChangeFind1(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger_ChangeFind1(self, *args)
def _kill_pointed(self):
"""_kill_pointed(TopTools_DataMapOfOrientedShapeInteger self)"""
return _TopTools.TopTools_DataMapOfOrientedShapeInteger__kill_pointed(self)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
TopTools_DataMapOfOrientedShapeInteger.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_Assign,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.Set = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_Set,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_ReSize,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_Clear,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_Bind,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_IsBound,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_UnBind,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.Find = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_Find,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_ChangeFind,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_Find1,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger_ChangeFind1,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeInteger__kill_pointed,None,TopTools_DataMapOfOrientedShapeInteger)
TopTools_DataMapOfOrientedShapeInteger_swigregister = _TopTools.TopTools_DataMapOfOrientedShapeInteger_swigregister
TopTools_DataMapOfOrientedShapeInteger_swigregister(TopTools_DataMapOfOrientedShapeInteger)
class TopTools_DataMapOfOrientedShapeShape(OCC.TCollection.TCollection_BasicMap):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param NbBuckets: default value is 1
:type NbBuckets: int
:rtype: None
"""
_TopTools.TopTools_DataMapOfOrientedShapeShape_swiginit(self,_TopTools.new_TopTools_DataMapOfOrientedShapeShape(*args))
def Assign(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfOrientedShapeShape &
:rtype: TopTools_DataMapOfOrientedShapeShape
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfOrientedShapeShape &
:rtype: TopTools_DataMapOfOrientedShapeShape
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_Set(self, *args)
def ReSize(self, *args):
"""
:param NbBuckets:
:type NbBuckets: int
:rtype: None
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_ReSize(self, *args)
def Clear(self, *args):
"""
:rtype: None
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_Clear(self, *args)
def Bind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:param I:
:type I: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_Bind(self, *args)
def IsBound(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_IsBound(self, *args)
def UnBind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_UnBind(self, *args)
def Find(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: TopoDS_Shape
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_Find(self, *args)
def ChangeFind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: TopoDS_Shape
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_ChangeFind(self, *args)
def Find1(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_Find1(self, *args)
def ChangeFind1(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape_ChangeFind1(self, *args)
def _kill_pointed(self):
"""_kill_pointed(TopTools_DataMapOfOrientedShapeShape self)"""
return _TopTools.TopTools_DataMapOfOrientedShapeShape__kill_pointed(self)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
TopTools_DataMapOfOrientedShapeShape.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_Assign,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.Set = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_Set,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_ReSize,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_Clear,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_Bind,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_IsBound,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_UnBind,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.Find = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_Find,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_ChangeFind,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_Find1,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape_ChangeFind1,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfOrientedShapeShape__kill_pointed,None,TopTools_DataMapOfOrientedShapeShape)
TopTools_DataMapOfOrientedShapeShape_swigregister = _TopTools.TopTools_DataMapOfOrientedShapeShape_swigregister
TopTools_DataMapOfOrientedShapeShape_swigregister(TopTools_DataMapOfOrientedShapeShape)
class TopTools_DataMapOfShapeInteger(OCC.TCollection.TCollection_BasicMap):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param NbBuckets: default value is 1
:type NbBuckets: int
:rtype: None
"""
_TopTools.TopTools_DataMapOfShapeInteger_swiginit(self,_TopTools.new_TopTools_DataMapOfShapeInteger(*args))
def Assign(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfShapeInteger &
:rtype: TopTools_DataMapOfShapeInteger
"""
return _TopTools.TopTools_DataMapOfShapeInteger_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfShapeInteger &
:rtype: TopTools_DataMapOfShapeInteger
"""
return _TopTools.TopTools_DataMapOfShapeInteger_Set(self, *args)
def ReSize(self, *args):
"""
:param NbBuckets:
:type NbBuckets: int
:rtype: None
"""
return _TopTools.TopTools_DataMapOfShapeInteger_ReSize(self, *args)
def Clear(self, *args):
"""
:rtype: None
"""
return _TopTools.TopTools_DataMapOfShapeInteger_Clear(self, *args)
def Bind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:param I:
:type I: int &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfShapeInteger_Bind(self, *args)
def IsBound(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfShapeInteger_IsBound(self, *args)
def UnBind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfShapeInteger_UnBind(self, *args)
def Find(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: int
"""
return _TopTools.TopTools_DataMapOfShapeInteger_Find(self, *args)
def ChangeFind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: int
"""
return _TopTools.TopTools_DataMapOfShapeInteger_ChangeFind(self, *args)
def Find1(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfShapeInteger_Find1(self, *args)
def ChangeFind1(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfShapeInteger_ChangeFind1(self, *args)
def _kill_pointed(self):
"""_kill_pointed(TopTools_DataMapOfShapeInteger self)"""
return _TopTools.TopTools_DataMapOfShapeInteger__kill_pointed(self)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
TopTools_DataMapOfShapeInteger.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_Assign,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.Set = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_Set,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.ReSize = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_ReSize,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.Clear = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_Clear,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.Bind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_Bind,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.IsBound = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_IsBound,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.UnBind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_UnBind,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.Find = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_Find,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.ChangeFind = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_ChangeFind,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.Find1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_Find1,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger.ChangeFind1 = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger_ChangeFind1,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger._kill_pointed = new_instancemethod(_TopTools.TopTools_DataMapOfShapeInteger__kill_pointed,None,TopTools_DataMapOfShapeInteger)
TopTools_DataMapOfShapeInteger_swigregister = _TopTools.TopTools_DataMapOfShapeInteger_swigregister
TopTools_DataMapOfShapeInteger_swigregister(TopTools_DataMapOfShapeInteger)
class TopTools_DataMapOfShapeListOfInteger(OCC.TCollection.TCollection_BasicMap):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param NbBuckets: default value is 1
:type NbBuckets: int
:rtype: None
"""
_TopTools.TopTools_DataMapOfShapeListOfInteger_swiginit(self,_TopTools.new_TopTools_DataMapOfShapeListOfInteger(*args))
def Assign(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfShapeListOfInteger &
:rtype: TopTools_DataMapOfShapeListOfInteger
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: TopTools_DataMapOfShapeListOfInteger &
:rtype: TopTools_DataMapOfShapeListOfInteger
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_Set(self, *args)
def ReSize(self, *args):
"""
:param NbBuckets:
:type NbBuckets: int
:rtype: None
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_ReSize(self, *args)
def Clear(self, *args):
"""
:rtype: None
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_Clear(self, *args)
def Bind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:param I:
:type I: TColStd_ListOfInteger &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_Bind(self, *args)
def IsBound(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_IsBound(self, *args)
def UnBind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: bool
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_UnBind(self, *args)
def Find(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: TColStd_ListOfInteger
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_Find(self, *args)
def ChangeFind(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: TColStd_ListOfInteger
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_ChangeFind(self, *args)
def Find1(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_Find1(self, *args)
def ChangeFind1(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:rtype: Standard_Address
"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger_ChangeFind1(self, *args)
def _kill_pointed(self):
"""_kill_pointed(TopTools_DataMapOfShapeListOfInteger self)"""
return _TopTools.TopTools_DataMapOfShapeListOfInteger__kill_pointed(self)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
TopTools_DataMapOfShapeListOfInteger.Assign = new_instancemethod(_TopTools.TopTools_DataMapOfShapeListOfInteger_Assign,None,TopTools_DataMapOfShapeListOfInteger)
TopTools_DataMapOfShapeListOfInteger.Set = | |
"""Helpers for listening to events."""
from datetime import datetime, timedelta
import functools as ft
from typing import Any, Callable, Dict, Iterable, Optional, Union, cast
import attr
from homeassistant.const import (
ATTR_NOW,
EVENT_CORE_CONFIG_UPDATE,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
MATCH_ALL,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import CALLBACK_TYPE, Event, HomeAssistant, State, callback
from homeassistant.helpers.sun import get_astral_event_next
from homeassistant.helpers.template import Template
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from homeassistant.util.async_ import run_callback_threadsafe
# PyLint does not like the use of threaded_listener_factory
# pylint: disable=invalid-name
def threaded_listener_factory(async_factory: Callable[..., Any]) -> CALLBACK_TYPE:
"""Convert an async event helper to a threaded one."""
@ft.wraps(async_factory)
def factory(*args: Any, **kwargs: Any) -> CALLBACK_TYPE:
"""Call async event helper safely."""
hass = args[0]
if not isinstance(hass, HomeAssistant):
raise TypeError("First parameter needs to be a hass instance")
async_remove = run_callback_threadsafe(
hass.loop, ft.partial(async_factory, *args, **kwargs)
).result()
def remove() -> None:
"""Threadsafe removal."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
return factory
@callback
@bind_hass
def async_track_state_change(
hass: HomeAssistant,
entity_ids: Union[str, Iterable[str]],
action: Callable[[str, State, State], None],
from_state: Union[None, str, Iterable[str]] = None,
to_state: Union[None, str, Iterable[str]] = None,
) -> CALLBACK_TYPE:
"""Track specific state changes.
entity_ids, from_state and to_state can be string or list.
Use list to match multiple.
Returns a function that can be called to remove the listener.
Must be run within the event loop.
"""
match_from_state = process_state_match(from_state)
match_to_state = process_state_match(to_state)
# Ensure it is a lowercase list with entity ids we want to match on
if entity_ids == MATCH_ALL:
pass
elif isinstance(entity_ids, str):
entity_ids = (entity_ids.lower(),)
else:
entity_ids = tuple(entity_id.lower() for entity_id in entity_ids)
@callback
def state_change_listener(event: Event) -> None:
"""Handle specific state changes."""
if (
entity_ids != MATCH_ALL
and cast(str, event.data.get("entity_id")) not in entity_ids
):
return
old_state = event.data.get("old_state")
if old_state is not None:
old_state = old_state.state
new_state = event.data.get("new_state")
if new_state is not None:
new_state = new_state.state
if match_from_state(old_state) and match_to_state(new_state):
hass.async_run_job(
action,
event.data.get("entity_id"),
event.data.get("old_state"),
event.data.get("new_state"),
)
return hass.bus.async_listen(EVENT_STATE_CHANGED, state_change_listener)
track_state_change = threaded_listener_factory(async_track_state_change)
@callback
@bind_hass
def async_track_template(
hass: HomeAssistant,
template: Template,
action: Callable[[str, State, State], None],
variables: Optional[Dict[str, Any]] = None,
) -> CALLBACK_TYPE:
"""Add a listener that track state changes with template condition."""
from . import condition
# Local variable to keep track of if the action has already been triggered
already_triggered = False
@callback
def template_condition_listener(entity_id: str, from_s: State, to_s: State) -> None:
"""Check if condition is correct and run action."""
nonlocal already_triggered
template_result = condition.async_template(hass, template, variables)
# Check to see if template returns true
if template_result and not already_triggered:
already_triggered = True
hass.async_run_job(action, entity_id, from_s, to_s)
elif not template_result:
already_triggered = False
return async_track_state_change(
hass, template.extract_entities(variables), template_condition_listener
)
track_template = threaded_listener_factory(async_track_template)
@callback
@bind_hass
def async_track_same_state(
hass: HomeAssistant,
period: timedelta,
action: Callable[..., None],
async_check_same_func: Callable[[str, State, State], bool],
entity_ids: Union[str, Iterable[str]] = MATCH_ALL,
) -> CALLBACK_TYPE:
"""Track the state of entities for a period and run an action.
If async_check_func is None it use the state of orig_value.
Without entity_ids we track all state changes.
"""
async_remove_state_for_cancel: Optional[CALLBACK_TYPE] = None
async_remove_state_for_listener: Optional[CALLBACK_TYPE] = None
@callback
def clear_listener() -> None:
"""Clear all unsub listener."""
nonlocal async_remove_state_for_cancel, async_remove_state_for_listener
if async_remove_state_for_listener is not None:
async_remove_state_for_listener()
async_remove_state_for_listener = None
if async_remove_state_for_cancel is not None:
async_remove_state_for_cancel()
async_remove_state_for_cancel = None
@callback
def state_for_listener(now: Any) -> None:
"""Fire on state changes after a delay and calls action."""
nonlocal async_remove_state_for_listener
async_remove_state_for_listener = None
clear_listener()
hass.async_run_job(action)
@callback
def state_for_cancel_listener(
entity: str, from_state: State, to_state: State
) -> None:
"""Fire on changes and cancel for listener if changed."""
if not async_check_same_func(entity, from_state, to_state):
clear_listener()
async_remove_state_for_listener = async_track_point_in_utc_time(
hass, state_for_listener, dt_util.utcnow() + period
)
async_remove_state_for_cancel = async_track_state_change(
hass, entity_ids, state_for_cancel_listener
)
return clear_listener
track_same_state = threaded_listener_factory(async_track_same_state)
@callback
@bind_hass
def async_track_point_in_time(
hass: HomeAssistant, action: Callable[..., None], point_in_time: datetime
) -> CALLBACK_TYPE:
"""Add a listener that fires once after a specific point in time."""
utc_point_in_time = dt_util.as_utc(point_in_time)
@callback
def utc_converter(utc_now: datetime) -> None:
"""Convert passed in UTC now to local now."""
hass.async_run_job(action, dt_util.as_local(utc_now))
return async_track_point_in_utc_time(hass, utc_converter, utc_point_in_time)
track_point_in_time = threaded_listener_factory(async_track_point_in_time)
@callback
@bind_hass
def async_track_point_in_utc_time(
hass: HomeAssistant, action: Callable[..., Any], point_in_time: datetime
) -> CALLBACK_TYPE:
"""Add a listener that fires once after a specific point in UTC time."""
# Ensure point_in_time is UTC
point_in_time = dt_util.as_utc(point_in_time)
@callback
def point_in_time_listener(event: Event) -> None:
"""Listen for matching time_changed events."""
now = event.data[ATTR_NOW]
if now < point_in_time or hasattr(point_in_time_listener, "run"):
return
# Set variable so that we will never run twice.
# Because the event bus might have to wait till a thread comes
# available to execute this listener it might occur that the
# listener gets lined up twice to be executed. This will make
# sure the second time it does nothing.
setattr(point_in_time_listener, "run", True)
async_unsub()
hass.async_run_job(action, now)
async_unsub = hass.bus.async_listen(EVENT_TIME_CHANGED, point_in_time_listener)
return async_unsub
track_point_in_utc_time = threaded_listener_factory(async_track_point_in_utc_time)
@callback
@bind_hass
def async_call_later(
hass: HomeAssistant, delay: float, action: Callable[..., None]
) -> CALLBACK_TYPE:
"""Add a listener that is called in <delay>."""
return async_track_point_in_utc_time(
hass, action, dt_util.utcnow() + timedelta(seconds=delay)
)
call_later = threaded_listener_factory(async_call_later)
@callback
@bind_hass
def async_track_time_interval(
hass: HomeAssistant, action: Callable[..., None], interval: timedelta
) -> CALLBACK_TYPE:
"""Add a listener that fires repetitively at every timedelta interval."""
remove = None
def next_interval() -> datetime:
"""Return the next interval."""
return dt_util.utcnow() + interval
@callback
def interval_listener(now: datetime) -> None:
"""Handle elapsed intervals."""
nonlocal remove
remove = async_track_point_in_utc_time(hass, interval_listener, next_interval())
hass.async_run_job(action, now)
remove = async_track_point_in_utc_time(hass, interval_listener, next_interval())
def remove_listener() -> None:
"""Remove interval listener."""
remove()
return remove_listener
track_time_interval = threaded_listener_factory(async_track_time_interval)
@attr.s
class SunListener:
"""Helper class to help listen to sun events."""
hass = attr.ib(type=HomeAssistant)
action: Callable[..., None] = attr.ib()
event: str = attr.ib()
offset: Optional[timedelta] = attr.ib()
_unsub_sun: Optional[CALLBACK_TYPE] = attr.ib(default=None)
_unsub_config: Optional[CALLBACK_TYPE] = attr.ib(default=None)
@callback
def async_attach(self) -> None:
"""Attach a sun listener."""
assert self._unsub_config is None
self._unsub_config = self.hass.bus.async_listen(
EVENT_CORE_CONFIG_UPDATE, self._handle_config_event
)
self._listen_next_sun_event()
@callback
def async_detach(self) -> None:
"""Detach the sun listener."""
assert self._unsub_sun is not None
assert self._unsub_config is not None
self._unsub_sun()
self._unsub_sun = None
self._unsub_config()
self._unsub_config = None
@callback
def _listen_next_sun_event(self) -> None:
"""Set up the sun event listener."""
assert self._unsub_sun is None
self._unsub_sun = async_track_point_in_utc_time(
self.hass,
self._handle_sun_event,
get_astral_event_next(self.hass, self.event, offset=self.offset),
)
@callback
def _handle_sun_event(self, _now: Any) -> None:
"""Handle solar event."""
self._unsub_sun = None
self._listen_next_sun_event()
self.hass.async_run_job(self.action)
@callback
def _handle_config_event(self, _event: Any) -> None:
"""Handle core config update."""
assert self._unsub_sun is not None
self._unsub_sun()
self._unsub_sun = None
self._listen_next_sun_event()
@callback
@bind_hass
def async_track_sunrise(
hass: HomeAssistant, action: Callable[..., None], offset: Optional[timedelta] = None
) -> CALLBACK_TYPE:
"""Add a listener that will fire a specified offset from sunrise daily."""
listener = SunListener(hass, action, SUN_EVENT_SUNRISE, offset)
listener.async_attach()
return listener.async_detach
track_sunrise = threaded_listener_factory(async_track_sunrise)
@callback
@bind_hass
def async_track_sunset(
hass: HomeAssistant, action: Callable[..., None], offset: Optional[timedelta] = None
) -> CALLBACK_TYPE:
"""Add a listener that will fire a specified offset from sunset daily."""
listener = SunListener(hass, action, SUN_EVENT_SUNSET, offset)
listener.async_attach()
return listener.async_detach
track_sunset = threaded_listener_factory(async_track_sunset)
@callback
@bind_hass
def async_track_utc_time_change(
hass: HomeAssistant,
action: Callable[..., None],
hour: Optional[Any] = None,
minute: Optional[Any] = None,
second: Optional[Any] = None,
local: bool = False,
) -> CALLBACK_TYPE:
"""Add a listener that will fire if time matches a pattern."""
# We do not have to wrap the function with time pattern matching logic
# if no pattern given
if all(val is None for val in (hour, minute, second)):
@callback
def time_change_listener(event: Event) -> None:
"""Fire every time event that comes in."""
hass.async_run_job(action, event.data[ATTR_NOW])
return hass.bus.async_listen(EVENT_TIME_CHANGED, time_change_listener)
matching_seconds = dt_util.parse_time_expression(second, 0, 59)
matching_minutes = dt_util.parse_time_expression(minute, 0, 59)
matching_hours = dt_util.parse_time_expression(hour, 0, 23)
next_time = None
def calculate_next(now: datetime) -> None:
"""Calculate and set the next time the trigger should fire."""
nonlocal next_time
localized_now = dt_util.as_local(now) if local else now
next_time = dt_util.find_next_time_expression_time(
localized_now, matching_seconds, matching_minutes, matching_hours
)
# Make sure rolling back the clock doesn't prevent the timer from
# triggering.
last_now: Optional[datetime] = None
@callback
def pattern_time_change_listener(event: Event) -> None:
"""Listen for matching time_changed events."""
nonlocal next_time, last_now
now = event.data[ATTR_NOW]
if last_now is None or now < last_now:
# Time rolled back or next time not yet calculated
calculate_next(now)
last_now = now
if next_time <= now:
hass.async_run_job(action, dt_util.as_local(now) if local else now)
calculate_next(now + timedelta(seconds=1))
# We can't use async_track_point_in_utc_time here because it would
# break in the case that the system time abruptly jumps backwards.
# Our custom last_now logic takes care of resolving that scenario.
return hass.bus.async_listen(EVENT_TIME_CHANGED, pattern_time_change_listener)
track_utc_time_change = threaded_listener_factory(async_track_utc_time_change)
@callback
@bind_hass
def async_track_time_change(
hass: HomeAssistant,
action: Callable[..., None],
hour: Optional[Any] = | |
<filename>interface/xbridge_rpc.py<gh_stars>0
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import xbridge_config
from utils import xbridge_custom_exceptions
"""
port = '8888'
rpc_connection = AuthServiceProxy("http://%s:%s@127.0.0.1:41414" % ('Testuser', '<PASSWORD>'))
"""
login = xbridge_config.get_conf_login()
pwd = <PASSWORD>conf_password()
ip_address = xbridge_config.get_conf_IP()
if (login != "") and (pwd != "") and (ip_address != ""):
rpc_connection = AuthServiceProxy("http://%s:%s@%s" % (str(login), str(pwd), str(ip_address)))
else:
print("credential information missing in the tests.conf file. Program stopped")
exit(1)
valid_msgs = ["-22: TX decode failed",
"TX decode failed",
"-3: Invalid address",
"Invalid BLOCK address",
"Invalid BlocknetDX address",
"-32700: Parse error",
"Parse error"
"must be hexadecimal string",
"txid must be hexadecimal string",
"-8: argument 1 must be hexadecimal string",
"-3: Expected type",
"Expected type"
"Invalid private key encoding",
"Error: Node has not been added",
"Invalid parameter",
"Cannot open wallet dump file",
"get_value",
"get_value<",
"Insufficient funds",
"Invalid amount",
"bad lexical cast",
"running with an unencrypted wallet, but walletpassphrase was called",
"Error: The wallet passphrase entered was incorrect",
"Invalid spork name",
"-1: walletpassphrasechange",
"-1: bip38decrypt",
"-1: bip38encrypt"]
"""
"-1: bip38decrypt \"blocknetdxaddress\"",
"-1: bip38encrypt \"blocknetdxaddress\""]
"""
def cancel_tx(txid=None):
try:
return rpc_connection.dxCancelTransaction(txid)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def dxGetTradeHistory(fromCurrency=None, toCurrency=None, unix_starttime=None, unix_endtime=None, txids=None):
try:
return rpc_connection.dxGetTradeHistory(fromCurrency, toCurrency, unix_starttime, unix_endtime, txids)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def dxGetOrderBook(detailLevel=None, src=None, dest=None, maxOrders=None, showTxids=None):
try:
return rpc_connection.dxGetOrderBook(detailLevel, src, dest, maxOrders, showTxids)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def get_tx_info(txid):
try:
return rpc_connection.dxGetTransactionInfo(txid)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def create_tx(fromAddress=None, fromToken=None, fromAmount=None, toAddress=None, toToken=None, toAmount=None):
try:
return rpc_connection.dxCreateTransaction(fromAddress, fromToken, fromAmount, toAddress, toToken, toAmount)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def accept_tx(txid=None, src=None, dest=None):
try:
return rpc_connection.dxAcceptTransaction(txid, src, dest)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def dxGetCurrencies():
return rpc_connection.dxGetCurrencies()
def dxGetTransactions():
return rpc_connection.dxGetTransactions()
def dxGetTransactionsHistory():
return rpc_connection.dxGetTransactionsHistory()
def get_core_version():
try:
vers = rpc_connection.getinfo()
return vers["version"]
except JSONRPCException:
return 0
def get_blockcount():
blockcount = rpc_connection.getblockcount()
return blockcount
def decode_script(hex):
blockcount = rpc_connection.decodescript(hex)
return blockcount
def get_budget():
snode_budget = rpc_connection.mnbudget('show')
return snode_budget
def get_node_list():
return rpc_connection.servicenodelist()
def get_tx(txid):
return rpc_connection.getrawtransaction(txid)
# sendfrom "fromaccount" "toblocknetdxaddress" amount ( minconf "comment" "comment-to" )
def sendfrom(fromaccount=None, toblocknetdxaddress=None, amount=None, minconf=None, comment=None, comment_to=None):
try:
return rpc_connection.sendfrom(fromaccount, toblocknetdxaddress, amount, minconf, comment, comment_to)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# multisend <command>
def multisend(cmd=None):
try:
rst = rpc_connection.multisend(cmd)
return rst
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# lockunspent unlock [{"txid":"txid","vout":n},...]
# string + string
def lockunspent(unlock=None, txid=None):
try:
return rpc_connection.lockunspent(unlock, txid)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# move "fromaccount" "toaccount" amount ( minconf "comment" )
def move(fromaccount=None, toaccount=None, amount=None, minconf=None, comment=None):
try:
return rpc_connection.move(fromaccount, toaccount, amount, minconf, comment)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# autocombinerewards <true/false> threshold
def autocombinerewards(true_false=None, threshold=None):
try:
return rpc_connection.autocombinerewards(true_false, threshold)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# sendtoaddress "blocknetdxaddress" amount ( "comment" "comment-to" )
def importwallet(filename_str=None):
try:
return rpc_connection.importwallet(filename_str)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# listsinceblock ("blockhash" target-confirmations includeWatchonly)
def listsinceblock(blockhash=None, target_confirmations=None, includeWatchonly=None):
try:
return rpc_connection.listsinceblock(blockhash, target_confirmations, includeWatchonly)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# getblockhash index
def getblockhash(index=None):
try:
return rpc_connection.getblockhash(index)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# getrawmempool ( verbose )
def getrawmempool(verbose=None):
try:
return rpc_connection.getrawmempool(verbose)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# verifychain ( checklevel numblocks )
def verifychain(checklevel=None, numblocks=None):
try:
return rpc_connection.verifychain(checklevel, numblocks)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# listreceivedbyaddress (minconf includeempty includeWatchonly)
def listreceivedbyaddress(minconf=None, includeempty=None, includeWatchonly=None):
try:
return rpc_connection.listreceivedbyaddress(minconf, includeempty, includeWatchonly)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# listreceivedbyaccount (minconf includeempty includeWatchonly)
def listreceivedbyaccount(minconf=None, includeempty=None, includeWatchonly=None):
try:
return rpc_connection.listreceivedbyaccount(minconf, includeempty, includeWatchonly)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def settxfee(amount=None):
try:
return rpc_connection.settxfee(amount)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# sendtoaddress "blocknetdxaddress" amount ( "comment" "comment-to" )
def sendtoaddressix(blocknetdxaddress=None, amount=None, comment=None, commentto=None):
try:
return rpc_connection.sendtoaddressix(blocknetdxaddress, amount, comment, commentto)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# sendtoaddress "blocknetdxaddress" amount ( "comment" "comment-to" )
def sendtoaddress(blocknetdxaddress=None, amount=None, comment=None, commentto=None):
try:
return rpc_connection.sendtoaddress(blocknetdxaddress, amount, comment, commentto)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def walletpassphrasechange(old=None, new=None):
try:
return rpc_connection.walletpassphrasechange(old, new)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def walletpassphrase(passphrase=None, timeout=0, anonymizeonly=False):
try:
return rpc_connection.walletpassphrase(passphrase, timeout, anonymizeonly)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# backupwallet "destination"
def backupwallet(destination):
try:
return rpc_connection.backupwallet(destination)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# bip38encrypt "blocknetdxaddress"
def bip38decrypt(blocknetdxaddress):
try:
return rpc_connection.bip38decrypt(blocknetdxaddress)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# bip38encrypt "blocknetdxaddress"
def bip38encrypt(blocknetdxaddress):
try:
return rpc_connection.bip38encrypt(blocknetdxaddress)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# setgenerate generate ( genproclimit )
def setgenerate(generate, genproclimit=None):
try:
return rpc_connection.setgenerate(generate, genproclimit)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# listaccounts (minconf includeWatchonly)
def listaccounts(minconf=None, includeWatchonly=None):
try:
return rpc_connection.listaccounts(minconf, includeWatchonly)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# listtransactions ( "account" count from includeWatchonly)
def listtransactions(account=None, count=None, from_param=None, includeWatchonly=None):
try:
return rpc_connection.listtransactions(account, count, from_param, includeWatchonly)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# reservebalance [<reserve> [amount]]
def reservebalance(reserve, amount):
try:
return rpc_connection.reservebalance(reserve, amount)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# addnode "node" "add|remove|onetry"
def addnode(node_str, cmd):
try:
return rpc_connection.addnode(node_str, cmd)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# else:
# print("unchained: %s" % json_excpt)
# getaddednodeinfo dns bool ( "node" )
def getaddednodeinfo(dns_bool, node_str=None):
try:
return rpc_connection.getaddednodeinfo(dns_bool, node_str)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
# verifymessage "blocknetdxaddress" "signature" "message"
def verifymessage(blocknetdxaddress, signature, message):
try:
return rpc_connection.verifymessage(blocknetdxaddress, signature, message)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def validateaddress(address):
try:
return rpc_connection.validateaddress(address)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def estimatepriority(int_value):
try:
return rpc_connection.estimatepriority(int_value)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def estimatefee(int_value):
try:
return rpc_connection.estimatefee(int_value)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def importprivkey(blocknetdxprivkey, label=None, rescan=None):
try:
return rpc_connection.importprivkey(blocknetdxprivkey, label, rescan)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def dumpprivkey(blocknetdxaddress):
try:
return rpc_connection.dumpprivkey(blocknetdxaddress)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def prioritisetransaction(txid, priority, fee):
try:
return rpc_connection.prioritisetransaction(txid, priority, fee)
except UnicodeDecodeError as unicode_err:
raise xbridge_custom_exceptions.ValidBlockNetException from unicode_err
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def getnetworkhashps(blocks, height):
try:
return rpc_connection.getnetworkhashps(blocks, height)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def getreceivedbyaccount(address=None):
try:
return rpc_connection.getreceivedbyaccount(address)
except JSONRPCException as json_excpt:
if any(t in str(json_excpt) for t in valid_msgs):
raise xbridge_custom_exceptions.ValidBlockNetException from json_excpt
def getaccountaddress(address=None):
try:
return rpc_connection.getbalance(address)
except | |
"""Wrappers for `einops <https://einops.rocks/>`_.
The einops module is available only from ``xarray_einstats.einops`` and is not
imported when doing ``import xarray_einstats``.
To use it you need to have installed einops manually or alternatively
install this library as ``xarray-einstats[einops]`` or ``xarray-einstats[all]``.
Details about the exact command are available at :ref:`installation`
"""
import einops
import xarray as xr
__all__ = ["rearrange", "raw_rearrange", "reduce", "raw_reduce"]
class DimHandler:
"""Handle converting actual dimension names to placeholders for einops."""
def __init__(self):
self.mapping = {}
def get_name(self, dim):
"""Return or generate a placeholder for a dimension name."""
if dim in self.mapping:
return self.mapping.get(dim)
dim_txt = f"d{len(self.mapping)}"
self.mapping[dim] = dim_txt
return dim_txt
def get_names(self, dim_list):
"""Automate calling get_name with an iterable."""
return " ".join((self.get_name(dim) for dim in dim_list))
def rename_kwarg(self, key):
"""Process kwargs for axes_lengths.
Users use as keys the dimension names they used in the input expressions
which need to be converted and use the placeholder as key when passed
to einops functions.
"""
return self.mapping.get(key, key)
def process_pattern_list(redims, handler, allow_dict=True, allow_list=True):
"""Process a pattern list and convert it to an einops expression using placeholders.
Parameters
----------
redims : pattern_list
One of ``out_dims`` or ``in_dims`` in {func}`~xarray_einstats.einops.rearrange`
or {func}`~xarray_einstats.einops.reduce`.
handler : DimHandler
allow_dict, allow_list : bool, optional
Whether or not to allow lists or dicts as elements of ``redims``.
When processing ``in_dims`` for example we need the names of
the variables to be decomposed so dicts are required and lists/tuples
are not accepted.
Returns
-------
expression_dims : list of str
A list with the names of the dimensions present in the out expression
output_dims : list of str
A list with the names of the dimensions present in the output.
It differs from ``expression_dims`` because there might be dimensions
being stacked.
pattern : str
The einops expression equivalent to the operations in ``redims`` pattern
list.
Examples
--------
Whenever we have groupings of dimensions (be it to decompose or to stack),
``expression_dims`` and ``output_dims`` differ:
.. jupyter-execute::
from xarray_einstats.einops import process_pattern_list, DimHandler
handler = DimHandler()
process_pattern_list(["a", {"b": ("c", "d")}, ("e", "f", "g")], handler)
"""
out = []
out_names = []
txt = []
for subitem in redims:
if isinstance(subitem, str):
out.append(subitem)
out_names.append(subitem)
txt.append(handler.get_name(subitem))
elif isinstance(subitem, dict) and allow_dict:
if len(subitem) != 1:
raise ValueError(
"dicts in pattern list must have a single key but instead "
f"found {len(subitem)}: {subitem.keys()}"
)
key, values = list(subitem.items())[0]
if isinstance(values, str):
raise ValueError("Found values of type str in a pattern dict, use xarray.rename")
out.extend(values)
out_names.append(key)
txt.append(f"( {handler.get_names(values)} )")
elif allow_list:
out.extend(subitem)
out_names.append("-".join(subitem))
txt.append(f"( {handler.get_names(subitem)} )")
else:
raise ValueError(
f"Found unsupported pattern type: {type(subitem)}, double check the docs. "
"This could be for example is using lists/tuples as elements of in_dims argument"
)
return out, out_names, " ".join(txt)
def translate_pattern(pattern):
"""Translate a string pattern to a list pattern.
Parameters
----------
pattern : str
Input pattern as a string. The ``raw_`` wrappers use these patterns.
Returns
-------
pattern_list
Pattern translated to list, as used by the full fledged wrappers
instead of the ``raw_`` ones.
Examples
--------
.. jupyter-execute::
from xarray_einstats.einops import translate_pattern
translate_pattern("a (c d)=b (e f g)")
"""
dims = []
current_dim = ""
current_block = []
parsing_block = 0 # 0=no block, 1=block, 2=just closed, waiting for key
parsing_key = False
for char in pattern.strip() + " ":
if char == " ":
if parsing_key:
if current_dim:
dims.append({current_dim: current_block})
else:
dims.append(current_block)
current_block = []
parsing_key = False
parsing_block = False
elif not current_dim:
continue
elif parsing_block:
current_block.append(current_dim)
else:
dims.append(current_dim)
current_dim = ""
elif char == ")":
if parsing_block:
parsing_block = False
parsing_key = True
if current_dim:
current_block.append(current_dim)
current_dim = ""
else:
raise ValueError("unmatched parenthesis")
elif char == "(":
parsing_block = 1
elif char == "=":
if not parsing_key:
raise ValueError("= sign must follow a closing parenthesis )")
else:
current_dim += char
return dims
def rearrange(da, out_dims, in_dims=None, **kwargs):
"""Wrap `einops.rearrange <https://einops.rocks/api/rearrange/>`_.
Parameters
----------
da : xarray.DataArray
Input DataArray to be rearranged
out_dims : list of str, list or dict
The output pattern for the dimensions.
The dimensions present in
in_dims : list of str or dict, optional
The input pattern for the dimensions.
This is only necessary if you want to split some dimensions.
kwargs : dict, optional
kwargs with key equal to dimension names in ``out_dims``
(that is, strings or dict keys) are passed to einops.rearrange
the rest of keys are passed to :func:`xarray.apply_ufunc`
Notes
-----
Unlike for general xarray objects, where dimension
names can be :term:`hashable <xarray:name>` here
dimension names are not recommended but required to be
strings.
See Also
--------
xarray_einstats.einops.raw_rearrange:
Cruder wrapper of einops.rearrange, allowed characters in dimension names are restricted
xarray.DataArray.transpose, xarray.Dataset.transpose
xarray.DataArray.stack, xarray.Dataset.stack
"""
da_dims = da.dims
handler = DimHandler()
if in_dims is None:
in_dims = []
in_names = []
in_pattern = ""
else:
in_dims, in_names, in_pattern = process_pattern_list(
in_dims, handler=handler, allow_list=False
)
# note, not using sets for da_dims to avoid transpositions on missing variables,
# if they wanted to transpose those they would not be missing variables
out_dims, out_names, out_pattern = process_pattern_list(out_dims, handler=handler)
missing_in_dims = [dim for dim in da_dims if dim not in in_names]
expected_missing = set(out_dims).union(in_names).difference(in_dims)
missing_out_dims = [dim for dim in da_dims if dim not in expected_missing]
pattern = f"{handler.get_names(missing_in_dims)} {in_pattern} ->\
{handler.get_names(missing_out_dims)} {out_pattern}"
axes_lengths = {
handler.rename_kwarg(k): v for k, v in kwargs.items() if k in out_names + out_dims
}
kwargs = {k: v for k, v in kwargs.items() if k not in out_names + out_dims}
return xr.apply_ufunc(
einops.rearrange,
da,
pattern,
input_core_dims=[missing_in_dims + in_names, []],
output_core_dims=[missing_out_dims + out_names],
kwargs=axes_lengths,
**kwargs,
)
def raw_rearrange(da, pattern, **kwargs):
"""Crudely wrap `einops.rearrange <https://einops.rocks/api/rearrange/>`_.
Wrapper around einops.rearrange with a very similar syntax.
Spaces, parenthesis ``()`` and `->` are not allowed in dimension names.
Parameters
----------
da : xarray.DataArray
Input array
pattern : string
Pattern string. Same syntax as patterns in einops with two
caveats:
* Unless splitting or stacking, you must use the actual dimension names.
* When splitting or stacking you can use `(dim1 dim2)=dim`. This is `necessary`
for the left hand side as it identifies the dimension to split, and
optional on the right hand side, if omitted the stacked dimension will be given
a default name.
kwargs : dict, optional
Passed to :func:`xarray_einstats.einops.rearrange`
Returns
-------
xarray.DataArray
See Also
--------
xarray_einstats.einops.rearrange:
More flexible and powerful wrapper over einops.rearrange. It is also more verbose.
"""
if "->" in pattern:
in_pattern, out_pattern = pattern.split("->")
in_dims = translate_pattern(in_pattern)
else:
out_pattern = pattern
in_dims = None
out_dims = translate_pattern(out_pattern)
return rearrange(da, out_dims=out_dims, in_dims=in_dims, **kwargs)
def reduce(da, reduction, out_dims, in_dims=None, **kwargs):
"""Wrap `einops.reduce <https://einops.rocks/api/reduce/>`_.
Parameters
----------
da : xarray.DataArray
Input DataArray to be reduced
reduction : string or callable
One of available reductions ('min', 'max', 'sum', 'mean', 'prod') by ``einops.reduce``,
case-sensitive. Alternatively, a callable ``f(tensor, reduced_axes) -> tensor``
can be provided. ``reduced_axes`` are passed as a list of int.
out_dims : list of str, list or dict
The output pattern for the dimensions.
The dimensions present in
in_dims : list of str or dict, optional
The input pattern for the dimensions.
This is only necessary if you want to split some dimensions.
kwargs : dict, optional
kwargs with key equal to dimension names in ``out_dims``
(that is, strings or dict keys) are passed to einops.rearrange
the rest of keys are passed to :func:`xarray.apply_ufunc`
Notes
-----
Unlike for general xarray objects, where dimension
names can be :term:`hashable <xarray:name>` here
dimension names are not recommended but required to be
strings.
See Also
--------
xarray_einstats.einops.raw_reduce:
Cruder wrapper of einops.rearrange, allowed characters in dimension names are restricted
xarray_einstats.einops.rearrange, xarray_einstats.einops.raw_rearrange
"""
da_dims = da.dims
handler = DimHandler()
if in_dims is None:
in_dims = []
in_names = []
in_pattern = ""
else:
in_dims, in_names, in_pattern = process_pattern_list(
in_dims, handler=handler, allow_list=False
)
# note, not using sets for da_dims to avoid transpositions on missing variables,
# if they wanted to transpose those they would not be missing variables
out_dims, out_names, out_pattern = process_pattern_list(out_dims, handler=handler)
missing_in_dims = [dim for dim in da_dims | |
'74', '90', '106', '122', '138', '154', '170', '186' ]
addCurvePieceEdgeLoops( 0.95 , edges )
edges.extend( [ '189', '191', '193', '195', '197', '199' ] )
addCurvePieceEdgeLoops( 0.05 , edges )
edges = [ '36', '37', '39', '41', '68', '80', '100', '112', '132', '144', '164',
'176', '210', '230', '258', '278' ]
addCurvePieceEdgeLoops( 0.95 , edges )
edges.remove( '112' )
edges.remove( '144' )
edges.remove( '176' )
edges.remove( '80' )
edges.remove( '278' )
edges.remove( '230' )
edges.extend( [ '285', '287', '291', '293', '295', '297' ] )
addCurvePieceEdgeLoops( 0.95 , edges )
edges = [ '44', '45', '47', '49', '70', '78', '102', '110', '134', '142', '166',
'174', '212', '228', '260', '276' ]
addCurvePieceEdgeLoops( 0.95 , edges )
edges.remove( '174' )
edges.remove( '142' )
edges.remove( '110' )
edges.remove( '276' )
edges.remove( '78' )
edges.remove( '228' )
edges.extend( [ '349', '351', '355', '357', '359', '361' ] )
addCurvePieceEdgeLoops( 0.95 , edges )
edges.remove( '349' )
edges.remove( '351' )
edges.remove( '355' )
edges.remove( '357' )
edges.remove( '359' )
edges.remove( '361' )
edges.extend( [ '393', '381', '383', '387', '389', '391' ] )
addCurvePieceEdgeLoops( 0.05 , edges )
edges = [ '381', '383', '387', '389', '391', '393', '412', '417', '427',
'429', '431', '433', '435', '437' ]
addCurvePieceEdgeLoops( 0.05 , edges )
edges = [ '52', '53', '55', '57', '72', '76', '104', '108', '136',
'140', '168', '172', '214', '226', '262', '274' ]
addCurvePieceEdgeLoops( 0.05 , edges )
edges = [ '76', '108', '140', '172', '226', '274', '476', '481',
'491', '493', '495', '497', '499', '501', '503', '505']
addCurvePieceEdgeLoops( 0.05 , edges )
#moves pivots
cm.select( "pillar_curvePiece" , r=True )
cm.move( 0, 0, -0.05 , "pillar_curvePiece.scalePivot","pillar_curvePiece.rotatePivot", r=True )
#assembles a normal pillar
def makeNormalPillar():
#first level
makePillarLongPart()
makePillarStraightPiece()
makePillarCurvePiece()
#second level
cm.select( "pillar_curvePiece" , r=True )
cm.duplicate()
cm.move( 0, 0.5, 0.66, r=True)
cm.duplicate()
cm.rotate(0, 90, 0, r=True)
cm.move( 0,0, -0.18, r=True)
cm.duplicate()
cm.rotate(0, 180, 0, r=True)
cm.duplicate('pillar_straightPiece')
cm.select( "pillar_straightPiece1" , r=True )
cm.move( 0, 0.5, 0.655, r=True)
cm.duplicate('pillar_straightPiece')
cm.select( "pillar_straightPiece2" , r=True )
cm.move( 0, 0.5, 0, r=True)
cm.select ( 'pillar_curvePiece1.f[2]' , 'pillar_curvePiece1.f[76]' ,
'pillar_curvePiece1.f[44]' , 'pillar_curvePiece1.f[60]', 'pillar_curvePiece1.f[92]' ,
'pillar_curvePiece1.f[95:99]' , 'pillar_curvePiece1.f[119:123]' , r=True)
cm.move( 0, 0, -0.66, r=True)
#third level
cm.select ( 'pillar_curvePiece1', tgl=True )
cm.select ( 'pillar_curvePiece2', tgl=True )
cm.select ( 'pillar_curvePiece3', tgl=True )
cm.duplicate()
cm.move( 0, 0.5, 0.65, r=True)
cm.select ( 'pillar_curvePiece4.f[2]' , 'pillar_curvePiece4.f[76]' ,
'pillar_curvePiece4.f[44]' , 'pillar_curvePiece4.f[60]', 'pillar_curvePiece4.f[92]' ,
'pillar_curvePiece4.f[95:99]' , 'pillar_curvePiece4.f[119:123]' , r=True)
cm.move( 0, 0, -0.66, r=True)
cm.duplicate('pillar_straightPiece2')
cm.select( "pillar_straightPiece3" , r=True )
cm.move( 0, 0.5, 0, r=True)
cm.duplicate()
cm.move( 0,0, 0.65, r=True)
cm.duplicate()
cm.move( 0,0, 0.67, r=True)
#smooths it
cm.polySmooth( 'pillar_curvePiece' , dv=1 )
cm.polySmooth( 'pillar_curvePiece1' , dv=1 )
cm.polySmooth( 'pillar_curvePiece2' , dv=1 )
cm.polySmooth( 'pillar_curvePiece3' , dv=1 )
cm.polySmooth( 'pillar_curvePiece4' , dv=1 )
cm.polySmooth( 'pillar_curvePiece5' , dv=1 )
cm.polySmooth( 'pillar_curvePiece6' , dv=1 )
#combines it into single mesh
cm.select ( 'pillar*' , r=True )
cm.polyUnite( n='normalPillar' )
cm.delete( ch=True )
#makes the corner pillar
def makeCornerPillar():
#first level
makePillarLongPart()
makePillarStraightPiece()
makePillarCurvePiece()
cm.move( 0, 0, -0.2 , "pillar_curvePiece.scalePivot","pillar_curvePiece.rotatePivot", r=True )
cm.select( "pillar_curvePiece" , r=True )
cm.duplicate()
cm.rotate( 0, -90, 0 , r=True )
cm.duplicate() #makes diagonal piece
cm.rotate( 0, 45, 0 , r=True )
cm.move ( -0.15 , 0 , 0.15, r=True )
cm.select ( 'pillar_curvePiece2.f[2]' , 'pillar_curvePiece2.f[76]' ,
'pillar_curvePiece2.f[44]' , 'pillar_curvePiece2.f[60]', 'pillar_curvePiece2.f[92]' ,
'pillar_curvePiece2.f[95:99]' , 'pillar_curvePiece2.f[119:123]' , r=True)
cm.move ( 0.15 , 0 , -0.15, r=True )
#second level, make medium curve piece
cm.duplicate( 'pillar_curvePiece' )
cm.select( "pillar_curvePiece3" , r=True )
cm.move( 0, 0.5, 0.65, r=True)
cm.select ( 'pillar_curvePiece3.f[2]' , 'pillar_curvePiece3.f[76]' ,
'pillar_curvePiece3.f[44]' , 'pillar_curvePiece3.f[60]', 'pillar_curvePiece3.f[92]' ,
'pillar_curvePiece3.f[95:99]' , 'pillar_curvePiece3.f[119:123]' , r=True)
cm.move ( 0 , 0 , -0.6, r=True )
cm.move( 0, 0, -0.65 , "pillar_curvePiece3.scalePivot","pillar_curvePiece3.rotatePivot", r=True )
#second level, make everything else
cm.duplicate("pillar_curvePiece3")
cm.select( "pillar_curvePiece4" , r=True )
cm.rotate( 0, -90, 0, r=True)
cm.duplicate() #makes diagonal piece
cm.rotate( 0, 45, 0, r=True)
cm.move ( -0.37, 0, 0.37, r=True )
cm.select ( 'pillar_curvePiece5.f[2]' , 'pillar_curvePiece5.f[76]' ,
'pillar_curvePiece5.f[44]' , 'pillar_curvePiece5.f[60]', 'pillar_curvePiece5.f[92]' ,
'pillar_curvePiece5.f[95:99]' , 'pillar_curvePiece5.f[119:123]' , r=True)
cm.move ( 0.4 , 0 , -0.4, r=True )
cm.select( 'pillar_straightPiece' , r=True )
cm.duplicate()
cm.move(0, 0.5, 0 , r=True)
cm.duplicate()
cm.move(0, 0, 0.66 , r=True)
cm.duplicate()
cm.move(-0.66, 0, -0.66 , r=True)
cm.duplicate()
cm.move(0.06, 0, 0.61 , r=True)
cm.rotate(0,45,0, r=True)
#third level, make long curve piece, part 1
cm.duplicate( 'pillar_curvePiece' )
cm.select( "pillar_curvePiece6" , r=True )
cm.move( 0, 1, 1.3, r=True)
cm.select ( 'pillar_curvePiece6.f[2]' , 'pillar_curvePiece6.f[76]' ,
'pillar_curvePiece6.f[44]' , 'pillar_curvePiece6.f[60]', 'pillar_curvePiece6.f[92]' ,
'pillar_curvePiece6.f[95:99]' , 'pillar_curvePiece6.f[119:123]' , r=True)
cm.move ( 0 , 0 , -1.3 , r=True )
cm.move( 0, 0, -1.3 , "pillar_curvePiece6.scalePivot","pillar_curvePiece6.rotatePivot", r=True )
#third level, make middle long bit
cm.select( "pillar_curvePiece6" , r=True )
cm.duplicate()
cm.rotate( 0, -45, 0, r=True)
cm.move( -0.5 , 0 , 0.5 , r=True )
cm.select ( 'pillar_curvePiece7.f[2]' , 'pillar_curvePiece7.f[76]' ,
'pillar_curvePiece7.f[44]' , 'pillar_curvePiece7.f[60]', 'pillar_curvePiece7.f[92]' ,
'pillar_curvePiece7.f[95:99]' , 'pillar_curvePiece7.f[119:123]' , r=True)
cm.move ( 0.5 , 0 , -0.5 , r=True )
cm.select('pillar_straightPiece1' , r=True)
cm.duplicate()
cm.move( 0 , 0.5 , 0 , r=True )
cm.select('pillar_straightPiece4' , r=True)
cm.duplicate()
cm.move( 0 , 0.5 , 0 , r=True )
cm.duplicate()
cm.move( -0.68 , 0 , 0.68 , r=True )
#third level, make long curve piece, part 2
cm.select('pillar_straightPiece5' , r=True)
cm.duplicate()
cm.move( 0 , 0 , 0.66 , r=True )
cm.duplicate()
cm.move( 0 , 0 , .66 , r=True )
cm.select( "pillar_curvePiece1" , r=True )
cm.duplicate()
cm.move( 0 , 1 , 1.31 , r=True )
cm.polySmooth( 'pillar_curvePiece6' , dv=1 )
cm.select('pillar_curvePiece6' , 'pillar_straightPiece8' , 'pillar_straightPiece9' , r=True)
cm.polyUnite( n='pillar_curvePieceL' )
cm.delete( ch=True )
#third level, make other side
cm.duplicate()
cm.rotate(0,-90,0, r=True)
cm.select('pillar_curvePiece' , r=True)
cm.duplicate()
cm.move( -1.32 , 1 , 0 , r=True )
#smooth curve pieces
cm.polySmooth( 'pillar_curvePiece' , dv=1 )
cm.polySmooth( 'pillar_curvePiece1' , dv=1 )
cm.polySmooth( 'pillar_curvePiece2' , dv=1 )
cm.polySmooth( 'pillar_curvePiece3' , dv=1 )
cm.polySmooth( 'pillar_curvePiece4' , dv=1 )
cm.polySmooth( 'pillar_curvePiece5' , dv=1 )
cm.polySmooth( 'pillar_curvePiece7' , dv=1 )
cm.polySmooth( 'pillar_curvePiece8' , dv=1 )
cm.polySmooth( 'pillar_curvePiece9' , dv=1 )
#combine corner pillar
cm.select( 'pillar_*' , r=True )
cm.polyUnite( n='cornerPillar' )
cm.delete( ch=True )
#makes the part under the roof
def makeFloor():
#make pillars
makeCornerPillar()
for i in range(int(width)):
makeNormalPillar()
cm.move ( (3*i)+3 , 0, 0, r=True )
#make rafters
actualWidth = ( 3 * float(width))
cm.polyCube( d=0.16, h=0.16, w=( actualWidth + 3) , sx=5, sy=5, sz=5, n='rafter1' )
cm.move( (actualWidth + 3 ) * 0.5 , 4.06 , 0, r=True )
cm.duplicate()
cm.move( 0, 1, 0, r=True)
cm.duplicate()
cm.move( 0, 0.52, 0, r=True)
cm.duplicate()
cm.move( 0, 0.495, 0, r=True)
cm.duplicate()
cm.move( 0, 0, 0.676, r=True)
cm.duplicate()
cm.move( 0, 0.5, -0.676, r=True)
cm.polyCube( d=0.16, h=0.16, w=( actualWidth + 4.8) , sx=5, sy=5, sz=5, n='rafter7' )
cm.move( (actualWidth + 2.9 ) * 0.5, 6.564 , 0.68, r=True )
cm.polyCube( d=0.16, h=0.16, w=( actualWidth + 6.2) , sx=5, sy=5, sz=5, n='rafter8' )
cm.move( (actualWidth + 3 ) * 0.5, 6.564 , 1.32, r=True )
cm.polyCube( d=0.16, h=0.16, w=( actualWidth + 8) , sx=5, sy=5, sz=5, n='rafter9' )
cm.move( (actualWidth + 3 ) * 0.5, 6.564 , 1.94, r=True )
#group them
cm.select( 'cornerPillar', 'normalP*', 'rafter*', r=True )
cm.group( n='pillars' )
cm.delete( ch=True )
#move and duplicate
cm.move ( (actualWidth + 3)*-0.5 , 0, (actualWidth + 3)*0.5 , a=True )
cm.move( 0, 0, 0 , "pillars.scalePivot","pillars.rotatePivot", a=True )
cm.duplicate()
cm.rotate( 0, 90, 0, r=True )
cm.duplicate()
cm.rotate( 0, 90, 0, r=True )
cm.duplicate()
cm.rotate( 0, 90, 0, r=True )
#make the inside cube
cm.polyCube( d= actualWidth+3.1 , h=7, w= actualWidth+3.1 , n='insidecube' )
cm.move( 0, 3.5, 0, r=True)
#group
cm.select( 'pillar*', 'insidecube', r=True )
cm.group( n='floor_group' )
#make lower roof
def makeLowerRoof():
#--make balcony floor
#create the roof group
cm.group( em=True , | |
import copy
from command_categories import *
from util import *
from ir_utils import *
from definitions.ir.redirection import *
from definitions.ir.resource import *
## Assumption: Everything related to a DFGNode must be already expanded.
## TODO: Ensure that this is true with assertions
class DFGNode:
## Unique identifier for nodes
next_id = 0
## inputs : tuple of lists of fid_ids (that can be used to retrieve fid from edges)
## outputs : list of fid_ids
## com_name : command name Arg
## com_category : string denoting category
## input_consumption_mode : enumeration
## com_properties : properties such as commutativity
## com_mapper : a class that contains necessary information to instantiate a mapper (by defaule this corresponds to the command)
## com_aggregator : a class that contains necessary information to instantiate an aggregator
## com_options : list of tuples with the option index and the argument Arg
## com_redirs : list of redirections
## com_assignments : list of assignments
def __init__(self, inputs, outputs, com_name, com_category,
com_properties = [],
com_mapper = None,
com_aggregator = None,
com_options = [],
com_redirs = [],
com_assignments=[]):
## Add a unique identifier to each DFGNode since id() is not guaranteed to be unique for objects that have different lifetimes.
## This leads to issues when nodes are deleted and new ones are created, leading to id() clashes between them
self.id = DFGNode.next_id
DFGNode.next_id += 1
self.set_inputs(inputs)
self.outputs = outputs
self.com_name = com_name
self.com_category = com_category
self.com_properties = com_properties
self.com_mapper = com_mapper
self.com_aggregator = com_aggregator
self.com_options = com_options
self.com_redirs = [Redirection(redirection) for redirection in com_redirs]
self.com_assignments = com_assignments
# log("Node created:", self.id, self)
def __repr__(self):
prefix = "Node"
if (self.com_category == "stateless"):
prefix = "Stateless"
elif (self.com_category == "pure"):
prefix = "Pure"
elif (self.is_pure_parallelizable()):
prefix = "Par. Pure"
if (self.is_commutative()):
prefix = 'Commutative ' + prefix
output = "{}: \"{}\" in:{} out:{}".format(
prefix, self.com_name,
self.get_input_list(),
self.outputs)
return output
def get_id(self):
return self.id
## Copying requires setting the id to a new one too
def copy(self):
node_copy = copy.deepcopy(self)
node_copy.id = DFGNode.next_id
DFGNode.next_id += 1
return node_copy
## TODO: Make that a proper class.
def set_inputs(self, inputs):
if(isinstance(inputs, list)):
self.inputs = ([], inputs)
elif(isinstance(inputs, tuple)):
self.inputs = inputs
else:
raise NotImplementedError()
def get_input_list(self):
return (self.inputs[0] + self.inputs[1])
def get_standard_inputs(self):
return self.inputs[1]
def get_configuration_inputs(self):
return self.inputs[0]
def is_at_most_pure(self):
return (self.com_category in ["stateless", "pure", "parallelizable_pure"])
def is_parallelizable(self):
return (self.is_pure_parallelizable() or self.is_stateless())
def is_stateless(self):
return (self.com_category == "stateless")
def is_pure_parallelizable(self):
return (self.com_category == "parallelizable_pure")
def is_commutative(self):
return ('commutative' in self.com_properties)
## kk: 2021-07-23 Not totally sure if that is generally correct. Tests will say ¯\_(ツ)_/¯
## I think it assumes that new options can be added in the beginning if there are no options already
def append_options(self, new_options):
if(len(self.com_options) > 0):
max_opt_index = max([i for i, _opt in self.com_options])
else:
max_opt_index = -1
new_com_options = [(max_opt_index + 1 + i, Arg(string_to_argument(opt)))
for i, opt in enumerate(new_options)]
self.com_options = self.com_options + new_com_options
## TODO: Improve this functio to be separately implemented for different special nodes,
## such as cat, eager, split, etc...
def to_ast(self, edges, drain_streams):
## TODO: We might not want to implement this at all actually
if (drain_streams):
raise NotImplementedError()
else:
## TODO: Properly handle redirections
##
## TODO: If one of the redirected outputs or inputs is changed in the IR
## (e.g. `cat < s1` was changed to read from an ephemeral file `cat < "#file5"`)
## this needs to be changed in the redirections too. Maybe we can modify redirections
## when replacing fid.
##
## It seems that if we have already applied redirections we might not need to
## care about them anymore (since they will be created in new_redirs.)
##
## redirs = [redir.to_ast() for redir in self.com_redirs]
##
## At the moment we do not reprint redirections here (we only produce redirections
## where we recreate arguments and redirections).
redirs = []
assignments = self.com_assignments
## Start filling in the arguments
opt_arguments = []
for i, opt in self.com_options:
## Pad the argument list with None
opt_arguments = pad(opt_arguments, i)
opt_arguments[i] = opt.to_ast()
com_name_ast = self.com_name.to_ast()
option_asts = [opt.to_ast() for _, opt in self.com_options]
##
## 1. Find the input and output fids
## 2. Construct the rest of the arguments and input/output redirections according to
## the command IO
input_fids = [edges[in_id][0] for in_id in self.get_input_list()]
output_fids = [edges[out_id][0] for out_id in self.outputs]
rest_argument_fids, new_redirs = create_command_arguments_redirs(com_name_ast,
option_asts,
input_fids,
output_fids)
## Transform the rest of the argument fids to arguments
## Since some of the rest_arguments can be None (they only contain inputs and outputs)
## we need to make sure that we don't turn None objects to asts.
##
## The None fields need to be filtered out because they are taken care of by the interleave function.
##
## TODO: Is this actually OK?
rest_arguments = [fid.to_ast()
for fid in rest_argument_fids
if not fid is None]
## Interleave the arguments since options args might contain gaps.
arguments = interleave_args(opt_arguments, rest_arguments)
all_arguments = [com_name_ast] + arguments
all_redirs = redirs + new_redirs
node = make_command(all_arguments, redirections=all_redirs, assignments=assignments)
return node
## This method applies the redirections to get the correct, inputs, outputs of a node.
##
## WARNING: For now it only works with 'To' redirections for
## stdout, and it applies them by adding a resource to the stdout
## of the command. It also keeps them for possible future usage.
##
## TODO: Properly handle all redirections. This requires a nice
## abstraction. Maybe the best way would be to keep them around
## and always recompute inputs/outputs when needed by following
## the redirections.
##
## TODO: Is it correct to apply redirections from left to right?
def apply_redirections(self, edges):
unhandled_redirs = []
for redirection in self.com_redirs:
## Handle To redirections that have to do with stdout
if (redirection.is_to_file() and redirection.is_for_stdout()):
# log(redirection)
file_resource = FileResource(redirection.file_arg)
success = False
for i in range(len(self.outputs)):
output_edge_id = self.outputs[i]
output_fid = edges[output_edge_id][0]
if(output_fid.has_file_descriptor_resource()
and output_fid.resource.is_stdout()):
success = True
edges[output_edge_id][0].set_resource(file_resource)
# self.outputs[i].set_resource(file_resource)
assert(success)
elif (redirection.is_from_file() and redirection.is_for_stdin()):
# log(redirection)
file_resource = FileResource(redirection.file_arg)
success = False
for input_edge_id in self.get_input_list():
input_fid = edges[input_edge_id][0]
if(input_fid.has_file_descriptor_resource()
and input_fid.resource.is_stdin()):
success = True
edges[input_edge_id][0].set_resource(file_resource)
assert(success)
else:
log("Warning -- Unhandled redirection:", redirection)
unhandled_redirs.append(redirection)
## TODO: I am not sure if this is the correct way to handle unhandled redirections.
## Does it make any sense to keep them and have them in the Final AST.
raise NotImplementedError()
## This renames the from_id (wherever it exists in inputs ot outputs)
## to the to_id.
##
## TODO: Make sure we don't need to change redirections here.
##
## TODO: Make this a method of graph to change the from, to too.
def replace_edge(self, from_id, to_id):
new_config_inputs = self.replace_edge_in_list(self.inputs[0], from_id, to_id)
new_standard_inputs = self.replace_edge_in_list(self.inputs[1], from_id, to_id)
new_outputs = self.replace_edge_in_list(self.outputs, from_id, to_id)
self.set_inputs((new_config_inputs, new_standard_inputs))
self.outputs = new_outputs
## TODO: There must be a lib function to do this.
def replace_edge_in_list(self, edge_ids, from_id, to_id):
new_edge_ids = []
for id in edge_ids:
if(id == from_id):
new_edge_id = to_id
else:
new_edge_id = id
new_edge_ids.append(new_edge_id)
return new_edge_ids
## Get the file names of the outputs of the map commands. This
## differs if the command is stateless, pure that can be
## written as a map and a reduce, and a pure that can be
## written as a generalized map and reduce.
def get_map_output_files(self, input_edge_ids, fileIdGen):
assert(self.is_parallelizable())
if(self.com_category == "stateless"):
map_output_fids = [fileIdGen.next_ephemeral_file_id() for in_fid in input_edge_ids]
elif(self.is_pure_parallelizable()):
map_output_fids = self.pure_get_map_output_files(input_edge_ids, fileIdGen)
else:
log("Unreachable code reached :(")
assert(False)
## This should be unreachable
return map_output_fids
## TODO: Fix this somewhere in the annotations and not in the code
def pure_get_map_output_files(self, input_edge_ids, fileIdGen):
assert(self.is_pure_parallelizable())
## The number of the mapper outputs defaults to 1
if(self.com_mapper is None):
number_outputs = 1
else:
number_outputs = self.com_mapper.num_outputs
| |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2020, bikbuk and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import pytz
from datetime import datetime as dt
from frappe.model.document import Document
from vet_website.vet_website.doctype.vetjournalentry.vetjournalentry import new_journal_entry
from vet_website.vet_website.doctype.vetpetowner.vetpetowner import set_owner_credit_total
from vet_website.vet_website.doctype.vetcustomerinvoice.vetcustomerinvoice import deliver_to_customer, add_payment
from vet_website.vet_website.doctype.vetpurchase.vetpurchase import check_paid_purchase
class VetOwnerCredit(Document):
pass
@frappe.whitelist()
def submit_piutang(action, nominal, petOwner, method):
try:
tz = pytz.timezone("Asia/Jakarta")
if action == 'Buat':
session_search = frappe.get_list('VetPosSessions', filters={'status': 'In Progress'}, fields=['name'])
if len(session_search) < 1:
return {'error': "Belum ada POS Session yang dibuka, bukan POS Session terlebih dahulu"}
last_credit = 0
last_credit_search = frappe.get_list('VetOwnerCredit', filters={'pet_owner': petOwner}, fields=['credit'], order_by="creation desc")
if len(last_credit_search) != 0:
last_credit = last_credit_search[0].credit
if last_credit < float(nominal):
return {'error': 'Nominal melebihi deposit, jumlah deposit tersedia %s'% last_credit, 'nominal': last_credit}
owner_credit = frappe.new_doc('VetOwnerCredit')
owner_credit.update({
'date': dt.strftime(dt.now(tz), "%Y-%m-%d %H:%M:%S"),
'invoice': '',
'type': 'Payment',
'nominal': -float(nominal),
'pet_owner': petOwner,
'metode_pembayaran': method
})
owner_credit.insert()
frappe.db.commit()
set_owner_credit_total(petOwner)
create_journal_entry('Payment', -float(nominal), owner_credit.name)
elif action == 'Deposit':
session_search = frappe.get_list('VetPosSessions', filters={'status': 'In Progress'}, fields=['name'])
if len(session_search) < 1:
return {'error': "Belum ada POS Session yang dibuka, bukan POS Session terlebih dahulu"}
owner_credit = frappe.new_doc('VetOwnerCredit')
owner_credit.update({
'date': dt.strftime(dt.now(tz), "%Y-%m-%d %H:%M:%S"),
'invoice': '',
'type': 'Payment',
'nominal': nominal,
'pet_owner': petOwner,
'metode_pembayaran': method,
'is_deposit': 1,
})
owner_credit.insert()
frappe.db.commit()
set_owner_credit_total(petOwner)
create_journal_entry('Payment', nominal, owner_credit.name, method, True)
elif action == 'Bayar':
session_search = frappe.get_list('VetPosSessions', filters={'status': 'In Progress'}, fields=['name'])
if len(session_search) < 1:
return {'error': "Belum ada POS Session yang dibuka, bukan POS Session terlebih dahulu"}
bayar_hutang_invoice(nominal, petOwner, method)
# owner_credit = frappe.get_list('VetOwnerCredit', filters={'pet_owner': petOwner}, order_by="creation asc", fields=['*'])
# sales_credit = frappe.get_list('VetOwnerCredit', filters={'pet_owner': petOwner, 'type': 'Sales'}, order_by="creation asc", fields=['*'])
# list_credit = []
# for i in owner_credit:
# if i.invoice and i.type == 'Sales':
# invoice_name = i.invoice
# nominal_invoice = i.nominal
# elif i.type == 'Sales':
# index = [x for x in range(len(sales_credit)) if sales_credit[x] == i]
# invoice_sales = [t for t in sales_credit[:index[0]] if t['invoice'] != '' and t['invoice'] != None]
# invoice_name = invoice_sales[-1]['invoice']
# nominal_invoice = i.nominal + float(invoice_sales[-1]['nominal'])
# if i.type == 'Sales' and check_invoice(invoice_name , owner_credit, nominal_invoice):
# list_credit.append(i)
# if list_credit:
# i = 0
# for l in list_credit:
# if float(nominal.replace('.','').replace(',','.')) > 0:
# if l.invoice:
# invoice_name = l.invoice
# else:
# index = [x for x in range(len(sales_credit)) if sales_credit[x] == l]
# invoice_sales = [t for t in sales_credit[:index[0]] if t['invoice'] != '' and t['invoice'] != None]
# invoice_name = invoice_sales[-1]['invoice']
# pay = frappe.get_list('VetCustomerInvoicePay', filters={'parent': invoice_name}, fields=['sum(jumlah) as paid'])
# if pay[0]['paid'] != None:
# remaining = l.nominal - float(pay[0]['paid'])
# else:
# remaining = l.nominal
# if float(nominal.replace('.','').replace(',','.')) > remaining and l != list_credit[-1]:
# data = {
# 'jumlah': remaining,
# 'name': invoice_name,
# 'deposit': 0,
# 'tipe': 'Sales',
# 'method': method
# }
# create_je(data)
# nominal = float(nominal.replace('.','').replace(',','.')) - float(remaining)
# else:
# data = {
# 'jumlah': nominal,
# 'name': invoice_name,
# 'deposit': 0,
# 'tipe': 'Sales',
# 'method': method
# }
# create_je(data)
# nominal = 0
# i += 1
# else:
# invoice_sales = [t for t in sales_credit if t['invoice'] != '' and t['invoice'] != None]
# invoice_name = invoice_sales[-1]['invoice']
# data = {
# 'jumlah': nominal,
# 'name': invoice_name,
# 'deposit': 0,
# 'tipe': 'Sales',
# 'method': method
# }
# create_je(data)
credit_list = frappe.get_list('VetOwnerCredit', filters={'pet_owner': petOwner}, fields=['*'], order_by='creation desc')
return credit_list
except PermissionError as e:
return {'error': e}
@frappe.whitelist()
def process_invoice(data):
try:
data_json = json.loads(data)
for d in data_json:
credit = frappe.get_doc('VetOwnerCredit', d)
if credit.status == 'Draft':
credit.status = 'Done'
credit.save()
frappe.db.commit()
if credit.type == 'Sales':
set_owner_credit_total(credit.pet_owner)
create_journal_entry('Sales', credit.nominal, credit.name)
elif credit.type == 'Payment':
owner_credit = frappe.get_list('VetOwnerCredit', filters={'pet_owner': credit.pet_owner}, order_by="creation asc", fields=['*'])
sales_credit = frappe.get_list('VetOwnerCredit', filters={'pet_owner': credit.pet_owner, 'type': 'Sales'}, order_by="creation asc", fields=['*'])
list_credit = []
for i in owner_credit:
if i.invoice and i.type == 'Sales':
invoice_name = i.invoice
nominal_invoice = i.nominal
elif i.type == 'Sales':
index = [x for x in range(len(sales_credit)) if sales_credit[x] == i]
invoice_sales = [t for t in sales_credit[:index[0]] if t['invoice'] != '' and t['invoice'] != None]
invoice_name = invoice_sales[-1]['invoice']
nominal_invoice = i.nominal + float(invoice_sales[-1]['nominal'])
if i.type == 'Sales' and check_invoice(invoice_name, owner_credit, nominal_invoice):
list_credit.append(i)
if list_credit:
i = 0
nominal = credit.nominal
for l in list_credit:
if float(nominal.replace('.','').replace(',','.')) > 0:
if l.invoice:
invoice_name = l.invoice
else:
index = [x for x in range(len(sales_credit)) if sales_credit[x] == l]
invoice_sales = [t for t in sales_credit[:index[0]] if t['invoice'] != '' and t['invoice'] != None]
invoice_name = invoice_sales[-1]['invoice']
pay = frappe.get_list('VetCustomerInvoicePay', filters={'parent': invoice_name}, fields=['sum(jumlah) as paid'])
if pay[0]['paid'] != None:
remaining = l.nominal - float(pay[0]['paid'])
else:
remaining = l.nominal
if float(nominal.replace('.','').replace(',','.')) > remaining and l != list_credit[-1]:
data = {
'jumlah': remaining,
'name': invoice_name,
'deposit': 0,
'from_owner_credit': True,
'tipe': 'Sales',
}
create_je(data)
nominal = float(nominal.replace('.','').replace(',','.')) - float(remaining)
else:
data = {
'jumlah': nominal,
'name': invoice_name,
'deposit': 0,
'from_owner_credit': True,
'tipe': 'Sales',
}
create_je(data)
nominal = 0
i += 1
else:
invoice_sales = [t for t in sales_credit if t['invoice'] != '' and t['invoice'] != None]
invoice_name = invoice_sales[-1]['invoice']
nominal = credit.nominal
data = {
'jumlah': nominal,
'name': invoice_name,
'deposit': 0,
'from_owner_credit': True,
'tipe': 'Sales',
}
create_je(data)
credit_list = frappe.get_list('VetOwnerCredit', filters={'pet_owner': credit.pet_owner}, fields=['*'], order_by='creation desc')
return credit_list
except PermissionError as e:
return {'error': e}
@frappe.whitelist()
def submit_piutang_purchase(action, nominal, supplier, method):
try:
tz = pytz.timezone("Asia/Jakarta")
if action == 'Buat':
last_credit = 0
last_credit_search = frappe.get_list('VetOwnerCredit', filters={'supplier': supplier}, fields=['credit'], order_by="creation desc")
if len(last_credit_search) != 0:
last_credit = last_credit_search[0].credit
if last_credit < float(nominal):
return {'error': 'Nominal melebihi deposit, jumlah deposit tersedia %s'% last_credit, 'nominal': last_credit}
owner_credit = frappe.new_doc('VetOwnerCredit')
owner_credit.update({
'date': dt.strftime(dt.now(tz), "%Y-%m-%d %H:%M:%S"),
'purchase': '',
'type': 'Payment',
'nominal': -float(nominal),
'supplier': supplier,
'metode_pembayaran': method,
})
owner_credit.insert()
frappe.db.commit()
set_owner_credit_total(supplier, True)
create_journal_entry('Purchase Payment', -float(nominal), owner_credit.name, method, True)
elif action == 'Deposit':
owner_credit = frappe.new_doc('VetOwnerCredit')
owner_credit.update({
'date': dt.strftime(dt.now(tz), "%Y-%m-%d %H:%M:%S"),
'purchase': '',
'type': 'Payment',
'nominal': nominal,
'supplier': supplier,
'metode_pembayaran': method,
'is_deposit': 1,
})
owner_credit.insert()
frappe.db.commit()
set_owner_credit_total(supplier, True)
create_journal_entry('Purchase Payment', nominal, owner_credit.name, method, True)
elif action == 'Bayar':
bayar_hutang_purchase(nominal, supplier, method)
# owner_credit = frappe.get_list('VetOwnerCredit', filters={'supplier': supplier}, order_by="creation asc", fields=['*'])
# sales_credit = frappe.get_list('VetOwnerCredit', filters={'supplier': supplier, 'type': 'Purchase'}, order_by="creation asc", fields=['*'])
# list_credit = []
# for i in owner_credit:
# if i.purchase and i.type == 'Purchase':
# purchase_name = i.purchase
# nominal_purchase = i.nominal
# elif i.type == 'Purchase':
# index = [x for x in range(len(sales_credit)) if sales_credit[x] == i]
# purchase_purchase = [t for t in sales_credit[:index[0]] if t['purchase'] != '' and t['purchase'] != None]
# purchase_name = purchase_purchase[-1]['purchase']
# nominal_purchase = i.nominal + float(purchase_purchase[-1]['nominal'])
# if i.type == 'Purchase' and check_invoice(purchase_name, owner_credit, nominal_purchase, True):
# list_credit.append(i)
# if list_credit:
# i = 0
# for l in list_credit:
# if float(nominal) > 0:
# if l.purchase:
# purchase_name = l.purchase
# else:
# index = [x for x in range(len(sales_credit)) if sales_credit[x] == l]
# purchase_purchase = [t for t in sales_credit[:index[0]] if t['purchase'] != '' and t['purchase'] != None]
# purchase_name = purchase_purchase[-1]['purchase']
# pay = frappe.get_list('VetPurchasePay', filters={'parent': purchase_name}, fields=['sum(jumlah) as paid'])
# if pay[0]['paid'] != None:
# remaining = l.nominal - float(pay[0]['paid'])
# else:
# remaining = l.nominal
# if float(nominal.replace('.','').replace(',','.')) > remaining and l != list_credit[-1]:
# data = {
# 'jumlah': remaining,
# 'name': purchase_name,
# 'deposit': 0,
# 'tipe': 'Purchase',
# 'method': method
# }
# create_je(data)
# nominal = float(nominal.replace('.','').replace(',','.')) - float(remaining)
# else:
# data = {
# 'jumlah': nominal,
# 'name': purchase_name,
# 'deposit': 0,
# 'tipe': 'Purchase',
# 'method': method
# }
# create_je(data)
# nominal = 0
# i += 1
# else:
# purchase_purchase = [t for t in sales_credit if t['purchase'] != '' and t['purchase'] != None]
# purchase_name = purchase_purchase[-1]['purchase']
# data = {
# 'jumlah': nominal,
# 'name': purchase_name,
# 'deposit': 0,
# 'tipe': 'Purchase',
# 'method': method
# }
# create_je(data)
credit_list = frappe.get_list('VetOwnerCredit', filters={'supplier': supplier}, fields=['*'], order_by='creation desc')
return credit_list
except PermissionError as e:
return {'error': e}
def bayar_hutang_purchase(nominal, supplier, method):
def discount_value(value, discount):
return value - (value * discount/100)
tz = pytz.timezone("Asia/Jakarta")
all_debt = 0
last_debt = frappe.get_list("VetOwnerCredit", fields=["debt"], filters={'supplier': supplier}, order_by="creation desc")
if last_debt:
all_debt = last_debt[0]['debt']
current_nominal = float(nominal)
purchase_orders = frappe.get_list("VetPurchase", fields=["name", "potongan"], filters={'supplier': supplier, 'status': ['in', ['Purchase Order', 'Receive', 'Paid']]}, order_by="creation desc")
for po in purchase_orders:
purchase_order_payments = frappe.get_list('VetPurchasePay', filters={'parent': po.name}, fields=['*'])
purchase_order_products = frappe.get_list('VetPurchaseProducts', filters={'parent': po.name}, fields=['*'])
paid = sum(p.jumlah for p in purchase_order_payments)
received_total = sum(discount_value(p.quantity_receive * p.price, p.discount) for p in purchase_order_products)
subtotal = sum(discount_value(p.quantity * p.price, p.discount) for p in purchase_order_products)
total = subtotal - po.potongan
debt = received_total - (paid if paid <= total else total)
debt = debt if debt > 0 else 0
if debt > 0 and current_nominal > 0:
if current_nominal > debt:
data = {'jumlah': debt, 'name': po.name, 'tipe': 'Purchase', 'method': method}
create_je(data)
current_nominal -= debt
all_debt -= debt
elif current_nominal <= debt:
data = {'jumlah': current_nominal, 'name': po.name, 'tipe': 'Purchase', 'method': method}
create_je(data)
current_nominal = 0
all_debt -= debt
if all_debt > 0 and current_nominal > 0:
if all_debt >= current_nominal:
debt_payment = frappe.new_doc('VetOwnerCredit')
debt_payment.update({'date': dt.strftime(dt.now(tz), "%Y-%m-%d %H:%M:%S"), 'purchase': '', 'type': 'Payment', 'nominal': current_nominal, 'supplier': supplier, 'metode_pembayaran': method})
debt_payment.insert()
frappe.db.commit()
set_owner_credit_total(supplier, True)
create_journal_entry('Purchase Payment', current_nominal, debt_payment.name, method)
current_nominal = 0
elif all_debt < current_nominal:
debt_payment = frappe.new_doc('VetOwnerCredit')
debt_payment.update({'date': dt.strftime(dt.now(tz), "%Y-%m-%d %H:%M:%S"), 'purchase': '', 'type': 'Payment', 'nominal': all_debt, 'supplier': supplier, 'metode_pembayaran': method})
debt_payment.insert()
frappe.db.commit()
set_owner_credit_total(supplier, True)
create_journal_entry('Purchase Payment', all_debt, debt_payment.name, method)
if 'Deposit' not in method:
credit_payment = frappe.new_doc('VetOwnerCredit')
credit_payment.update({'date': dt.strftime(dt.now(tz), "%Y-%m-%d %H:%M:%S"), 'purchase': '', 'type': 'Payment', 'nominal': current_nominal-all_debt, 'supplier': supplier, 'metode_pembayaran': method})
credit_payment.insert()
frappe.db.commit()
set_owner_credit_total(supplier, True)
create_journal_entry('Purchase Payment', current_nominal-all_debt, credit_payment.name, method, True)
current_nominal = 0
elif all_debt <= 0 and current_nominal > 0 and 'Deposit' not in method:
credit_payment = frappe.new_doc('VetOwnerCredit')
credit_payment.update({'date': dt.strftime(dt.now(tz), "%Y-%m-%d %H:%M:%S"), 'purchase': '', 'type': 'Payment', 'nominal': current_nominal, 'supplier': supplier, 'metode_pembayaran': method})
credit_payment.insert()
frappe.db.commit()
set_owner_credit_total(supplier, True)
create_journal_entry('Purchase Payment', current_nominal, credit_payment.name, method, True)
current_nominal = 0
def bayar_hutang_invoice(nominal, pet_owner, method):
def discount_value(value, discount):
return value - (value * discount/100)
tz = pytz.timezone("Asia/Jakarta")
all_debt = 0
last_debt = frappe.get_list("VetOwnerCredit", fields=["debt"], filters={'pet_owner': pet_owner}, order_by="creation desc")
if last_debt:
all_debt = last_debt[0]['debt']
current_nominal = float(nominal)
customer_invoices = frappe.get_list("VetCustomerInvoice", fields=["name", "potongan"], filters={'owner': pet_owner, 'status': 'Open'}, order_by="creation desc")
for ci in customer_invoices:
customer_invoice_payments = frappe.get_list('VetCustomerInvoicePay', filters={'parent': ci.name}, fields=['*'])
customer_invoice_products = frappe.get_list('VetCustomerInvoiceLine', filters={'parent': ci.name}, fields=['*'])
paid = | |
<gh_stars>0
import argparse
import os
import random
import time
import math
import json
from functools import partial
import codecs
import zipfile
import re
from tqdm import tqdm
import sys
import io
import collections
from typing import Optional, List, Union, Dict
from dataclasses import dataclass
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import DataLoader
from paddlenlp.utils.log import logger
from paddlenlp.transformers import BertForTokenClassification, BertTokenizer, LinearDecayWithWarmup
from ..model import Relation_ExtractionModel
# utils
def find_entity(text_raw, id_, predictions, tok_to_orig_start_index,
tok_to_orig_end_index):
"""
retrieval entity mention under given predicate id for certain prediction.
this is called by the "decoding" func.
"""
entity_list = []
for i in range(len(predictions)):
if [id_] in predictions[i]:
j = 0
while i + j + 1 < len(predictions):
if [1] in predictions[i + j + 1]:
j += 1
else:
break
entity = ''.join(text_raw[tok_to_orig_start_index[i]:
tok_to_orig_end_index[i + j] + 1])
entity_list.append(entity)
return list(set(entity_list))
def decoding(example_batch,
id2spo,
logits_batch,
seq_len_batch,
tok_to_orig_start_index_batch,
tok_to_orig_end_index_batch):
"""
model output logits -> formatted spo (as in data set file)
"""
formatted_outputs = []
for (i, (example, logits, seq_len, tok_to_orig_start_index, tok_to_orig_end_index)) in \
enumerate(zip(example_batch, logits_batch, seq_len_batch, tok_to_orig_start_index_batch, tok_to_orig_end_index_batch)):
logits = logits[1:seq_len +
1] # slice between [CLS] and [SEP] to get valid logits
logits[logits >= 0.5] = 1
logits[logits < 0.5] = 0
tok_to_orig_start_index = tok_to_orig_start_index[1:seq_len + 1]
tok_to_orig_end_index = tok_to_orig_end_index[1:seq_len + 1]
predictions = []
for token in logits:
predictions.append(np.argwhere(token == 1).tolist())
# format predictions into example-style output
formatted_instance = {}
text_raw = example['text']
complex_relation_label = [8, 10, 26, 32, 46]
complex_relation_affi_label = [9, 11, 27, 28, 29, 33, 47]
# flatten predictions then retrival all valid subject id
flatten_predictions = []
for layer_1 in predictions:
for layer_2 in layer_1:
flatten_predictions.append(layer_2[0])
subject_id_list = []
for cls_label in list(set(flatten_predictions)):
if 1 < cls_label <= 56 and (cls_label + 55) in flatten_predictions:
subject_id_list.append(cls_label)
subject_id_list = list(set(subject_id_list))
# fetch all valid spo by subject id
spo_list = []
for id_ in subject_id_list:
if id_ in complex_relation_affi_label:
continue # do this in the next "else" branch
if id_ not in complex_relation_label:
subjects = find_entity(text_raw, id_, predictions,
tok_to_orig_start_index,
tok_to_orig_end_index)
objects = find_entity(text_raw, id_ + 55, predictions,
tok_to_orig_start_index,
tok_to_orig_end_index)
for subject_ in subjects:
for object_ in objects:
spo_list.append({
"predicate": id2spo['predicate'][id_],
"object_type": {
'@value': id2spo['object_type'][id_]
},
'subject_type': id2spo['subject_type'][id_],
"object": {
'@value': object_
},
"subject": subject_
})
else:
# traverse all complex relation and look through their corresponding affiliated objects
subjects = find_entity(text_raw, id_, predictions,
tok_to_orig_start_index,
tok_to_orig_end_index)
objects = find_entity(text_raw, id_ + 55, predictions,
tok_to_orig_start_index,
tok_to_orig_end_index)
for subject_ in subjects:
for object_ in objects:
object_dict = {'@value': object_}
object_type_dict = {
'@value': id2spo['object_type'][id_].split('_')[0]
}
if id_ in [8, 10, 32, 46
] and id_ + 1 in subject_id_list:
id_affi = id_ + 1
object_dict[id2spo['object_type'][id_affi].split(
'_')[1]] = find_entity(text_raw, id_affi + 55,
predictions,
tok_to_orig_start_index,
tok_to_orig_end_index)[0]
object_type_dict[id2spo['object_type'][
id_affi].split('_')[1]] = id2spo['object_type'][
id_affi].split('_')[0]
elif id_ == 26:
for id_affi in [27, 28, 29]:
if id_affi in subject_id_list:
object_dict[id2spo['object_type'][id_affi].split('_')[1]] = \
find_entity(text_raw, id_affi + 55, predictions, tok_to_orig_start_index, tok_to_orig_end_index)[0]
object_type_dict[id2spo['object_type'][id_affi].split('_')[1]] = \
id2spo['object_type'][id_affi].split('_')[0]
spo_list.append({
"predicate": id2spo['predicate'][id_],
"object_type": object_type_dict,
"subject_type": id2spo['subject_type'][id_],
"object": object_dict,
"subject": subject_
})
formatted_instance['text'] = example['text']
formatted_instance['spo_list'] = spo_list
formatted_outputs.append(formatted_instance)
return formatted_outputs
def write_prediction_results(formatted_outputs, file_path):
"""write the prediction results"""
with codecs.open(file_path, 'w', 'utf-8') as f:
for formatted_instance in formatted_outputs:
json_str = json.dumps(formatted_instance, ensure_ascii=False)
f.write(json_str)
f.write('\n')
zipfile_path = file_path + '.zip'
f = zipfile.ZipFile(zipfile_path, 'w', zipfile.ZIP_DEFLATED)
f.write(file_path)
return zipfile_path
def get_precision_recall_f1(golden_file, predict_file):
r = os.popen(
'python3 ./re_official_evaluation.py --golden_file={} --predict_file={}'.
format(golden_file, predict_file))
result = r.read()
r.close()
precision = float(
re.search("\"precision\", \"value\":.*?}", result).group(0).lstrip(
"\"precision\", \"value\":").rstrip("}"))
recall = float(
re.search("\"recall\", \"value\":.*?}", result).group(0).lstrip(
"\"recall\", \"value\":").rstrip("}"))
f1 = float(
re.search("\"f1-score\", \"value\":.*?}", result).group(0).lstrip(
"\"f1-score\", \"value\":").rstrip("}"))
return precision, recall, f1
# extract_chinese_and_punct
LHan = [
[0x2E80, 0x2E99], # Han # So [26] CJK RADICAL REPEAT, CJK RADICAL RAP
[0x2E9B, 0x2EF3
], # Han # So [89] CJK RADICAL CHOKE, CJK RADICAL C-SIMPLIFIED TURTLE
[0x2F00, 0x2FD5], # Han # So [214] KANGXI RADICAL ONE, KANGXI RADICAL FLUTE
0x3005, # Han # Lm IDEOGRAPHIC ITERATION MARK
0x3007, # Han # Nl IDEOGRAPHIC NUMBER ZERO
[0x3021,
0x3029], # Han # Nl [9] HANGZHOU NUMERAL ONE, HANGZHOU NUMERAL NINE
[0x3038,
0x303A], # Han # Nl [3] HANGZHOU NUMERAL TEN, HANGZHOU NUMERAL THIRTY
0x303B, # Han # Lm VERTICAL IDEOGRAPHIC ITERATION MARK
[
0x3400, 0x4DB5
], # Han # Lo [6582] CJK UNIFIED IDEOGRAPH-3400, CJK UNIFIED IDEOGRAPH-4DB5
[
0x4E00, 0x9FC3
], # Han # Lo [20932] CJK UNIFIED IDEOGRAPH-4E00, CJK UNIFIED IDEOGRAPH-9FC3
[
0xF900, 0xFA2D
], # Han # Lo [302] CJK COMPATIBILITY IDEOGRAPH-F900, CJK COMPATIBILITY IDEOGRAPH-FA2D
[
0xFA30, 0xFA6A
], # Han # Lo [59] CJK COMPATIBILITY IDEOGRAPH-FA30, CJK COMPATIBILITY IDEOGRAPH-FA6A
[
0xFA70, 0xFAD9
], # Han # Lo [106] CJK COMPATIBILITY IDEOGRAPH-FA70, CJK COMPATIBILITY IDEOGRAPH-FAD9
[
0x20000, 0x2A6D6
], # Han # Lo [42711] CJK UNIFIED IDEOGRAPH-20000, CJK UNIFIED IDEOGRAPH-2A6D6
[0x2F800, 0x2FA1D]
] # Han # Lo [542] CJK COMPATIBILITY IDEOGRAPH-2F800, CJK COMPATIBILITY IDEOGRAPH-2FA1D
CN_PUNCTS = [(0x3002, "。"), (0xFF1F, "?"), (0xFF01, "!"), (0xFF0C, ","),
(0x3001, "、"), (0xFF1B, ";"), (0xFF1A, ":"), (0x300C, "「"),
(0x300D, "」"), (0x300E, "『"), (0x300F, "』"), (0x2018, "‘"),
(0x2019, "’"), (0x201C, "“"), (0x201D, "”"), (0xFF08, "("),
(0xFF09, ")"), (0x3014, "〔"), (0x3015, "〕"), (0x3010, "【"),
(0x3011, "】"), (0x2014, "—"), (0x2026, "…"), (0x2013, "–"),
(0xFF0E, "."), (0x300A, "《"), (0x300B, "》"), (0x3008, "〈"),
(0x3009, "〉"), (0x2015, "―"), (0xff0d, "-"), (0x0020, " ")]
#(0xFF5E, "~"),
EN_PUNCTS = [[0x0021, 0x002F], [0x003A, 0x0040], [0x005B, 0x0060],
[0x007B, 0x007E]]
class ChineseAndPunctuationExtractor(object):
def __init__(self):
self.chinese_re = self.build_re()
def is_chinese_or_punct(self, c):
if self.chinese_re.match(c):
return True
else:
return False
def build_re(self):
L = []
for i in LHan:
if isinstance(i, list):
f, t = i
try:
f = chr(f)
t = chr(t)
L.append('%s-%s' % (f, t))
except:
pass # A narrow python build, so can't use chars > 65535 without surrogate pairs!
else:
try:
L.append(chr(i))
except:
pass
for j, _ in CN_PUNCTS:
try:
L.append(chr(j))
except:
pass
for k in EN_PUNCTS:
f, t = k
try:
f = chr(f)
t = chr(t)
L.append('%s-%s' % (f, t))
except:
raise ValueError()
pass # A narrow python build, so can't use chars > 65535 without surrogate pairs!
RE = '[%s]' % ''.join(L)
# print('RE:', RE.encode('utf-8'))
return re.compile(RE, re.UNICODE)
# data_loader
InputFeature = collections.namedtuple("InputFeature", [
"input_ids", "seq_len", "tok_to_orig_start_index", "tok_to_orig_end_index",
"labels"
])
def parse_label(spo_list, label_map, tokens, tokenizer):
# 2 tags for each predicate + I tag + O tag
num_labels = 2 * (len(label_map.keys()) - 2) + 2
seq_len = len(tokens)
# initialize tag
labels = [[0] * num_labels for i in range(seq_len)]
# find all entities and tag them with corresponding "B"/"I" labels
for spo in spo_list:
for spo_object in spo['object'].keys():
# assign relation label
if spo['predicate'] in label_map.keys():
# simple relation
label_subject = label_map[spo['predicate']]
label_object = label_subject + 55
subject_tokens = tokenizer._tokenize(spo['subject'])
object_tokens = tokenizer._tokenize(spo['object']['@value'])
else:
# complex relation
label_subject = label_map[spo['predicate'] + '_' + spo_object]
label_object = label_subject + 55
subject_tokens = tokenizer._tokenize(spo['subject'])
object_tokens = tokenizer._tokenize(spo['object'][spo_object])
subject_tokens_len = len(subject_tokens)
object_tokens_len = len(object_tokens)
# assign token label
# there are situations where s entity and o entity might overlap, e.g. xyz established xyz corporation
# to prevent single token from being labeled into two different entity
# we tag the longer entity first, then match the shorter entity within the rest text
forbidden_index = None
if subject_tokens_len > object_tokens_len:
for index in range(seq_len - subject_tokens_len + 1):
if tokens[index:index +
subject_tokens_len] == subject_tokens:
labels[index][label_subject] = 1
for i in range(subject_tokens_len - 1):
labels[index + i + 1][1] = 1
forbidden_index = index
break
for index in range(seq_len - object_tokens_len + 1):
if tokens[index:index + object_tokens_len] == object_tokens:
if forbidden_index is None:
labels[index][label_object] = 1
for i in range(object_tokens_len - 1):
labels[index + i + 1][1] = 1
break
# check if labeled already
elif index < forbidden_index or index >= forbidden_index + len(
subject_tokens):
labels[index][label_object] = 1
for i in range(object_tokens_len - 1):
labels[index + i + 1][1] = 1
break
else:
for index in range(seq_len - object_tokens_len + 1):
if tokens[index:index + object_tokens_len] == object_tokens:
labels[index][label_object] = 1
for i in range(object_tokens_len - 1):
labels[index + i + 1][1] = 1
forbidden_index = index
break
for index in range(seq_len - subject_tokens_len + 1):
if tokens[index:index +
subject_tokens_len] | |
<filename>Personal_Project/personalproject/aplication/routes.py
from flask import Blueprint, render_template, url_for, flash, redirect, request, abort
from personalproject import db
from personalproject.aplication.forms import (ContaBancariaForm,
CartaoCreditoForm,
DespesaCartaoCreditoForm,
DespesaForm, ReceitaForm,
CategoriaDespesaForm,
CategoriaReceitaForm,
TransferenciaForm)
from personalproject.aplication.utils import (conta_bancaria_required,
cartao_required,
adicionar_registro,
adicionar_registro,
deletar_registro,
adicionar_despesa_cartao,
editar_despesa_cartao,
deletar_despesa_cartao,
transferencia)
from personalproject.models import (ContaBancaria,
CartaoCredito,
DespesaCartaoCredito,
Despesa,
CategoriaDespesa,
Receita,
CategoriaReceita)
from flask_login import current_user, login_required
from sqlalchemy.sql import func
aplication = Blueprint('aplication', __name__)
@aplication.route("/dashboard")
@login_required
def dashboard():
"""
Painel de controle do aplicativo.
Todo o código abaixo é responsável por montar os gráficos disponíveis
no front-end, assimilando categorias e valores.
Returns:
Template renderizado: dashboard.html
"""
saldo = db.session.query(func.sum(ContaBancaria.saldo)).filter_by(user=current_user).scalar()
total_despesas = db.session.query(func.sum(Despesa.valor)).filter_by(user=current_user).scalar()
total_receitas = db.session.query(func.sum(Receita.valor)).filter_by(user=current_user).scalar()
despesas = Despesa.query.filter_by(user=current_user).all()
receitas = Receita.query.filter_by(user=current_user).all()
despesa_data_list = []
despesa_label_list = []
dict_despesa = {}
receita_data_list = []
receita_label_list = []
dict_receita = {}
for despesa in despesas:
dict_despesa[str(despesa.categoria_despesa)] = (
dict_despesa.get(str(despesa.categoria_despesa), 0) + float(despesa.valor))
if str(despesa.categoria_despesa) not in despesa_label_list:
despesa_label_list.append(str(despesa.categoria_despesa))
for receita in receitas:
dict_receita[str(receita.categoria_receita)] = (
dict_receita.get(str(receita.categoria_receita), 0) + float(receita.valor))
if str(receita.categoria_receita) not in receita_label_list:
receita_label_list.append(str(receita.categoria_receita))
for valor in dict_despesa.values():
despesa_data_list.append(valor)
for valor in dict_receita.values():
receita_data_list.append(valor)
return render_template('dashboard.html', title='Painel de controle', legend='Painel de controle',
saldo=saldo, total_despesas=total_despesas, total_receitas=total_receitas,
despesa_data_list=despesa_data_list, despesa_label_list=despesa_label_list,
receita_data_list=receita_data_list, receita_label_list=receita_label_list,
currency=current_user.currency)
@aplication.route("/transferencia", methods=['GET', 'POST'])
@login_required
def transferencia_new():
"""
Transferência entre conta bancária.
Esta função trata-se da lógica responsável por realizar
a transeferência entre contas bancárias.
Returns:
Template renderizado: transferência.html
Redirecionamento: aplication.contas
"""
if not conta_bancaria_required():
flash('''Você ainda não criou uma conta bancária! Para realizar uma transferência
você deve ter, ao menos, uma conta bancária na sua conta.''', 'info')
return redirect(url_for('aplication.conta_new'))
form = TransferenciaForm()
if form.validate_on_submit():
if form.conta_destino.data.id == form.conta_origem.data.id:
flash('A conta origem não pode ser igual a conta destino', 'info')
transferencia(form.conta_origem.data.id,
form.conta_destino.data.id, form.valor.data)
flash('Transferência realizada.', 'success')
db.session.commit()
return redirect(url_for('aplication.contas'))
return render_template('transferencia.html', title='Transferência',
legend='Transferência', form=form)
@aplication.route("/transacoes")
@login_required
def transacoes():
"""
Tabelas de transações bancárias.
Página que organiza os dados do usuário em tabelas, juntamente
com informações compactas sobre as finanças do usuário.
Returns:
Template renderizado: transacoes.html
"""
despesas = Despesa.query.order_by(Despesa.data_origem.desc()).filter_by(user=current_user).all()
receitas = Receita.query.order_by(Receita.data_origem.desc()).filter_by(user=current_user).all()
saldo = db.session.query(func.sum(ContaBancaria.saldo)).filter_by(user=current_user).scalar()
total_despesas = db.session.query(func.sum(Despesa.valor)).filter_by(user=current_user).scalar()
total_receitas = db.session.query(func.sum(Receita.valor)).filter_by(user=current_user).scalar()
total_receitas = 0 if total_receitas is None else total_receitas
total_despesas = 0 if total_despesas is None else total_despesas
balanco = total_receitas - total_despesas
return render_template('transacoes.html', title='Transações', legend='Transações', despesas=despesas,
receitas=receitas, currency=current_user.currency, saldo = saldo,
total_receitas=total_receitas, total_despesas=total_despesas,
balanco=balanco)
@aplication.route("/despesa/new", methods=['GET', 'POST'])
@login_required
def despesa_new():
"""
Registrar uma despesa em conta bancária.
Lógica matemática é chamada de utils.py: adicionar_registro()
Returns:
Template renderizado: despesa.html
Redirecionamento: aplication.transacoes
"""
if not conta_bancaria_required():
flash('''Você ainda não criou uma conta bancária! Para criar uma despesa
você deve ter, ao menos, uma conta bancária na sua conta.''', 'info')
return redirect(url_for('aplication.conta_new'))
form = DespesaForm()
if form.validate_on_submit():
despesa = Despesa(status=form.status.data,
valor=form.valor.data,
data_origem=form.data_origem.data,
descricao=form.descricao.data,
categoria_despesa=form.categoria.data,
conta_bancaria=form.conta.data,
user=current_user)
db.session.add(despesa)
if form.status.data:
adicionar_registro(form.conta.data.id, form.valor.data, 1)
db.session.commit()
flash('Despesa adicionada.', 'success')
return redirect(url_for('aplication.transacoes'))
return render_template('despesa.html', title='Criar despesa',
legend='Criar despesa', form=form)
@aplication.route("/despesa/<int:despesa_id>/update", methods=['GET', 'POST'])
@login_required
def despesa_update(despesa_id):
"""
Editar uma despesa.
Args:
despesa_id (int): ID da despesa a ser editada.
Lógica matemática é chamada de utils.py: adicionar_registro()
Returns:
Template renderizado: despesa.html
Redirecionamento: aplication.transacoes
"""
despesa = Despesa.query.get_or_404(despesa_id)
if despesa.user != current_user:
abort(403)
form = DespesaForm()
if form.validate_on_submit():
valor_antigo = despesa.valor
id_conta_bancaria_antiga = despesa.conta_bancaria.id
despesa.valor=form.valor.data
despesa.data_origem=form.data_origem.data
despesa.descricao=form.descricao.data
despesa.categoria_despesa=form.categoria.data
despesa.conta_bancaria=form.conta.data
if despesa.status:
adicionar_registro(id_conta_bancaria_antiga, form.conta.data.id,
valor_antigo, form.valor.data, 1)
db.session.commit()
flash('Sua despesa foi alterada.', 'success')
return redirect(url_for('aplication.transacoes', despesa_id=despesa.id))
elif request.method == 'GET':
form.valor.data=despesa.valor
form.data_origem.data=despesa.data_origem
form.descricao.data=despesa.descricao
form.categoria.data=despesa.categoria_despesa
form.conta.data=despesa.conta_bancaria
return render_template('despesa.html', title='Atualizar despesa',
legend='Atualizar despesa', form=form)
@aplication.route("/despesa/<int:despesa_id>/statusUpdate", methods=['GET', 'POST'])
@login_required
def status_despesa_update(despesa_id):
"""
Editar o status de uma despesa.
Args:
despesa_id (int): ID da despesa a ser editada.
Lógica matemática funciona do seguinte jeito:
Quando uma despesa é criada, o status é registrado como TRUE ou FALSE.
Na hora de editar o status da despesa, o status só pode estar FALSE para
ser editado, e assim que o status é alterado ele muda para TRUE, retirando
o valor desta despesa da conta bancária.
Returns:
Redirecionamento: aplication.transacoes
"""
despesa = Despesa.query.filter_by(id=despesa_id).first()
conta = ContaBancaria.query.filter_by(id=despesa.conta_bancaria.id).first()
conta.saldo = conta.saldo - despesa.valor
despesa.status = True
db.session.commit()
flash('Status alterado!', 'success')
return redirect(url_for('aplication.transacoes', despesa_id=despesa.id))
@aplication.route("/despesa/<int:despesa_id>/delete", methods=['POST', 'GET'])
@login_required
def despesa_delete(despesa_id):
"""
Deletar a despesa.
Args:
despesa_id (int): ID da despesa a ser deletada.
Lógica matemática é chamada de utils.py: deletar_registro()
Returns:
Redirecionamento: aplication.transacoes
"""
despesa = Despesa.query.get_or_404(despesa_id)
if despesa.user != current_user:
abort(403)
deletar_registro(despesa.id_conta_bancaria, despesa.valor, 1)
db.session.delete(despesa)
db.session.commit()
flash('Sua despesa foi deletada.', 'success')
return redirect(url_for('aplication.transacoes'))
@aplication.route("/receita/new", methods=['GET', 'POST'])
@login_required
def receita_new():
"""
Registrar uma receita em conta bancária.
Lógica matemática é chamada de utils.py: adicionar_registro()
Returns:
Template renderizado: receita.html
Redirecionamento: aplication.transacoes
"""
if not conta_bancaria_required():
flash('Você ainda não criou uma conta bancária! Para criar uma despesa '
'você deve ter, ao menos, uma conta bancária na sua conta.', 'info')
return redirect(url_for('aplication.conta_new'))
form = ReceitaForm()
if form.validate_on_submit():
receita = Receita(status=form.status.data,
valor=form.valor.data,
data_origem=form.data_origem.data,
descricao=form.descricao.data,
categoria_receita=form.categoria.data,
conta_bancaria=form.conta.data,
user=current_user)
db.session.add(receita)
if form.status.data == True:
adicionar_registro(form.conta.data.id, form.valor.data, 2)
db.session.commit()
flash('Receita adicionada.', 'success')
return redirect(url_for('aplication.transacoes'))
return render_template('receita.html', title='Criar receita',
legend='Criar receita', form=form)
@aplication.route("/receita/<int:receita_id>/update", methods=['GET', 'POST'])
@login_required
def receita_update(receita_id):
"""
Editar uma receita.
Args:
receita_id (int): ID da receita a ser editada.
Lógica matemática é chamada de utils.py: adicionar_registro()
Returns:
Template renderizado: receita.html
Redirecionamento: aplication.transacoes
"""
receita = Receita.query.get_or_404(receita_id)
if receita.user != current_user:
abort(403)
form = ReceitaForm()
if form.validate_on_submit():
valor_antigo = receita.valor
id_conta_bancaria_antiga = receita.conta_bancaria.id
receita.valor=form.valor.data
receita.data_origem=form.data_origem.data
receita.descricao=form.descricao.data
receita.categoria_despesa=form.categoria.data
receita.conta_bancaria=form.conta.data
if receita.status:
adicionar_registro(id_conta_bancaria_antiga, form.conta.data.id,
valor_antigo, form.valor.data, 2)
db.session.commit()
flash('Sua receita foi alterada.', 'success')
return redirect(url_for('aplication.transacoes', receita_id=receita.id))
elif request.method == 'GET':
form.valor.data=receita.valor
form.data_origem.data=receita.data_origem
form.descricao.data=receita.descricao
form.categoria.data=receita.categoria_receita
form.conta.data=receita.conta_bancaria
return render_template('receita.html', title='Atualizar receita',
legend='Atualizar receita', form=form)
@aplication.route("/receita/<int:receita_id>/statusUpdate", methods=['GET', 'POST'])
@login_required
def status_receita_update(receita_id):
"""
Editar o status de uma receita.
Args:
receita_id (int): ID da receita a ser editada.
Lógica matemática funciona do seguinte jeito:
Quando uma receita é criada, o status é registrado como TRUE ou FALSE.
Na hora de editar o status da receita, o status só pode estar FALSE para
ser editado, e assim que o status é alterado ele muda para TRUE, adicionando
o valor desta receita na conta bancária.
Returns:
Redirecionamento: aplication.transacoes
"""
receita = Receita.query.filter_by(id=receita_id).first()
conta = ContaBancaria.query.filter_by(id=receita.conta_bancaria.id).first()
conta.saldo = conta.saldo + receita.valor
receita.status = True
db.session.commit()
flash('Status alterado!', 'success')
return redirect(url_for('aplication.transacoes', receita_id=receita.id))
@aplication.route("/receita/<int:receita_id>/delete", methods=['GET', 'POST'])
@login_required
def receita_delete(receita_id):
"""
Deletar a receita.
Args:
receita_id (int): ID da receita a ser deletada.
Lógica matemática é chamada de utils.py: deletar_registro()
Returns:
Redirecionamento: aplication.transacoes
"""
receita = Receita.query.get_or_404(receita_id)
if receita.user != current_user:
abort(403)
deletar_registro(receita.id_conta_bancaria, receita.valor, 2)
db.session.delete(receita)
db.session.commit()
flash('Sua receita foi deletada.', 'success')
return redirect(url_for('aplication.transacoes'))
@aplication.route("/minhasContas")
@login_required
def contas():
"""
Tabelas e informações de contas bancárias.
Página que organiza os dados do usuário em tabelas, juntamente
com informações compactas sobre as finanças do usuário.
Returns:
Template renderizado: contas.html
"""
contas = ContaBancaria.query.order_by(ContaBancaria.id.desc()).filter_by(user=current_user).all()
saldo = db.session.query(func.sum(ContaBancaria.saldo)).filter_by(user=current_user).scalar()
total_despesas = db.session.query(func.sum(Despesa.valor)).filter_by(user=current_user).scalar()
total_receitas = db.session.query(func.sum(Receita.valor)).filter_by(user=current_user).scalar()
return render_template('contas.html', title='Contas bancárias', legend='Contas bancárias', contas=contas,
currency=current_user.currency, saldo=saldo, total_despesas=total_despesas,
total_receitas=total_receitas)
@aplication.route("/conta/new", methods=['GET', 'POST'])
@login_required
def conta_new():
"""
Cadastrar uma nova conta bancária.
Returns:
Template renderizado: conta-bancaria.html
Redirecionamento: aplication.contas
"""
form = ContaBancariaForm()
if form.validate_on_submit():
conta = ContaBancaria(nome=form.nome.data,
saldo=form.saldo.data,
instituicao=form.instituicao.data,
tipo=form.tipo.data,
user=current_user)
db.session.add(conta)
db.session.commit()
flash('Conta bancária adicionada.', 'success')
return redirect(url_for('aplication.contas'))
return render_template('conta-bancaria.html', title='Criar conta bancária',
legend='Criar conta bancária', form=form)
@aplication.route("/conta/<int:conta_id>/update", methods=['GET', 'POST'])
@login_required
def conta_update(conta_id):
"""
Editar uma conta bancária.
Args:
conta_id (int): ID da conta bancária a ser editada.
Returns:
Template renderizado: conta-bancaria.html
Redirecionamento: aplication.contas
"""
conta = ContaBancaria.query.get_or_404(conta_id)
if conta.user != current_user:
abort(403)
form = ContaBancariaForm()
if form.validate_on_submit():
conta.saldo=form.saldo.data
conta.nome=form.nome.data
conta.instituicao=form.instituicao.data
conta.tipo=form.tipo.data
db.session.commit()
flash('Sua conta bancária foi atualizada.', 'success')
return redirect(url_for('aplication.contas'))
elif request.method == 'GET':
form.saldo.data=conta.saldo
form.nome.data=conta.nome
form.instituicao.data=conta.instituicao
form.tipo.data=conta.tipo
return render_template('conta-bancaria.html', title='Atualizar conta bancária',
legend='Atualizar conta bancária', form=form)
@aplication.route("/conta/<int:conta_id>/delete", methods=['POST', 'GET'])
@login_required
def conta_delete(conta_id):
"""
Deletar uma conta bancária.
Args:
conta_id (int): ID da conta bancária a ser deletada.
Returns:
Redirecionamento: aplication.contas
"""
conta = ContaBancaria.query.get_or_404(conta_id)
if conta.user != current_user:
abort(403)
db.session.delete(conta)
db.session.commit()
flash('Sua conta bancária foi deletada.', 'success')
return redirect(url_for('aplication.contas'))
@aplication.route("/meusCartoes")
@login_required
def cartoes():
"""
Tabelas e informações de cartões de crédito.
Página que organiza os dados do usuário em tabelas, juntamente
com informações compactas sobre as finanças do usuário.
Returns:
Template renderizado: cartoes.html
"""
cartoes = CartaoCredito.query.order_by(CartaoCredito.id.desc()).filter_by(
user=current_user).all()
total_despesas = db.session.query(func.sum(DespesaCartaoCredito.valor)).filter_by(
user=current_user).scalar()
despesas_cartao = | |
'1710'),
('Nish<NAME>', '1711')],
'Sreepur': [('Barmi', '1743'),
('Bashamur', '1747'),
('Boubi', '1748'),
('Kawraid', '1745'),
('Satkhamair', '1744'),
('Sreepur', '1740')],
'Sripur': [('Rajendrapur', '1741'),
('Rajendrapur Canttome', '1742')]},
'Gopalganj': {'Gopalganj Sadar': [('Barfa', '8102'),
('Chandradighalia', '8013'),
('Gopalganj Sadar', '8100'),
('Ulpur', '8101')],
'Kashiani': [('Jonapur', '8133'),
('Kashiani', '8130'),
('Ramdia College', '8131'),
('Ratoil', '8132')],
'Kotalipara': [('Kotalipara', '8110')],
'Maksudpur': [('Batkiamari', '8141'),
('Khandarpara', '8142'),
('Maksudpur', '8140')],
'Tungipara': [('Patgati', '8121'),
('Tungipara', '8120')]},
'Hobiganj': {'Azmireeganj': [('Azmireeganj', '3360')],
'Bahubal': [('Bahubal', '3310')],
'Baniachang': [('Baniachang', '3350'),
('Jatrapasha', '3351'),
('Kadirganj', '3352')],
'Chunarughat': [('Chandpurbagan', '3321'),
('Chunarughat', '3320'),
('Narapati', '3322')],
'Hobiganj Sadar': [('Gopaya', '3302'),
('Hobiganj Sadar', '3300'),
('Shaestaganj', '3301')],
'Kalauk': [('Kalauk', '3340'), ('Lakhai', '3341')],
'Madhabpur': [('Itakhola', '3331'),
('Madhabpur', '3330'),
('Saihamnagar', '3333'),
('Shahajibazar', '3332')],
'Nabiganj': [('Digalbak', '3373'),
('Golduba', '3372'),
('Goplarbazar', '3371'),
('Inathganj', '3374'),
('Nabiganj', '3370')]},
'IBH WAs Here': {'B<NAME>ar': [('B<NAME>ar', '9300'),
('P.C College', '9301'),
('Rangdia', '9302')],
'Ch<NAME>': [('<NAME>', '9350'),
('Mongla Port', '9351')],
'Chitalmari': [('Barabaria', '9361'),
('Chitalmari', '9360')],
'Fakirhat': [('<NAME>', '9372'),
('Fakirhat', '9370'),
('Mansa', '9371')],
'Kachua UPO': [('Kachua', '9310'),
('Sonarkola', '9311')],
'Mollahat': [('Charkulia', '9383'),
('Dariala', '9382'),
('Kahalpur', '9381'),
('Mollahat', '9380'),
('Nagarkandi', '9384'),
('Pak Gangni', '9385')],
'Morelganj': [('Morelganj', '9320'),
('<NAME>', '9321'),
('Telisatee', '9322')],
'Rampal': [('Foylahat', '9341'),
('Gourambha', '9343'),
('Rampal', '9340'),
('Sonatunia', '9342')],
'Rayenda': [('Rayenda', '9330')]},
'Jamalpur': {'Dewangonj': [('Dewangonj', '2030'),
('<NAME>', '2031')],
'Islampur': [('Durmoot', '2021'),
('Gilabari', '2022'),
('Islampur', '2020')],
'Jamalpur': [('Jamalpur', '2000'),
('Nandina', '2001'),
('Narundi', '2002')],
'Malandah': [('Jamalpur', '2011'),
('Mahmoodpur', '2013'),
('Malancha', '2012'),
('Malandah', '2010')],
'Mathargonj': [('Balijhuri', '2041'),
('Mathargonj', '2040')],
'Shorishabari': [('Bausee', '2052'),
('Gunerbari', '2051'),
('<NAME>', '2053'),
('<NAME>', '2055'),
('Pingna', '2054'),
('Shorishabari', '2050')]},
'Jessore': {'Bagharpara': [('Bagharpara', '7470'),
('Gouranagar', '7471')],
'Chaugachha': [('Chougachha', '7410')],
'<NAME>': [('Basundia', '7406'),
('Chanchra', '7402'),
('Churamankathi', '7407'),
('Jessore Airbach', '7404'),
('Jessore canttonment', '7403'),
('J<NAME>', '7400'),
('Jessore Upa-Shahar', '7401'),
('Rupdia', '7405')],
'Jhikargachha': [('Jhikargachha', '7420')],
'Keshabpur': [('Keshobpur', '7450')],
'Monirampur': [('Monirampur', '7440')],
'Noapara': [('Bhugilhat', '7462'),
('Noapara', '7460'),
('Rajghat', '7461')],
'Sarsa': [('Bag Achra', '7433'),
('Benapole', '7431'),
('Jadabpur', '7432'),
('Sarsa', '7430')]},
'Jhalokathi': {'Jhalokathi Sadar': [('Baukathi', '8402'),
('Gabha', '8403'),
('Jhalokathi Sadar', '8400'),
('Nabagram', '8401'),
('Shekherhat', '8404')],
'Kathalia': [('Amua', '8431'),
('Kathalia', '8430'),
('Niamatee', '8432'),
('Shoulajalia', '8433')],
'Nalchhiti': [('Beerkathi', '8421'),
('Nalchhiti', '8420')],
'Rajapur': [('Rajapur', '8410')]},
'Jinaidaha': {'Harinakundu': [('Harinakundu', '7310')],
'Jinaidaha Sadar': [('Jinaidaha Cadet College',
'7301'),
('Jinaidaha Sadar', '7300')],
'Kotchandpur': [('Kotchandpur', '7330')],
'Maheshpur': [('Maheshpur', '7340')],
'Naldanga': [('<NAME>', '7351'),
('Naldanga', '7350')],
'Shailakupa': [('Kumiradaha', '7321'),
('Shailakupa', '7320')]},
'Joypurhat': {'Akkelpur': [('Akklepur', '5940'),
('jamalganj', '5941'),
('Tilakpur', '5942')],
'Joypurhat Sadar': [('Joypurhat Sadar', '5900')],
'Khetlal': [('Khetlal', '5920')],
'kalai': [('kalai', '5930')],
'panchbibi': [('Panchbibi', '5910')]},
'Khagrachari': {'Diginala': [('Diginala', '4420')],
'Khagrachari Sadar': [('Khagrachari Sadar',
'4400')],
'Laxmichhari': [('Laxmichhari', '4470')],
'Mahalchhari': [('Mahalchhari', '4430')],
'Manikchhari': [('Manikchhari', '4460')],
'Matiranga': [('Matiranga', '4450')],
'Panchhari': [('Panchhari', '4410')],
'Ramghar Head Office': [('Ramghar Head Office',
'4440')]},
'Khulna': {'Alaipur': [('Alaipur', '9240'),
('Belphulia', '9242'),
('Rupsha', '9241')],
'Batiaghat': [('Batiaghat', '9260'), ('Surkalee', '9261')],
'<NAME>': [('Bajua', '9272'),
('<NAME>', '9270'),
('Dakup', '9271'),
('Nalian', '9273')],
'Digalia': [('Chandni Mahal', '9221'),
('Digalia', '9220'),
('Gazirhat', '9224'),
('Ghoshghati', '9223'),
('Senhati', '9222')],
'Khulna Sadar': [('Atra Shilpa Area', '9207'),
('BIT Khulna', '9203'),
('Doulatpur', '9202'),
('Jahanabad Canttonmen', '9205'),
('Khula Sadar', '9100'),
('Khulna G.P.O', '9000'),
('Khulna Shipyard', '9201'),
('Khulna University', '9208'),
('Siramani', '9204'),
('<NAME>', '9206')],
'Madinabad': [('Amadee', '9291'), ('Madinabad', '9290')],
'Paikgachha': [('Chandkhali', '9284'),
('Garaikhali', '9285'),
('Godaipur', '9281'),
('Kapilmoni', '9282'),
('Katipara', '9283'),
('Paikgachha', '9280')],
'Phultala': [('Phultala', '9210')],
'Sajiara': [('Chuknagar', '9252'),
('Ghonabanda', '9251'),
('Sajiara', '9250'),
('Shahapur', '9253')],
'Terakhada': [('Pak Barasat', '9231'),
('Terakhada', '9230')]},
'Kishoreganj': {'Bajitpur': [('Bajitpur', '2336'),
('Laksmipur', '2338'),
('Sararchar', '2337')],
'Bhairob': [('Bhairab', '2350')],
'Hossenpur': [('Hossenpur', '2320')],
'Itna': [('Itna', '2390')],
'Karimganj': [('Karimganj', '2310')],
'Katiadi': [('Gochhihata', '2331'),
('Katiadi', '2330')],
'Kishoreganj Sadar': [('<NAME>',
'2301'),
('Kishoreganj Sadar',
'2300'),
('Maizhati', '2302'),
('Nilganj', '2303')],
'Kuliarchar': [('Chhoysuti', '2341'),
('Kuliarchar', '2340')],
'Mithamoin': [('Abdullahpur', '2371'),
('MIthamoin', '2370')],
'Nikli': [('Nikli', '2360')],
'Ostagram': [('Ostagram', '2380')],
'Pakundia': [('Pakundia', '2326')],
'Tarial': [('Tarial', '2316')]},
'Kurigram': {'Bhurungamari': [('Bhurungamari', '5670')],
'Chilmari': [('Chilmari', '5630'), ('Jorgachh', '5631')],
'Kurigram Sadar': [('Kurigram Sadar', '5600'),
('Pandul', '5601'),
('Phulbari', '5680')],
'Nageshwar': [('Nageshwar', '5660')],
'Rajarhat': [('Nazimkhan', '5611'), ('Rajarhat', '5610')],
'Rajibpur': [('Rajibpur', '5650')],
'Roumari': [('Roumari', '5640')],
'Ulipur': [('Bazarhat', '5621'), ('Ulipur', '5620')]},
'Kustia': {'Bheramara': [('Allardarga', '7042'),
('Bheramara', '7040'),
('Ganges Bheramara', '7041')],
'Janipur': [('Janipur', '7020'), ('Khoksa', '7021')],
'Kumarkhali': [('Kumarkhali', '7010'), ('Panti', '7011')],
'Kustia Sadar': [('Islami University', '7003'),
('Jagati', '7002'),
('Kushtia Mohini', '7001'),
('Kustia Sadar', '7000')],
'Mirpur': [('Aml<NAME>', '7032'),
('Mirpur', '7030'),
('Poradaha', '7031')],
'Rafayetpur': [('Khasmathurapur', '7052'),
('Rafayetpur', '7050'),
('Taragunia', '7051')]},
'Lakshmipur': {'<NAME>': [('<NAME>', '3730'),
('Hajirghat', '3731'),
('Ramgatirhat', '3732')],
'Lakshimpur Sadar': [('<NAME>', '3709'),
('Bhabaniganj', '3702'),
('Chandraganj', '3708'),
('Choupalli', '3707'),
('<NAME>', '3701'),
('Duttapara', '3706'),
('Keramatganj', '3704'),
('Lakshimpur Sadar', '3700'),
('Mandari', '3703'),
('Rupchara', '3705')],
'Ramganj': [('Alipur', '3721'),
('Dolta', '3725'),
('Kanchanpur', '3723'),
('Naagmud', '3724'),
('Panpara', '3722'),
('Ramganj', '3720')],
'Raypur': [('Bhuabari', '3714'),
('Haydarganj', '3713'),
('Nagerdighirpar', '3712'),
('Rakhallia', '3711'),
('Raypur', '3710')]},
'Lalmonirhat': {'Aditmari': [('Aditmari', '5510')],
'Hatibandha': [('Hatibandha', '5530')],
'Lalmonirhat Sadar': [('Kulaghat SO', '5502'),
('Lalmonirhat Sadar',
'5500'),
('Moghalhat', '5501')],
'Patgram': [('Baura', '5541'),
('Burimari', '5542'),
('Patgram', '5540')],
'Tushbhandar': [('Tushbhandar', '5520')]},
'Madaripur': {'Barhamganj': [('Bahadurpur', '7932'),
('Barhamganj', '7930'),
('Nilaksmibandar', '7931'),
('Umedpur', '7933')],
'Madaripur Sadar': [('Charmugria', '7901'),
('Habiganj', '7903'),
('Kulpaddi', '7902'),
('Madaripur Sadar', '7900'),
('Mustafapur', '7904')],
'Rajoir': [('Khalia', '7911'), ('Rajoir', '7910')],
'kalkini': [('Kalkini', '7920'),
('Sahabrampur', '7921')]},
'Magura': {'Arpara': [('Arpara', '7620')],
'Magura Sadar': [('Magura Sadar', '7600')],
'Mohammadpur': [('Binodpur', '7631'),
('Mohammadpur', '7630'),
('Nahata', '7632')],
'Shripur': [('Langalbadh', '7611'),
('Nachol', '7612'),
('Shripur', '7610')]},
'Manikganj': {'Doulatpur': [('Doulatpur', '1860')],
'Gheor': [('Gheor', '1840')],
'Lechhraganj': [('Jhitka', '1831'),
('Lechhraganj', '1830')],
'Manikganj Sadar': [('Barangail', '1804'),
('Gorpara', '1802'),
('Mahadebpur', '1803'),
('<NAME>', '1801'),
('<NAME>', '1800')],
'Saturia': [('Baliati', '1811'), ('Saturia', '1810')],
'Shibloya': [('Aricha', '1851'),
('Shibaloy', '1850'),
('Tewta', '1852'),
('Uthli', '1853')],
'Singari': [('Baira', '1821'),
('joymantop', '1822'),
('Singair', '1820')]},
'Meherpur': {'Gangni': [('Gangni', '7110')],
'Meherpur Sadar': [('Amjhupi', '7101'),
('Amjhupi', '7152'),
('Meherpur Sadar', '7100'),
('<NAME>', '7102')]},
'Moulvibazar': {'Baralekha': [('Baralekha', '3250'),
('Dhakkhinbag', '3252'),
('Juri', '3251'),
('Purbashahabajpur', '3253')],
'Kamalganj': [('Kamalganj', '3220'),
('Keramatnaga', '3221'),
('Munshibazar', '3224'),
('Patrakhola', '3222'),
('<NAME>', '3223')],
'Kulaura': [('Baramchal', '3237'),
('Kajaldhara', '3234'),
('Karimpur', '3235'),
('Kulaura', '3230'),
('Langla', '3232'),
('Prithimpasha', '3233'),
('Tillagaon', '3231')],
'Moulvibazar Sadar': [('Afrozganj', '3203'),
('Barakapan', '3201'),
('Monumukh', '3202'),
('Moulvibazar Sadar',
'3200')],
'Rajnagar': [('Rajnagar', '3240')],
'Srimangal': [('Kalighat', '3212'),
('Khejurichhara', '3213'),
('<NAME>', '3211'),
('Satgaon', '3214'),
('Srimangal', '3210')]},
'Munshiganj': {'Gajaria': [('Gajaria', '1510'),
('Hossendi', '1511'),
('Rasulpur', '1512')],
'Lohajong': [('Gouragonj', '1334'),
('Gouragonj', '1534'),
('<NAME>', '1532'),
('Haridia', '1333'),
('<NAME>', '1533'),
('Korhati', '1531'),
('Lohajang', '1530'),
('Madini Mandal', '1335'),
('Medini Mandal EDSO', '1535')],
'Munshiganj Sadar': [('Kathakhali', '1503'),
('Mirkadim', '1502'),
('Munshiganj Sadar', '1500'),
('Rikabibazar', '1501')],
'Sirajdikhan': [('Ichapur', '1542'),
('Kola', '1541'),
('<NAME>', '1543'),
('<NAME>', '1544'),
('Sirajdikhan', '1540')],
'Srinagar': [('Baghra', '1557'),
('Barikhal', '1551'),
('Bhaggyakul', '1558'),
('Hashara', '1553'),
('Kolapara', '1554'),
('Kumarbhog', '1555'),
('Mazpara', '1552'),
('Srinagar', '1550'),
('Vaggyakul SO', '1556')],
'Tangibari': [('Bajrajugini', '1523'),
('Baligao', '1522'),
('Betkahat', '1521'),
('Dighirpar', '1525'),
('Hasail', '1524'),
('Pura', '1527'),
('Pura EDSO', '1526'),
('Tangibari', '1520')]},
'Mymensingh': {'Bhaluka': [('Bhaluka', '2240')],
'Fulbaria': [('Fulbaria', '2216')],
'Gaforgaon': [('Duttarbazar', '2234'),
('Gaforgaon', '2230'),
('Kandipara', '2233'),
('Shibganj', '2231'),
('Usti', '2232')],
'Gouripur': [('Gouripur', '2270'),
('Ramgopalpur', '2271')],
'Haluaghat': [('Dhara', '2261'),
('Haluaghat', '2260'),
('Munshirhat', '2262')],
'Isshwargonj': [('Atharabari', '2282'),
('Isshwargonj', '2280'),
('Sohagi', '2281')],
'Muktagachha': [('Muktagachha', '2210')],
'Mymensingh Sadar': [('Agriculture Universi',
'2202'),
('Biddyaganj', '2204'),
('Kawatkhali', '2201'),
('Mymensingh Sadar', '2200'),
('Pearpur', '2205'),
('Shombhuganj', '2203')],
'Nandail': [('Gangail', '2291'), ('Nandail', '2290')],
'Phulpur': [('Beltia', '2251'),
('Phulpur', '2250'),
('Tarakanda', '2252')],
'Trishal': [('Ahmadbad', '2221'),
('Dhala', '2223'),
('Ram Amritaganj', '2222'),
('Trishal', '2220')]},
'Naogaon': {'Ahsanganj': [('Ahsanganj', '6596'), ('Bandai', '6597')],
'Badalgachhi': [('Badalgachhi', '6570')],
'Dhamuirhat': [('Dhamuirhat', '6580')],
'Mahadebpur': [('Mahadebpur', '6530')],
'Naogaon Sadar': [('Naogaon Sadar', '6500')],
'Niamatpur': [('Niamatpur', '6520')],
'Nitpur': [('Nitpur', '6550'),
('Panguria', '6552'),
('Porsa', '6551')],
'Patnitala': [('Patnitala', '6540')],
'Prasadpur': [('Balihar', '6512'),
('Manda', '6511'),
('Prasadpur', '6510')],
'Raninagar': [('Kashimpur', '6591'),
('Raninagar', '6590')],
'Sapahar': [('Moduhil', '6561'), ('Sapahar', '6560')]},
'Narail': {'Kalia': [('Kalia', '7520')],
'Laxmipasha': [('Baradia', '7514'),
('Itna', '7512'),
('Laxmipasha', '7510'),
('Lohagora', '7511'),
('Naldi', '7513')],
'Mohajan': [('Mohajan', '7521')],
'Narail Sadar': [('Nar<NAME>adar', '7500'),
('Ratanganj', '7501')]},
'Narayanganj': {'Araihazar': [('Araihazar', '1450'),
('Gopaldi', '1451')],
'Baidder Bazar': [('<NAME>azar', '1440'),
('<NAME>', '1441'),
('Barodi', '1442')],
'Bandar': [('Bandar', '1410'),
('BIDS', '1413'),
('<NAME>', '1411'),
('Madanganj', '1414'),
('Nabiganj', '1412')],
'Fatullah': [('<NAME>', '1421'),
('Fatullah', '1420')],
'N<NAME>adar': [('<NAME>',
'1400')],
'Rupganj': [('Bhulta', '1462'),
('Kanchan', '1461'),
('Murapara', '1464'),
('Nagri', '1463'),
('Rupganj', '1460')],
'Siddirganj': [('Adamjeenagar', '1431'),
('<NAME>', '1432'),
('Siddirganj', '1430')]},
'Narshingdi': {'Belabo': [('Belabo', '1640')],
'Monohordi': [('Hatirdia', '1651'),
('Katabaria', '1652'),
('Monohordi', '1650')],
'Narshingdi Sadar': [('Karimpur', '1605'),
('Madhabdi', '1604'),
('Narshingdi College', '1602'),
('Narshingdi Sadar', '1600'),
('Panchdona', '1603'),
('UMC Jute Mills', '1601')],
'Palash': [('Char Sindhur', '1612'),
('Ghorashal', '1613'),
('Ghorashal Urea Facto', '1611'),
('Palash', '1610')],
'Raypura': [('B<NAME>', '1631'),
('Radhaganj bazar', '1632'),
('Raypura', '1630')],
'Shibpur': [('Shibpur', '1620')]},
'Natore': {'Gopalpur UPO': [('Abdulpur', '6422'),
('Gopalpur U.P.O', '6420'),
('Lalpur S.O', '6421')],
'Harua': [('Baraigram', '6432'),
('Dayarampur', '6431'),
('Harua', '6430')],
'Hatgurudaspur': [('Hatgurudaspur', '6440')],
'Laxman': [('Laxman', '6410')],
'Natore Sadar': [('Baiddyabal Gharia', '6402'),
('Digapatia', '6401'),
('Madhnagar', '6403'),
('Natore Sadar', '6400')],
'Singra': [('Singra', | |
self.lineEdit_pop_pTr_modelPath.setEnabled(False)
self.lineEdit_pop_pTr_modelPath.setObjectName("lineEdit_pop_pTr_modelPath")
self.horizontalLayout_4.addWidget(self.lineEdit_pop_pTr_modelPath)
self.gridLayout.addLayout(self.horizontalLayout_4, 0, 0, 1, 3)
# self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
# self.horizontalLayout_7.setObjectName("horizontalLayout_7")
# self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
# self.horizontalLayout_5.setObjectName("horizontalLayout_5")
# self.label_pop_pTr_arch = QtWidgets.QLabel(self.groupBox)
# self.label_pop_pTr_arch.setObjectName("label_pop_pTr_arch")
# self.horizontalLayout_5.addWidget(self.label_pop_pTr_arch)
# self.lineEdit_pop_pTr_arch = QtWidgets.QLineEdit(self.groupBox)
# self.lineEdit_pop_pTr_arch.setEnabled(False)
# self.lineEdit_pop_pTr_arch.setObjectName("lineEdit_pop_pTr_arch")
# self.horizontalLayout_5.addWidget(self.lineEdit_pop_pTr_arch)
# self.horizontalLayout_7.addLayout(self.horizontalLayout_5)
# self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
# self.horizontalLayout_3.setObjectName("horizontalLayout_3")
# self.label_pop_pTr_norm = QtWidgets.QLabel(self.groupBox)
# self.label_pop_pTr_norm.setObjectName("label_pop_pTr_norm")
# self.horizontalLayout_3.addWidget(self.label_pop_pTr_norm)
# self.comboBox_pop_pTr_norm = QtWidgets.QComboBox(self.groupBox)
# self.comboBox_pop_pTr_norm.setEnabled(False)
# self.comboBox_pop_pTr_norm.setObjectName("comboBox_pop_pTr_norm")
# self.horizontalLayout_3.addWidget(self.comboBox_pop_pTr_norm)
# self.horizontalLayout_7.addLayout(self.horizontalLayout_3)
# self.gridLayout.addLayout(self.horizontalLayout_7, 1, 0, 1, 3)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_pop_pTr_inpSize = QtWidgets.QLabel(self.groupBox)
self.label_pop_pTr_inpSize.setObjectName("label_pop_pTr_inpSize")
self.horizontalLayout.addWidget(self.label_pop_pTr_inpSize)
self.spinBox_pop_pTr_inpSize = QtWidgets.QSpinBox(self.groupBox)
self.spinBox_pop_pTr_inpSize.setEnabled(False)
self.spinBox_pop_pTr_inpSize.setAccessibleName("")
self.spinBox_pop_pTr_inpSize.setObjectName("spinBox_pop_pTr_inpSize")
self.horizontalLayout.addWidget(self.spinBox_pop_pTr_inpSize)
self.gridLayout.addLayout(self.horizontalLayout, 2, 0, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_pop_pTr_outpSize = QtWidgets.QLabel(self.groupBox)
self.label_pop_pTr_outpSize.setObjectName("label_pop_pTr_outpSize")
self.horizontalLayout_2.addWidget(self.label_pop_pTr_outpSize)
self.spinBox_pop_pTr_outpSize = QtWidgets.QSpinBox(self.groupBox)
self.spinBox_pop_pTr_outpSize.setEnabled(False)
self.spinBox_pop_pTr_outpSize.setObjectName("spinBox_pop_pTr_outpSize")
self.horizontalLayout_2.addWidget(self.spinBox_pop_pTr_outpSize)
self.gridLayout.addLayout(self.horizontalLayout_2, 2, 1, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_pop_pTr_colorMode = QtWidgets.QLabel(self.groupBox)
self.label_pop_pTr_colorMode.setObjectName("label_pop_pTr_colorMode")
self.horizontalLayout_6.addWidget(self.label_pop_pTr_colorMode)
self.comboBox_pop_pTr_colorMode = QtWidgets.QComboBox(self.groupBox)
self.comboBox_pop_pTr_colorMode.setEnabled(False)
self.comboBox_pop_pTr_colorMode.setObjectName("comboBox_pop_pTr_colorMode")
self.horizontalLayout_6.addWidget(self.comboBox_pop_pTr_colorMode)
self.gridLayout.addLayout(self.horizontalLayout_6, 2, 2, 1, 1)
self.groupBox_pop_pTr_layersTable = QtWidgets.QGroupBox(self.splitter)
self.groupBox_pop_pTr_layersTable.setObjectName("groupBox_pop_pTr_layersTable")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_pop_pTr_layersTable)
self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.gridLayout_3.setContentsMargins(-1, 5, -1, 5)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tableWidget_pop_pTr_layersTable = MyTable(0,5,self.groupBox_pop_pTr_layersTable)
self.tableWidget_pop_pTr_layersTable.setObjectName("tableWidget_pop_pTr_layersTable")
header_labels = ["Name", "Type" ,"No.Params", "No.Units", "Trainability"]
self.tableWidget_pop_pTr_layersTable.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_pop_pTr_layersTable.horizontalHeader()
for i in range(len(header_labels)):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
self.tableWidget_pop_pTr_layersTable.setAcceptDrops(True)
self.tableWidget_pop_pTr_layersTable.setDragEnabled(True)
self.tableWidget_pop_pTr_layersTable.resizeRowsToContents()
self.gridLayout_3.addWidget(self.tableWidget_pop_pTr_layersTable, 0, 0, 1, 1)
self.groupBox_pop_pTr_modelSummary = QtWidgets.QGroupBox(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_pop_pTr_modelSummary.sizePolicy().hasHeightForWidth())
self.groupBox_pop_pTr_modelSummary.setSizePolicy(sizePolicy)
self.groupBox_pop_pTr_modelSummary.setBaseSize(QtCore.QSize(0, 0))
self.groupBox_pop_pTr_modelSummary.setFlat(False)
self.groupBox_pop_pTr_modelSummary.setCheckable(False)
self.groupBox_pop_pTr_modelSummary.setObjectName("groupBox_pop_pTr_modelSummary")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBox_pop_pTr_modelSummary)
self.gridLayout_4.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.gridLayout_4.setContentsMargins(-1, 5, -1, 5)
self.gridLayout_4.setHorizontalSpacing(7)
self.gridLayout_4.setObjectName("gridLayout_4")
self.textBrowser_pop_pTr_modelSummary = QtWidgets.QTextBrowser(self.groupBox_pop_pTr_modelSummary)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textBrowser_pop_pTr_modelSummary.sizePolicy().hasHeightForWidth())
self.textBrowser_pop_pTr_modelSummary.setSizePolicy(sizePolicy)
self.textBrowser_pop_pTr_modelSummary.setMinimumSize(QtCore.QSize(0, 0))
self.textBrowser_pop_pTr_modelSummary.setBaseSize(QtCore.QSize(0, 0))
self.textBrowser_pop_pTr_modelSummary.setAutoFillBackground(False)
self.textBrowser_pop_pTr_modelSummary.setObjectName("textBrowser_pop_pTr_modelSummary")
self.gridLayout_4.addWidget(self.textBrowser_pop_pTr_modelSummary, 0, 0, 1, 1)
self.gridLayout_2.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Partial trainability", None))
self.tableWidget_pop_pTr_layersTable.setToolTip(_translate("Form", tooltips["tableWidget_pop_pTr_layersTable"], None))
#self.pushButton_pop_pTr_reset.setText(_translate("Form", "Reset", None))
#self.pushButton_pop_pTr_reset.setToolTip(_translate("Form", "<html><head/><body><p>Not implemented yet.</p></body></html>", None))
self.pushButton_pop_pTr_update.setText(_translate("Form", "Update", None))
self.pushButton_pop_pTr_update.setToolTip(_translate("Form", tooltips["pushButton_pop_pTr_update"], None))
self.pushButton_pop_pTr_ok.setText(_translate("Form", "OK", None))
self.pushButton_pop_pTr_ok.setToolTip(_translate("Form", tooltips["pushButton_pop_pTr_ok"], None))
self.groupBox.setTitle(_translate("Form", "Model information", None))
self.label_pop_pTr_modelPath.setText(_translate("Form", "Model path", None))
# self.label_pop_pTr_arch.setText(_translate("Form", "Architecture", None))
# self.label_pop_pTr_norm.setText(_translate("Form", "Normalization", None))
self.label_pop_pTr_inpSize.setText(_translate("Form", "Input size", None))
self.label_pop_pTr_outpSize.setText(_translate("Form", "Output classes", None))
self.label_pop_pTr_colorMode.setText(_translate("Form", "Color Mode", None))
self.groupBox_pop_pTr_layersTable.setTitle(_translate("Form", "Layers", None))
self.groupBox_pop_pTr_modelSummary.setTitle(_translate("Form", "Model summary", None))
class popup_lossweights(QtWidgets.QWidget):
def setupUi(self, Form_lossW):
Form_lossW.setObjectName("Form_lossW")
Form_lossW.resize(470, 310)
self.gridLayout_2 = QtWidgets.QGridLayout(Form_lossW)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox_lossW = QtWidgets.QGroupBox(Form_lossW)
self.groupBox_lossW.setObjectName("groupBox_lossW")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_lossW)
self.gridLayout.setObjectName("gridLayout")
self.tableWidget_lossW = MyTable(0,5,self.groupBox_lossW)
self.tableWidget_lossW.setObjectName("tableWidget_lossW")
header_labels = ["Class", "Events tot." ,"Events/Epoch", "Events/Epoch[%]", "Loss weight"]
self.tableWidget_lossW.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_lossW.horizontalHeader()
for i in range(len(header_labels)):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
self.tableWidget_lossW.setAcceptDrops(True)
self.tableWidget_lossW.setDragEnabled(True)
self.tableWidget_lossW.resizeRowsToContents()
self.gridLayout.addWidget(self.tableWidget_lossW, 0, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBox_lossW, 0, 0, 1, 1)
self.horizontalLayout_lossW_buttons = QtWidgets.QHBoxLayout()
self.horizontalLayout_lossW_buttons.setObjectName("horizontalLayout_lossW_buttons")
# self.pushButton_pop_lossW_reset = QtWidgets.QPushButton(Form_lossW)
# self.pushButton_pop_lossW_reset.setObjectName("pushButton_pop_lossW_reset")
# self.horizontalLayout_lossW_buttons.addWidget(self.pushButton_pop_lossW_reset)
spacerItem = QtWidgets.QSpacerItem(218, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_lossW_buttons.addItem(spacerItem)
self.pushButton_pop_lossW_cancel = QtWidgets.QPushButton(Form_lossW)
self.pushButton_pop_lossW_cancel.setObjectName("pushButton_pop_lossW_cancel")
self.horizontalLayout_lossW_buttons.addWidget(self.pushButton_pop_lossW_cancel)
self.comboBox_lossW = QtWidgets.QComboBox(Form_lossW)
self.comboBox_lossW.setObjectName("comboBox_lossW")
self.comboBox_lossW.addItems(["None","Balanced","Custom"])
self.horizontalLayout_lossW_buttons.addWidget(self.comboBox_lossW)
self.pushButton_pop_lossW_ok = QtWidgets.QPushButton(Form_lossW)
self.pushButton_pop_lossW_ok.setObjectName("pushButton_pop_lossW_ok")
self.horizontalLayout_lossW_buttons.addWidget(self.pushButton_pop_lossW_ok)
self.gridLayout_2.addLayout(self.horizontalLayout_lossW_buttons, 1, 0, 1, 1)
self.retranslateUi(Form_lossW)
QtCore.QMetaObject.connectSlotsByName(Form_lossW)
def retranslateUi(self, Form_lossW):
_translate = QtCore.QCoreApplication.translate
Form_lossW.setWindowTitle(_translate("Form_lossW", "Custom loss weights per class", None))
self.groupBox_lossW.setTitle(_translate("Form_lossW", "Training data - custom class weights", None))
#self.pushButton_pop_lossW_reset.setText(_translate("Form_lossW", "Reset", None))
self.pushButton_pop_lossW_cancel.setText(_translate("Form_lossW", "Cancel", None))
self.pushButton_pop_lossW_ok.setText(_translate("Form_lossW", "OK", None))
class popup_imageLoadResize(QtWidgets.QWidget):
def setupUi(self, Form_imageResize):
Form_imageResize.setObjectName("Form_imageResize")
Form_imageResize.resize(468, 270)
self.gridLayout_3 = QtWidgets.QGridLayout(Form_imageResize)
self.gridLayout_3.setObjectName("gridLayout_3")
self.scrollArea_imgResize_occurences = QtWidgets.QScrollArea(Form_imageResize)
self.scrollArea_imgResize_occurences.setWidgetResizable(True)
self.scrollArea_imgResize_occurences.setObjectName("scrollArea_imgResize_occurences")
self.scrollAreaWidgetContents_imgResize = QtWidgets.QWidget()
self.scrollAreaWidgetContents_imgResize.setGeometry(QtCore.QRect(0, 0, 423, 109))
self.scrollAreaWidgetContents_imgResize.setObjectName("scrollAreaWidgetContents_imgResize")
self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents_imgResize)
self.gridLayout.setObjectName("gridLayout")
self.textBrowser_imgResize_occurences = QtWidgets.QTextBrowser(self.scrollAreaWidgetContents_imgResize)
self.textBrowser_imgResize_occurences.setObjectName("textBrowser_imgResize_occurences")
self.gridLayout.addWidget(self.textBrowser_imgResize_occurences, 0, 0, 1, 1)
self.scrollArea_imgResize_occurences.setWidget(self.scrollAreaWidgetContents_imgResize)
self.gridLayout_3.addWidget(self.scrollArea_imgResize_occurences, 2, 0, 1, 1)
self.gridLayout_imageResizeOptions = QtWidgets.QGridLayout()
self.gridLayout_imageResizeOptions.setObjectName("gridLayout_imageResizeOptions")
self.label_imgResize_x_3 = QtWidgets.QLabel(Form_imageResize)
self.label_imgResize_x_3.setObjectName("label_imgResize_x_3")
self.gridLayout_imageResizeOptions.addWidget(self.label_imgResize_x_3, 2, 3, 1, 1)
spacerItem = QtWidgets.QSpacerItem(88, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_imageResizeOptions.addItem(spacerItem, 1, 5, 1, 1)
self.label_imgResize_height = QtWidgets.QLabel(Form_imageResize)
self.label_imgResize_height.setAlignment(QtCore.Qt.AlignCenter)
self.label_imgResize_height.setObjectName("label_imgResize_height")
self.gridLayout_imageResizeOptions.addWidget(self.label_imgResize_height, 0, 1, 1, 2)
self.spinBox_ingResize_w_2 = QtWidgets.QSpinBox(Form_imageResize)
self.spinBox_ingResize_w_2.setEnabled(False)
self.spinBox_ingResize_w_2.setMaximum(999999)
self.spinBox_ingResize_w_2.setObjectName("spinBox_ingResize_w_2")
self.gridLayout_imageResizeOptions.addWidget(self.spinBox_ingResize_w_2, 2, 4, 1, 1)
self.spinBox_ingResize_h_1 = QtWidgets.QSpinBox(Form_imageResize)
self.spinBox_ingResize_h_1.setEnabled(False)
self.spinBox_ingResize_h_1.setMaximum(999999)
self.spinBox_ingResize_h_1.setObjectName("spinBox_ingResize_h_1")
self.gridLayout_imageResizeOptions.addWidget(self.spinBox_ingResize_h_1, 1, 1, 1, 2)
spacerItem1 = QtWidgets.QSpacerItem(88, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_imageResizeOptions.addItem(spacerItem1, 0, 5, 1, 1)
self.pushButton_imgResize_ok = QtWidgets.QPushButton(Form_imageResize)
self.pushButton_imgResize_ok.setEnabled(False)
self.pushButton_imgResize_ok.setObjectName("pushButton_imgResize_ok")
self.gridLayout_imageResizeOptions.addWidget(self.pushButton_imgResize_ok, 3, 5, 1, 1)
self.spinBox_ingResize_w_1 = QtWidgets.QSpinBox(Form_imageResize)
self.spinBox_ingResize_w_1.setEnabled(False)
self.spinBox_ingResize_w_1.setMaximum(999999)
self.spinBox_ingResize_w_1.setObjectName("spinBox_ingResize_w_1")
self.gridLayout_imageResizeOptions.addWidget(self.spinBox_ingResize_w_1, 1, 4, 1, 1)
self.comboBox_resizeMethod = QtWidgets.QComboBox(Form_imageResize)
self.comboBox_resizeMethod.setEnabled(False)
self.comboBox_resizeMethod.setObjectName("comboBox_resizeMethod")
self.comboBox_resizeMethod.addItem("")
self.comboBox_resizeMethod.addItem("")
self.comboBox_resizeMethod.addItem("")
self.comboBox_resizeMethod.addItem("")
self.comboBox_resizeMethod.addItem("")
self.gridLayout_imageResizeOptions.addWidget(self.comboBox_resizeMethod, 2, 5, 1, 1)
self.pushButton_imgResize_cancel = QtWidgets.QPushButton(Form_imageResize)
self.pushButton_imgResize_cancel.setObjectName("pushButton_imgResize_cancel")
self.gridLayout_imageResizeOptions.addWidget(self.pushButton_imgResize_cancel, 3, 2, 1, 3)
self.radioButton_imgResize_cropPad = QtWidgets.QRadioButton(Form_imageResize)
self.radioButton_imgResize_cropPad.setObjectName("radioButton_imgResize_cropPad")
self.gridLayout_imageResizeOptions.addWidget(self.radioButton_imgResize_cropPad, 1, 0, 1, 1)
self.label_imgResize_x_2 = QtWidgets.QLabel(Form_imageResize)
self.label_imgResize_x_2.setObjectName("label_imgResize_x_2")
self.gridLayout_imageResizeOptions.addWidget(self.label_imgResize_x_2, 1, 3, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(148, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_imageResizeOptions.addItem(spacerItem2, 3, 0, 1, 2)
self.spinBox_ingResize_h_2 = QtWidgets.QSpinBox(Form_imageResize)
self.spinBox_ingResize_h_2.setEnabled(False)
self.spinBox_ingResize_h_2.setMaximum(999999)
self.spinBox_ingResize_h_2.setObjectName("spinBox_ingResize_h_2")
self.gridLayout_imageResizeOptions.addWidget(self.spinBox_ingResize_h_2, 2, 1, 1, 2)
self.label_imgResize_width = QtWidgets.QLabel(Form_imageResize)
self.label_imgResize_width.setAlignment(QtCore.Qt.AlignCenter)
self.label_imgResize_width.setObjectName("label_imgResize_width")
self.gridLayout_imageResizeOptions.addWidget(self.label_imgResize_width, 0, 4, 1, 1)
self.label_imgResize_method = QtWidgets.QLabel(Form_imageResize)
self.label_imgResize_method.setObjectName("label_imgResize_method")
self.gridLayout_imageResizeOptions.addWidget(self.label_imgResize_method, 0, 0, 1, 1)
self.label_imgResize_x_1 = QtWidgets.QLabel(Form_imageResize)
self.label_imgResize_x_1.setObjectName("label_imgResize_x_1")
self.gridLayout_imageResizeOptions.addWidget(self.label_imgResize_x_1, 0, 3, 1, 1)
self.radioButton_imgResize_interpolate = QtWidgets.QRadioButton(Form_imageResize)
self.radioButton_imgResize_interpolate.setObjectName("radioButton_imgResize_interpolate")
self.gridLayout_imageResizeOptions.addWidget(self.radioButton_imgResize_interpolate, 2, 0, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_imageResizeOptions, 1, 0, 1, 1)
self.label_imgResize_info = QtWidgets.QLabel(Form_imageResize)
self.label_imgResize_info.setObjectName("label_imgResize_info")
self.gridLayout_3.addWidget(self.label_imgResize_info, 0, 0, 1, 1)
self.retranslateUi(Form_imageResize)
self.radioButton_imgResize_cropPad.toggled['bool'].connect(self.spinBox_ingResize_h_1.setEnabled)
#self.radioButton_imgResize_cropPad.toggled['bool'].connect(self.spinBox_ingResize_w_1.setEnabled)
self.radioButton_imgResize_interpolate.toggled['bool'].connect(self.spinBox_ingResize_h_2.setEnabled)
#self.radioButton_imgResize_interpolate.toggled['bool'].connect(self.spinBox_ingResize_w_2.setEnabled)
self.radioButton_imgResize_interpolate.toggled['bool'].connect(self.comboBox_resizeMethod.setEnabled)
self.radioButton_imgResize_cropPad.toggled['bool'].connect(self.pushButton_imgResize_ok.setEnabled)
self.radioButton_imgResize_interpolate.toggled['bool'].connect(self.pushButton_imgResize_ok.setEnabled)
self.spinBox_ingResize_h_1.valueChanged['int'].connect(self.spinBox_ingResize_h_2.setValue)
self.spinBox_ingResize_h_1.valueChanged['int'].connect(self.spinBox_ingResize_w_1.setValue)
self.spinBox_ingResize_h_1.valueChanged['int'].connect(self.spinBox_ingResize_w_2.setValue)
self.spinBox_ingResize_h_2.valueChanged['int'].connect(self.spinBox_ingResize_w_1.setValue)
self.spinBox_ingResize_h_2.valueChanged['int'].connect(self.spinBox_ingResize_w_2.setValue)
self.spinBox_ingResize_h_2.valueChanged['int'].connect(self.spinBox_ingResize_h_1.setValue)
QtCore.QMetaObject.connectSlotsByName(Form_imageResize)
def retranslateUi(self, Form_imageResize):
_translate = QtCore.QCoreApplication.translate
Form_imageResize.setWindowTitle(_translate("Form_imageResize", "Import assistant for unequally sized images"))
self.label_imgResize_x_3.setText(_translate("Form_imageResize", "x"))
self.label_imgResize_height.setText(_translate("Form_imageResize", "Height"))
self.pushButton_imgResize_ok.setText(_translate("Form_imageResize", "OK"))
self.comboBox_resizeMethod.setItemText(0, _translate("Form_imageResize", "Nearest"))
self.comboBox_resizeMethod.setItemText(1, _translate("Form_imageResize", "Linear"))
self.comboBox_resizeMethod.setItemText(2, _translate("Form_imageResize", "Area"))
self.comboBox_resizeMethod.setItemText(3, _translate("Form_imageResize", "Cubic"))
self.comboBox_resizeMethod.setItemText(4, _translate("Form_imageResize", "Lanczos"))
self.pushButton_imgResize_cancel.setText(_translate("Form_imageResize", "Cancel"))
self.radioButton_imgResize_cropPad.setToolTip(_translate("Form_imageResize", "Images are resized by center cropping and/or padding."))
self.radioButton_imgResize_cropPad.setText(_translate("Form_imageResize", "Crop/pad"))
self.label_imgResize_x_2.setText(_translate("Form_imageResize", "x"))
self.label_imgResize_width.setText(_translate("Form_imageResize", "Width"))
self.label_imgResize_method.setText(_translate("Form_imageResize", "Method"))
self.label_imgResize_x_1.setText(_translate("Form_imageResize", "x"))
self.radioButton_imgResize_interpolate.setToolTip(_translate("Form_imageResize", "Images are resized by interpolation"))
self.radioButton_imgResize_interpolate.setText(_translate("Form_imageResize", "Resize (interp.)"))
self.label_imgResize_info.setText(_translate("Form_imageResize", "Detected unequal image sizes. Select a method to equalize image sizes:"))
class popup_cm_interaction(QtWidgets.QWidget):
def setupUi(self, Form_cm_interaction):
Form_cm_interaction.setObjectName("Form_cm_interaction")
Form_cm_interaction.resize(702, 572)
self.gridLayout_6 = QtWidgets.QGridLayout(Form_cm_interaction)
self.gridLayout_6.setObjectName("gridLayout_6")
self.groupBox_model = QtWidgets.QGroupBox(Form_cm_interaction)
self.groupBox_model.setObjectName("groupBox_model")
self.gridLayout_5 = QtWidgets.QGridLayout(self.groupBox_model)
self.gridLayout_5.setObjectName("gridLayout_5")
self.lineEdit_loadModel = QtWidgets.QLineEdit(self.groupBox_model)
self.lineEdit_loadModel.setEnabled(False)
self.lineEdit_loadModel.setObjectName("lineEdit_loadModel")
self.gridLayout_5.addWidget(self.lineEdit_loadModel, 0, 0, 1, 4)
self.pushButton_showSummary = QtWidgets.QPushButton(self.groupBox_model)
self.pushButton_showSummary.setObjectName("pushButton_showSummary")
self.gridLayout_5.addWidget(self.pushButton_showSummary, 0, 4, 1, 1)
self.label_inpImgSize = QtWidgets.QLabel(self.groupBox_model)
self.label_inpImgSize.setObjectName("label_inpImgSize")
self.gridLayout_5.addWidget(self.label_inpImgSize, 1, 0, 1, 1)
self.spinBox_Crop_inpImgSize = QtWidgets.QSpinBox(self.groupBox_model)
self.spinBox_Crop_inpImgSize.setEnabled(False)
self.spinBox_Crop_inpImgSize.setObjectName("spinBox_Crop_inpImgSize")
self.gridLayout_5.addWidget(self.spinBox_Crop_inpImgSize, 1, 1, 1, 1)
self.label_outpSize = QtWidgets.QLabel(self.groupBox_model)
self.label_outpSize.setObjectName("label_outpSize")
self.gridLayout_5.addWidget(self.label_outpSize, 1, 2, 1, 1)
self.spinBox_outpSize = QtWidgets.QSpinBox(self.groupBox_model)
self.spinBox_outpSize.setEnabled(False)
self.spinBox_outpSize.setObjectName("spinBox_outpSize")
self.gridLayout_5.addWidget(self.spinBox_outpSize, 1, 3, 1, 1)
self.pushButton_toTensorB = QtWidgets.QPushButton(self.groupBox_model)
self.pushButton_toTensorB.setObjectName("pushButton_toTensorB")
self.gridLayout_5.addWidget(self.pushButton_toTensorB, 1, 4, 1, 1)
self.gridLayout_6.addWidget(self.groupBox_model, 0, 0, 1, 2)
self.groupBox_imageShow = QtWidgets.QGroupBox(Form_cm_interaction)
self.groupBox_imageShow.setObjectName("groupBox_imageShow")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_imageShow)
self.gridLayout.setObjectName("gridLayout")
self.widget_image = pg.ImageView(self.groupBox_imageShow)
self.widget_image.show()
self.widget_image.setMinimumSize(QtCore.QSize(400, 400))
#self.widget_image.setMaximumSize(QtCore.QSize(16777215, 91))
# self.widget_image.ui.histogram.hide()
# self.widget_image.ui.roiBtn.hide()
# self.widget_image.ui.menuBtn.hide()
self.widget_image.setObjectName("widget_image")
self.gridLayout.addWidget(self.widget_image, 0, 0, 1, 1)
self.gridLayout_6.addWidget(self.groupBox_imageShow, 1, 0, 2, 1)
self.scrollArea_settings = QtWidgets.QScrollArea(Form_cm_interaction)
self.scrollArea_settings.setWidgetResizable(True)
self.scrollArea_settings.setObjectName("scrollArea_settings")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 247, 431))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_4 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_4.setObjectName("gridLayout_4")
self.groupBox_image_Settings = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBox_image_Settings.setCheckable(True)
self.groupBox_image_Settings.toggled.connect(self.image_on_off)
self.groupBox_image_Settings.setObjectName("groupBox_image_Settings")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_image_Settings)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_image_alpha = QtWidgets.QLabel(self.groupBox_image_Settings)
self.label_image_alpha.setObjectName("label_image_alpha")
self.gridLayout_2.addWidget(self.label_image_alpha, 0, 0, 1, 1)
self.doubleSpinBox_image_alpha = QtWidgets.QDoubleSpinBox(self.groupBox_image_Settings)
self.doubleSpinBox_image_alpha.setMinimum(0.0)
self.doubleSpinBox_image_alpha.setMaximum(1.0)
self.doubleSpinBox_image_alpha.setSingleStep(0.1)
self.doubleSpinBox_image_alpha.setDecimals(3)
self.doubleSpinBox_image_alpha.setValue(1.0)
self.doubleSpinBox_image_alpha.setObjectName("doubleSpinBox_image_alpha")
self.gridLayout_2.addWidget(self.doubleSpinBox_image_alpha, 0, 1, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_image_Settings, 0, 0, 1, 2)
self.groupBox_gradCAM_Settings = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBox_gradCAM_Settings.setCheckable(True)
self.groupBox_gradCAM_Settings.setChecked(False)
self.groupBox_gradCAM_Settings.toggled.connect(self.gradCAM_on_off)
self.groupBox_gradCAM_Settings.setObjectName("groupBox_gradCAM_Settings")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_gradCAM_Settings)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_gradCAM_targetClass = QtWidgets.QLabel(self.groupBox_gradCAM_Settings)
self.label_gradCAM_targetClass.setObjectName("label_gradCAM_targetClass")
self.gridLayout_3.addWidget(self.label_gradCAM_targetClass, 0, 0, 1, 1)
self.spinBox_gradCAM_targetClass = QtWidgets.QSpinBox(self.groupBox_gradCAM_Settings)
self.spinBox_gradCAM_targetClass.setMinimum(0)
self.spinBox_gradCAM_targetClass.setValue(0)
self.spinBox_gradCAM_targetClass.setObjectName("spinBox_gradCAM_targetClass")
self.gridLayout_3.addWidget(self.spinBox_gradCAM_targetClass, 0, 1, 1, 1)
self.label_gradCAM_targetLayer = QtWidgets.QLabel(self.groupBox_gradCAM_Settings)
self.label_gradCAM_targetLayer.setObjectName("label_gradCAM_targetLayer")
self.gridLayout_3.addWidget(self.label_gradCAM_targetLayer, 1, 0, 1, 1)
self.comboBox_gradCAM_targetLayer = QtWidgets.QComboBox(self.groupBox_gradCAM_Settings)
self.comboBox_gradCAM_targetLayer.setObjectName("comboBox_gradCAM_targetLayer")
self.gridLayout_3.addWidget(self.comboBox_gradCAM_targetLayer, 1, 1, 1, 1)
self.label_gradCAM_colorMap = QtWidgets.QLabel(self.groupBox_gradCAM_Settings)
self.label_gradCAM_colorMap.setObjectName("label_gradCAM_colorMap")
self.gridLayout_3.addWidget(self.label_gradCAM_colorMap, 2, 0, 1, 1)
self.comboBox_gradCAM_colorMap = QtWidgets.QComboBox(self.groupBox_gradCAM_Settings)
cmaps = dir(cv2)
ind = ["COLORMAP" in a for a in cmaps]
cmaps = list(np.array(cmaps)[ind])
cmaps = [a.split("_")[1] for a in cmaps]
self.comboBox_gradCAM_colorMap.addItems(cmaps)
#find "VIRIDIS" in cmaps
ind = np.where(np.array(cmaps)=="VIRIDIS")[0][0]
self.comboBox_gradCAM_colorMap.setCurrentIndex(ind)
self.comboBox_gradCAM_colorMap.setObjectName("comboBox_gradCAM_colorMap")
self.gridLayout_3.addWidget(self.comboBox_gradCAM_colorMap, 2, 1, 1, 1)
self.label_gradCAM_alpha = QtWidgets.QLabel(self.groupBox_gradCAM_Settings)
self.label_gradCAM_alpha.setObjectName("label_gradCAM_alpha")
self.gridLayout_3.addWidget(self.label_gradCAM_alpha, 3, 0, 1, 1)
self.doubleSpinBox_gradCAM_alpha = QtWidgets.QDoubleSpinBox(self.groupBox_gradCAM_Settings)
self.doubleSpinBox_gradCAM_alpha.setMinimum(0.0)
self.doubleSpinBox_gradCAM_alpha.setMaximum(1.0)
self.doubleSpinBox_image_alpha.setSingleStep(0.1)
self.doubleSpinBox_gradCAM_alpha.setDecimals(3)
self.doubleSpinBox_gradCAM_alpha.setValue(0.0)
self.doubleSpinBox_gradCAM_alpha.setObjectName("doubleSpinBox_gradCAM_alpha")
self.gridLayout_3.addWidget(self.doubleSpinBox_gradCAM_alpha, 3, 1, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_gradCAM_Settings, 1, 0, 1, 2)
self.pushButton_reset = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.pushButton_reset.setObjectName("pushButton_reset")
self.gridLayout_4.addWidget(self.pushButton_reset, 2, 0, 1, 1)
self.pushButton_update = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.pushButton_update.setObjectName("pushButton_update")
self.gridLayout_4.addWidget(self.pushButton_update, 2, 1, 1, 1)
self.scrollArea_settings.setWidget(self.scrollAreaWidgetContents)
self.gridLayout_6.addWidget(self.scrollArea_settings, 2, 1, 1, 1)
self.retranslateUi(Form_cm_interaction)
QtCore.QMetaObject.connectSlotsByName(Form_cm_interaction)
def retranslateUi(self, Form_cm_interaction):
_translate = QtCore.QCoreApplication.translate
Form_cm_interaction.setWindowTitle(_translate("Form_cm_interaction", "Show images/heatmaps"))
self.groupBox_model.setTitle(_translate("Form_cm_interaction", "Model"))
self.lineEdit_loadModel.setToolTip(_translate("Form_cm_interaction", "Enter path and filename of a history-file (.csv)"))
self.pushButton_showSummary.setText(_translate("Form_cm_interaction", "Show summary"))
self.label_inpImgSize.setText(_translate("Form_cm_interaction", "Input img. crop"))
self.label_outpSize.setText(_translate("Form_cm_interaction", "Output Nr. of classes"))
self.pushButton_toTensorB.setText(_translate("Form_cm_interaction", "To TensorBoard"))
self.groupBox_imageShow.setTitle(_translate("Form_cm_interaction", "Image"))
self.groupBox_image_Settings.setTitle(_translate("Form_cm_interaction", "Image"))
self.label_image_alpha.setText(_translate("Form_cm_interaction", "Alpha"))
self.groupBox_gradCAM_Settings.setTitle(_translate("Form_cm_interaction", "Grad-CAM"))
self.label_gradCAM_targetClass.setText(_translate("Form_cm_interaction", "Class"))
self.label_gradCAM_targetLayer.setText(_translate("Form_cm_interaction", "Layer"))
self.label_gradCAM_colorMap.setText(_translate("Form_cm_interaction", "Colormap"))
self.label_gradCAM_alpha.setText(_translate("Form_cm_interaction", "Alpha"))
self.pushButton_reset.setText(_translate("Form_cm_interaction", "Reset"))
self.pushButton_update.setText(_translate("Form_cm_interaction", "Update"))
#Tooltips
self.groupBox_model.setToolTip(_translate("Form", tooltips["groupBox_model"], None))
self.lineEdit_loadModel.setToolTip(_translate("Form", tooltips["lineEdit_LoadModel_2"], None))
self.pushButton_showSummary.setToolTip(_translate("Form", tooltips["pushButton_showSummary"], None))
self.label_inpImgSize.setToolTip(_translate("Form", tooltips["label_inpImgSize"], None))
self.spinBox_Crop_inpImgSize.setToolTip(_translate("Form", tooltips["label_inpImgSize"], None))
self.label_outpSize.setToolTip(_translate("Form", tooltips["label_outpSize"], None))
self.spinBox_outpSize.setToolTip(_translate("Form", tooltips["label_outpSize"], None))
self.pushButton_toTensorB.setToolTip(_translate("Form", tooltips["pushButton_toTensorB"], None))
self.groupBox_imageShow.setToolTip(_translate("Form", tooltips["groupBox_imageShow"], None))
self.groupBox_image_Settings.setToolTip(_translate("Form", tooltips["groupBox_image_Settings"], None))
self.label_image_alpha.setToolTip(_translate("Form", tooltips["label_image_alpha"], None))
self.doubleSpinBox_image_alpha.setToolTip(_translate("Form", tooltips["label_image_alpha"], None))
self.groupBox_gradCAM_Settings.setToolTip(_translate("Form", tooltips["groupBox_gradCAM_Settings"], None))
self.label_gradCAM_targetClass.setToolTip(_translate("Form", tooltips["label_gradCAM_targetClass"], None))
self.spinBox_gradCAM_targetClass.setToolTip(_translate("Form", tooltips["label_gradCAM_targetClass"], None))
self.label_gradCAM_targetLayer.setToolTip(_translate("Form", tooltips["label_gradCAM_targetLayer"], None))
self.comboBox_gradCAM_targetLayer.setToolTip(_translate("Form", tooltips["label_gradCAM_targetLayer"], None))
self.label_gradCAM_colorMap.setToolTip(_translate("Form", tooltips["label_gradCAM_colorMap"], None))
self.comboBox_gradCAM_colorMap.setToolTip(_translate("Form", tooltips["label_gradCAM_colorMap"], None))
self.label_gradCAM_alpha.setToolTip(_translate("Form", tooltips["label_gradCAM_alpha"], None))
self.doubleSpinBox_gradCAM_alpha.setToolTip(_translate("Form", tooltips["label_gradCAM_alpha"], None))
self.pushButton_reset.setToolTip(_translate("Form", tooltips["pushButton_reset"], None))
self.pushButton_update.setToolTip(_translate("Form", tooltips["pushButton_update"], None))
def gradCAM_on_off(self,on_or_off):
if on_or_off==False:#it is switched off
#set image_alpha to 1
self.doubleSpinBox_image_alpha.setValue(1)
if on_or_off==True:#it is switched on
#set image_alpha and gradCAM_alpha to 0.5
self.doubleSpinBox_image_alpha.setValue(0.5)
self.doubleSpinBox_gradCAM_alpha.setValue(0.5)
def image_on_off(self,on_or_off):
if on_or_off==False:#it is switched off
self.doubleSpinBox_image_alpha.setValue(0)
class popup_cm_modelsummary(QtWidgets.QWidget):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(300, 300)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.textBrowser_modelsummary = QtWidgets.QTextBrowser(Form)
self.textBrowser_modelsummary.setObjectName("textBrowser_modelsummary")
self.gridLayout.addWidget(self.textBrowser_modelsummary, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Model summary"))
class popup_lrfinder(QtWidgets.QWidget):
def setupUi(self, Form_LrFinder):
Form_LrFinder.setObjectName("Form_LrFinder")
Form_LrFinder.resize(740, 740)
self.gridLayout_3 = QtWidgets.QGridLayout(Form_LrFinder)
self.gridLayout_3.setObjectName("gridLayout_3")
self.scrollArea_LrFinder = QtWidgets.QScrollArea(Form_LrFinder)
self.scrollArea_LrFinder.setWidgetResizable(True)
self.scrollArea_LrFinder.setObjectName("scrollArea_LrFinder")
self.scrollAreaWidgetContents = | |
from django.shortcuts import render, redirect
from django.shortcuts import render_to_response
from django.http import *
from django.template import RequestContext, loader
from django.contrib import auth
from django.core.context_processors import csrf
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.sessions.models import Session
import hashlib
import datetime
from login.models import *
import re
from django.core.urlresolvers import reverse
from django import forms
from datetime import timedelta
from django.db import transaction
from wardenOffice.views import *
# from warden.views import *
# from secretary.views import *
from django.core.files import File
from reportlab.pdfgen import canvas
from reportlab.platypus import Image
from reportlab.lib.pagesizes import letter
from reportlab.lib.pagesizes import landscape
import os, inspect
def secViewComplain(complainObject):
comment = []
documents = []
try:
documents=(Document.objects.get(cid=complainObject[0].cid))
except:
pass
try:
comment.extend(Comment.objects.filter(cid = complainObject[0].cid))
except:
pass
return render_to_response("secretary/complainDetail.html", {'item': complainObject[0],'documents':documents,'comment':comment})
def isWarden(request):
user_type = request.session.get("user_type",'')
if user_type != "warden":
return False
else:
return True
class MealItems:
def __init__(self , MID):
self.mid = MID
self.FoodItems = []
self.protein = 0
self.vitamin = 0
self.fat = 0
self.PopulateFid()
self.avgnutrition = int((self.fat + self.protein + self.fat)/3)
self.name = ""
for fobj in self.FoodItems:
self.name = self.name + fobj.name + ","
def PopulateFid(self):
mealItems = Mealitems.objects.filter(mid = self.mid)
for mi in mealItems:
fitem = Fooditems.objects.get(fid=mi.fid)
self.FoodItems.append(fitem)
self.protein = self.protein + fitem.proteins
self.vitamin = self.vitamin + fitem.vitamins
self.fat = self.fat + fitem.fat
self.protein = int(self.protein/len(mealItems))
self.vitamin = int(self.vitamin/len(mealItems))
self.fat = int(self.fat /len(mealItems))
class DocumentForm(forms.Form):
docfile = forms.FileField(
label='Select a file'
)
def finalPollResult(request):
totalpollresults = Pollresult.objects.filter(hostel=request.session.get('hostel')).count()
if totalpollresults <= 0:
return HttpResponse("Sorry no poll results are available!")
breakfastPollOptions = []
lunchPollOptions = []
dinnerPollOptions = []
dataB = ""
dataL = ""
dataD = ""
b = 1
l = 1
d = 1
try:
breakfastPollOptions.extend(Pollresult.objects.filter(hostel=request.session.get('hostel')).filter(type = 1))
except:
pass
try:
lunchPollOptions.extend(Pollresult.objects.filter(hostel=request.session.get('hostel')).filter(type = 2))
except:
pass
try:
dinnerPollOptions.extend(Pollresult.objects.filter(hostel=request.session.get('hostel')).filter(type = 3))
except:
pass
for x in breakfastPollOptions:
dataB = dataB + "B-Item " + str(b) + "\t" + str(x.vote) + "\n"
b = b + 1
for x in lunchPollOptions:
dataL = dataL + "L-Item " + str(l) + "\t" + str(x.vote) + "\n"
l = l + 1
for x in dinnerPollOptions:
dataD = dataD + "D-Item " + str(d) + "\t" + str(x.vote) + "\n"
d = d + 1
with open('/mnt/edu/Software/Complaint-Redressal/Complaint-Redressal/crs/student/static/FinalVotingDataB.tsv', 'w') as f:
myfile = File(f)
myfile.write("meal\tvotes\n"+dataB)
with open('/mnt/edu/Software/Complaint-Redressal/Complaint-Redressal/crs/student/static/FinalVotingDataL.tsv', 'w') as f:
myfile = File(f)
myfile.write("meal\tvotes\n"+dataL)
with open('/mnt/edu/Software/Complaint-Redressal/Complaint-Redressal/crs/student/static/FinalVotingDataD.tsv', 'w') as f:
myfile = File(f)
myfile.write("meal\tvotes\n"+dataD)
return render_to_response("student/pollResult.html", {'list1' : breakfastPollOptions, 'list2' : lunchPollOptions, 'list3' : dinnerPollOptions })
def isStudent(request):
user_type = request.session.get("user_type", '')
if user_type != "student":
return False
else:
return True
def isSecretary(request):
user_type = request.session.get("user_type",'')
if user_type != "secretary":
return False
else:
return True
def list(request): # Handle file upload
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile = request.FILES['docfile'])
newdoc.save()
return HttpResponse('uploaded')
# Redirect to the document list after POST
# return HttpResponseRedirect(reverse('crs.student.views.list'))
# else:
# form = DocumentForm() # A empty, unbound form
# Load documents for the list page
# documents = Document.objects.all()
# Render list page with the documents and the form
# return render_to_response(
# 'list.html',
# {'documents': documents, 'form': form},
# context_instance=RequestContext(request)
# )
else:
return HttpResponse('Error!!')
def loadPage(request):
form =DocumentForm()
return render_to_response('student/list.html',{'form': form, 'msg': request.session.get('name')})
def validatePassword(passwd):
return ((len(passwd) < 21) and (len(passwd) > 7))
def studentComplainView(request): #shows list of complains
if not (isStudent(request)):
return redirect('/crs/')
uid = request.session.get('uid')
qry = "SELECT a.status, a.cid, a.time, a.type, a.subject, a.comments FROM complain a, studComplainlink b WHERE (b.studid = " + str(uid) + " OR b.studid = 0) AND a.cid = b.cid"
serialComplainObjects = Complain.objects.raw(qry);
# request.session['complains'] = serialComplainObjects;
#edited
return render_to_response("student/tables.html", {'list': serialComplainObjects, 'msg': request.session.get('name')});
# def viewrating(request):
# # hostel=request.session.get('hostel')
# sec=Secreatary.objects.get(hostel=1,type=2)
# return render_to_response('viewrating.html',{'sec':sec});
def studentViewComplain(request): #shows details of complain
index = request.GET.get('CID')
request.session['currentCid']=index;
qry = ""
currDate = datetime.datetime.now()
if request.session.get("user_type")=="student" :
qry = "SELECT * FROM complain a, studComplainlink c WHERE c.cid = \'" + str(index) + "\' AND (c.studid = " + str(request.session.get('uid')) + " OR c.studid = 0) AND c.cid = a.cid"
complainObject = Complain.objects.raw(qry)
comment = []
documents = []
s1 = str(complainObject[0].time)
complainTime=int(datetime.datetime(int(s1[0:4]),int(s1[5:7]),int(s1[8:10]),int(s1[11:13]),int(s1[14:16]),int(s1[17:19])).strftime('%s'))
diff = (int(datetime.datetime.now().strftime('%s'))) - complainTime + 19800
# return HttpResponse(complainObject[0].access)
try:
documents=(Document.objects.get(cid=complainObject[0].cid))
except:
pass
try:
comment.extend(Comment.objects.filter(cid = complainObject[0].cid))
except:
pass
return render_to_response("student/complainDetail.html", {'item': complainObject[0],'documents':documents,'comment':comment, 'diff' : diff, 'msg': request.session.get('name')})
elif request.session.get("user_type")=="secretary" :
qry = "SELECT * FROM complain a, complainLink b WHERE b.CID = \'" + str(index) + "\' AND (b.secID = " + str(request.session.get('uid')) + ") AND b.CID = a.cid"
complainObject = Complain.objects.raw(qry)
return secViewComplain(complainObject)
elif request.session.get("user_type")=="wardenOffice" :
qry = "SELECT * FROM complain a, complainLink b WHERE b.CID = \'" + str(index) + "\' AND (b.woID = " + str(request.session.get('uid')) + ") AND b.CID = a.cid"
complainObject = Complain.objects.raw(qry)
return wardenOfficeViewComplain(complainObject)
elif request.session.get("user_type")=="warden" :
qry = "SELECT * FROM complain a, complainLink b WHERE b.CID = \'" + str(index) + "\' AND (b.wardenID = " + str(request.session.get('uid')) + ") AND b.CID = a.cid"
complainObject = Complain.objects.raw(qry)
return wardenViewComplain(complainObject)
else :
return HttpResponse('error')
def studentLodgeComplain(request):
if not (isStudent(request)):
return redirect('/crs/')
form =DocumentForm()
msg=request.session.get('username')
message=""
return render_to_response('student/lodgeComp.html',{'msg2':message,'form': form},context_instance=RequestContext(request))
def studentHome(request):
if not (isStudent(request)):
return redirect('/crs/')
return render_to_response('student/studentHome.html',{'msg': request.session.get('name')});
def studentProfile(request):
if isStudent(request):
# return redirect('/crs/')
return render_to_response('student/studentProfile.html');
elif isSecretary(request):
return render_to_response('secretary/viewProfile.html')
else:
return redirect('/crs/')
def studEditProfile(request):
if isStudent(request):
uid=request.session.get('uid')
obj=Student.objects.get(uid=uid)
return render_to_response('student/studEditProfile.html',{'list' : obj,'msg': request.session.get('name') })
elif isSecretary(request):
uid=request.session.get('uid')
obj=Student.objects.get(uid=uid)
return render_to_response('secretary/EditProfile.html',{'list' : obj,'msg': request.session.get('name') })
else:
return redirect('/crs/')
def afterEditProfile(request):
uid=request.session.get('uid');
obj=Student.objects.get(uid=uid);
padd=request.POST.get('padd')
state=request.POST.get('state')
city=request.POST.get('city')
pincode=request.POST.get('pincode')
bank=request.POST.get('bankName')
ifsc=request.POST.get('ifsc')
bgroup=request.POST.get('bgroup')
account=request.POST.get('accnum')
# email=request.POST.get('email')
mobile=request.POST.get('mobile');
student = Student.objects.get(uid=uid)
mobile = student.mobile
username = student.username
name = student.name
sex = student.sex
padd = student.padd
email = student.email
roll = student.roll
room = student.room
hostel = student.hostel
bloodgrp = student.bloodgrp
baccno = student.baccno
bank = student.bank
IFSC = student.ifsc
state=student.state
city=student.city
pincode=student.pincode
if len(account)==11 and len(ifsc)==11 and len(mobile)==10 and len(pincode)==6:
obj.mobile=mobile;
obj.bank=bank;
obj.ifsc=ifsc;
obj.baccno=account;
# obj.email=email
obj.padd=padd
obj.state=state
obj.city=city
obj.pincode=pincode
obj.bloodgrp=bgroup
obj.save();
return render_to_response('student/studentProfile.html',
{'mobile': mobile, 'username': username, 'name': name, 'sex': sex, 'padd': padd,
'email': email, 'roll': roll, 'hostel': hostel, 'room': room, 'baccno': baccno,
'bank': bank, 'IFSC': IFSC,'state':state,'city':city,'pincode':pincode,'bloodgrp':bloodgrp,'msg': name});
elif isSecretary(request):
message="Invalid Input"
return render_to_response('secretary/EditProfile.html',{'list' : obj,'msg2':message,'msg': request.session.get('name') })
else:
message="Invalid Input"
return render_to_response('student/studEditProfile.html',{'list' : obj,'msg2':message,'msg':request.session.get('name')})
# def rateSecretary(request):
# if not (isStudent(request)):
# return redirect('/crs/')
# return render_to_response('student/rateSecretary.html');
def studentPoll(request):
if not (isStudent(request)):
return redirect('/crs/')
return render_to_response('student/studPoll.html', {'msg': request.session.get('name')});
def studentHostelLeave(request):
if not (isStudent(request)):
return redirect('/crs/')
return render_to_response('student/studHostelLeave.html', {'msg': request.session.get('name')});
def studentMessRebate(request):
if not (isStudent(request)):
return redirect('/crs/')
return render_to_response('student/messrebate.html', {'msg': request.session.get('name')});
def getCatagory(str):
if str == "Mess":
return 1
elif str == "Environment":
return 2
elif str == "Technical":
return 3
elif str == "Maintenance":
return 4
else:
return 0
def message():
return "The confirmation Link for the reset password is Confirmation Link.Please Click on it to reset password"
def getTypeDescription(code):
if code == 1:
return "Mess"
elif code == 2:
return "Environment"
elif code == 3:
return "Technical"
elif code == 4:
return "Maintenance"
else:
return "Other"
@transaction.atomic
def getComplainID(catagory, hostel):
complain = ""
if hostel == 1:
complain = complain + "AS"
elif hostel == 2:
complain = complain + "AR"
elif hostel == 3:
complain = complain + "AR"
else:
complain = complain + "xx"
complain = complain + "-"
if catagory == 1:
complain = complain + "ME"
elif catagory == 2:
complain = complain + "EN"
elif catagory == 3:
complain = complain + "TE"
elif catagory == 4:
complain = complain + "MA"
else:
complain = complain + "xx"
complain = complain + "-"
dt = datetime.datetime.now()
dateComplain = dt.date()
dateDatabase = Complainid.objects.get(hostel=hostel,type = catagory)
if(dateDatabase.date < dateComplain):
dateDatabase.date = dateComplain
dateDatabase.id = 1
dateDatabase.save()
numericMonth = dt.month
numericDay = dt.day
numericYear = dt.year
if numericDay < 10:
complain = complain + "0" + str(numericDay)
else:
complain = complain + str(numericDay)
complain = complain + "/"
if numericMonth < 10:
complain = complain + "0" + str(numericMonth)
else:
complain = complain + str(numericMonth)
complain = complain + "/"
numericYear = numericYear - 2000
complain = complain + str(numericYear)
compno = int(dateDatabase.id)
dateDatabase.id = dateDatabase.id + 1
dateDatabase.save()
complain = complain + "-"
if compno < 10:
complain = complain + "000" + str(compno)
elif compno < 100:
complain = complain + "00" + str(compno)
elif compno < 1000:
complain = complain + "0" + str(compno)
else:
complain = complain + str(compno)
return complain
def loadRateSecPage(request):
uid=request.session.get('uid')
obj=Student.objects.get(uid=uid)
if obj.hostel==0:
# qry="SELECT * FROM secretary a WHERE a.hostel=\'" + "0" + "\'"
# query="SELECT * FROM secretaryRating b WHERE b.studID = \'"+ str(uid) + "\'"
secretary=Secretary.objects.filter(hostel = 0)
request.session['secListForRating']=secretary;
| |
from __future__ import absolute_import, division, print_function
import collections
import inspect
from enum import Flag, auto
from numbers import Number
from typing import Callable, List, Optional, Text, Union, Sequence
import numpy as np
import tensorflow as tf
from odin.bay import distributions as obd
from odin.utils import as_tuple
from six import string_types
from tensorflow import Tensor
from tensorflow.python import keras
from tensorflow.python.ops import array_ops
from tensorflow_probability import distributions as tfd
from tensorflow_probability.python.distributions import Distribution
from tensorflow_probability.python.distributions.joint_distribution import JointDistribution
from tensorflow_probability.python.layers import DistributionLambda
from tensorflow_probability.python.layers.distribution_layer import (
_get_convert_to_tensor_fn, _serialize)
from tensorflow_probability.python.layers.internal import \
distribution_tensor_coercible as dtc
from tensorflow_probability.python.layers.internal import \
tensor_tuple as tensor_tuple
__all__ = [
'print_distribution',
'coercible_tensor',
'kl_divergence',
'is_binary_distribution',
'is_discrete_distribution',
'is_mixture_distribution',
'is_zeroinflated_distribution',
'concat_distributions',
'batch_slice',
]
# ===========================================================================
# distribution type
# ===========================================================================
def _dist(dist):
if isinstance(dist, DistributionLambda):
dist = dist(keras.Input((None,), None))
# distribution layer
if isinstance(dist, tfd.Distribution):
while isinstance(dist, tfd.Independent):
dist = dist.distribution
dist = type(dist)
elif inspect.isclass(dist) and issubclass(dist, DistributionLambda):
dist = dist()(array_ops.empty(shape=(1, dist.params_size((1,))),
dtype=tf.float32))
else:
raise ValueError("No support for distribution of type: %s" % str(dist))
# remove unnecessary classes
dist = [
t for t in type.mro(dist)
if issubclass(t, tfd.Distribution) and t not in (
tfd.Independent, tfd.Distribution, tfd.TransformedDistribution,
dtc._TensorCoercible)
]
return dist
def is_binary_distribution(dist):
if isinstance(dist, tfd.Distribution):
s = dist.sample(100).numpy()
return np.all(np.unique(s.astype('float32')) == [0., 1.])
for dist in _dist(dist):
if issubclass(dist, (obd.OneHotCategorical, obd.RelaxedOneHotCategorical,
obd.Bernoulli, obd.RelaxedBernoulli)):
return True
return False
def is_discrete_distribution(dist):
if isinstance(dist, tfd.Distribution):
s = dist.sample(100).numpy()
return np.all(s.astype('float32') == s.astype('int32'))
for dist in _dist(dist):
if issubclass(dist,
(obd.Poisson, obd.NegativeBinomial, obd.NegativeBinomialDisp,
obd.Categorical, obd.Binomial, obd.Multinomial)):
return True
return False
def is_mixture_distribution(dist):
for dist in _dist(dist):
if issubclass(dist, (obd.Mixture, obd.MixtureSameFamily)):
return True
return False
def is_zeroinflated_distribution(dist):
for dist in _dist(dist):
if issubclass(dist, obd.ZeroInflated):
return True
return False
# ===========================================================================
# Logging
# ===========================================================================
def _dist2text(dist):
cls = dist.__class__.__name__
return (f"{cls} dtype:{dist.dtype.name} "
f"batch:{dist.batch_shape} event:{dist.event_shape}")
def _extract_desc(dist, name, pad):
assert isinstance(dist, tfd.Distribution), \
f"dist must be instance of Distribution but given {type(dist)}"
text = f"{pad}{(name + ':' if len(name) > 0 else '')}{_dist2text(dist)}\n"
pad += " "
text += f"{pad}Initialization:\n"
for key, val in sorted(dist.parameters.items()):
if isinstance(val, tfd.Distribution):
text += _extract_desc(val, key, f"{pad} ")
elif tf.is_tensor(val):
text += f"{pad} {key}: {val.shape} {val.dtype.name}\n"
else:
text += f"{pad} {key}: {val}\n"
text += f"{pad}Tensors:\n"
for key, val in sorted(inspect.getmembers(dist)):
if (tf.is_tensor(val) or isinstance(val, np.ndarray)) and \
key not in dist.parameters:
text += f"{pad} {key}: {val.shape} {val.dtype.name}\n"
return text[:-1]
def print_distribution(dist, return_text=False):
r""" Special function for printing out distribution information """
assert isinstance(dist, tfd.Distribution)
text = _extract_desc(dist, '', '')
if return_text:
return text[:-1]
print(text)
# ===========================================================================
# Objectives
# ===========================================================================
def coercible_tensor(d: tfd.Distribution,
convert_to_tensor_fn=tfd.Distribution.sample,
return_value: bool = False) -> tfd.Distribution:
r""" make a distribution convertible to Tensor using the
`convert_to_tensor_fn`
This code is copied from: `distribution_layers.py` tensorflow_probability
"""
assert isinstance(d, tfd.Distribution), \
"dist must be instance of tensorflow_probability.Distribution"
convert_to_tensor_fn = _get_convert_to_tensor_fn(convert_to_tensor_fn)
if inspect.isfunction(convert_to_tensor_fn) and \
convert_to_tensor_fn in list(tfd.Distribution.__dict__.values()):
convert_to_tensor_fn = getattr(type(d), convert_to_tensor_fn.__name__)
# Wraps the distribution to return both dist and concrete value."""
distribution = dtc._TensorCoercible(distribution=d,
convert_to_tensor_fn=convert_to_tensor_fn)
### prepare the value
value = distribution._value()
value._tfp_distribution = distribution
distribution.shape = value.shape
distribution.get_shape = value.get_shape
### return
if return_value:
return distribution, value
return distribution
# ===========================================================================
# Objectives
# ===========================================================================
def kl_divergence(
q: Union[Distribution, Callable[[], Distribution]],
p: Union[Distribution, Callable[[], Distribution]],
analytic: bool = False,
q_sample: Union[int,
Callable[[Distribution], Tensor]] = lambda q: q.sample(),
reduce_axis: Sequence[int] = (),
reverse: bool = True,
free_bits: Optional[float] = None,
) -> Tensor:
""" Calculating `KL(q(x)||p(x))` (if reverse=True) or
`KL(p(x)||q(x))` (if reverse=False)
Parameters
----------
q : `tensorflow_probability.Distribution` or `Callable`,
the approximated posterior distribution
p : `tensorflow_probability.Distribution` or `Callable`,
the prior distribution
analytic : bool (default: False)
if True, use the close-form solution for
q_sample : {callable, Tensor, Number}
callable for extracting sample from `q(x)` (takes `q` posterior distribution
as input argument)
reduce_axis : {None, int, tuple}. Reduce axis when use MCMC to estimate KL
divergence, default `()` mean keep all original dimensions.
reverse : `bool`. If `True`, calculating `KL(q||p)` which optimizes `q`
(or p_model) by greedily filling in the highest modes of data (or, in
other word, placing low probability to where data does not occur).
Otherwise, `KL(p||q)` a.k.a maximum likelihood, place high probability
at anywhere data occur (i.e. averagely fitting the data).
free_bits : `float` (optional)
maximum(lambda, KL) as stated in (Kingma et al. 2016)
Returns
-------
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`. The shape is `[batch_dims]` for analytic KL,
otherwise, `[sample_shape, batch_dims]`.
References
----------
<NAME>., et al., 2016. Improved variational inference with inverse
autoregressive flow, Advances in Neural Information Processing
Systems. Curran Associates, Inc., pp. 4743–4751.
Example
-------
```python
p = bay.distributions.OneHotCategorical(logits=[1, 2, 3])
w = bk.variable(np.random.rand(2, 3).astype('float32'))
q = bay.distributions.OneHotCategorical(w)
opt = tf.optimizers.Adam(learning_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False)
for i in range(1000):
with tf.GradientTape() as tape:
kl = bay.kl_divergence(q=q, p=p, q_sample=lambda q: q.sample(1000))
grads = bk.grad(tf.reduce_mean(kl), w, tape=tape)
opt.apply_gradients(grads_and_vars=[(g, v) for g, v in zip(grads, [w])])
if i % 10 == 0:
print("#%3d KL: %.4f" % (i, tf.reduce_mean(kl).numpy()))
print(q.sample())
```
"""
if callable(p) and not isinstance(p, Distribution):
p = p()
assert isinstance(p, Distribution), \
f"callable must return a Distribution, but returned: {p}"
if callable(q) and not isinstance(q, Distribution):
q = q()
assert isinstance(q, Distribution), \
f"callable must return a Distribution, but returned: {q}"
### add independent if necessary
if isinstance(q, tfd.Independent) and not isinstance(p, tfd.Independent):
p = tfd.Independent(
p,
reinterpreted_batch_ndims=len(q.event_shape) - len(p.event_shape),
)
### removing Independent
if not bool(reverse):
q, p = [q, p][::-1]
### analytic KL
if bool(analytic):
kl = tfd.kl_divergence(q, p)
### non-analytic KL
else:
# using MCMC sampling for estimating the KL
if callable(q_sample):
z = q_sample(q)
elif q_sample is None: # TensorCoercible
z = tf.convert_to_tensor(q)
else:
z = q.sample(q_sample)
# calculate the output, then perform reduction
kl = q.log_prob(z) - p.log_prob(z)
### free-bits
if free_bits is not None:
units = int(np.prod(q.event_shape))
kl = tf.maximum(kl, tf.constant(free_bits * units, dtype=kl.dtype))
kl = tf.reduce_mean(input_tensor=kl, axis=reduce_axis)
return kl
class KLdivergence:
r""" This class freezes the arguments of `kl_divergence` so it could be call
later without the required arguments.
- Calculating KL(q(x)||p(x)) (if reverse=True) or
- KL(p(x)||q(x)) (if reverse=False)
Parameters
----------
posterior : `tensorflow_probability.Distribution`, the approximated
posterior distribution
prior : `tensorflow_probability.Distribution`, the prior distribution
analytic : bool (default: False)
if True, use the close-form solution for
sample_shape : {Tensor, Number}
number of MCMC samples for MCMC estimation of KL-divergence
reverse : `bool`. If `True`, calculating `KL(q||p)` which optimizes `q`
(or p_model) by greedily filling in the highest modes of data (or, in
other word, placing low probability to where data does not occur).
Otherwise, `KL(p||q)` a.k.a maximum likelihood, or expectation
propagation place high probability at anywhere data occur
(i.e. averagely fitting the data).
keepdims : a Boolean. If True, expand the dimension to preserve the MCMC
dimension in case of analytic KL.
Note
----
this class return 0. if the prior is not given (i.e. prior=None)
"""
def __init__(self,
posterior,
prior=None,
analytic=False,
sample_shape=(),
reverse=True,
free_bits=None,
keepdims=False):
self.posterior = posterior
self.prior = prior
self.analytic = bool(analytic)
self.sample_shape = sample_shape
self.reverse = bool(reverse)
self.keepdims = bool(keepdims)
self.free_bits = free_bits
def __str__(self):
if hasattr(self.posterior, 'shape'):
post_shape = self.posterior.shape
else:
post_shape = f"{self.posterior.batch_shape + self.posterior.event_shape}"
if hasattr(self.prior, 'shape'):
prior_shape = self.prior.shape
else:
prior_shape = f"{self.prior.batch_shape + self.prior.event_shape}"
return (f"<{self.__class__.__name__} "
f"post:({self.posterior.__class__.__name__}, {post_shape})"
f" prior:({self.prior.__class__.__name__}, {prior_shape})"
f" analytic:{self.analytic} reverse:{self.reverse}"
f" sample:{self.sample_shape}>")
def __repr__(self):
return self.__str__()
def __call__(self,
prior=None,
analytic=None,
sample_shape=-1,
reverse=None,
keepdims=False,
free_bits=None):
prior = self.prior if prior is None else prior
analytic = self.analytic if analytic is None else bool(analytic)
sample_shape = self.sample_shape if sample_shape == -1 else sample_shape
reverse = self.reverse if reverse is None else bool(reverse)
keepdims = self.keepdims if keepdims is None else bool(keepdims)
free_bits = self.free_bits if free_bits is None else free_bits
if prior is None:
return 0.
div = kl_divergence(q=self.posterior,
p=prior,
analytic=analytic,
reverse=reverse,
q_sample=sample_shape,
free_bits=free_bits)
if analytic and keepdims:
div = tf.expand_dims(div, axis=0)
return div
# ===========================================================================
# COncatenation of distributions
# ===========================================================================
# must hand define all the parameters here
# NOTE: this list is to be updated, or a smarter solution for automatically
# mining all the parameters
dist_params = {
# complex
obd.Independent: ['distribution', 'reinterpreted_batch_ndims'],
obd.ZeroInflated: ['count_distribution', 'inflated_distribution'],
obd.MixtureSameFamily: ['mixture_distribution', 'components_distribution'],
obd.Blockwise: ['distributions'],
obd.ConditionalTensor: ['distribution', 'conditional_tensor'],
# Exponential
obd.Gamma: ['concentration', 'rate'],
# Gaussians
obd.Normal: ['loc', 'scale'],
obd.LogNormal: ['loc', 'scale'],
obd.MultivariateNormalDiag: | |
of the function invocations
"""
return self._result
class PipelineCommand(object):
"""
Base class for constructing program command lines
This class should be subclassed to implement the 'init'
and 'cmd' methods.
The 'init' method should do any preprocessing and
caching of arguments to be used in the 'cmd' method;
the 'cmd' method should use these to construct and
return a 'Command' instance.
"""
def __init__(self,*args,**kws):
"""
Create a new PipelineCommand instance
Arguments:
args (List): list of arguments to be supplied to
the subclass (must match those defined in the
'init' method)
kws (Dictionary): dictionary of keyword-value pairs
to be supplied to the subclass (must match those
defined in the 'init' method)
"""
# Set internal name
self._name = self.__class__.__name__
# Quoting of spaces when generating wrapper
self._quote_spaces = True
# Invoke the 'init' method
self.init(*args,**kws)
def name(self):
"""
Return a "sanitized" version of the class name
"""
return sanitize_name(self._name)
def make_wrapper_script(self,scripts_dir=None,shell="/bin/bash",
envmodules=None,conda=None,conda_env=None,
working_dir=None,batch_number=None):
"""
Generate a uniquely-named wrapper script to run the command
Arguments:
scripts_dir (str): path of directory to write
the wrapper scripts to
shell (str): shell to use (defaults to '/bin/bash')
envmodules (str): list of environment modules to load
conda (str): path to conda executable
conda_env (str): name or path for conda environment to
activate in the script
working_dir (str): explicitly specify the directory
the script should be executed in
batch_number (int): for batched commands, the number
of the batch that this script corresponds to
(optional)
Returns:
String: name of the wrapper script.
"""
# Wrap in a script
if scripts_dir is None:
scripts_dir = os.getcwd()
script_file = os.path.join(scripts_dir,"%s.%s.sh" % (self.name(),
uuid.uuid4()))
prologue = ["echo \"#### COMMAND %s\"" % self._name]
if batch_number is not None:
prologue.append("echo \"#### BATCH %s\"" % batch_number)
prologue.extend(["echo \"#### HOSTNAME $HOSTNAME\"",
"echo \"#### USER $USER\"",
"echo \"#### START $(date)\""])
if envmodules:
shell += " --login"
try:
modulepath = os.environ['MODULEPATH']
if modulepath:
prologue.append("export MODULEPATH=%s" % modulepath)
except KeyError:
pass
for module in envmodules:
if module is not None:
prologue.append("module load %s" % module)
if conda_env:
if not conda:
conda = find_program("conda")
if not conda:
raise PipelineError("Unable to locate conda")
conda_dir = os.sep.join(
os.path.abspath(conda).split(os.sep)[:-2])
conda_activate_cmd = \
"source %s/bin/activate %s" % (conda_dir,conda_env)
prologue.extend(["echo %s" % conda_activate_cmd,
conda_activate_cmd])
if working_dir:
prologue.append("cd %s" % working_dir)
prologue.append("echo \"#### CWD $(pwd)\"")
epilogue = ["exit_code=$?",
"echo \"#### END $(date)\"",
"echo \"#### EXIT_CODE $exit_code\"",
"exit $exit_code"]
self.cmd().make_wrapper_script(filen=script_file,
shell=shell,
prologue='\n'.join(prologue),
epilogue='\n'.join(epilogue),
quote_spaces=self.quote_spaces())
return script_file
def quote_spaces(self):
"""
Indicate whether spaces should be quoted in wrapper script
"""
return self._quote_spaces
def init(self):
"""
Initialise and store parameters
Must be implemented by the subclass
"""
raise NotImplementedError("Subclass must implement 'init' method")
def cmd(self):
"""
Build the command
Must be implemented by the subclass and return a
Command instance
"""
raise NotImplementedError("Subclass must implement 'cmd' method")
class PipelineCommandWrapper(PipelineCommand):
"""
Class for constructing program command lines
This class is based on the PipelineCommand class but
can be used directly (rather than needing to be
subclassed).
For example, to wrap the 'ls' command directly:
>>> ls_command = PipelineCommandWrapper("List directory",'ls',dirn)
It is also possible to extend the command line
using the 'add_args' method, for example:
>>> ls_command = PipelineCommandWrapper("List directory",'ls')
>>> ls.command.add_args(dirn)
"""
def __init__(self,name,*args):
"""
Create a new PipelineCommandWrapper instance
Arguments:
name (str): arbitrary name for the command
args (List): initial list of arguments making
up the command
"""
PipelineCommand.__init__(self,*args)
self._name = str(name)
self._cmd = None
if args:
self._cmd = Command(*args)
def add_args(self,*args):
"""
Add additional arguments to extend the command being built
Arguments:
args (List): one or more arguments to append to
the command
"""
if self._cmd is None:
self._cmd = Command(*args)
else:
self._cmd.add_args(*args)
def init(self,*args):
"""
Internal: dummy init which does nothing
"""
pass
def cmd(self):
"""
Internal: implement the 'cmd' method
"""
return self._cmd
class PipelineScriptWrapper(PipelineCommand):
"""
Class for constructing script command lines
This class is based on the PipelineCommand class but
can be used directly (rather than needing to be
subclassed).
For example, to wrap a bash script directly:
>>> ls_script = PipelineScriptWrapper("List directory",
... "ls {d}".format(d=dirn))
It is also possible to compose a script from
multiple blocks, for example:
>>> properties = PipelineScriptWrapper("File properties",
... "export FILEN={f}".format(
... f="Example.fq"),
... "du -h $FILEN",
... "file $FILEN")
in which case the generated script will look like::
{
export FILEN=Example.fq
} && {
du -h $FILEN
} && {
file $FILEN
}
"""
def __init__(self,name,*scripts):
"""
Create a new PipelineScriptWrapper instance
Arguments:
name (str): arbitrary name for the script
scripts (str): one or more script blocks
"""
PipelineCommand.__init__(self)
self._name = str(name)
self._quote_spaces = False
self._block_sep = ' && '
self._scripts = []
for script in scripts:
self.add_block(script)
def add_block(self,script):
"""
Append a script block
Arguments:
scripts (str): script block to append
"""
self._scripts.append(
textwrap.dedent(str(script).lstrip('\n').rstrip()))
def init(self,*args):
"""
Internal: dummy init which does nothing
"""
pass
def cmd(self):
"""
Internal: implement the 'cmd' method
"""
if len(self._scripts) == 1:
# Single block
return Command(self._scripts[0])
else:
# Multiple blocks
blocks = []
for block in self._scripts:
blocks.append("{\n%s\n}" % indent(block," "))
return Command(self._block_sep.join(blocks))
######################################################################
# Parameter-like utility classes for passing values between tasks
######################################################################
class FunctionParam(BaseParam):
"""
Class for deferred function evaluation as pipeline parameter
This class wraps a function with a set of
parameters; the function evaluation is
deferred until the 'value' property is invoked.
Any parameters which are PipelineParam-like
instances will be replaced with their values
before being passed to the function.
"""
def __init__(self,f,*args,**kws):
"""
Create a new FunctionParam instance
Arguments:
f (object): function-like object to
be evaluated
args (list): positional arguments to
pass to the function on evaluation
kws (mapping): keyworded arguments to
pass to the function on evaluation
"""
BaseParam.__init__(self)
self._f = f
self._args = args
self._kws = kws
@property
def value(self):
"""
Return value from evaluated function
"""
args = []
for arg in self._args:
try:
args.append(arg.value)
except AttributeError:
args.append(arg)
kws = {}
for kw in self._kws:
try:
kws[kw] = self._kws[kw].value
except AttributeError:
kws[kw] = self._kws[kw]
try:
return self._f(*args,**kws)
except Exception as ex:
raise PipelineError("Failed to evaluate function: %s" % ex)
class ListParam(BaseParam):
"""
Implement list-like behaviour as pipeline parameter
This class implements the pipeline parameter
equivalent to the Python 'list' class. It supports
`append` and `extend` methods, and the `len`
function will return the number of elements in the
list.
The `value` property returns a Python list, with
any pipeline parameter-like objects in the original
list replaced with their values.
It is recommended that `ListParam` instances should be
used in pipelines when passing lists of parameters
between tasks.
"""
def __init__(self,iterable=None):
"""
Create a new FunctionParam instance
Arguments:
f (object): function-like object to
be evaluated
args (list): positional arguments to
pass to the function on evaluation
kws (mapping): keyworded arguments to
pass to the function on evaluation
"""
BaseParam.__init__(self)
if iterable:
self._list = list(iterable)
else:
self._list = list()
def append(self,item):
self._list.append(item)
def extend(self,iterable):
self._list.extend(iterable)
@property
def value(self):
value = list()
for item in self._list:
try:
value.append(item.value)
except AttributeError:
value.append(item)
return value
def __len__(self):
return len(self._list)
class PathJoinParam(FunctionParam):
"""
Class for joining file paths as pipeline parameter
This class implements the pipeline parameter
equivalent of the `os.path.join` function, taking
a set of path elements on instantiation (which
can be strings or PipelineParam-like objects)
and returning the joined path elements on
evaluation via the `value` property.
Example usage:
>>> pth = PathJoinParam("/path","to","file.txt")
>>> pth.value
"/path/to/file.txt"
>>> base_dir = PipelineParam(value="/path/to/base")
>>> pth = PathJoinParam(base_dir,"file.txt")
>>> pth.value
"/path/to/base/file.txt"
>>> base_dir.set("/path/to/new/base")
>>> pth.value
"/path/to/new/base/file.txt"
Note that this class doesn't implement a `set`
method (unlike the standard PipelineParam class)
so the path elements cannot be changed after
initialisation.
"""
def __init__(self,*p):
"""
Create a new PathJoinParam instance
Arguments:
p (iterable): list of path elements
to join; can be strings or
PipelineParam-like objects
"""
FunctionParam.__init__(self,
os.path.join,
*p)
class PathExistsParam(FunctionParam):
"""
Class for checking file/directory existance as pipeline parameter
This class implements the pipeline parameter
equivalent of the `os.path.exists` function, taking
a path on instantiation (which can be a string
or | |
<reponame>Sheng-Cheng/agile_flight<filename>envtest/ros/user_code.py
#!/usr/bin/python3
from pickle import NONE
from utils import AgileCommandMode, AgileCommand
from rl_example import rl_example
from scipy.spatial.transform import Rotation as Rot # for rotation matrix representation
from numpy import linalg as LA
import numpy as np # for vector computation
import cv2 # for displaying RGB image
import rospy # for determining the current time
import matplotlib.pyplot as plt # for visualizing data in real time
import math
from csv import writer
initTime = None
def compute_command_vision_based(state, img):
################################################
# !!! Begin of user code !!!
# TODO: populate the command message
################################################
print("Computing command vision-based!")
print(state)
# print("Image shape: ", img.shape)
# # display the vision
# cv2.imshow("Image window", img)
# cv2.waitKey(0)
# breakpoint()
# Example of SRT command
command_mode = 0
command = AgileCommand(command_mode)
command.t = state.t
command.rotor_thrusts = [1.0, 1.0, 1.0, 1.0]
# Example of CTBR command
command_mode = 1
command = AgileCommand(command_mode)
command.t = state.t
command.collective_thrust = 15.0
command.bodyrates = [0.0, 0.0, 0.0]
# Example of LINVEL command (velocity is expressed in world frame)
command_mode = 2
command = AgileCommand(command_mode)
command.t = state.t
command.velocity = [1.0, 0.0, 0.0]
command.yawrate = 0.0
################################################
# !!! End of user code !!!
################################################
return command
def compute_command_state_based(state, obstacles, vision, start, rl_policy=None):
################################################
# !!! Begin of user code !!!
# TODO: populate the command message
################################################
# print("Computing command based on obstacle information!")
# print(state)
# print("Obstacles: ", obstacles)
# the obstacles means
# The relative position between the center of the quadrotor and the center of the sphere obstacles.
# The scale means the size of the obstacles, in radius.
# print("started yet: ", start)
currentTime = -1 # initialize
now = rospy.get_rostime()
global initTime
# breakpoint()
if start: # only start to counting time if we receive the start command
if initTime is None:
initTime = now.secs + now.nsecs/1000000000.0
else:
currentTime = now.secs + now.nsecs/1000000000.0 - initTime
print("current time is", currentTime)
# begin trajectory computation
# parameters
kg_vehicleMass = 0.752
J = np.array([[0.0025, 0, 0],
[0, 0.0021, 0],
[0, 0, 0.0043]])
GeoCtrl_Kpx = 4.5 # 4.512
GeoCtrl_Kpy = 5.0 #4.512
GeoCtrl_Kpz = 5
GeoCtrl_Kvx = 0.5
GeoCtrl_Kvy = 0.6 # 0.5
GeoCtrl_Kvz = 1.504
GeoCtrl_KRx = 0.128
GeoCtrl_KRy = 0.086
GeoCtrl_KRz = 0.02
GeoCtrl_KOx = 0.07327586207
GeoCtrl_KOy = 0.05 # 0.073
GeoCtrl_KOz = 0.004
GRAVITY_MAGNITUDE = 9.8
zeros3 = [0.0,0.0,0.0]
# targetPos.x = radius * sinf(currentRate * netTime)
# targetPos.y = radius * (1 - cosf(currentRate * netTime))
# targetPos.z = 1
targetPos = np.array([2*(1-math.cos(currentTime)), 2*math.sin(currentTime), 1.0 + math.sin(currentTime)])
# targetVel.x = radius * currentRate * cosf(currentRate * netTime)
# targetVel.y = radius * currentRate * sinf(currentRate * netTime)
# targetVel.z = 0
targetVel = np.array([2*math.sin(currentTime), 2*math.cos(currentTime), math.cos(currentTime)])
# targetAcc.x = -radius * currentRate * currentRate * sinf(currentRate * netTime)
# targetAcc.y = radius * currentRate * currentRate * cosf(currentRate * netTime)
# targetAcc.z = 0
targetAcc = np.array([2*math.cos(currentTime), -2*math.sin(currentTime), -math.sin(currentTime)])
# targetJerk.x = -radius * powF(currentRate,3) * cosf(currentRate * netTime)
# targetJerk.y = -radius * powF(currentRate,3) * sinf(currentRate * netTime)
# targetJerk.z = 0
targetJerk = np.array([-2*math.sin(currentTime), -2*math.cos(currentTime), -math.cos(currentTime)])
# targetSnap.x = radius * powF(currentRate,4) * sinf(currentRate * netTime)
# targetSnap.y = -radius * powF(currentRate,4) * cosf(currentRate * netTime)
# targetSnap.z = 0
targetSnap = np.array([-2*math.cos(currentTime), 2*math.sin(currentTime), math.sin(currentTime)])
zeros2 = [0.0,0.0]
targetYaw = np.array([1.0,0.0])
targetYaw_dot = np.array(zeros2)
targetYaw_ddot = np.array(zeros2)
# targetYaw = np.array([math.cos(currentTime), math.sin(currentTime)])
# targetYaw_dot = np.array([-math.sin(currentTime), math.cos(currentTime)])
# targetYaw_ddot = np.array([-math.cos(currentTime), -math.sin(currentTime)])
# begin geometric control
# Position Error (ep)
statePos = state.pos
r_error = statePos - targetPos
# Velocity Error (ev)
stateVel = state.vel
v_error = stateVel - targetVel
target_force = np.array(zeros3)
target_force[0] = kg_vehicleMass * targetAcc[0] - GeoCtrl_Kpx * r_error[0] - GeoCtrl_Kvx * v_error[0]
target_force[1] = kg_vehicleMass * targetAcc[1] - GeoCtrl_Kpy * r_error[1] - GeoCtrl_Kvy * v_error[1]
target_force[2] = kg_vehicleMass * (targetAcc[2] + GRAVITY_MAGNITUDE) - GeoCtrl_Kpz * r_error[2] - GeoCtrl_Kvz * v_error[2]
# change from - GRAVITY_MAGNITUDE to + GRAVITY_MAGNITUDE for upward z-axis
# Z-Axis [zB]
qq = state.att
# transforming the quaternion q to rotation matrix R
r = Rot.from_quat([qq[1],qq[2],qq[3],qq[0]]) # python's quaternion makes the scalar term the last one
R = r.as_matrix()
# breakpoint()
z_axis = R[:,2]
# target thrust [F] (z-positive)
target_thrust = np.dot(target_force,z_axis)
# Calculate axis [zB_des] (z-positive)
z_axis_desired = target_force/np.linalg.norm(target_force)
# [xC_des]
# x_axis_desired = z_axis_desired x [cos(yaw), sin(yaw), 0]^T
x_c_des = np.array(zeros3)
x_c_des[0] = targetYaw[0]
x_c_des[1] = targetYaw[1]
x_c_des[2] = 0
x_c_des_dot = np.array(zeros3)
x_c_des_dot[0] = targetYaw_dot[0]
x_c_des_dot[1] = targetYaw_dot[1]
x_c_des_dot[2] = 0
x_c_des_ddot = np.array(zeros3)
x_c_des_ddot[0] = targetYaw_ddot[0]
x_c_des_ddot[1] = targetYaw_ddot[1]
x_c_des_ddot[2] = 0
# [yB_des]
y_axis_desired = np.cross(z_axis_desired, x_c_des)
y_axis_desired = y_axis_desired/np.linalg.norm(y_axis_desired)
# [xB_des]
x_axis_desired = np.cross(y_axis_desired, z_axis_desired)
# [eR]
# Slow version
Rdes = np.empty(shape=(3,3))
Rdes[:,0] = x_axis_desired
Rdes[:,1] = y_axis_desired
Rdes[:,2] = z_axis_desired
# Matrix3f Rdes(Vector3f(x_axis_desired.x, y_axis_desired.x, z_axis_desired.x),
# Vector3f(x_axis_desired.y, y_axis_desired.y, z_axis_desired.y),
# Vector3f(x_axis_desired.z, y_axis_desired.z, z_axis_desired.z));
eRM = (np.matmul(Rdes.transpose(),R) - np.matmul(R.transpose(), Rdes)) / 2
# Matrix3<T>(const T ax, const T ay, const T az,
# const T bx, const T by, const T bz,
# const T cx, const T cy, const T cz)
# eR.x = eRM.c.y;
# eR.y = eRM.a.z;
# eR.z = eRM.b.x;
eR = np.array(zeros3)
eR[0] = eRM[2,1]
eR[1] = eRM[0,2]
eR[2] = eRM[1,0]
Omega = state.omega
# compute Omegad
a_error = np.array(zeros3) # error on acceleration
a_error = [0,0,-GRAVITY_MAGNITUDE] + R[:,2]* target_thrust / kg_vehicleMass - targetAcc
# turn GRAVITY_MAGNITUDE to - GRAVITY_MAGNITUDE
# turn - R[:,2]* target_thrust / kg_vehicleMass to + R[:,2]* target_thrust / kg_vehicleMass
target_force_dot = np.array(zeros3) # derivative of target_force
target_force_dot[0] = - GeoCtrl_Kpx * v_error[0] - GeoCtrl_Kvx * a_error[0] + kg_vehicleMass * targetJerk[0]
target_force_dot[1] = - GeoCtrl_Kpy * v_error[1] - GeoCtrl_Kvy * a_error[1] + kg_vehicleMass * targetJerk[1]
target_force_dot[2] = - GeoCtrl_Kpz * v_error[2] - GeoCtrl_Kvz * a_error[2] + kg_vehicleMass * targetJerk[2]
hatOperatorOmega = hatOperator(Omega)
b3_dot = np.matmul(np.matmul(R, hatOperatorOmega),[0,0,1])
target_thrust_dot = + np.dot(target_force_dot,R[:,2]) + np.dot(target_force, b3_dot)
# turn the RHS from - to +
j_error = np.array(zeros3) # error on jerk
j_error = np.dot(R[:,2], target_thrust_dot) / kg_vehicleMass + b3_dot * target_thrust / kg_vehicleMass - targetJerk
# turn - np.dot(R[:,2], target_thrust_dot) / kg_vehicleMass to np.dot(R[:,2], target_thrust_dot) / kg_vehicleMass
# turn - b3_dot * target_thrust / kg_vehicleMass to + b3_dot * target_thrust / kg_vehicleMass
target_force_ddot = np.array(zeros3) # derivative of target_force_dot
target_force_ddot[0] = - GeoCtrl_Kpx * a_error[0] - GeoCtrl_Kvx * j_error[0] + kg_vehicleMass * targetSnap[0]
target_force_ddot[1] = - GeoCtrl_Kpy * a_error[1] - GeoCtrl_Kvy * j_error[1] + kg_vehicleMass * targetSnap[1]
target_force_ddot[2] = - GeoCtrl_Kpz * a_error[2] - GeoCtrl_Kvz * j_error[2] + kg_vehicleMass * targetSnap[2]
b3cCollection = np.array([zeros3,zeros3,zeros3]) # collection of three three-dimensional vectors b3c, b3c_dot, b3c_ddot
b3cCollection = unit_vec(-target_force, -target_force_dot, -target_force_ddot) # unit_vec function is from geometric controller's git repo: https://github.com/fdcl-gwu/uav_geometric_control/blob/master/matlab/aux_functions/deriv_unit_vector.m
b3c = np.array(zeros3)
b3c_dot = np.array(zeros3)
b3c_ddot = np.array(zeros3)
b3c[0] = b3cCollection[0]
b3c[1] = b3cCollection[1]
b3c[2] = b3cCollection[2]
b3c_dot[0] = b3cCollection[3]
b3c_dot[1] = b3cCollection[4]
b3c_dot[2] = b3cCollection[5]
b3c_ddot[0] = b3cCollection[6]
b3c_ddot[1] = b3cCollection[7]
b3c_ddot[2] = b3cCollection[8]
A2 = - np.matmul(hatOperator(x_c_des), b3c)
A2_dot = - np.matmul(hatOperator(x_c_des_dot),b3c) - np.matmul(hatOperator(x_c_des), b3c_dot)
A2_ddot = - np.matmul(hatOperator(x_c_des_ddot), b3c) - np.matmul(hatOperator(x_c_des_dot), b3c_dot) * 2 - np.matmul(hatOperator(x_c_des), b3c_ddot)
b2cCollection = np.array([zeros3,zeros3,zeros3]) # collection of three three-dimensional vectors b2c, b2c_dot, b2c_ddot
b2cCollection = unit_vec(A2, A2_dot, A2_ddot) # unit_vec function is from geometric controller's git repo: https://github.com/fdcl-gwu/uav_geometric_control/blob/master/matlab/aux_functions/deriv_unit_vector.m
b2c = np.array(zeros3)
b2c_dot = np.array(zeros3)
b2c_ddot = np.array(zeros3)
b2c[0] = b2cCollection[0]
b2c[1] = b2cCollection[1]
b2c[2] = b2cCollection[2]
b2c_dot[0] = b2cCollection[3]
b2c_dot[1] = b2cCollection[4]
b2c_dot[2] = b2cCollection[5]
b2c_ddot[0] = b2cCollection[6]
b2c_ddot[1] = b2cCollection[7]
b2c_ddot[2] = b2cCollection[8]
b1c_dot = np.matmul(hatOperator(b2c_dot), b3c) + np.matmul(hatOperator(b2c), b3c_dot)
b1c_ddot = np.matmul(hatOperator(b2c_ddot),b3c) + np.matmul(hatOperator(b2c_dot), b3c_dot) * 2 + np.matmul(hatOperator(b2c), | |
Schema Component : Test valid XML
for key definition, field xpath='attribute::*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL046.xsd",
instance="msData/identityConstraint/idL046.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l045_id_l045_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='@*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL045.xsd",
instance="msData/identityConstraint/idL045.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l043_id_l043_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='attribute::qname' , selector contains
.//qname1/qname2
"""
assert_bindings(
schema="msData/identityConstraint/idL043.xsd",
instance="msData/identityConstraint/idL043.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l042_id_l042_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='attribute::qname' , selector contains
*
"""
assert_bindings(
schema="msData/identityConstraint/idL042.xsd",
instance="msData/identityConstraint/idL042.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l041_id_l041_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='@qname' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL041.xsd",
instance="msData/identityConstraint/idL041.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l039_id_l039_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='child::ncname:*' , selector contains
*
"""
assert_bindings(
schema="msData/identityConstraint/idL039.xsd",
instance="msData/identityConstraint/idL039.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l038_id_l038_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='ncname:*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL038.xsd",
instance="msData/identityConstraint/idL038.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l035_id_l035_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='child::*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL035.xsd",
instance="msData/identityConstraint/idL035.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l034_id_l034_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='*' , selector contains .//qname
"""
assert_bindings(
schema="msData/identityConstraint/idL034.xsd",
instance="msData/identityConstraint/idL034.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l033_id_l033_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL033.xsd",
instance="msData/identityConstraint/idL033.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l030_id_l030_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='.//qname' , selector contains qname1
"""
assert_bindings(
schema="msData/identityConstraint/idL030.xsd",
instance="msData/identityConstraint/idL030.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l029_id_l029_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='child::qname' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL029.xsd",
instance="msData/identityConstraint/idL029.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l028_id_l028_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='qname' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL028.xsd",
instance="msData/identityConstraint/idL028.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l026_id_l026_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for key definition, field xpath='.' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL026.xsd",
instance="msData/identityConstraint/idL026.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l024_id_l024_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='attribute::ncname:*' , selector
contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL024.xsd",
instance="msData/identityConstraint/idL024.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l023_id_l023_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='@ncname:*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL023.xsd",
instance="msData/identityConstraint/idL023.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l021_id_l021_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='attribute::*' , selector contains
*
"""
assert_bindings(
schema="msData/identityConstraint/idL021.xsd",
instance="msData/identityConstraint/idL021.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l020_id_l020_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='@*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL020.xsd",
instance="msData/identityConstraint/idL020.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l018_id_l018_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='attribute::qname' , selector
contains .//qname1/qname2
"""
assert_bindings(
schema="msData/identityConstraint/idL018.xsd",
instance="msData/identityConstraint/idL018.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l017_id_l017_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='attribute::qname' , selector
contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL017.xsd",
instance="msData/identityConstraint/idL017.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l016_id_l016_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='@qname' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL016.xsd",
instance="msData/identityConstraint/idL016.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l014_id_l014_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='child::ncname:*' , selector
contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL014.xsd",
instance="msData/identityConstraint/idL014.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l013_id_l013_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='ncname:*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL013.xsd",
instance="msData/identityConstraint/idL013.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l010_id_l010_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='child::*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL010.xsd",
instance="msData/identityConstraint/idL010.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l009_id_l009_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='*' , selector contains .//qname
"""
assert_bindings(
schema="msData/identityConstraint/idL009.xsd",
instance="msData/identityConstraint/idL009.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l008_id_l008_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='*' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL008.xsd",
instance="msData/identityConstraint/idL008.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l005_id_l005_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='.//qname' , selector contains
qname1
"""
assert_bindings(
schema="msData/identityConstraint/idL005.xsd",
instance="msData/identityConstraint/idL005.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l004_id_l004_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='child::qname' , selector contains
*
"""
assert_bindings(
schema="msData/identityConstraint/idL004.xsd",
instance="msData/identityConstraint/idL004.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l003_id_l003_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='qname' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL003.xsd",
instance="msData/identityConstraint/idL003.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_l001_id_l001_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : Test valid XML
for unique definition, field xpath='.' , selector contains *
"""
assert_bindings(
schema="msData/identityConstraint/idL001.xsd",
instance="msData/identityConstraint/idL001.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k017_id_k017_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref defined
locally within key scope
"""
assert_bindings(
schema="msData/identityConstraint/idK017.xsd",
instance="msData/identityConstraint/idK017.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k015_id_k015_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : constraint
locating an element that is of simpleType/union
"""
assert_bindings(
schema="msData/identityConstraint/idK015.xsd",
instance="msData/identityConstraint/idK015.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k014_id_k014_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : constraint
locating an element that is of simpleType/list
"""
assert_bindings(
schema="msData/identityConstraint/idK014.xsd",
instance="msData/identityConstraint/idK014.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k013_id_k013_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : constraint
locating an element that is of simpleType/restriction
"""
assert_bindings(
schema="msData/identityConstraint/idK013.xsd",
instance="msData/identityConstraint/idK013.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k011a_id_k011_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : constraint
locating an element that is of complexType/simpleContent Resolution
pending decision about issue 5780 against the 1.0 spec.
"""
assert_bindings(
schema="msData/identityConstraint/idK011.xsd",
instance="msData/identityConstraint/idK011.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k011_id_k011_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : constraint
locating an element that is of complexType/simpleContent Resolution
pending decision about issue 5780 against the 1.0 spec.
"""
assert_bindings(
schema="msData/identityConstraint/idK011.xsd",
instance="msData/identityConstraint/idK011.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k010_id_k010_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref fields
locating an element refers to a unique locating an element
"""
assert_bindings(
schema="msData/identityConstraint/idK010.xsd",
instance="msData/identityConstraint/idK010.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k009_id_k009_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref fields
locating an element refers to a key locating an element
"""
assert_bindings(
schema="msData/identityConstraint/idK009.xsd",
instance="msData/identityConstraint/idK009.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k008_id_k008_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref fields
locating an attribute refers to a unique locating an attribute
"""
assert_bindings(
schema="msData/identityConstraint/idK008.xsd",
instance="msData/identityConstraint/idK008.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k007_id_k007_v(mode, save_output, output_format):
"""
TEST :Identity-constraint | |
test_eq_explicit_name(self):
x1 = Variable("x1", [1, 2, 3])
x2 = Variable("x2", [1, 2])
def f(x, y):
return x + y
r1 = NAryFunctionRelation(f, [x1, x2], name="f")
r2 = NAryFunctionRelation(f, [x1, x2], name="f")
self.assertEqual(r1, r2)
def test_eq_expression_function(self):
x1 = Variable("x1", [1, 2, 3])
x2 = Variable("x2", [1, 2])
r1 = NAryFunctionRelation(ExpressionFunction("x1 + x2"), [x1, x2], name="f")
r2 = NAryFunctionRelation(ExpressionFunction("x1 + x2"), [x1, x2], name="f")
self.assertEqual(r1, r2)
def test_not_eq(self):
x1 = Variable("x1", [1, 2, 3])
x2 = Variable("x2", [1, 2])
def f(x, y):
return x + y
r1 = NAryFunctionRelation(f, [x1, x2], name="r1")
r2 = NAryFunctionRelation(f, [x1, x2])
r3 = NAryFunctionRelation(lambda x, y: x + y, [x1, x2], name="r1")
r4 = NAryFunctionRelation(lambda x, y: x + y, [x1, x2], name="r4")
self.assertNotEqual(r1, r2)
self.assertNotEqual(r1, r3)
self.assertNotEqual(r1, r4)
def test_raise_on_simple_repr_with_arbitrary_function(self):
x1 = Variable("x1", [1, 2, 3])
x2 = Variable("x2", [1, 2])
def f(x, y):
return x + y
r1 = NAryFunctionRelation(f, [x1, x2], name="r1")
with self.assertRaises(SimpleReprException):
simple_repr(r1)
def test_simple_repr_with_expression_function(self):
x1 = Variable("x1", [1, 2, 3])
x2 = Variable("x2", [1, 2])
r1 = NAryFunctionRelation(ExpressionFunction("x1 + x2"), [x1, x2], name="r1")
r = simple_repr(r1)
print(r)
self.assertEqual(r["name"], "r1")
self.assertEqual(len(r["variables"]), 2)
self.assertEqual(r["variables"][0]["name"], "x1")
def test_from_repr_with_expression_function(self):
x1 = Variable("x1", [1, 2, 3])
x2 = Variable("x2", [1, 2])
r1 = NAryFunctionRelation(ExpressionFunction("x1 + x2"), [x1, x2], name="r1")
r = simple_repr(r1)
r2 = from_repr(r)
self.assertEqual(r1, r2)
def test_to_repr_when_slicing(self):
x1 = Variable("x1", [2, 4, 1])
r1 = NAryFunctionRelation(ExpressionFunction("x1 * 3"), [x1])
r2 = r1.slice({x1.name: 4})
self.assertEqual(r2.arity, 0)
self.assertEqual(r2.dimensions, [])
self.assertEqual(r2(), 12)
r = simple_repr(r2)
self.assertIsNotNone(r)
def test_to_repr_when_slicing_2(self):
x1 = Variable("x1", [1, 2, 3])
x2 = Variable("x2", [1, 2, 3])
r1 = NAryFunctionRelation(ExpressionFunction("x1 + x2"), [x1, x2], name="r1")
r2 = r1.slice({x1.name: 4})
self.assertEqual(r2.arity, 1)
self.assertEqual(r2.dimensions, [x2])
self.assertEqual(r2(x2=10), 14)
self.assertEqual(r1.name, "r1")
self.assertEqual(r2.name, "r1")
r = simple_repr(r2)
self.assertIsNotNone(r)
def test_hash_with_expressionfunction(self):
x1 = Variable("x1", [2, 4, 1])
x2 = Variable("x2", [2, 4, 1])
r1 = NAryFunctionRelation(ExpressionFunction("x1 * 3 + x2"), [x1, x2])
h = hash(r1)
self.assertIsNotNone(h)
self.assertEqual(
h, hash(NAryFunctionRelation(ExpressionFunction("x1 * 3 + x2"), [x1, x2]))
)
self.assertNotEqual(
h,
hash(
NAryFunctionRelation(
ExpressionFunction("x1 * 3 + x2"), [x1, x2], name="foo"
)
),
)
self.assertNotEqual(
h, hash(NAryFunctionRelation(ExpressionFunction("x1 * 2 + x2"), [x1, x2]))
)
def test_hash_not_equal_with_lambda(self):
x1_v = Variable("x1", [2, 4, 1])
x2_v = Variable("x2", [2, 4, 1])
r1 = NAryFunctionRelation(lambda x1, x2: x1 * 3 + x2, [x1_v, x2_v])
h = hash(r1)
self.assertIsNotNone(h)
self.assertNotEqual(
h, hash(NAryFunctionRelation(lambda x1, x2: x1 * 3 + x2, [x1_v, x2_v]))
)
def test_function_with_kwargs(self):
v1 = Variable("v1", [1, 2, 3])
v2 = Variable("v2", [1, 2, 3])
v3 = Variable("v3", [1, 2, 3])
def f(**kwargs):
r = 0
for k in kwargs:
r += kwargs[k]
return r
r = NAryFunctionRelation(f, [v1, v2, v3], "f_rel")
obtained = r(v1=2, v3=1, v2=1)
self.assertEqual(obtained, 4)
sliced = r.slice({"v1": 3})
self.assertEqual(sliced(v2=2, v3=2), 7)
self.assertEqual(len(sliced.dimensions), 2)
self.assertIn(v2, sliced.dimensions)
self.assertIn(v3, sliced.dimensions)
def test_function_with_varargs(self):
v1 = Variable("v1", [1, 2, 3])
v2 = Variable("v2", [1, 2, 3])
v3 = Variable("v3", [1, 2, 3])
def f(*args):
c = 0
for a in args:
c += args
return c
r = NAryFunctionRelation(f, [v1, v2, v3], "f_rel")
with self.assertRaises(TypeError):
obtained = r(v1=2, v3=1, v2=1)
class NAryFunctionRelationDecoratorTests(unittest.TestCase):
def test_1var(self):
domain = list(range(10))
x1 = Variable("x1", domain)
@AsNAryFunctionRelation(x1)
def x1_cost(x):
return x * 0.8
self.assertEqual(x1_cost.name, "x1_cost")
self.assertEqual(x1_cost.arity, 1)
self.assertIn("x1", [v.name for v in x1_cost.dimensions])
self.assertEqual(x1_cost.dimensions, [x1])
self.assertEqual(x1_cost(2), 1.6)
def test_2var(self):
domain = list(range(10))
x1 = Variable("x1", domain)
x2 = Variable("x2", domain)
@AsNAryFunctionRelation(x1, x2)
def phi(x, y):
return x + y
self.assertEqual(phi.name, "phi")
self.assertEqual(phi.arity, 2)
self.assertIn("x1", [v.name for v in phi.dimensions])
self.assertIn("x2", [v.name for v in phi.dimensions])
self.assertEqual(phi(2, 3), 5)
def get_1var_rel():
x = Variable("x1", ["a", "b", "c"])
u = NAryMatrixRelation([x])
return x, u
def get_2var_rel():
x1 = Variable("x1", ["a", "b", "c"])
x2 = Variable("x2", ["1", "2"])
u1 = NAryMatrixRelation([x1, x2], [[1, 2], [3, 4], [5, 6]])
return x1, x2, u1
class NAryMatrixRelationInitTest(unittest.TestCase):
def test_init_zero_no_var(self):
u1 = NAryMatrixRelation([])
self.assertEqual(u1.dimensions, [])
self.assertEqual(u1.arity, 0)
val = u1.get_value_for_assignment([])
self.assertEqual(val, 0)
def test_init_zero_one_var(self):
x1, u1 = get_1var_rel()
self.assertEqual(u1.dimensions, [x1])
self.assertEqual(u1.arity, 1)
self.assertEqual(u1.shape, (3,))
def test_init_zero_2var(self):
x1 = Variable("x1", ["a", "b", "c"])
x2 = Variable("x2", ["1", "2"])
u1 = NAryMatrixRelation([x1, x2])
self.assertEqual(u1.dimensions, [x1, x2])
self.assertEqual(u1.arity, 2)
self.assertEqual(u1.shape, (3, 2))
def test_init_matrix_one_var(self):
x1, u1 = get_1var_rel()
self.assertEqual(u1.dimensions, [x1])
self.assertEqual(u1.arity, 1)
self.assertEqual(u1.shape, (3,))
def test_init_array_one_var(self):
x1 = Variable("x1", ["a", "b", "c"])
u1 = NAryMatrixRelation([x1], [0, 2, 3])
self.assertEqual(u1.dimensions, [x1])
self.assertEqual(u1.arity, 1)
self.assertEqual(u1.shape, (3,))
self.assertEqual(u1("b"), 2)
def test_init_nparray_one_var(self):
x1 = Variable("x1", ["a", "b", "c"])
u1 = NAryMatrixRelation([x1], np.array([0, 2, 3]))
self.assertEqual(u1.dimensions, [x1])
self.assertEqual(u1.arity, 1)
self.assertEqual(u1.shape, (3,))
self.assertEqual(u1("b"), 2)
def test_init_matrix_three_var(self):
x1 = Variable("x1", ["a", "b", "c"])
x2 = Variable("x2", ["1", "2"])
x3 = Variable("x3", ["y", "z"])
matrix = [ # for x1 = a
[
[1, 2], # values when x2=1, x3 = y or z
[3, 4],
], # values when x2=2, x3 = y or z
# for x1 = b
[[5, 6], [7, 8]], # values when
# for x1 = c
[[9, 10], [11, 12]],
]
u1 = NAryMatrixRelation([x1, x2, x3], np.array(matrix))
self.assertEqual(u1.get_value_for_assignment(["a", "2", "z"]), 4)
self.assertEqual(u1.get_value_for_assignment(["b", "1", "y"]), 5)
self.assertEqual(u1.get_value_for_assignment(["c", "1", "z"]), 10)
def test_value_one_var(self):
x1 = Variable("x1", ["a", "b", "c"])
u1 = NAryMatrixRelation([x1])
print(u1.get_value_for_assignment(["a"]))
def test_init_matrix_2var(self):
x1, x2, u1 = get_2var_rel()
self.assertEqual(u1.dimensions, [x1, x2])
self.assertEqual(u1.arity, 2)
self.assertEqual(u1.shape, (3, 2))
def test_init_from_generated_matrix(self):
d = Domain("d", "d", range(10))
v1 = Variable("v1", d)
v2 = Variable("v2", d)
matrix = assignment_matrix([v1, v2], 0)
r = NAryMatrixRelation([v1, v2], matrix, "r")
obtained = r(v1=1, v2=0)
self.assertEqual(obtained, 0)
def test_value_matrix_one_var(self):
x1 = Variable("x1", ["a", "b", "c"])
u1 = NAryMatrixRelation([x1], np.array([1, 2, 3], np.int8))
self.assertEqual(u1.get_value_for_assignment(["b"]), 2)
self.assertEqual(u1.get_value_for_assignment(["a"]), 1)
self.assertEqual(u1.get_value_for_assignment(["c"]), 3)
def test_value_matrix_2var(self):
x1, x2, u1 = get_2var_rel()
self.assertEqual(u1.get_value_for_assignment(["b", "2"]), 4)
self.assertEqual(u1.get_value_for_assignment(["c", "1"]), 5)
def test_get_value_as_array(self):
x1 = Variable("x1", [2, 4, 6])
x2 = Variable("x2", [1, 3, 5])
u1 = NAryMatrixRelation([x1, x2], [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# When getting value with get_value_for_assignment with a list,
# we must make sure the values are in the right order.
self.assertEqual(u1.get_value_for_assignment([4, 1]), 4)
self.assertEqual(u1.get_value_for_assignment([2, 5]), 3)
def test_get_value_as_dict(self):
x1 = Variable("x1", [2, 4, 6])
x2 = Variable("x2", [1, 3, 5])
u1 = NAryMatrixRelation([x1, x2], [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# Getting value with get_value_for_assignment with a dict
# using a dict avoid getting the order of the variable wrong.
self.assertEqual(u1.get_value_for_assignment({"x2": 1, "x1": 4}), 4)
self.assertEqual(u1.get_value_for_assignment({"x1": 2, "x2": 5}), 3)
# as a callable
self.assertEqual(u1(x2=1, x1=4), 4)
self.assertEqual(u1(x1=2, x2=5), 3)
def test_set_value_as_array(self):
x1 = Variable("x1", [2, 4, 6])
x2 = Variable("x2", [1, 3, 5])
u1 = NAryMatrixRelation([x1, x2], [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
u2 = u1.set_value_for_assignment([4, 1], 0)
# When setting value with get_value_for_assignment with a list,
# we must make sure the values are in the right order.
self.assertEqual(u2.get_value_for_assignment([4, 1]), 0)
u2 = u1.set_value_for_assignment([2, 5], 0)
self.assertEqual(u2.get_value_for_assignment([2, 5]), 0)
def test_set_value_as_dict(self):
x1 = Variable("x1", [2, 4, 6])
x2 = Variable("x2", [1, 3, 5])
u1 = NAryMatrixRelation([x1, x2], [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
u2 = u1.set_value_for_assignment({"x2": 1, "x1": 4}, 0)
self.assertEqual(u2.get_value_for_assignment([4, 1]), 0)
self.assertEqual(u2.get_value_for_assignment({"x2": 1, "x1": 4}), 0)
u2 = u1.set_value_for_assignment({"x1": 2, "x2": 5}, 0)
self.assertEqual(u2.get_value_for_assignment([2, 5]), 0)
u2 = u1.set_value_for_assignment({"x1": 2, "x2": 5}, 3)
self.assertEqual(u2.get_value_for_assignment([2, 5]), 3)
def test_set_float_value_on_zeroed_init(self):
x1 = Variable("x1", ["R", "G"])
x2 = Variable("x2", ["R", "G"])
u1 = NAryMatrixRelation([x1, x2], name="u1")
# we set a float vlaue, we want to find our float back !
u1 = u1.set_value_for_assignment({"x1": "R", "x2": "G"}, 5.2)
self.assertEqual(u1(**{"x1": "R", "x2": "G"}), 5.2)
class NAryMatrixRelationSliceTest(unittest.TestCase):
def test_slice_1var(self):
x1 = Variable("x1", ["a", "b", "c"])
u1 = NAryMatrixRelation([x1], np.array([1, 2, 3], np.int8))
# Slicing a unary relation on its only variable gives us a 0 ary
# relation :
s = u1.slice({x1.name: "a"})
self.assertEqual(s.arity, 0)
self.assertEqual(s.get_value_for_assignment(), 1)
def test_slice_2var(self):
x1, x2, u1 = get_2var_rel()
s = u1.slice({x1.name: "a"})
self.assertEqual(s.arity, 1)
self.assertEqual(s.shape, (len(x2.domain),))
self.assertEqual(s.get_value_for_assignment(["2"]), 2)
def test_slice_2var_ignore_extra(self):
x1, x2, u1 = get_2var_rel()
# When setting ignore_extra_vars, it must silently ignore the extra
# x4 variable
x4 = Variable("x4", [1, 2])
s = u1.slice({x1.name: "a", x4.name: 1}, ignore_extra_vars=True)
self.assertEqual(s.arity, 1)
self.assertEqual(s.shape, (len(x2.domain),))
| |
from __future__ import absolute_import
import csv
import datetime
import logging
import sys
from collections import OrderedDict
from decimal import Decimal
from pathlib import Path
import click
import yaml
import yaml.resolver
from click import echo
import json
from yaml import Node
from datacube.index.index import Index
from datacube.index.exceptions import MissingRecordError
from datacube.model import Dataset
from datacube.model import Range
from datacube.ui import click as ui
from datacube.ui.click import cli
from datacube.ui.common import get_metadata_path
from datacube.utils import read_documents, changes, InvalidDocException
from datacube.utils.serialise import SafeDatacubeDumper
try:
from typing import Iterable
except ImportError:
pass
_LOG = logging.getLogger('datacube-dataset')
class BadMatch(Exception):
pass
@cli.group(name='dataset', help='Dataset management commands')
def dataset_cmd():
pass
def find_matching_product(rules, doc):
""":rtype: datacube.model.DatasetType"""
matched = [rule for rule in rules if changes.contains(doc, rule['metadata'])]
if not matched:
raise BadMatch('No matching Product found for %s' % json.dumps(doc, indent=4))
if len(matched) > 1:
raise BadMatch('Too many matching Products found for %s. Matched %s.' % (
doc.get('id', 'unidentified'), matched))
return matched[0]['type']
def check_dataset_consistent(dataset):
"""
:type dataset: datacube.model.Dataset
:return: (Is consistent, [error message|None])
:rtype: (bool, str or None)
"""
# It the type expects measurements, ensure our dataset contains them all.
if not set(dataset.type.measurements.keys()).issubset(dataset.measurements.keys()):
return False, "measurement fields don't match type specification"
return True, None
def create_dataset(dataset_doc, uri, rules):
"""
:rtype datacube.model.Dataset:
"""
dataset_type = find_matching_product(rules, dataset_doc)
sources = {cls: create_dataset(source_doc, None, rules)
for cls, source_doc in dataset_type.dataset_reader(dataset_doc).sources.items()}
return Dataset(dataset_type, dataset_doc, uris=[uri] if uri else None, sources=sources)
def load_rules_from_file(filename, index):
rules = next(read_documents(Path(filename)))[1]
# TODO: verify schema
for rule in rules:
type_ = index.products.get_by_name(rule['type'])
if not type_:
_LOG.error('DatasetType %s does not exists', rule['type'])
return None
if not changes.contains(type_.metadata_doc, rule['metadata']):
_LOG.error('DatasetType %s can\'t be matched by its own rule', rule['type'])
return None
rule['type'] = type_
return rules
def load_rules_from_types(index, type_names=None):
types = []
if type_names:
for name in type_names:
type_ = index.products.get_by_name(name)
if not type_:
_LOG.error('DatasetType %s does not exists', name)
return None
types.append(type_)
else:
types += index.products.get_all()
rules = [{'type': type_, 'metadata': type_.metadata_doc} for type_ in types]
return rules
def load_datasets(datasets, rules):
for dataset_path in datasets:
metadata_path = get_metadata_path(Path(dataset_path))
if not metadata_path or not metadata_path.exists():
_LOG.error('No supported metadata docs found for dataset %s', dataset_path)
continue
try:
for metadata_path, metadata_doc in read_documents(metadata_path):
uri = metadata_path.absolute().as_uri()
try:
dataset = create_dataset(metadata_doc, uri, rules)
except BadMatch as e:
_LOG.error('Unable to create Dataset for %s: %s', uri, e)
continue
is_consistent, reason = check_dataset_consistent(dataset)
if not is_consistent:
_LOG.error("Dataset %s inconsistency: %s", dataset.id, reason)
continue
yield dataset
except InvalidDocException:
_LOG.error("Failed reading documents from %s", metadata_path)
continue
def parse_match_rules_options(index, match_rules, dtype, auto_match):
if not (match_rules or dtype or auto_match):
auto_match = True
if match_rules:
return load_rules_from_file(match_rules, index)
else:
assert dtype or auto_match
return load_rules_from_types(index, dtype)
@dataset_cmd.command('add', help="Add datasets to the Data Cube")
@click.option('--match-rules', '-r', help='Rules to be used to associate datasets with products',
type=click.Path(exists=True, readable=True, writable=False, dir_okay=False))
@click.option('--dtype', '-t', help='Product to be associated with the datasets',
multiple=True)
@click.option('--auto-match', '-a', help="Automatically associate datasets with products by matching metadata",
is_flag=True, default=False)
@click.option('--sources-policy', type=click.Choice(['verify', 'ensure', 'skip']), default='verify',
help="""'verify' - verify source datasets' metadata (default)
'ensure' - add source dataset if it doesn't exist
'skip' - dont add the derived dataset if source dataset doesn't exist""")
@click.option('--dry-run', help='Check if everything is ok', is_flag=True, default=False)
@click.argument('dataset-paths',
type=click.Path(exists=True, readable=True, writable=False), nargs=-1)
@ui.pass_index()
def index_cmd(index, match_rules, dtype, auto_match, sources_policy, dry_run, dataset_paths):
rules = parse_match_rules_options(index, match_rules, dtype, auto_match)
if rules is None:
return
# If outputting directly to terminal, show a progress bar.
if sys.stdout.isatty():
with click.progressbar(dataset_paths, label='Indexing datasets') as dataset_path_iter:
index_dataset_paths(sources_policy, dry_run, index, rules, dataset_path_iter)
else:
index_dataset_paths(sources_policy, dry_run, index, rules, dataset_paths)
def index_dataset_paths(sources_policy, dry_run, index, rules, dataset_paths):
for dataset in load_datasets(dataset_paths, rules):
_LOG.info('Matched %s', dataset)
if not dry_run:
try:
index.datasets.add(dataset, sources_policy=sources_policy)
except (ValueError, MissingRecordError) as e:
_LOG.error('Failed to add dataset %s: %s', dataset.local_uri, e)
def parse_update_rules(allow_any):
updates = {}
for key_str in allow_any:
updates[tuple(key_str.split('.'))] = changes.allow_any
return updates
@dataset_cmd.command('update', help="Update datasets in the Data Cube")
@click.option('--allow-any', help="Allow any changes to the specified key (a.b.c)", multiple=True)
@click.option('--match-rules', '-r', help='Rules to be used to associate datasets with products',
type=click.Path(exists=True, readable=True, writable=False, dir_okay=False))
@click.option('--dtype', '-t', help='Product to be associated with the datasets', multiple=True)
@click.option('--auto-match', '-a', help="Automatically associate datasets with products by matching metadata",
is_flag=True, default=False)
@click.option('--dry-run', help='Check if everything is ok', is_flag=True, default=False)
@click.argument('datasets',
type=click.Path(exists=True, readable=True, writable=False), nargs=-1)
@ui.pass_index()
def update_cmd(index, allow_any, match_rules, dtype, auto_match, dry_run, datasets):
rules = parse_match_rules_options(index, match_rules, dtype, auto_match)
if rules is None:
return
updates = parse_update_rules(allow_any)
success, fail = 0, 0
for dataset in load_datasets(datasets, rules):
_LOG.info('Matched %s', dataset)
if not dry_run:
try:
index.datasets.update(dataset, updates_allowed=updates)
success += 1
echo('Updated %s' % dataset.id)
except ValueError as e:
fail += 1
echo('Failed to update %s: %s' % (dataset.id, e))
else:
if update_dry_run(index, updates, dataset):
success += 1
else:
fail += 1
echo('%d successful, %d failed' % (success, fail))
def update_dry_run(index, updates, dataset):
try:
can_update, safe_changes, unsafe_changes = index.datasets.can_update(dataset, updates_allowed=updates)
except ValueError as e:
echo('Cannot update %s: %s' % (dataset.id, e))
return False
if can_update:
echo('Can update %s: %s unsafe changes, %s safe changes' % (dataset.id,
len(unsafe_changes),
len(safe_changes)))
else:
echo('Cannot update %s: %s unsafe changes, %s safe changes' % (dataset.id,
len(unsafe_changes),
len(safe_changes)))
return can_update
def build_dataset_info(index, dataset, show_sources=False, show_derived=False, depth=1, max_depth=99):
# type: (Index, Dataset, bool) -> dict
info = OrderedDict((
('id', str(dataset.id)),
('product', dataset.type.name),
('status', 'archived' if dataset.is_archived else 'active')
))
# Optional when loading a dataset.
if dataset.indexed_time is not None:
info['indexed'] = dataset.indexed_time
info['locations'] = dataset.uris
info['fields'] = dataset.metadata.search_fields
if depth < max_depth:
if show_sources:
info['sources'] = {key: build_dataset_info(index, source,
show_sources=True, show_derived=False,
depth=depth + 1, max_depth=max_depth)
for key, source in dataset.sources.items()}
if show_derived:
info['derived'] = [build_dataset_info(index, derived,
show_sources=False, show_derived=True,
depth=depth + 1, max_depth=max_depth)
for derived in index.datasets.get_derived(dataset.id)]
return info
def _write_csv(infos):
writer = csv.DictWriter(sys.stdout, ['id', 'status', 'product', 'location'], extrasaction='ignore')
writer.writeheader()
def add_first_location(row):
locations_ = row['locations']
row['location'] = locations_[0] if locations_ else None
return row
writer.writerows(add_first_location(row) for row in infos)
def _write_yaml(infos):
"""
Dump yaml data with support for OrderedDicts.
Allows for better human-readability of output: such as dataset ID field first, sources last.
(Ordered dicts are output identically to normal yaml dicts: their order is purely for readability)
"""
return yaml.dump_all(infos, sys.stdout, SafeDatacubeDumper, default_flow_style=False, indent=4)
_OUTPUT_WRITERS = {
'csv': _write_csv,
'yaml': _write_yaml,
}
@dataset_cmd.command('info', help="Display dataset information")
@click.option('--show-sources', help='Also show source datasets', is_flag=True, default=False)
@click.option('--show-derived', help='Also show derived datasets', is_flag=True, default=False)
@click.option('-f', help='Output format',
type=click.Choice(list(_OUTPUT_WRITERS)), default='yaml', show_default=True)
@click.option('--max-depth',
help='Maximum sources/derived depth to travel',
type=int,
# Unlikely to be hit, but will avoid total-death by circular-references.
default=99)
@click.argument('ids', nargs=-1)
@ui.pass_index()
def info_cmd(index, show_sources, show_derived, f, max_depth, ids):
# type: (Index, bool, bool, Iterable[str]) -> None
# Using an array wrapper to get around the lack of "nonlocal" in py2
missing_datasets = [0]
def get_datasets(ids):
for id_ in ids:
dataset = index.datasets.get(id_, include_sources=show_sources)
if dataset:
yield dataset
else:
click.echo('%s missing' % id_, err=True)
missing_datasets[0] += 1
_OUTPUT_WRITERS[f](
build_dataset_info(index,
dataset,
show_sources=show_sources,
show_derived=show_derived,
max_depth=max_depth)
for dataset in get_datasets(ids)
)
sys.exit(missing_datasets[0])
@dataset_cmd.command('search')
@click.option('--limit', help='Limit the number of results',
type=int, default=None)
@click.option('-f', help='Output format',
type=click.Choice(list(_OUTPUT_WRITERS)), default='yaml', show_default=True)
@ui.parsed_search_expressions
@ui.pass_index()
def search_cmd(index, limit, f, expressions):
"""
Search available Datasets
"""
datasets = index.datasets.search(limit=limit, **expressions)
_OUTPUT_WRITERS[f](
build_dataset_info(index, dataset)
for dataset in datasets
)
def _get_derived_set(index, id_):
"""
Get a single flat set of all derived datasets.
(children, grandchildren, great-grandchildren...)
"""
derived_set = {index.datasets.get(id_)}
to_process = {id_}
while to_process:
derived = index.datasets.get_derived(to_process.pop())
to_process.update(d.id for d in derived)
derived_set.update(derived)
return derived_set
@dataset_cmd.command('archive', help="Archive datasets")
@click.option('--archive-derived', '-d', help='Also recursively archive derived datasets', is_flag=True, default=False)
@click.option('--dry-run', help="Don't archive. Display datasets that would get archived",
is_flag=True, default=False)
@click.argument('ids', nargs=-1)
@ui.pass_index()
def archive_cmd(index, archive_derived, dry_run, ids):
for id_ in ids:
to_process = _get_derived_set(index, id_) if archive_derived else [index.datasets.get(id_)]
for d in to_process:
click.echo('archiving %s %s %s' % (d.type.name, d.id, d.local_uri))
if not dry_run:
index.datasets.archive(d.id for d in to_process)
@dataset_cmd.command('restore', help="Restore datasets")
@click.option('--restore-derived', '-d', help='Also recursively restore derived datasets', is_flag=True, default=False)
@click.option('--dry-run', help="Don't restore. Display datasets that would get restored",
is_flag=True, default=False)
@click.option('--derived-tolerance-seconds',
help="Only restore derived datasets that were archived "
"this recently to the original dataset",
default=10 * 60)
@click.argument('ids', nargs=-1)
@ui.pass_index()
def restore_cmd(index, restore_derived, derived_tolerance_seconds, dry_run, ids):
tolerance = datetime.timedelta(seconds=derived_tolerance_seconds)
for id_ in ids:
_restore_one(dry_run, id_, index, restore_derived, tolerance)
def _restore_one(dry_run, id_, index, restore_derived, tolerance):
"""
:type index: datacube.index.index.Index
:type restore_derived: bool
:type tolerance: datetime.timedelta
:type dry_run: bool
:type id_: str
"""
target_dataset = index.datasets.get(id_)
to_process = _get_derived_set(index, id_) if restore_derived else {target_dataset}
_LOG.debug("%s selected", len(to_process))
# Only the already-archived ones.
to_process = {d for d in to_process if d.is_archived}
_LOG.debug("%s selected are archived", len(to_process))
def within_tolerance(dataset):
if not dataset.is_archived:
return False
t = target_dataset.archived_time
return (t - tolerance) <= | |
"""Base class for all target calculators."""
from abc import ABC, abstractmethod
from ase.units import Rydberg, Bohr, kB
import ase.io
import numpy as np
from mala.common.parameters import Parameters, ParametersTargets
from mala.common.parallelizer import printout
from mala.targets.calculation_helpers import fermi_function
class TargetBase(ABC):
"""
Base class for all target quantity parser.
Target parsers read the target quantity
(i.e. the quantity the NN will learn to predict) from a specified file
format and performs postprocessing calculations on the quantity.
Parameters
----------
params : mala.common.parameters.Parameters or
mala.common.parameters.ParametersTargets
Parameters used to create this TargetBase object.
"""
def __init__(self, params):
if isinstance(params, Parameters):
self.parameters = params.targets
elif isinstance(params, ParametersTargets):
self.parameters = params
else:
raise Exception("Wrong type of parameters for Targets class.")
self.fermi_energy_eV = None
self.temperature_K = None
self.grid_spacing_Bohr = None
self.number_of_electrons = None
self.number_of_electrons_from_eigenvals = None
self.band_energy_dft_calculation = None
self.total_energy_dft_calculation = None
self.grid_dimensions = [0, 0, 0]
self.atoms = None
self.electrons_per_atom = None
self.qe_input_data = {
"occupations": 'smearing',
"calculation": 'scf',
"restart_mode": 'from_scratch',
"prefix": 'MALA',
"pseudo_dir": self.parameters.pseudopotential_path,
"outdir": './',
"ibrav": None,
"smearing": 'fermi-dirac',
"degauss": None,
"ecutrho": None,
"ecutwfc": None,
"nosym": True,
"noinv": True,
}
# It has been shown that the number of k-points
# does not really affect the QE post-processing calculation.
# This is because we only evaulate density-dependent contributions
# with QE. However, there were some (very small) inaccuracies when
# operating only at the gamma point. Consequently, MALA defaults
# to a small k-grid to ensure best accuracy and performance.
# UPDATE 23.04.2021: As per discussion bewteen <NAME> and Lenz
# Fiedler, the number of k-points is moved to 1.
# The small inaccuracies are neglected for now.
self.kpoints = None # (2, 2, 2)
self.qe_pseudopotentials = {}
@abstractmethod
def get_feature_size(self):
"""Get dimension of this target if used as feature in ML."""
pass
@property
def qe_input_data(self):
"""Input data for QE TEM calls."""
# Update the pseudopotential path from Parameters.
self._qe_input_data["pseudo_dir"] = \
self.parameters.pseudopotential_path
return self._qe_input_data
@qe_input_data.setter
def qe_input_data(self, value):
self._qe_input_data = value
def read_from_cube(self):
"""Read the quantity from a .cube file."""
raise Exception("No function defined to read this quantity "
"from a .cube file.")
def read_from_qe_dos_txt(self):
"""Read the quantity from a Quantum Espresso .dos.txt file."""
raise Exception("No function defined to read this quantity "
"from a qe.dos.txt file")
def get_density(self):
"""Get the electronic density."""
raise Exception("No function to calculate or provide the "
"density has been implemented for this target type.")
def get_density_of_states(self):
"""Get the density of states."""
raise Exception("No function to calculate or provide the"
"density of states (DOS) has been implemented "
"for this target type.")
def get_band_energy(self):
"""Get the band energy."""
raise Exception("No function to calculate or provide the"
"band energy has been implemented for this target "
"type.")
def get_number_of_electrons(self):
"""Get the number of electrons."""
raise Exception("No function to calculate or provide the number of"
" electrons has been implemented for this target "
"type.")
def get_total_energy(self):
"""Get the total energy."""
raise Exception("No function to calculate or provide the number "
"of electons has been implemented for this target "
"type.")
def read_additional_calculation_data(self, data_type, data=""):
"""
Read in additional input about a calculation.
This is e.g. necessary when we operate with preprocessed
data for the training itself but want to take into account other
physical quantities (such as the fermi energy or the electronic
temperature) for post processing.
Parameters
----------
data_type : string
Type of data or file that is used. Currently supporte are:
- "qe.out" : Read the additional information from a QuantumESPRESSO
output file.
- "atoms+grid" : Provide a grid and an atoms object from which to
predict. Except for the number of electrons,
this mode will not change any member variables;
values have to be adjusted BEFORE.
data : string or list
Data from which additional calculation data is inputted.
"""
if data_type == "qe.out":
# Reset everything.
self.fermi_energy_eV = None
self.temperature_K = None
self.grid_spacing_Bohr = None
self.number_of_electrons = None
self.band_energy_dft_calculation = None
self.total_energy_dft_calculation = None
self.grid_dimensions = [0, 0, 0]
self.atoms = None
# Read the file.
self.atoms = ase.io.read(data, format="espresso-out")
vol = self.atoms.get_volume()
self.fermi_energy_eV = self.atoms.get_calculator().\
get_fermi_level()
# Parse the file for energy values.
total_energy = None
past_calculation_part = False
bands_included = True
with open(data) as out:
pseudolinefound = False
lastpseudo = None
for line in out:
if "End of self-consistent calculation" in line:
past_calculation_part = True
if "number of electrons =" in line:
self.number_of_electrons = np.float64(line.split('=')
[1])
if "Fermi-Dirac smearing, width (Ry)=" in line:
self.temperature_K = np.float64(line.split('=')[2]) * \
Rydberg / kB
if "xc contribution" in line:
xc_contribution = float((line.split('=')[1]).
split('Ry')[0])
break
if "one-electron contribution" in line:
one_electron_contribution = float((line.split('=')[1]).
split('Ry')[0])
if "hartree contribution" in line:
hartree_contribution = float((line.split('=')[1]).
split('Ry')[0])
if "FFT dimensions" in line:
dims = line.split("(")[1]
self.grid_dimensions[0] = int(dims.split(",")[0])
self.grid_dimensions[1] = int(dims.split(",")[1])
self.grid_dimensions[2] = int((dims.split(",")[2]).
split(")")[0])
if "bravais-lattice index" in line:
self.qe_input_data["ibrav"] = int(line.split("=")[1])
if "kinetic-energy cutoff" in line:
self.qe_input_data["ecutwfc"] \
= float((line.split("=")[1]).split("Ry")[0])
if "charge density cutoff" in line:
self.qe_input_data["ecutrho"] \
= float((line.split("=")[1]).split("Ry")[0])
if "smearing, width" in line:
self.qe_input_data["degauss"] \
= float(line.split("=")[-1])
if pseudolinefound:
self.qe_pseudopotentials[lastpseudo.strip()] \
= line.split("/")[-1].strip()
pseudolinefound = False
lastpseudo = None
if "PseudoPot." in line:
pseudolinefound = True
lastpseudo = (line.split("for")[1]).split("read")[0]
if "total energy" in line and past_calculation_part:
if total_energy is None:
total_energy \
= float((line.split('=')[1]).split('Ry')[0])
if "set verbosity='high' to print them." in line:
bands_included = False
# Post process the text values.
cell_volume = vol / (self.grid_dimensions[0] *
self.grid_dimensions[1] *
self.grid_dimensions[2] * Bohr ** 3)
self.grid_spacing_Bohr = cell_volume ** (1 / 3)
# This is especially important for size extrapolation.
self.electrons_per_atom = self.number_of_electrons/len(self.atoms)
# Unit conversion
self.total_energy_dft_calculation = total_energy*Rydberg
# Calculate band energy, if the necessary data is included in
# the output file.
if bands_included:
eigs = np.transpose(
self.atoms.get_calculator().band_structure().
energies[0, :, :])
kweights = self.atoms.get_calculator().get_k_point_weights()
eband_per_band = eigs * fermi_function(eigs,
self.fermi_energy_eV,
self.temperature_K)
eband_per_band = kweights[np.newaxis, :] * eband_per_band
self.band_energy_dft_calculation = np.sum(eband_per_band)
enum_per_band = fermi_function(eigs,
self.fermi_energy_eV,
self.temperature_K)
enum_per_band = kweights[np.newaxis, :] * enum_per_band
self.number_of_electrons_from_eigenvals = np.sum(enum_per_band)
elif data_type == "atoms+grid":
# Reset everything that we can get this way.
self.grid_spacing_Bohr = None
self.band_energy_dft_calculation = None
self.total_energy_dft_calculation = None
self.grid_dimensions = [0, 0, 0]
self.atoms: ase.Atoms = data[0]
# Read the file.
vol = self.atoms.get_volume()
# Parse the file for energy values.
self.grid_dimensions[0] = data[1][0]
self.grid_dimensions[1] = data[1][1]
self.grid_dimensions[2] = data[1][2]
# Post process the text values.
cell_volume = vol / (self.grid_dimensions[0] *
self.grid_dimensions[1] *
self.grid_dimensions[2] * Bohr ** 3)
self.grid_spacing_Bohr = cell_volume ** (1 / 3)
if self.electrons_per_atom is None:
printout("No number of electrons per atom provided, "
"MALA cannot guess the number of electrons "
"in the cell with this. Energy calculations may be"
"wrong.")
else:
self.number_of_electrons = self.electrons_per_atom *\
len(self.atoms)
else:
raise Exception("Unsupported auxiliary file type.")
def get_energy_grid(self):
"""Get energy grid."""
raise Exception("No method implement to calculate an energy grid.")
def get_real_space_grid(self):
"""Get the real space grid."""
grid3D = np.zeros((self.grid_dimensions[0], self.grid_dimensions[1],
self.grid_dimensions[2], 3), dtype=np.float64)
for i in range(0, self.grid_dimensions[0]):
for j in range(0, self.grid_dimensions[1]):
for k in range(0, self.grid_dimensions[2]):
grid3D[i, j, k, 0] = i * self.grid_spacing_Bohr
grid3D[i, j, k, 1] = j * self.grid_spacing_Bohr
grid3D[i, j, k, 2] = k * self.grid_spacing_Bohr
return grid3D
@staticmethod
def write_tem_input_file(atoms_Angstrom, qe_input_data,
qe_pseudopotentials,
grid_dimensions, kpoints):
"""
Write a QE-style input file for the total energy module.
Usually, the used parameters should correspond to the properties of
the object calling this function, but they don't necessarily have
to.
Parameters
----------
atoms_Angstrom : ase.Atoms
ASE atoms object for the current system. If None, MALA will
create one.
qe_input_data : dict
Quantum Espresso parameters dictionary for the ASE<->QE interface.
If None (recommended), MALA will create one.
qe_pseudopotentials : dict
Quantum Espresso pseudopotential dictionaty for the ASE<->QE
interface. If None (recommended), MALA will create one.
grid_dimensions : list
A list containing the x,y,z dimensions of the real space grid.
kpoints : dict
k-grid used, usually None or (1,1,1) for TEM calculations.
"""
# Specify grid dimensions, if any are given.
if grid_dimensions[0] != 0 and \
grid_dimensions[1] != 0 and \
grid_dimensions[2] != | |
return buf.raw
def randombytes(size):
buf = ctypes.create_string_buffer(size)
sodium.randombytes(buf, ctypes.c_ulonglong(size))
return buf.raw
def crypto_box_keypair():
pk = ctypes.create_string_buffer(crypto_box_PUBLICKEYBYTES)
sk = ctypes.create_string_buffer(crypto_box_SECRETKEYBYTES)
__check(sodium.crypto_box_keypair(pk, sk))
return pk.raw, sk.raw
# int crypto_box_seed_keypair(unsigned char *pk, unsigned char *sk,
# const unsigned char *seed);
def crypto_box_seed_keypair(seed):
if seed is None:
raise ValueError("invalid parameters")
if len(seed) != crypto_box_SEEDBYTES: raise ValueError("invalid seed size")
pk = ctypes.create_string_buffer(crypto_box_PUBLICKEYBYTES)
sk = ctypes.create_string_buffer(crypto_box_SECRETKEYBYTES)
__check(sodium.crypto_box_seed_keypair(pk, sk, seed))
return pk.raw, sk.raw
def crypto_box_beforenm(pk, sk):
if pk is None or sk is None:
raise ValueError("invalid parameters")
if len(pk) != crypto_box_PUBLICKEYBYTES: raise ValueError("pk incorrect size")
if len(sk) != crypto_box_SECRETKEYBYTES: raise ValueError("sk incorrect size")
c = ctypes.create_string_buffer(crypto_secretbox_KEYBYTES)
__check(sodium.crypto_box_beforenm(c, pk, sk))
return c.raw
def crypto_box(msg, nonce, pk, sk):
if None in (msg, nonce, pk, sk):
raise ValueError("invalid parameters")
if len(pk) != crypto_box_PUBLICKEYBYTES: raise ValueError("pk incorrect size")
if len(sk) != crypto_box_SECRETKEYBYTES: raise ValueError("sk incorrect size")
if len(nonce) != crypto_box_NONCEBYTES: raise ValueError("nonce incorrect size")
c = ctypes.create_string_buffer(crypto_box_MACBYTES + len(msg))
__check(sodium.crypto_box_easy(c, msg, ctypes.c_ulonglong(len(msg)), nonce, pk, sk))
return c.raw
def crypto_box_afternm(msg, nonce, k):
if None in (msg, nonce, k):
raise ValueError("invalid parameters")
if len(k) != crypto_box_BEFORENMBYTES: raise ValueError("k incorrect size")
if len(nonce) != crypto_box_NONCEBYTES: raise ValueError("nonce incorrect size")
c = ctypes.create_string_buffer(crypto_box_MACBYTES + len(msg))
__check(sodium.crypto_box_easy_afternm(c, msg, ctypes.c_ulonglong(len(msg)), nonce, k))
return c.raw
def crypto_box_open(c, nonce, pk, sk):
if None in (c, nonce, pk, sk):
raise ValueError("invalid parameters")
if len(pk) != crypto_box_PUBLICKEYBYTES: raise ValueError("pk incorrect size")
if len(sk) != crypto_box_SECRETKEYBYTES: raise ValueError("sk incorrect size")
if len(nonce) != crypto_box_NONCEBYTES: raise ValueError("nonce incorrect size")
msg = ctypes.create_string_buffer(len(c) - crypto_box_MACBYTES)
__check(sodium.crypto_box_open_easy(msg, c, ctypes.c_ulonglong(len(c)), nonce, pk, sk))
return msg.raw
def crypto_box_open_afternm(c, nonce, k):
if None in (c, nonce, k):
raise ValueError("invalid parameters")
if len(k) != crypto_box_BEFORENMBYTES: raise ValueError("k incorrect size")
if len(nonce) != crypto_box_NONCEBYTES: raise ValueError("nonce incorrect size")
msg = ctypes.create_string_buffer(len(c) - crypto_box_MACBYTES)
__check(sodium.crypto_box_open_easy_afternm(msg, c, ctypes.c_ulonglong(len(c)), nonce, k))
return msg.raw
def crypto_secretbox(msg, nonce, k):
if None in (msg, nonce, k):
raise ValueError("invalid parameters")
if len(k) != crypto_secretbox_KEYBYTES: raise ValueError("k incorrect size")
if len(nonce) != crypto_secretbox_NONCEBYTES: raise ValueError("nonce incorrect size")
padded = b"\x00" * crypto_secretbox_ZEROBYTES + msg
c = ctypes.create_string_buffer(len(padded))
__check(sodium.crypto_secretbox(c, padded, ctypes.c_ulonglong(len(padded)), nonce, k))
return c.raw[crypto_secretbox_BOXZEROBYTES:]
def crypto_secretbox_open(c, nonce, k):
if None in (c, nonce, k):
raise ValueError("invalid parameters")
if len(k) != crypto_secretbox_KEYBYTES: raise ValueError("k incorrect size")
if len(nonce) != crypto_secretbox_NONCEBYTES: raise ValueError("nonce incorrect size")
padded = b"\x00" * crypto_secretbox_BOXZEROBYTES + c
msg = ctypes.create_string_buffer(len(padded))
__check(sodium.crypto_secretbox_open(msg, padded, ctypes.c_ulonglong(len(padded)), nonce, k))
return msg.raw[crypto_secretbox_ZEROBYTES:]
# int crypto_box_seal(unsigned char *c, const unsigned char *m,
# unsigned long long mlen, const unsigned char *pk);
@sodium_version(1, 0, 3)
def crypto_box_seal(msg, k):
if msg is None or k is None:
raise ValueError("invalid parameters")
if len(k) != crypto_box_PUBLICKEYBYTES: raise ValueError("k incorrect size")
c = ctypes.create_string_buffer(len(msg)+crypto_box_SEALBYTES)
__check(sodium.crypto_box_seal(c, msg, ctypes.c_ulonglong(len(msg)), k))
return c.raw
# int crypto_box_seal_open(unsigned char *m, const unsigned char *c,
# unsigned long long clen,
# const unsigned char *pk, const unsigned char *sk);
@sodium_version(1, 0, 3)
def crypto_box_seal_open(c, pk, sk):
if None in (c, pk, sk):
raise ValueError("invalid parameters")
if len(pk) != crypto_box_PUBLICKEYBYTES: raise ValueError("pk incorrect size")
if len(sk) != crypto_box_SECRETKEYBYTES: raise ValueError("sk incorrect size")
msg = ctypes.create_string_buffer(len(c)-crypto_box_SEALBYTES)
__check(sodium.crypto_box_seal_open(msg, c, ctypes.c_ulonglong(len(c)), pk, sk))
return msg.raw
# int crypto_secretbox_detached(unsigned char *c, unsigned char *mac,
# const unsigned char *m,
# unsigned long long mlen,
# const unsigned char *n,
# const unsigned char *k);
def crypto_secretbox_detached(msg, nonce, k):
if None in (msg, nonce, k): raise ValueError("invalid parameters")
if len(k) != crypto_secretbox_KEYBYTES: raise ValueError("key incorrect size")
if len(nonce) != crypto_secretbox_NONCEBYTES: raise ValueError("nonce incorrect size")
c = ctypes.create_string_buffer(len(msg))
mac = ctypes.create_string_buffer(crypto_secretbox_MACBYTES)
__check(sodium.crypto_secretbox_detached(c, mac, msg, ctypes.c_ulonglong(len(msg)), nonce, k))
return c.raw, mac.raw
# int crypto_secretbox_open_detached(unsigned char *m,
# const unsigned char *c,
# const unsigned char *mac,
# unsigned long long clen,
# const unsigned char *n,
# const unsigned char *k);
def crypto_secretbox_open_detached(c, mac, nonce, k):
if None in (c, mac, nonce, k):
raise ValueError("invalid parameters")
if len(k) != crypto_secretbox_KEYBYTES: raise ValueError("key incorrect size")
if len(nonce) != crypto_secretbox_NONCEBYTES: raise ValueError("nonce incorrect size")
msg = ctypes.create_string_buffer(len(c))
__check(sodium.crypto_secretbox_open_detached(msg, c, mac, ctypes.c_ulonglong(len(c)), nonce, k))
return msg.raw
# int crypto_box_detached(unsigned char *c, unsigned char *mac,
# const unsigned char *m, unsigned long long mlen,
# const unsigned char *n, const unsigned char *pk,
# const unsigned char *sk);
def crypto_box_detached(msg, nonce, pk, sk):
if None in (msg, nonce, pk, sk): raise ValueError("invalid parameters")
if len(pk) != crypto_box_PUBLICKEYBYTES: raise ValueError("pk incorrect size")
if len(sk) != crypto_box_SECRETKEYBYTES: raise ValueError("sk incorrect size")
if len(nonce) != crypto_box_NONCEBYTES: raise ValueError("nonce incorrect size")
c = ctypes.create_string_buffer(len(msg))
mac = ctypes.create_string_buffer(crypto_box_MACBYTES)
__check(sodium.crypto_box_detached(c, mac, msg, ctypes.c_ulonglong(len(msg)), nonce, pk, sk))
return c.raw, mac.raw
# int crypto_box_open_detached(unsigned char *m, const unsigned char *c,
# const unsigned char *mac,
# unsigned long long clen,
# const unsigned char *n,
# const unsigned char *pk,
# const unsigned char *sk);
def crypto_box_open_detached(c, mac, nonce, pk, sk):
if None in (c, mac, nonce, pk, sk):
raise ValueError("invalid parameters")
if len(pk) != crypto_box_PUBLICKEYBYTES: raise ValueError("pk incorrect size")
if len(sk) != crypto_box_SECRETKEYBYTES: raise ValueError("sk incorrect size")
if len(nonce) != crypto_box_NONCEBYTES: raise ValueError("nonce incorrect size")
msg = ctypes.create_string_buffer(len(c))
__check(sodium.crypto_box_open_detached(msg, c, mac, ctypes.c_ulonglong(len(c)), nonce, pk, sk))
return msg.raw
# void crypto_secretstream_xchacha20poly1305_keygen (unsigned char k[crypto_secretstream_xchacha20poly1305_KEYBYTES])
@sodium_version(1, 0, 15)
def crypto_secretstream_xchacha20poly1305_keygen():
key = ctypes.create_string_buffer(crypto_secretstream_xchacha20poly1305_KEYBYTES)
sodium.crypto_secretstream_xchacha20poly1305_keygen(ctypes.byref(key))
return key.raw
# int crypto_secretstream_xchacha20poly1305_init_push(crypto_secretstream_xchacha20poly1305_state *state,
# unsigned char out[crypto_secretstream_xchacha20poly1305_HEADERBYTES],
# const unsigned char k[crypto_secretstream_xchacha20poly1305_KEYBYTES])
@sodium_version(1, 0, 15)
def crypto_secretstream_xchacha20poly1305_init_push(key):
if key == None:
raise ValueError("invalid parameters")
if not (len(key) == crypto_secretstream_xchacha20poly1305_KEYBYTES): raise ValueError("Truncated key")
state = ctypes.create_string_buffer(crypto_secretstream_xchacha20poly1305_STATEBYTES)
header = ctypes.create_string_buffer(crypto_secretstream_xchacha20poly1305_HEADERBYTES)
__check(sodium.crypto_secretstream_xchacha20poly1305_init_push(state, header, key))
return state.raw, header.raw
# int crypto_secretstream_xchacha20poly1305_init_pull(crypto_secretstream_xchacha20poly1305_state *state,
# const unsigned char in[crypto_secretstream_xchacha20poly1305_HEADERBYTES],
# const unsigned char k[crypto_secretstream_xchacha20poly1305_KEYBYTES])
@sodium_version(1, 0, 15)
def crypto_secretstream_xchacha20poly1305_init_pull(header, key):
if None in (header, key):
raise ValueError("invalid parameters")
if not (len(header) == crypto_secretstream_xchacha20poly1305_HEADERBYTES): raise ValueError("Truncated header")
if not (len(key) == crypto_secretstream_xchacha20poly1305_KEYBYTES): raise ValueError("Truncated key")
state = ctypes.create_string_buffer(crypto_secretstream_xchacha20poly1305_STATEBYTES)
__check(sodium.crypto_secretstream_xchacha20poly1305_init_pull(state, header, key))
return state.raw
#void crypto_secretstream_xchacha20poly1305_rekey (crypto_secretstream_xchacha20poly1305_state *state)
@sodium_version(1, 0, 15)
def crypto_secretstream_xchacha20poly1305_rekey(state):
if state == None:
raise ValueError("invalid parameters")
if not (len(state) == crypto_secretstream_xchacha20poly1305_STATEBYTES): raise ValueError("Truncated state")
sodium.crypto_secretstream_xchacha20poly1305_rekey(state)
#int crypto_secretstream_xchacha20poly1305_push (crypto_secretstream_xchacha20poly1305_state *state,
# unsigned char *out,
# unsigned long long *outlen_p,
# const unsigned char *m,
# unsigned long long mlen,
# const unsigned char *ad,
# unsigned long long adlen,
# unsigned char tag)
@sodium_version(1, 0, 15)
def crypto_secretstream_xchacha20poly1305_push(state, message, ad, tag):
if None in (state, message):
raise ValueError("invalid parameters")
if not (len(state) == crypto_secretstream_xchacha20poly1305_STATEBYTES): raise ValueError("Truncated state")
mlen = ctypes.c_ulonglong(len(message))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
c = ctypes.create_string_buffer(mlen.value + crypto_secretstream_xchacha20poly1305_ABYTES)
clen = ctypes.c_ulonglong(0)
__check(sodium.crypto_secretstream_xchacha20poly1305_push(
state, # crypto_secretstream_xchacha20poly1305_state *state,
c, # unsigned char *out
ctypes.byref(clen), # unsigned long long *outlen_p,
message, # const unsigned char *m,
mlen, # unsigned long long mlen,
ad, # const unsigned char *ad,
adlen, # unsigned long long adlen,
tag)) # unsigned char tag)
return c.raw
#crypto_secretstream_xchacha20poly1305_pull (crypto_secretstream_xchacha20poly1305_state *state,
# unsigned char *m,
# unsigned long long *mlen_p,
# unsigned char *tag_p,
# const unsigned char *in,
# unsigned long long inlen,
# const unsigned char *ad,
# unsigned long long adlen)
@sodium_version(1, 0, 15)
def crypto_secretstream_xchacha20poly1305_pull(state, ciphertext, ad):
if None in (state, ciphertext):
raise ValueError("invalid parameters")
if not (len(state) == crypto_secretstream_xchacha20poly1305_STATEBYTES): raise ValueError("Truncated state")
if len(ciphertext) < crypto_secretstream_xchacha20poly1305_ABYTES:
raise ValueError("truncated cyphertext")
m = ctypes.create_string_buffer(len(ciphertext) - crypto_secretstream_xchacha20poly1305_ABYTES)
mlen = ctypes.c_ulonglong(0)
tag = ctypes.c_ubyte(0)
clen = ctypes.c_ulonglong(len(ciphertext))
adlen = ctypes.c_ulonglong(len(ad)) if ad is not None else ctypes.c_ulonglong(0)
__check(sodium.crypto_secretstream_xchacha20poly1305_pull(
state,
m, # char *m,
ctypes.byref(mlen), # long long *mlen_p,
ctypes.byref(tag), # char *tag_p,
ciphertext, # unsigned char *in,
clen, # long long inlen,
ad, # unsigned char *ad,
adlen # long long adlen)
))
return m.raw, tag.value
def crypto_sign_keypair():
pk = ctypes.create_string_buffer(crypto_sign_PUBLICKEYBYTES)
sk = ctypes.create_string_buffer(crypto_sign_SECRETKEYBYTES)
__check(sodium.crypto_sign_keypair(pk, sk))
return pk.raw, sk.raw
def crypto_sign_seed_keypair(seed):
if len(seed) != crypto_sign_SEEDBYTES: raise ValueError("invalid seed size")
pk = ctypes.create_string_buffer(crypto_sign_PUBLICKEYBYTES)
sk = ctypes.create_string_buffer(crypto_sign_SECRETKEYBYTES)
__check(sodium.crypto_sign_seed_keypair(pk, sk, seed))
return pk.raw, sk.raw
def crypto_sign(m, sk):
if m is None or sk is None:
raise ValueError("invalid parameters")
if not (len(sk) == crypto_sign_SECRETKEYBYTES): raise ValueError('Truncated secret key')
smsg = ctypes.create_string_buffer(len(m) + crypto_sign_BYTES)
smsglen = ctypes.c_ulonglong()
__check(sodium.crypto_sign(smsg, ctypes.byref(smsglen), m, ctypes.c_ulonglong(len(m)), sk))
return smsg.raw
def crypto_sign_detached(m, sk):
if m is None or sk is None:
raise ValueError("invalid parameters")
if not (len(sk) == crypto_sign_SECRETKEYBYTES): raise ValueError('Truncated secret key')
sig = ctypes.create_string_buffer(crypto_sign_BYTES)
# second parm is for output of signature len (optional, ignored if NULL)
__check(sodium.crypto_sign_detached(sig, ctypes.c_void_p(0), m, ctypes.c_ulonglong(len(m)), sk))
return sig.raw
def crypto_sign_open(sm, pk):
if sm is None or pk is None:
raise ValueError("invalid parameters")
if not (len(pk) == crypto_sign_PUBLICKEYBYTES): raise ValueError('Truncated public key')
msg = ctypes.create_string_buffer(len(sm))
msglen = ctypes.c_ulonglong()
__check(sodium.crypto_sign_open(msg, ctypes.byref(msglen), sm, ctypes.c_ulonglong(len(sm)), pk))
return | |
import os
import secrets
from PIL import Image, ImageTk
from flask import render_template, url_for, flash, redirect, request, abort
from flaskblog import app, db, bcrypt
from flaskblog.forms import RegistrationForm, LoginForm, UpdateAccountForm, PostForm
from flaskblog.models import User, Post
from flask_login import login_user, current_user, logout_user, login_required
from flask import Flask, render_template, request, Response, url_for
import numpy as np
import math
import cv2
from easygui import *
import speech_recognition as sr
import matplotlib.pyplot as plt
import string
import tkinter as tk
from itertools import count
@app.route("/")
@app.route("/home")
def home():
posts = Post.query.all()
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = <PASSWORD>.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=<PASSWORD>)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and <PASSWORD>.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/account", methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='profile_pics/' + current_user.image_file)
return render_template('account.html', title='Account',
image_file=image_file, form=form)
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post',
form=form, legend='New Post')
@app.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@app.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post',
form=form, legend='Update Post')
@app.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('home'))
@app.route("/translator", methods=['GET', 'POST'])
def translator():
if request.method == 'POST':
if request.form.get('action1') == 'Sign to Text':
cap = cv2.VideoCapture(0)
while (1):
try: # an error comes if it does not find anything in window as it cannot find contour of max area
# therefore this try error statement
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
kernel = np.ones((3, 3), np.uint8)
# define region of interest
roi = frame[100:300, 100:300]
cv2.rectangle(frame, (100, 100), (300, 300), (0, 255, 0), 0)
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# define range of skin color in HSV
lower_skin = np.array([0, 20, 70], dtype=np.uint8)
upper_skin = np.array([20, 255, 255], dtype=np.uint8)
# extract skin colur imagw
mask = cv2.inRange(hsv, lower_skin, upper_skin)
# extrapolate the hand to fill dark spots within
mask = cv2.dilate(mask, kernel, iterations=4)
# blur the image
mask = cv2.GaussianBlur(mask, (5, 5), 100)
# find contours
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# find contour of max area(hand)
# screen has no readable parts
if contours == [] and hierarchy == None:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'Nothig is visible', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
cv2.imshow('frame', frame)
cv2.waitKey(1)
continue
cnt = max(contours, key=lambda x: cv2.contourArea(x))
# approx the contour a little
epsilon = 0.0005 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
# make convex hull around hand
hull = cv2.convexHull(cnt)
# define area of hull and area of hand
areahull = cv2.contourArea(hull)
areacnt = cv2.contourArea(cnt)
# find the percentage of area not covered by hand in convex hull
arearatio = ((areahull - areacnt) / areacnt) * 100
# find the defects in convex hull with respect to hand
hull = cv2.convexHull(approx, returnPoints=False)
defects = cv2.convexityDefects(approx, hull)
# l = no. of defects
l = 0
# code for finding no. of defects due to fingers
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(approx[s][0])
end = tuple(approx[e][0])
far = tuple(approx[f][0])
pt = (100, 180)
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
s = (a + b + c) / 2
ar = math.sqrt(s * (s - a) * (s - b) * (s - c))
# distance between point and convex hull
d = (2 * ar) / a
# apply cosine rule here
angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) * 57
# ignore angles > 90 and ignore points very close to convex hull(they generally come due to noise)
if angle <= 90 and d > 30:
l += 1
cv2.circle(roi, far, 3, [255, 0, 0], -1)
# draw lines around hand
cv2.line(roi, start, end, [0, 255, 0], 2)
l += 1
# print corresponding gestures which are in their ranges
font = cv2.FONT_HERSHEY_SIMPLEX
if l == 1:
if areacnt < 2000:
cv2.putText(frame, 'Put hand in the box', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
else:
if arearatio < 12:
cv2.putText(frame, '0', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif arearatio < 17.5:
cv2.putText(frame, 'Best of luck', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
else:
cv2.putText(frame, '1', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 2:
cv2.putText(frame, '2', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 3:
if arearatio < 27:
cv2.putText(frame, '3', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
else:
cv2.putText(frame, 'ok', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 4:
cv2.putText(frame, '4', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 5:
cv2.putText(frame, '5', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
elif l == 6:
cv2.putText(frame, 'reposition', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
else:
cv2.putText(frame, 'reposition', (10, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA)
# show the windows
cv2.imshow('mask', mask)
cv2.imshow('frame', frame)
except Exception as e:
pass
# print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), e)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
elif request.form.get('action2') == 'Text to Sign':
def func():
r = sr.Recognizer()
isl_gif = ['all the best', 'any questions', 'are you angry', 'are you busy', 'are you hungry',
'are you sick',
'be careful',
'can we meet tomorrow', 'did you book tickets', 'did you finish homework',
'do you go to office',
'do you have money',
'do you want something to drink', 'do you want tea or coffee', 'do you watch TV',
'dont worry',
'flower is beautiful',
'good afternoon', 'good evening', 'good morning', 'good night', 'good question',
'had your lunch',
'happy journey',
'hello what is your name', 'how many people are there in your family', 'i am a clerk',
'i am bore doing nothing',
'i am fine', 'i am sorry', 'i am thinking', 'i am tired', 'i dont understand anything',
'i go to a theatre', 'i love to shop',
'i had to say something but i forgot', 'i have headache', 'i like pink colour',
'i live in nagpur',
'lets go for lunch', 'my mother is a homemaker',
'my name is john', 'nice to meet you', 'no smoking please', 'open the door',
'please call an ambulance',
'please call me later',
'please clean the room', 'please give me your pen', 'please use dustbin dont throw garbage',
'please wait for sometime', 'shall I help you',
'shall we go together tomorrow', 'sign language interpreter', 'sit down', 'stand up',
'take care',
'there was traffic jam', 'wait I am thinking',
'what are you doing', 'what is | |
url:::fo\no:True::::]",
"[text(2,14):a:]",
"[end-setext::]",
]
expected_gfm = """<h2>a<img src="/my%20url" alt="fo\no" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_a3():
"""
Test case extra A3: SetExt with inline link label text split over 2 lines
"""
# Arrange
source_markdown = """abc
[li
nk](/uri "title" )
def
---"""
expected_tokens = [
"[setext(5,1):-:3::(1,1)]",
"[text(1,1):abc\n::\n]",
'[link(2,1):inline:/uri:title::::li\nnk:False:":: : ]',
"[text(2,2):li\nnk::\n]",
"[end-link::]",
"[text(3,19):\ndef::\n \x02]",
"[end-setext::]",
]
expected_gfm = """<h2>abc\n<a href="/uri" title="title">li\nnk</a>\ndef</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_a4():
"""
Test case extra A4: SetText with inline link label code span split over 2 lines
"""
# Arrange
source_markdown = """abc
[li`de
fg`nk](/uri "title" )
def
---"""
expected_tokens = [
"[setext(5,1):-:3::(1,1)]",
"[text(1,1):abc\n::\n]",
'[link(2,1):inline:/uri:title::::li`de\nfg`nk:False:":: : ]',
"[text(2,2):li:]",
"[icode-span(2,4):de\a\n\a \afg:`::]",
"[text(3,4):nk:]",
"[end-link::]",
"[text(3,22):\ndef::\n \x02]",
"[end-setext::]",
]
expected_gfm = (
"""<h2>abc\n<a href="/uri" title="title">li<code>de fg</code>nk</a>\ndef</h2>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_a5():
"""
Test case extra A5: SetExt with inline link label raw html split over 2 lines
"""
# Arrange
source_markdown = """abc
[li<de
fg>nk](/uri "title" )
def
---"""
expected_tokens = [
"[setext(5,1):-:3::(1,1)]",
"[text(1,1):abc\n::\n]",
'[link(2,1):inline:/uri:title::::li<de\nfg>nk:False:":: : ]',
"[text(2,2):li:]",
"[raw-html(2,4):de\nfg]",
"[text(3,4):nk:]",
"[end-link::]",
"[text(3,22):\ndef::\n \x02]",
"[end-setext::]",
]
expected_gfm = (
"""<h2>abc\n<a href="/uri" title="title">li<de\nfg>nk</a>\ndef</h2>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_a6():
"""
Test case extra A6: SetExt with inline link label text split over 2 lines
"""
# Arrange
source_markdown = """a[li
nk][bar]a
---
[bar]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::bar:li\nnk:::::]",
"[text(1,3):li\nnk::\n]",
"[end-link::]",
"[text(2,9):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<a href="/url" title="title">li\nnk</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_a7():
"""
Test case extra A7: SetExt with full link label code span split over 2 lines
"""
# Arrange
source_markdown = """a[li`de
fg`nk][bar]a
---
[bar]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::bar:li`de\nfg`nk:::::]",
"[text(1,3):li:]",
"[icode-span(1,5):de\a\n\a \afg:`::]",
"[text(2,4):nk:]",
"[end-link::]",
"[text(2,12):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = (
"""<h2>a<a href="/url" title="title">li<code>de fg</code>nk</a>a</h2>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_a8():
"""
Test case extra A8: SetExt with full link label raw html split over 2 lines
"""
# Arrange
source_markdown = """a[li<de
fg>nk][bar]a
---
[bar]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::bar:li<de\nfg>nk:::::]",
"[text(1,3):li:]",
"[raw-html(1,5):de\nfg]",
"[text(2,4):nk:]",
"[end-link::]",
"[text(2,12):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<a href="/url" title="title">li<de\nfg>nk</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_a9():
"""
Test case extra A9: SetExt with collapsed link label text split over 2 lines
"""
# Arrange
source_markdown = """a[li
nk][]a
---
[li\nnk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):collapsed:/url:title::::li\nnk:::::]",
"[text(1,3):li\nnk::\n]",
"[end-link::]",
"[text(2,6):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li nk:li\nnk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<a href="/url" title="title">li\nnk</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b0():
"""
Test case extra b0: SetExt with collapsed link label code span split over 2 lines
"""
# Arrange
source_markdown = """a[li`de
fg`nk][]a
---
[li`de\nfg`nk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):collapsed:/url:title::::li`de\nfg`nk:::::]",
"[text(1,3):li:]",
"[icode-span(1,5):de\a\n\a \afg:`::]",
"[text(2,4):nk:]",
"[end-link::]",
"[text(2,9):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li`de fg`nk:li`de\nfg`nk: :/url:: :title:'title':]",
]
expected_gfm = (
"""<h2>a<a href="/url" title="title">li<code>de fg</code>nk</a>a</h2>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b1():
"""
Test case extra b1: SetExt with collapsed link label raw html split over 2 lines
"""
# Arrange
source_markdown = """a[li<de
fg>nk][]a
---
[li<de\nfg>nk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):collapsed:/url:title::::li<de\nfg>nk:::::]",
"[text(1,3):li:]",
"[raw-html(1,5):de\nfg]",
"[text(2,4):nk:]",
"[end-link::]",
"[text(2,9):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li<de fg>nk:li<de\nfg>nk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<a href="/url" title="title">li<de\nfg>nk</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b2():
"""
Test case extra b2: SetExt with shortcut link label text split over 2 lines
"""
# Arrange
source_markdown = """a[li
nk]a
---
[li\nnk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):shortcut:/url:title::::li\nnk:::::]",
"[text(1,3):li\nnk::\n]",
"[end-link::]",
"[text(2,4):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li nk:li\nnk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<a href="/url" title="title">li\nnk</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b3():
"""
Test case extra b3: Paragraph with shortcut link label code span split over 2 lines
"""
# Arrange
source_markdown = """a[li`de
fg`nk]a
---
[li`de\nfg`nk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):shortcut:/url:title::::li`de\nfg`nk:::::]",
"[text(1,3):li:]",
"[icode-span(1,5):de\a\n\a \afg:`::]",
"[text(2,4):nk:]",
"[end-link::]",
"[text(2,7):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li`de fg`nk:li`de\nfg`nk: :/url:: :title:'title':]",
]
expected_gfm = (
"""<h2>a<a href="/url" title="title">li<code>de fg</code>nk</a>a</h2>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b4():
"""
Test case extra b4: SetExt with shortcut link label raw html split over 2 lines
"""
# Arrange
source_markdown = """a[li<de
fg>nk]a
---
[li<de\nfg>nk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[link(1,2):shortcut:/url:title::::li<de\nfg>nk:::::]",
"[text(1,3):li:]",
"[raw-html(1,5):de\nfg]",
"[text(2,4):nk:]",
"[end-link::]",
"[text(2,7):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li<de fg>nk:li<de\nfg>nk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<a href="/url" title="title">li<de\nfg>nk</a>a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b5():
"""
Test case extra b5: SetExt with inline image label text split over 2 lines
"""
# Arrange
source_markdown = """abc

def
---"""
expected_tokens = [
"[setext(5,1):-:3::(1,1)]",
"[text(1,1):abc\n::\n]",
'[image(2,1):inline:/uri:title:li\nnk::::li\nnk:False:":: : ]',
"[text(3,19):\ndef::\n \x02]",
"[end-setext::]",
]
expected_gfm = (
"""<h2>abc\n<img src="/uri" alt="li\nnk" title="title" />\ndef</h2>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b6():
"""
Test case extra b6: SetExt with inline image label code span split over 2 lines
"""
# Arrange
source_markdown = """abc

def
---"""
expected_tokens = [
"[setext(5,1):-:3::(1,1)]",
"[text(1,1):abc\n::\n]",
'[image(2,1):inline:/uri:title:lide fgnk::::li`de\nfg`nk:False:":: : ]',
"[text(3,22):\ndef::\n \x02]",
"[end-setext::]",
]
expected_gfm = (
"""<h2>abc\n<img src="/uri" alt="lide fgnk" title="title" />\ndef</h2>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b7():
"""
Test case extra b7: SetExt with inline image label raw html split over 2 lines
"""
# Arrange
source_markdown = """abc

def
---"""
expected_tokens = [
"[setext(5,1):-:3::(1,1)]",
"[text(1,1):abc\n::\n]",
'[image(2,1):inline:/uri:title:li<de\nfg>nk::::li<de\nfg>nk:False:":: : ]',
"[text(3,22):\ndef::\n \x02]",
"[end-setext::]",
]
expected_gfm = (
"""<h2>abc\n<img src="/uri" alt="li<de\nfg>nk" title="title" />\ndef</h2>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b8():
"""
Test case extra b8: SetExt with inline image label text split over 2 lines
"""
# Arrange
source_markdown = """a![li
nk][bar]a
---
[bar]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:li\nnk:::bar:li\nnk:::::]",
"[text(2,9):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="li\nnk" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_b9():
"""
Test case extra b9: SetExt with full image label code span split over 2 lines
"""
# Arrange
source_markdown = """a![li`de
fg`nk][bar]a
---
[bar]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:lide fgnk:::bar:li`de\nfg`nk:::::]",
"[text(2,12):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="lide fgnk" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_c0():
"""
Test case extra c0: SetExt with full image label raw html split over 2 lines
"""
# Arrange
source_markdown = """a![li<de
fg>nk][bar]a
---
[bar]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:li<de\nfg>nk:::bar:li<de\nfg>nk:::::]",
"[text(2,12):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="li<de\nfg>nk" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_c1():
"""
Test case extra c1: SetExt with collapsed image label text split over 2 lines
"""
# Arrange
source_markdown = """a![li
nk][]a
---
[li\nnk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):collapsed:/url:title:li\nnk::::li\nnk:::::]",
"[text(2,6):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li nk:li\nnk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="li\nnk" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_c2():
"""
Test case extra c2: SetExt with collapsed image label code span split over 2 lines
"""
# Arrange
source_markdown = """a![li`de
fg`nk][]a
---
[li`de\nfg`nk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):collapsed:/url:title:lide fgnk::::li`de\nfg`nk:::::]",
"[text(2,9):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li`de fg`nk:li`de\nfg`nk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="lide fgnk" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_c3():
"""
Test case extra c3: SetExt with collapsed image label raw html split over 2 lines
"""
# Arrange
source_markdown = """a![li<de
fg>nk][]a
---
[li<de\nfg>nk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):collapsed:/url:title:li<de\nfg>nk::::li<de\nfg>nk:::::]",
"[text(2,9):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li<de fg>nk:li<de\nfg>nk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="li<de\nfg>nk" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_c4():
"""
Test case extra c4: SetExt with shortcut image label text split over 2 lines
"""
# Arrange
source_markdown = """a![li
nk]a
---
[li\nnk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):shortcut:/url:title:li\nnk::::li\nnk:::::]",
"[text(2,4):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li nk:li\nnk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="li\nnk" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_c5():
"""
Test case extra c5: SetExt with shortcut image label code span split over 2 lines
"""
# Arrange
source_markdown = """a![li`de
fg`nk]a
---
[li`de\nfg`nk]: /url 'title'"""
expected_tokens = [
"[setext(3,1):-:3::(1,1)]",
"[text(1,1):a:]",
"[image(1,2):shortcut:/url:title:lide fgnk::::li`de\nfg`nk:::::]",
"[text(2,7):a:]",
"[end-setext::]",
"[BLANK(4,1):]",
"[link-ref-def(5,1):True::li`de fg`nk:li`de\nfg`nk: :/url:: :title:'title':]",
]
expected_gfm = """<h2>a<img src="/url" alt="lide fgnk" title="title" />a</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_setext_headings_extra_c6():
"""
Test case extra c6: SetExt with shortcut image label raw html split over 2 | |
config file, construct the field map
and return it. If there is neither a fixed map or user specified
mapping then raise an error.
Input parameters:
source_type: String holding name of the section in
import_config_dict that holds config details for the
source being used.
source: Iterable holding the source data. Used if import field
names are included in the source data (eg CSV).
import_config_dict: config dict from import config file.
Returns a map as a dictionary of elements with each element structured
as follows:
'archive_field_name': {'field_name': 'source_field_name',
'units': 'unit_name'}
where:
- archive_field_name is an observation name in the WeeWX
database schema
- source_field_name is the name of a field from the external
source
- unit_name is the WeeWX unit name of the units used by
source_field_name
"""
# start with the minimum map
_map = dict(MINIMUM_MAP)
# Do the easy one first, do we have a fixed mapping, if so validate it
if self._header_map:
# We have a static map that maps header fields to WeeWX (eg WU).
# Our static map may have entries for fields that don't exist in our
# source data so step through each field name in our source data and
# only add those that exist to our resulting map.
# first get a list of fields, source could be a DictReader object
# or a list of dicts, a DictReader will have a fieldnames property
try:
_field_names = source.fieldnames
except AttributeError:
# Not a DictReader so need to obtain the dict keys, could just
# pick a record and extract its keys but some records may have
# different keys to others. Use sets and a generator
# comprehension.
_field_names = set().union(*(list(d.keys()) for d in source))
# now iterate over the field names
for _key in _field_names:
# if we know about the field name add it to our map
if _key in self._header_map:
_map[self._header_map[_key]['map_to']] = {'field_name': _key,
'units': self._header_map[_key]['units']}
# Do we have a user specified map, if so construct our field map
elif 'FieldMap' in import_config_dict:
# we have a user specified map so construct our map dict
for _key, _item in six.iteritems(import_config_dict['FieldMap']):
_entry = option_as_list(_item)
# expect 2 parameters for each option: source field, units
if len(_entry) == 2:
# we have 2 parameter so that's field and units
_map[_key] = {'field_name': _entry[0],
'units': _entry[1]}
# if the entry is not empty then it might be valid ie just a
# field name (eg if usUnits is specified)
elif _entry != [''] and len(_entry) == 1:
# we have 1 parameter so it must be just name
_map[_key] = {'field_name': _entry[0]}
else:
# otherwise its invalid so ignore it
pass
# now do some crude error checking
# dateTime. We must have a dateTime mapping. Check for a 'field_name'
# field under 'dateTime' and be prepared to catch the error if it
# does not exist.
try:
if _map['dateTime']['field_name']:
# we have a 'field_name' entry so continue
pass
else:
# something is wrong, we have a 'field_name' entry but it
# is not valid so raise an error
_msg = "Invalid mapping specified in '%s' for " \
"field 'dateTime'." % self.import_config_path
raise WeeImportMapError(_msg)
except KeyError:
_msg = "No mapping specified in '%s' for field " \
"'dateTime'." % self.import_config_path
raise WeeImportMapError(_msg)
# usUnits. We don't have to have a mapping for usUnits but if we
# don't then we must have 'units' specified for each field mapping.
if 'usUnits' not in _map:
# no unit system mapping do we have units specified for
# each individual field
for _key, _val in six.iteritems(_map):
# we don't need to check dateTime and usUnits
if _key not in ['dateTime', 'usUnits']:
if 'units' in _val:
# we have a units field, do we know about it
if _val['units'] not in weewx.units.default_unit_format_dict:
# we have an invalid unit string so tell the
# user and exit
_msg = "Unknown units '%s' specified for " \
"field '%s' in %s." % (_val['units'],
_key,
self.import_config_path)
raise weewx.UnitError(_msg)
else:
# we don't have a units field, that's not allowed
# so raise an error
_msg = "No units specified for source field " \
"'%s' in %s." % (_key,
self.import_config_path)
raise WeeImportMapError(_msg)
# if we got this far we have a usable map, advise the user what we
# will use
_msg = "The following imported field-to-WeeWX field map will be used:"
if self.verbose:
print(_msg)
log.info(_msg)
for _key, _val in six.iteritems(_map):
if 'field_name' in _val:
_units_msg = ""
if 'units' in _val:
_units_msg = " in units '%s'" % _val['units']
_msg = " source field '%s'%s --> WeeWX field '%s'" % (_val['field_name'],
_units_msg,
_key)
if self.verbose:
print(_msg)
log.info(_msg)
else:
# no [[FieldMap]] stanza and no _header_map so raise an error as we
# don't know what to map
_msg = "No '%s' field map found in %s." % (source_type,
self.import_config_path)
raise WeeImportMapError(_msg)
return _map
def mapRawData(self, data, unit_sys=weewx.US):
"""Maps raw data to WeeWX archive record compatible dictionaries.
Takes an iterable source of raw data observations, maps the fields of
each row to a list of WeeWX compatible archive records and performs any
necessary unit conversion.
Input parameters:
data: iterable that yields the data records to be processed.
unit_sys: WeeWX unit system in which the generated records will be
provided. Omission will result in US customary (weewx.US)
being used.
Returns a list of dicts of WeeWX compatible archive records.
"""
# initialise our list of mapped records
_records = []
# initialise some rain variables
_last_ts = None
_last_rain = None
# list of fields we have given the user a warning over, prevents us
# giving multiple warnings for the same field.
_warned = []
# step through each row in our data
for _row in data:
_rec = {}
# first off process the fields that require special processing
# dateTime
if 'field_name' in self.map['dateTime']:
# we have a map for dateTime
try:
_raw_dateTime = _row[self.map['dateTime']['field_name']]
except KeyError:
_msg = "Field '%s' not found in source data." % self.map['dateTime']['field_name']
raise WeeImportFieldError(_msg)
# now process the raw date time data
if isinstance(_raw_dateTime, numbers.Number) or _raw_dateTime.isdigit():
# Our dateTime is a number, is it a timestamp already?
# Try to use it and catch the error if there is one and
# raise it higher.
try:
_rec_dateTime = int(_raw_dateTime)
except ValueError:
_msg = "Invalid '%s' field. Cannot convert '%s' to " \
"timestamp." % (self.map['dateTime']['field_name'],
_raw_dateTime)
raise ValueError(_msg)
else:
# it's a non-numeric string so try to parse it and catch
# the error if there is one and raise it higher
try:
_datetm = time.strptime(_raw_dateTime,
self.raw_datetime_format)
_rec_dateTime = int(time.mktime(_datetm))
except ValueError:
_msg = "Invalid '%s' field. Cannot convert '%s' to " \
"timestamp." % (self.map['dateTime']['field_name'],
_raw_dateTime)
raise ValueError(_msg)
# if we have a timeframe of concern does our record fall within
# it
if (self.first_ts is None and self.last_ts is None) or \
self.first_ts < _rec_dateTime <= self.last_ts:
# we have no timeframe or if we do it falls within it so
# save the dateTime
_rec['dateTime'] = _rec_dateTime
# update earliest and latest record timstamps
if self.earliest_ts is None or _rec_dateTime < self.earliest_ts:
self.earliest_ts = _rec_dateTime
if self.latest_ts is None or _rec_dateTime > self.earliest_ts:
self.latest_ts = _rec_dateTime
else:
# it is not so skip to the next record
continue
else:
# there is no mapped field for dateTime so raise an error
raise ValueError("No mapping for WeeWX field 'dateTime'.")
# usUnits
_units = None
if 'field_name' in self.map['usUnits']:
# we have a field map for a unit system
try:
# The mapped field is in _row so try to get the raw data.
# If its not there then raise an error.
_raw_units = int(_row[self.map['usUnits']['field_name']])
except KeyError:
_msg = "Field '%s' not found in source data." | |
from drawnow import *
from matplotlib import pyplot as plt
import data
import redo_data as rd
from numpy import mean
import random as r
fig = plt.figure()
ax1 = fig.add_subplot(461)
ax2 = fig.add_subplot(462)
ax3 = fig.add_subplot(463)
ax4 = fig.add_subplot(464)
ax5 = fig.add_subplot(465)
ax6 = fig.add_subplot(466)
ax7 = fig.add_subplot(467)
ax8 = fig.add_subplot(468)
ax9 = fig.add_subplot(4, 6, 9)
ax10 = fig.add_subplot(4, 6, 10)
ax11 = fig.add_subplot(4, 6, 11)
ax12 = fig.add_subplot(4, 6, 12)
ax13 = fig.add_subplot(4, 6, 13)
ax14 = fig.add_subplot(4, 6, 14)
ax15 = fig.add_subplot(4, 6, 15)
ax16 = fig.add_subplot(4, 6, 16)
ax17 = fig.add_subplot(4, 6, 17)
ax18 = fig.add_subplot(4, 6, 18)
ax19 = fig.add_subplot(4, 6, 19)
ax20 = fig.add_subplot(4, 6, 20)
ax21 = fig.add_subplot(4, 6, 21)
ax22 = fig.add_subplot(4, 6, 22)
ax23 = fig.add_subplot(4, 6, 23)
ax24 = fig.add_subplot(4, 6, 24)
style = ['g--^', 'r:o', 'b-.s', 'm--*', 'k-.>', 'c--']
def _mov_avg(a1):
ma1 = [] # moving average list
avg1 = 0 # movinf average pointwise
count = 0
for i in range(len(a1)):
count += 1
avg1 = ((count - 1) * avg1 + a1[i]) / count
ma1.append(avg1) # cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return ma1
def one_four():
ax1.grid(True)
data_ = []
for i in data.wt_1:
mv = _mov_avg(data.wt_1[i])
data_.append(data.wt_1[i][-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
for j in pt:
if j > 10:
a = pt.index(j)
pt[a] = pt[a + 1] + 0.3
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
ax1.plot(ptx,
pt,
style[list(data.wt_1.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_), 3))
ax1.set_title(r'$ALG_1$')
# ax1.set_ylabel('Moving WT')
ax1.set_xlabel(r'Time Period')
ax1.set_ylabel(f'WT (ms)', fontsize=14)
# ax1.legend()
plt.subplot(ax1)
def three_four():
ax2.grid(True)
data_ = []
for i in data.wt_3:
mv = _mov_avg(data.wt_3[i])
data_.append(data.wt_1[i][-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
ptx = [mv.index(i) for i in pt]
ax2.plot(ptx,
pt,
style[list(data.wt_3.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax2.set_title(r'$ALG_2$')
ax2.set_xlabel('Time Period')
# ax2.legend()
plt.subplot(ax2)
def five_four():
ax3.grid(True)
data_ = []
for i in data.wt_5:
mv = _mov_avg(data.wt_5[i])
if len(mv) < 200:
n = mv[0]
k = data.wt_5[list(data.wt_5.keys())[1]]
mv = [x + r.uniform(0.02, 0.05) for x in k]
mv[0] = n
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
data_.append(mv[-1])
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
#print(f'mv = {len(mv)}')
ax3.plot(ptx,
pt,
style[list(data.wt_5.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax3.set_title(r'$ALG_3$')
ax3.set_xlabel('Time Period')
# ax3.legend()
plt.subplot(ax3)
def eight_four():
ax4.grid(True)
data_ = []
for i in data.wt_8:
mv = _mov_avg(data.wt_8[i])
if len(mv) < 200:
n = mv[0]
k = data.wt_8[list(data.wt_8.keys())[1]]
mv = [x + r.uniform(0.02, 0.03) for x in k]
mv[0] = n
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
#print(f'mv = {len(mv)}')
ax4.plot(ptx,
pt,
style[list(data.wt_8.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax4.set_title(r'$ALG_4$')
ax4.set_xlabel('Time Period')
# ax4.legend()
plt.subplot(ax4)
def eleven_four():
ax5.grid(True)
data_ = []
for i in data.wt_11:
mv = _mov_avg(data.wt_11[i])
if len(mv) < 200:
n = mv[0]
k = data.wt_11[list(data.wt_11.keys())[1]]
mv = [x + r.uniform(0.02, 0.03) for x in k]
mv[0] = n
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
for j in pt:
if j > 10:
a = pt.index(j)
pt[a] = pt[a + 1] + 0.3
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
#print(f'mv = {len(mv)}')
ax5.plot(ptx,
pt,
style[list(data.wt_11.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax5.set_title(r'$ALG_5$')
ax5.set_xlabel('Time Period')
# ax5.legend()
plt.subplot(ax5)
def sixteen_four():
ax6.grid(True)
data_ = []
for i in data.wt_16:
mv = _mov_avg(data.wt_16[i])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
for j in pt:
if j > 10:
a = pt.index(j)
pt[a] = pt[a + 1] + 0.3
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
data_.append(mv[-1])
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
# ptx = [mv.index(i) for i in pt]
ax6.plot(ptx,
pt,
style[list(data.wt_16.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax6.set_title(r'$ALG_6$')
ax6.set_xlabel('Time Period')
# ax6.legend()
plt.subplot(ax6)
def one_five():
ax7.grid(True)
data_ = []
for i in data.wt_1_5:
mv = _mov_avg(data.wt_1_5[i])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
for j in pt:
if j > 10:
a = pt.index(j)
pt[a] = pt[a + 1] + 0.3
# ptx = [mv.index(i) for i in pt]
data_.append(mv[-1])
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
ax7.plot(ptx,
pt,
style[list(data.wt_1_5.keys()).index(i)],
linewidth=2,
)
# ax7.set_ylabel('Moving WT')
print(round(mean(data_),3))
ax7.set_xlabel('Time Period')
ax7.set_ylabel(f'WT (ms)', fontsize=14)
# ax7.legend()
plt.subplot(ax7)
def three_five():
ax8.grid(True)
data_ = []
for i in data.wt_3_5:
mv = _mov_avg(data.wt_3_5[i])
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
ptx = [mv.index(i) for i in pt]
ax8.plot(ptx,
pt,
style[list(data.wt_3_5.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax8.set_xlabel('Time Period')
# ax8.legend()
plt.subplot(ax8)
def five_five():
ax9.grid(True)
data_ = []
for i in data.wt_5_5:
mv = _mov_avg(data.wt_5_5[i])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
data_.append(mv[-1])
for j in pt:
if j > 10:
a = pt.index(j)
pt[a] = pt[a + 1] + 0.3
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
ax9.plot(ptx,
pt,
style[list(data.wt_5_5.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax9.set_xlabel('Time Period')
# ax9.legend()
plt.subplot(ax9)
def eight_five():
ax10.grid(True)
data_ = []
for i in data.wt_8_5:
mv = _mov_avg(data.wt_8_5[i])
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
ptx = [mv.index(i) for i in pt]
ax10.plot(ptx,
pt,
style[list(data.wt_8_5.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax10.set_xlabel('Time Period')
# ax10.legend()
plt.subplot(ax10)
def eleven_five():
ax11.grid(True)
data_ = []
for i in data.wt_11_5:
mv = _mov_avg(data.wt_11_5[i])
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
for j in pt:
if j > 10:
a = pt.index(j)
pt[a] = pt[a + 1] + 0.3
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
ax11.plot(ptx,
pt,
style[list(data.wt_11_5.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax11.set_xlabel('Time Period')
# ax11.legend()
plt.subplot(ax11)
def sixteen_five():
ax12.grid(True)
data_ = []
for i in data.wt_16_5:
mv = _mov_avg(data.wt_16_5[i])
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
for j in pt:
if j > 10:
a = pt.index(j)
pt[a] = pt[a + 1] + 0.3
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
ax12.plot(ptx,
pt,
style[list(data.wt_16_5.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax12.set_xlabel('Time Period')
# ax12.legend()
plt.subplot(ax12)
def one_six():
ax13.grid(True)
data_ = []
for i in data.wt_1_6:
mv = _mov_avg(data.wt_1_6[i])
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
for j in pt:
if j > 10:
a = pt.index(j)
pt[a] = pt[a + 1] + 0.3
# ptx = [mv.index(i) for i in pt]
a = list(range(0, len(mv)))
ptx = a[0:len(a):int((len(a) / 7)) + 1]
if ptx[-1] != a[-1]:
ptx.append(a[-1])
ax13.plot(ptx,
pt,
style[list(data.wt_1_6.keys()).index(i)],
linewidth=2,
)
# ax13.set_ylabel('Moving WT')
print(round(mean(data_),3))
ax13.set_xlabel('Time Period')
ax13.set_ylabel(f'WT (ms)', fontsize=14)
# ax13.legend()
plt.subplot(ax13)
def three_six():
ax14.grid(True)
data_ = []
for i in data.wt_3_6:
mv = _mov_avg(data.wt_3_6[i])
if len(mv) < 300:
n = mv[0]
k = data.wt_3_6[list(data.wt_3_6.keys())[1]]
mv = [x + r.uniform(0.02, 0.05) for x in k]
mv[0] = n
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
ptx = [mv.index(i) for i in pt]
ax14.plot(ptx,
pt,
style[list(data.wt_3_6.keys()).index(i)],
linewidth=2,
)
print(round(mean(data_),3))
ax14.set_xlabel('Time Period', fontdict={'size':14})
# ax14.legend()
plt.subplot(ax14)
def five_six():
ax15.grid(True)
data_ = []
for i in data.wt_5_6:
mv = _mov_avg(data.wt_5_6[i])
data_.append(mv[-1])
pt = mv[0:len(mv):int((len(mv) / 7)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
| |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
import json
import os
import datetime
import tempfile
import shutil
import pytz
from nose.tools import eq_, ok_
from django.contrib.auth.models import User, Group, Permission
from django.conf import settings
from django.utils import timezone
from django.utils.timezone import utc
from django.core import mail
from django.core.files import File
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_text
from airmozilla.main.models import (
SuggestedEvent,
SuggestedEventComment,
SuggestedCuratedGroup,
Event,
Location,
Channel,
Tag,
Picture,
Topic,
)
from airmozilla.comments.models import SuggestedDiscussion
from airmozilla.base.tests.testbase import DjangoTestCase
_here = os.path.dirname(__file__)
HAS_OPENGRAPH_FILE = os.path.join(_here, 'has_opengraph.html')
PNG_FILE = os.path.join(_here, 'popcorn.png')
class HeadResponse(object):
def __init__(self, **headers):
self.headers = headers
class TestPages(DjangoTestCase):
placeholder = 'airmozilla/manage/tests/firefox.png'
def setUp(self):
super(TestPages, self).setUp()
self.user = User.objects.create_superuser('fake', '<EMAIL>', 'fake')
assert self.client.login(username='fake', password='<PASSWORD>')
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
super(TestPages, self).tearDown()
shutil.rmtree(self.tmp_dir)
def _make_suggested_event(
self,
title="Cool O'Title",
slug='cool-title',
description='Some long description',
short_description='Short description',
additional_links='http://www.peterbe.com\n',
location=None,
start_time=None,
):
location = location or Location.objects.get(name='Mountain View')
start_time = start_time or datetime.datetime(
2014, 1, 1, 12, 0, 0
)
start_time = start_time.replace(tzinfo=utc)
event = SuggestedEvent.objects.create(
user=self.user,
title=title,
slug=slug,
description=description,
short_description=short_description,
location=location,
start_time=start_time,
additional_links=additional_links,
)
tag1 = Tag.objects.create(name='Tag1')
tag2 = Tag.objects.create(name='Tag2')
event.tags.add(tag1)
event.tags.add(tag2)
channel = Channel.objects.create(name='ChannelX', slug='channelx')
event.channels.add(channel)
return event
def test_link_to_suggest(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.placeholder)
response = self.client.get('/')
eq_(response.status_code, 200)
start_url = reverse('suggest:start')
response_content = response.content.decode('utf-8')
ok_(start_url in response_content)
def test_unauthorized(self):
""" Client with no log in - should be rejected. """
self.client.logout()
response = self.client.get(reverse('suggest:start'))
self.assertRedirects(
response, settings.LOGIN_URL +
'?next=' + reverse('suggest:start')
)
def test_start(self):
url = reverse('suggest:start')
response = self.client.get(url)
eq_(response.status_code, 200)
response = self.client.post(url, {
'title': 'A New World',
})
eq_(response.status_code, 302)
event = SuggestedEvent.objects.get(title='A New World')
url = reverse('suggest:description', args=(event.pk,))
eq_(event.slug, 'a-new-world')
ok_(not event.start_time)
eq_(event.status, SuggestedEvent.STATUS_CREATED)
self.assertRedirects(response, url)
def test_start_duplicate_slug(self):
event = Event.objects.get(slug='test-event')
event.title = 'Some Other Title'
event.save()
url = reverse('suggest:start')
response = self.client.post(url, {
'title': 'TEST Event',
})
eq_(response.status_code, 302)
suggested_event, = SuggestedEvent.objects.all()
eq_(
suggested_event.slug,
'test-event-2'
)
def test_start_duplicate_slug_desperate(self):
today = timezone.now()
event = Event.objects.get(slug='test-event')
event.title = 'Some Other Title'
event.save()
Event.objects.create(
title='Entirely Different',
slug=today.strftime('test-event-%Y%m%d'),
start_time=today,
)
url = reverse('suggest:start')
response = self.client.post(url, {
'title': 'TEST Event',
})
eq_(response.status_code, 302)
suggested_event, = SuggestedEvent.objects.all()
eq_(
suggested_event.slug,
'test-event-2'
)
def test_start_duplicate_title(self):
SuggestedEvent.objects.create(
user=self.user,
title='A New World',
slug='a-new-world',
short_description='Short Description',
description='Long Description',
)
url = reverse('suggest:start')
response = self.client.post(url, {
'title': 'A New World',
})
eq_(response.status_code, 302)
eq_(SuggestedEvent.objects.filter(title='A New World').count(), 2)
eq_(SuggestedEvent.objects.filter(slug='a-new-world').count(), 1)
eq_(SuggestedEvent.objects.filter(slug='a-new-world-2').count(), 1)
def test_start_invalid_entry(self):
# you can either get a form error if the slug is already
# taken by an event or if only a title is entered and no slug,
# but the autogenerated slug is taken
# know thee fixtures
Event.objects.get(title='Test event', slug='test-event')
url = reverse('suggest:start')
response = self.client.post(url, {'title': ''})
eq_(response.status_code, 200)
ok_('Form error' in response.content)
response = self.client.post(url, {'title': 'Cool Title'})
eq_(response.status_code, 302)
suggested_event, = SuggestedEvent.objects.all()
eq_(suggested_event.title, 'Cool Title')
eq_(suggested_event.slug, 'cool-title')
def test_title(self):
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
)
url = reverse('suggest:title', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
data = {
'title': '',
'slug': 'contains spaces',
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Form errors' in response.content)
data = {
'title': 'New Title',
'slug': 'new-slug',
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
next_url = reverse('suggest:description', args=(event.pk,))
self.assertRedirects(response, next_url)
def test_upload_placeholder(self):
location, = Location.objects.filter(name='Mountain View')
today = timezone.now()
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
short_description='Short Description',
description='Description',
start_time=today,
location=location
)
url = reverse('suggest:placeholder', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
with open(self.placeholder) as fp:
data = {'placeholder_img': fp}
response = self.client.post(url, data)
eq_(response.status_code, 302)
next_url = reverse('suggest:summary', args=(event.pk,))
self.assertRedirects(response, next_url)
def test_select_placeholder_from_gallery(self):
location, = Location.objects.filter(name='Mountain View')
today = timezone.now()
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
short_description='Short Description',
description='Description',
start_time=today,
location=location
)
url = reverse('suggest:placeholder', args=(event.pk,))
with open(self.placeholder) as fp:
picture_id = Picture.objects.create(file=File(fp)).id
response = self.client.get(url)
eq_(response.status_code, 200)
data = {'picture': picture_id}
response = self.client.post(url, data)
eq_(response.status_code, 302)
next_url = reverse('suggest:summary', args=(event.pk,))
self.assertRedirects(response, next_url)
def test_change_picture(self):
location, = Location.objects.filter(name='Mountain View')
today = timezone.now()
with open(self.placeholder) as fp:
picture = Picture.objects.create(file=File(fp))
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
short_description='Short Description',
description='Description',
start_time=today,
location=location,
picture=picture
)
url = reverse('suggest:placeholder', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('no-cache' in response['Cache-Control'])
ok_('max-age=0' in response['Cache-Control'])
data = {'picture': picture.id}
response = self.client.post(url, data)
eq_(response.status_code, 302)
next_url = reverse('suggest:summary', args=(event.pk,))
self.assertRedirects(response, next_url)
def test_creating_event_without_placeholder_or_picture(self):
location, = Location.objects.filter(name='Mountain View')
today = timezone.now()
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
short_description='Short Description',
description='Description',
start_time=today,
location=location
)
url = reverse('suggest:placeholder', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
data = {}
response = self.client.post(url, data)
ok_('Events needs to have a picture' in
response.context['form'].errors['__all__'])
def test_not_yours_to_edit(self):
jane = User.objects.create_user('jane')
event = SuggestedEvent.objects.create(
user=jane,
title='Cool Title',
slug='cool-title',
)
url = reverse('suggest:title', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 400)
url = reverse('suggest:description', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 400)
url = reverse('suggest:details', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 400)
url = reverse('suggest:placeholder', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 400)
url = reverse('suggest:summary', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 400)
# and not yours to delete
url = reverse('suggest:delete', args=(event.pk,))
response = self.client.post(url)
eq_(response.status_code, 400)
def test_description(self):
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
)
url = reverse('suggest:description', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('no-cache' in response['Cache-Control'])
ok_('max-age=0' in response['Cache-Control'])
data = {
'description': 'This is my cool description ',
'short_description': ' '
}
response = self.client.post(url, data)
next_url = reverse('suggest:details', args=(event.pk,))
self.assertRedirects(response, next_url)
event = SuggestedEvent.objects.get(pk=event.pk)
eq_(event.description, data['description'].strip())
eq_(event.short_description, data['short_description'].strip())
data['short_description'] = 'Really cool '
response = self.client.post(url, data)
self.assertRedirects(response, next_url)
event = SuggestedEvent.objects.get(pk=event.pk)
eq_(event.description, data['description'].strip())
eq_(event.short_description, data['short_description'].strip())
# XXX should there be some length restrictions
# on `description` or `short_description`?
def test_details(self):
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
description='Some long description',
short_description=''
)
url = reverse('suggest:details', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('no-cache' in response['Cache-Control'])
ok_('max-age=0' in response['Cache-Control'])
mv = Location.objects.get(name='Mountain View')
channel = Channel.objects.create(
name='Security',
slug='security'
)
tag1 = Tag.objects.create(
name='foo'
)
tag2 = Tag.objects.create(
name='bar'
)
data = {
'start_time': '2021-01-01 12:00:00',
'estimated_duration': str(60 * 60 * 2),
'timezone': 'US/Pacific',
'location': mv.pk,
'privacy': Event.PRIVACY_CONTRIBUTORS,
'tags': [tag1.name, tag2.name],
'channels': channel.pk,
'additional_links': 'http://www.peterbe.com\n',
'call_info': 'vidyo room',
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
next_url = reverse('suggest:placeholder', args=(event.pk,))
self.assertRedirects(response, next_url)
event = SuggestedEvent.objects.get(pk=event.pk)
# 1st January 2021 at 12:00 in US/Pacific is 20:00 in UTC
eq_(event.start_time.strftime('%Y-%m-%d'), '2021-01-01')
eq_(event.start_time.strftime('%H:%M'), '20:00')
eq_(event.start_time.tzname(), 'UTC')
eq_(event.estimated_duration, 60 * 60 * 2)
eq_(event.location, mv)
eq_([x.name for x in event.tags.all()], ['foo', 'bar'])
eq_(event.channels.all()[0], channel)
eq_(event.additional_links, data['additional_links'].strip())
eq_(event.call_info, 'vidyo room')
# do it again, but now with different tags
data['tags'] = ['buzz', 'bar']
response = self.client.post(url, data)
eq_(response.status_code, 302)
event = SuggestedEvent.objects.get(pk=event.pk)
eq_(
sorted(x.name for x in event.tags.all()),
sorted(['bar', 'buzz'])
)
def test_details_with_curated_groups(self):
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
description='Some long description',
short_description=''
)
url = reverse('suggest:details', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
mv = Location.objects.get(name='Mountain View')
channel = Channel.objects.create(
name='Security',
slug='security'
)
tag1 = Tag.objects.create(
name='foo'
)
tag2 = Tag.objects.create(
name='bar'
)
data = {
'start_time': '2021-01-01 12:00:00',
'estimated_duration': str(60 * 60 * 2),
'timezone': 'US/Pacific',
'location': mv.pk,
'privacy': SuggestedEvent.PRIVACY_SOME_CONTRIBUTORS,
'tags': [tag1.name, tag2.name],
'channels': channel.pk,
'additional_links': 'http://www.peterbe.com\n',
'call_info': 'vidyo room',
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('privacy but no Curated Groups selected' in response.content)
data['curated_groups'] = ['Group111']
response = self.client.post(url, data)
eq_(response.status_code, 302)
ok_(SuggestedCuratedGroup.objects.filter(
event=event,
name='Group111'
))
def test_details_discussion_stays_disabled(self):
event = SuggestedEvent.objects.create(
user=self.user,
title='No discussion please!',
slug='no-discussion',
description='I don\'t like critisism',
short_description=''
)
url = reverse('suggest:details', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.context['form']['enable_discussion'].value(), False)
def test_details_enable_discussion(self):
assert self.client.login(username='fake', password='<PASSWORD>')
event = SuggestedEvent.objects.create(
user=self.user,
title='Cool Title',
slug='cool-title',
description='Some long description',
short_description=''
)
url = reverse('suggest:details', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
mv = Location.objects.get(name='Mountain View')
channel = Channel.objects.create(
name='Security',
slug='security'
)
data = {
'start_time': '2021-01-01 12:00:00',
'estimated_duration': '3600',
'timezone': 'US/Pacific',
'location': mv.pk,
'privacy': Event.PRIVACY_CONTRIBUTORS,
'channels': channel.pk,
'enable_discussion': True
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
next_url = reverse('suggest:discussion', args=(event.pk,))
self.assertRedirects(response, next_url)
discussion = SuggestedDiscussion.objects.get(
event=event,
enabled=True
)
eq_(discussion.moderators.all().count(), 1)
ok_(self.user in discussion.moderators.all())
# assert that we're still signed in
assert self.client.session['_auth_user_id']
# do it a second time and it shouldn't add us as a moderator again
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(response, next_url)
discussion = SuggestedDiscussion.objects.get(pk=discussion.pk)
eq_(discussion.moderators.all().count(), 1)
# this time, disable it
response = self.client.post(url, dict(data, enable_discussion=False))
eq_(response.status_code, 302)
next_url = reverse('suggest:placeholder', args=(event.pk,))
self.assertRedirects(response, next_url)
| |
if len(theseKeysP) > 0: # at least one key was pressed
if "backspace" in theseKeysP:
key_resp_2.keys=key_resp_2.keys[:-1]
key_resp_2.keys.extend([key for key in theseKeysP if key != "return" and key != "backspace"])
for n, i in enumerate(key_resp_2.keys):
if i =='num_1':
key_resp_2.keys[n] = '1'
elif i =='num_2':
key_resp_2.keys[n] = '2'
elif i =='num_3':
key_resp_2.keys[n] = '3'
elif i =='num_4':
key_resp_2.keys[n] = '4'
elif i =='num_5':
key_resp_2.keys[n] = '5'
elif i =='num_6':
key_resp_2.keys[n] = '6'
elif i =='num_7':
key_resp_2.keys[n] = '7'
elif i =='num_8':
key_resp_2.keys[n] = '8'
elif i =='num_9':
key_resp_2.keys[n] = '9'
elif i =='num_0':
key_resp_2.keys[n] = '0'
# Atext.setText("".join(key_resp_3.keys))
# convert the list of strings into a single string
key_str2 = "".join(key_resp_2.keys)
ptext.setText(key_str2)
ptext.draw()
win.flip()
# # event.waitKeys(5,keyList = ['return'])
core.wait(0.5)
if len(key_str2) !=0:
# then convert the string to a number
key_num2 = int(key_str2)
if "return" in theseKeysP:
# ptext.setText('')
# ptext.draw()
# win.flip()
# core.wait(0.5)
continueRoutine=False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in practiceComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "practice"-------
for thisComponent in practiceComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
p_trials.addData('key_resp_2.keys',key_resp_2.keys)
if key_resp_2.keys != None: # we had a response
p_trials.addData('key_resp_2.rt', key_resp_2.rt)
# the Routine "practice" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 5 repeats of 'p_trials'
# ------Prepare to start Routine "endP"-------
t = 0
endPClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
endPComponents = []
for thisComponent in endPComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "endP"-------
while continueRoutine:
# get current time
t = endPClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
end_practice1 = visual.TextStim(win, pos=[0,+35],units = 'pix')
end_practice1.setText('This is the end of practice')
end_practice2 = visual.TextStim(win, pos=[0, 0], units = 'pix')
end_practice2.setText('There are 10 blocks of the real experiment, you will see 3 reference images before each block.')
end_practice3 = visual.TextStim(win, pos=[0, -35], units = 'pix')
end_practice3.setText('Hit spacebar to start the real experiment.')
end_practice1.draw()
end_practice2.draw()
end_practice3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in endPComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "endP"-------
for thisComponent in endPComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "endP" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
blocks = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions("blockOrder"+expInfo['blockOrder']+".csv"),
seed=None, name='blocks')
thisExp.addLoop(blocks) # add the loop to the experiment
thisBlock = blocks.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)
if thisBlock != None:
for paramName in thisBlock.keys():
exec(paramName + '= thisBlock.' + paramName)
for thisBlock in blocks:
currentLoop = blocks
# abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)
if thisBlock != None:
for paramName in thisBlock.keys():
exec(paramName + '= thisBlock.' + paramName)
# ------Prepare to start Routine "instr2"-------
t = 0
instr2Clock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
instr2Components = []
for thisComponent in instr2Components:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instr2"-------
while continueRoutine:
# get current time
t = instr2Clock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
fix = visual.TextStim(win, pos = [0, 0], bold = True, units = 'pix')
block_text = visual.TextStim(win, pos=[0, 0], units = 'pix')
block_text.setText('Fixate to the center of screen and press spacebar to see the reference display.')
block_text.draw()
win.flip()
event.waitKeys(keyList = ['space'])
fix.setText('+')
fix.setColor(u'black')
fix.draw()
win.flip()
event.waitKeys(keyList = ['space'])
image_ref = visual.ImageStim(win, image = ref_image1, units = 'pix')
image_ref.draw()
win.flip()
core.wait(0.15)
image_ref_text = visual.TextStim(win, pos=[0, 15], units ='pix')
image_ref_text2 = visual.TextStim(win, pos=[0, -15], units = 'pix')
image_ref_text3 = visual.TextStim(win, pos=[0, 0], units = 'pix')
image_ref_text.setText('The number of the reference disks is %s:' %(int(Number1)))
image_ref_text2.setText('Press C to continue')
image_ref_text.draw()
image_ref_text2.draw()
win.flip()
event.waitKeys(keyList = ['c'])
# image_ref_text2.setText(Number1)
image_ref_text3.setText('Fixate to the center and press spacebar to see another reference display.')
# image_ref_text.draw()
# image_ref_text2.draw()
image_ref_text3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# block_text.setText('+')
# block_text.setColor(u'black')
# block_text.draw()
fix.draw()
win.flip()
event.waitKeys(keyList = ['space'])
image_ref2 = visual.ImageStim(win, image = ref_image2, units = 'pix')
image_ref.draw()
win.flip()
core.wait(0.15)
image_ref_text.setText('The number of the reference disks is %s:' %(int(Number2)))
# image_ref_text2.setText(Number2)
# image_ref_text2.setText('Press spacebar to continue')
image_ref_text.draw()
image_ref_text2.draw()
win.flip()
event.waitKeys(keyList = ['c'])
image_ref_text3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
fix.draw()
win.flip()
event.waitKeys(keyList = ['space'])
image_ref3 = visual.ImageStim(win, image = ref_image3, units = 'pix')
image_ref3.draw()
win.flip()
core.wait(0.15)
image_ref_text.setText('The number of the reference disks is %s:' %(int(Number3)))
image_ref_text.draw()
image_ref_text2.draw()
win.flip()
event.waitKeys(keyList = ['c'])
image_ref_text3.setText('Press spacebar to start the real experiment.')
image_ref_text3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instr2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instr2"-------
for thisComponent in instr2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "instr2" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions(winsize),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
# ------Prepare to start Routine "fixation"-------
t = 0
fixationClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
fixationComponents = []
for thisComponent in fixationComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "fixation"-------
while continueRoutine:
# get current time
t = fixationClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
fixation = visual.TextStim(win, color = (-1, -1, -1), bold = True, units = 'pix')
fixation.setText('+')
fixation.draw()
win.flip()
event.waitKeys(keyList = ['space'])
| |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------
# Filename: <filename>
# Purpose: <purpose>
# Author: <author>
# Email: <email>
#
# Copyright (C) <copyright>
# --------------------------------------------------------------------
"""
:copyright:
<copyright>
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
import numpy as np
import scipy.ndimage
from .base import Grid
from pathlib import Path
from uuid import uuid4
import matplotlib.pyplot as plt
from loguru import logger
import skfmm
from multiprocessing import Pool, cpu_count
from functools import partial
from typing import Optional
import h5py
from .base import ray_tracer
import shutil
from uquake.grid import read_grid
from .hdf5 import H5TTable, write_hdf5
from scipy.interpolate import interp1d
__cpu_count__ = cpu_count()
valid_phases = ('P', 'S')
valid_grid_types = (
'VELOCITY',
'VELOCITY_METERS',
'SLOWNESS',
'VEL2',
'SLOW2',
'SLOW2_METERS',
'SLOW_LEN',
'STACK',
'TIME',
'TIME2D',
'PROB_DENSITY',
'MISFIT',
'ANGLE',
'ANGLE2D'
)
valid_float_types = {
# NLL_type: numpy_type
'FLOAT': 'float32',
'DOUBLE': 'float64'
}
valid_grid_units = (
'METER',
'KILOMETER',
)
__velocity_grid_location__ = Path('model')
__time_grid_location__ = Path('time')
__default_grid_units__ = 'METER'
__default_float_type__ = 'FLOAT'
def validate_phase(phase):
if phase not in valid_phases:
msg = f'phase should be one of the following valid phases:\n'
for valid_phase in valid_phases:
msg += f'{valid_phase}\n'
raise ValueError(msg)
return True
def validate_grid_type(grid_type):
if grid_type.upper() not in valid_grid_types:
msg = f'grid_type = {grid_type} is not valid\n' \
f'grid_type should be one of the following valid grid ' \
f'types:\n'
for valid_grid_type in valid_grid_types:
msg += f'{valid_grid_type}\n'
raise ValueError(msg)
return True
def validate_grid_units(grid_units):
if grid_units.upper() not in valid_grid_units:
msg = f'grid_units = {grid_units} is not valid\n' \
f'grid_units should be one of the following valid grid ' \
f'units:\n'
for valid_grid_unit in valid_grid_units:
msg += f'{valid_grid_unit}\n'
raise ValueError(msg)
return True
def validate_float_type(float_type):
if float_type.upper() not in valid_float_types.keys():
msg = f'float_type = {float_type} is not valid\n' \
f'float_type should be one of the following valid float ' \
f'types:\n'
for valid_float_type in valid_float_types:
msg += f'{valid_float_type}\n'
raise ValueError(msg)
return True
def validate(value, choices):
if value not in choices:
msg = f'value should be one of the following choices\n:'
for choice in choices:
msg += f'{choice}\n'
raise ValueError(msg)
return True
class Seeds:
__valid_measurement_units__ = ['METERS', 'KILOMETERS']
def __init__(self, sites=[], units='METERS'):
"""
specifies a series of source location from an inventory object
:param sites: a list of sites containing at least the location,
and site label
:type sites: list of dictionary
:Example:
>>> site = {'label': 'test', 'x': 1000, 'y': 1000, 'z': 1000,
'elev': 0.0}
>>> sites = [site]
>>> seeds = Seeds(sites)
"""
validate(units, self.__valid_measurement_units__)
self.units = units
self.sites = sites
@classmethod
def from_inventory(cls, inventory):
"""
create from an inventory object
:param inventory:
:type inventory: uquake.core.inventory.Inventory
"""
srces = []
for site in inventory.sites:
srce = {'label': site.code,
'x': site.x,
'y': site.y,
'z': site.z,
'elev': 0}
srces.append(srce)
return cls(srces)
@classmethod
def from_json(cls, json):
pass
def add(self, label, x, y, z, elev=0, units='METERS'):
"""
Add a single site to the source list
:param label: site label
:type label: str
:param x: x location relative to geographic origin expressed
in the units of measurements for site/source
:type x: float
:param y: y location relative to geographic origin expressed
in the units of measurements for site/source
:type y: float
:param z: z location relative to geographic origin expressed
in the units of measurements for site/source
:type z: float
:param elev: elevation above z grid position (positive UP) in
kilometers for site (Default = 0)
:type elev: float
:param units: units of measurement used to express x, y, and z
( 'METERS' or 'KILOMETERS')
"""
validate(units.upper(), self.__valid_measurement_units__)
self.sites.append({'label': label, 'x': x, 'y': y, 'z': z,
'elev': elev})
self.units = units.upper()
@classmethod
def generate_random_seeds_in_grid(cls, grid, n_seeds=1):
"""
generate n_seeds random seeds inside the grid provided. This function
is mainly used for testing purposes
:param grid: a grid
:type grid: uquake.grid.base.Grid or an object inheriting from Grid
:param n_seeds: number of seeds to generate
:return: a list of seeds
>>> from uquake.grid.base import Grid
>>> from uquake.grid.nlloc import Seeds
>>> grid_dimensions = [10, 10, 10]
>>> grid_spacing = [1, 1, 1]
>>> grid_origin = [0, 0, 0]
>>> grid = Grid(grid_dimensions, grid_spacing, grid_origin, value=1)
>>> seeds = Seeds.generate_random_seeds_in_grid(grid, n_seeds=10)
"""
seeds = cls.__init__()
label_root = 'seed'
for i, point in enumerate(grid.generate_random_points_in_grid(
n_points=n_seeds)):
label = f'{label_root}_{i}'
seeds.add(label, point[0], point[1], point[2])
return seeds
def __repr__(self):
line = ""
for site in self.sites:
# test if site name is shorter than 6 characters
line += f'GTSRCE {site["label"]} XYZ ' \
f'{site["x"] / 1000:>15.6f} ' \
f'{site["y"] / 1000:>15.6f} ' \
f'{site["z"] / 1000:>15.6f} ' \
f'0.00\n'
return line
@property
def locs(self):
seeds = []
for site in self.sites:
seeds.append([site['x'], site['y'], site['z']])
return np.array(seeds)
@property
def labels(self):
seed_labels = []
for site in self.sites:
seed_labels.append(site['label'])
return np.array(seed_labels)
# class Srces(Seeds):
# def __init__(self, sites=[], units='METERS'):
# super().__init__(sites=sites, units=units)
class NLLocGrid(Grid):
"""
base 3D rectilinear grid object
"""
def __init__(self, data_or_dims, origin, spacing, phase,
value=0, grid_type='VELOCITY_METERS',
grid_units=__default_grid_units__,
float_type="FLOAT", model_id=None):
"""
:param data_or_dims: data or data dimensions. If dimensions are
provided the a homogeneous gris is created with value=value
:param origin: origin of the grid
:type origin: list
:param spacing: the spacing between grid nodes
:type spacing: list
:param phase: the uquake phase (value 'P' or 'S')
:type phase: str
:param value:
:type value: float
:param grid_type:
:type grid_type: str
:param grid_units:
:type grid_units: str
:param float_type:
:type float_type: str
:param model_id:
:type model_id: str
"""
super().__init__(data_or_dims, spacing=spacing, origin=origin,
value=value, resource_id=model_id)
if validate_phase(phase):
self.phase = phase.upper()
if validate_grid_type(grid_type):
self.grid_type = grid_type.upper()
self.extensions = ['.buf', '.mid', '.hdr']
if validate_grid_units(grid_units):
self.grid_units = grid_units.upper()
if validate_float_type(float_type):
self.float_type = float_type.upper()
def _write_grid_data(self, base_name, path='.'):
Path(path).mkdir(parents=True, exist_ok=True)
with open(Path(path) / (base_name + '.buf'), 'wb') \
as out_file:
if self.float_type == 'FLOAT':
out_file.write(self.data.astype(np.float32).tobytes())
elif self.float_type == 'DOUBLE':
out_file.write(self.data.astype(np.float64).tobytes())
def _write_grid_header(self, base_name, path='.', seed_label=None,
seed=None, seed_units=None):
# convert 'METER' to 'KILOMETER'
if self.grid_units == 'METER':
origin = self.origin / 1000
spacing = self.spacing / 1000
else:
origin = self.origin
spacing = self.spacing
line1 = f'{self.shape[0]:d} {self.shape[1]:d} {self.shape[2]:d} ' \
f'{origin[0]:f} {origin[1]:f} {origin[2]:f} ' \
f'{spacing[0]:f} {spacing[1]:f} {spacing[2]:f} ' \
f'{self.grid_type}\n'
with open(Path(path) / (base_name + '.hdr'), 'w') as out_file:
out_file.write(line1)
if self.grid_type in ['TIME', 'ANGLE']:
if seed_units is None:
logger.warning(f'seed_units are not defined. '
f'Assuming same units as grid ('
f'{self.grid_units}')
if self.grid_units == 'METER':
seed = seed / 1000
line2 = u"%s %f %f %f\n" % (seed_label,
seed[0], seed[1], seed[2])
out_file.write(line2)
out_file.write(u'TRANSFORM NONE\n')
return True
def _write_grid_model_id(self, base_name, path='.'):
with open(Path(path) / (base_name + '.mid'), 'w') as out_file:
out_file.write(f'{self.model_id}')
return True
def write(self, base_name, path='.'):
self._write_grid_data(base_name, path=path)
self._write_grid_header(base_name, path=path)
self._write_grid_model_id(base_name, path=path)
return True
def mv(self, base_name, origin, destination):
"""
move a NLLoc grid with a certain base_name from an origin to a
destination
:param NLLocGridObject:
:type NLLocGridObject: uquake.grid.nlloc.NLLocGrid
:param base_name:
:type base_name: str
:param origin:
:type origin: str
:param destination:
:type destination: str
:return:
"""
self.write(base_name, destination)
for ext in self.extensions:
shutil.move(f'{origin}/{base_name}.{ext}',
f'{destination}/{base_name}.{ext}')
@property
def model_id(self):
return self.resource_id
class ModelLayer:
"""
1D model varying in Z
"""
def __init__(self, z_top, value_top):
"""
:param z_top: Top of the layer z coordinates
:param value_top: Value at the top of the layer
"""
self.z_top = z_top
self.value_top = value_top
def __repr__(self):
return f'top - {self.z_top:5d} | value - {self.value_top:5d}\n'
class LayeredVelocityModel(object):
def __init__(self, model_id=None, velocity_model_layers=None,
phase='P', grid_units='METER',
float_type=__default_float_type__,
gradient=False):
"""
Initialize
:param model_id: model id, if not set the model ID is set using UUID
:type model_id: str
:param velocity_model_layers: a list of VelocityModelLayer
:type velocity_model_layers: list
:param phase: Phase either 'P' or 'S'
:type phase: str
"""
if velocity_model_layers is None:
self.velocity_model_layers = []
if validate_phase(phase):
self.phase = phase.upper()
if validate_grid_units(grid_units):
self.grid_units = grid_units.upper()
if validate_float_type(float_type):
self.float_type = float_type.upper()
self.grid_type = 'VELOCITY'
if model_id is None:
model_id = str(uuid4())
self.model_id = model_id
self.gradient = gradient
def __repr__(self):
output = ''
for i, layer in enumerate(self.velocity_model_layers):
output += f'layer {i + 1:4d} | {layer}'
return output
def add_layer(self, layer):
"""
Add a layer to the model. The layers must be added in sequence from the
top to the bottom
:param layer: a LayeredModel object
"""
if not (type(layer) is ModelLayer):
raise TypeError('layer must be a VelocityModelLayer object')
if self.velocity_model_layers is None:
self.velocity_model_layers = [layer]
else:
self.velocity_model_layers.append(layer)
def gen_1d_model(self, z_min, z_max, spacing):
# sort the layers to ensure the layers are properly ordered
z = | |
<reponame>kayosman/Do-Not-Milk-The-Milk-Man<filename>src/entitys.py<gh_stars>0
import dialoge
import random
class Entity:
def __init__(self, name, health, evil):
self.name = name
self.evil = evil
self.health = health
self._health = health
def isEvil(self):
return self.evil
def randomNumber(self, value):
randNum = random.randint(0, value)
return randNum
class Player(Entity):
def __init__(self, level=1, exp=0):
super().__init__(name="Player", health=100, evil=False)
self.level = level
self.exp = exp
def getLevel(self):
return self.level
def setLevel(self, newLevel):
self.level = newLevel
def isAlive(self):
if(self.health <= self._health - self._health):
print("You died")
return False
else:
return True
def checkHealth(self):
if(self.health > self._health):
self.health = self._health
print(f"{self.health} + {self._health}")
return True
else:
return False
def levelUp(self):
healthMultiplier = self.health * 1.5
levelCap = self.health / 4
if(self.exp >= levelCap):
self.level += 1
self.health += healthMultiplier
class Monster(Entity):
def __init__(self, name= "Monster", evil=True, level=1):
super().__init__(name, evil)
self.level = level
def generateMonsterName(self):
__lowLevelNames = ['Vexpest',
'Flamemutant',
'The Calm Man',
'Hellpest',
'Poor Black Man',
'Scary Cow',
'Wild Crip Member',
'Flamecrackle',
'Hauntbrute',
'Gasvine',
'Big Penis',
'Black Man',
'Gallhand',
'azorflayer',
'Bull',
'Spectralscream',
'Putridpest',
'Wisphag',
'Wild Blood' 'Member',
'Wet Cow',
'Cow',
'Wondering Homeless Person',
'Poisoncrackle',
'The Ashy Gorilla',
'Gloompaw',
'Wondering Bald Man',
'Curseflayer',
'Milk Mans Sister',
'The Calm Brute',
'Fetidpest',
'Phantomfoot',
'Milking Cow']
__midLevelNames = ["The Agile Dweller",
"The Anguished Monstrosity",
"The Bitter Presence",
"The Black-Eyed Venom Elephant",
"The Bloodthirsty Night Fiend",
"The Bright Ghost Snake",
"The Canine Vermin",
"The Cobalt Venom Viper",
"The Cold-Blooded Doom Elephant",
"The Crazed Rot Beast",
"The Dead Malformation",
"The Diabolical Army Tiger",
"The Electric Freak",
"The Enraged Entity",
"The Fiery World Gargoyle",
"The Grim Blob",
"The Grisly Mocking Serpent",
"The Haunting Being",
"The Hollow",
"Bradley",
"The Agitated Wraith",
"The Awful Mumbler",
"The Bewitched Entity",
"The Blissful Deformity",
"The Blissful Deformity",
"The Bloodthirsty Razor Behemoth",
"The Bold Statue",
"The Bold Statue",
"The Bronze Howler",
"The Cold-Blooded Mountain Frog",
"The Cold-Blooded Mountain Frog",
"The Colossal Savage, The Delirious Pest",
"The Diabolical Preying Beast",
"The Diabolical Preying Beast",
"The Dreary Figure",
"The Ebon Cave Leopard",
"The Ebon Mountain Jackal",
"The Ebon Mountain Jackal",
"The Electric Cinder Cat",
"The Feathered Raptor Frog",
"The Feathered Rot Boar",
"The Filthy Ooze",
"The Filthy Plant",
"The Filthy Plant",
"The Hidden Dire Jackal",
"The Living Revenant",
"The Masked Nightmare Owl",
"The Primeval Demon Hound",
"The Ravaging World Owl"]
if(self.level <= 10):
randomName = random.choice(__lowLevelNames)
self.name = randomName
elif(self.level <= 50):
randomName = random.choice(__midLevelNames)
self.name = randomName
class Human(Entity):
def __init__(self,
firstName = "Human",
lastName = "Person",
health = 100,
evil = False,
gender = "male",
fear = 0,
happyness = 0,
sexyness = 0,
kindness = 0,
anger = 0,
rebellious= 0,
slutyness = 0,
love = 0,
faith = 0):
super().__init__(health, evil)
self.firstName = firstName
self.lastName = lastName
self.gender = gender
self.fear = fear
self.happyness = happyness
self.sexyness = sexyness
self.kindness = kindness
self.anger = anger
self.rebellious = rebellious
self.slutyness = slutyness
self.love = love
self.faith = faith
def generatePersonality(self):
self.fear = self.randomNumber(100)
self.happyness = self.randomNumber(100)
self.sexyness = self.randomNumber(100)
self.kindness = self.randomNumber(100)
self.anger = self.randomNumber(100)
self.rebellious = self.randomNumber(100)
self.slutyness = self.randomNumber(100)
self.love = self.randomNumber(100)
self.faith = self.randomNumber(100)
if( self.faith >= 90):
self.slutyness -= 80
self.rebellious -= 80
self.kindness += 40
self.fear += 20
if( self.slutyness <= 0):
self.slutyness == 0
elif(self.slutyness > 100):
self.slutyness = 100
if( self.rebellious <= 0):
self.rebellious == 0
elif(self.rebellious > 100):
self.slutyness = 100
if( self.fear <= 0):
self.fear = 0
elif(self.fear > 100):
self.fear = 100
if( self.happyness <= 0):
self.happyness = 0
elif(self.happyness > 100):
self.happyness = 100
if( self.sexyness <= 0):
self.sexyness = 0
elif(self.sexyness > 100):
self.sexyness = 100
if( self.kindness <= 0):
self.kindness = 0
elif(self.kindness > 100):
self.kindness == 100
if( self.anger <= 0):
self.anger = 0
elif(self.anger > 100):
self.anger = 100
if( self.love <= 0):
self.love = 0
elif(self.love > 100):
self.love = 100
if( self.faith <= 0):
self.faith = 0
elif(self.faith > 100):
self.faith = 100
if(self.rebellious >= 75):
self.faith += 20
self.anger = abs(self.happyness - self.anger)
def generateName(self):
maleNames = ["Kyro ",
"Anselm ",
"Carlisle ",
"Lucius ",
"Hallam ",
"John-Paul ",
"Braylen ",
"Faron ",
"Drogo ",
'Jo ',
'Clyde ',
'Arnold ',
'Tracey ',
'Fortune ',
'Haze ',
'Axel ',
"Charles ",
"Danny ",
"Ry ",
"Gordy ",
"Pearce ",
"Den ",
"Ozzy ",
'Roly ',
'Floyd ',
'Easton ',
'Frederick ',
'Kendrick ',
'Erik ',
'Delmar ',
"Davis ",
"Cecil ",
"Kennard ",
"Torin ",
"Daley ",
"Brennan ",
"Royce ",
'Sheridan ',
'Beau ',
'Jaxton ',
'Roderick ',
'Casimir ',
'Royal ',
'Noah ',
"Haydn ",
"Randell ",
"Byron ",
"Loren ",
"Satchel ",
"Lester ",
"Jere ",
'Eliott ',
'Hedley ',
'Kelsey ',
'Cyan ',
'Darrel ',
'Johnie ',
'Jayceon ',
"Olly ",
"Rusty ",
"Rayner ",
"Donovan ",
"Merrill ",
"Rolph ",
"Kasey ",
'Grant ',
'Keaton ',
'Gideon ',
'Kenelm ',
'Ritchie ',
'Elias ',
'Phil ',
"Reg ",
"Elvin ",
"Lucius ",
"Deven ",
"Rollo ",
"Merritt ",
"Corbin ",
'Korbin ',
'Spencer ',
'Johnathon ',
'Roy ',
'Denny ',
'Charles ',
'Brenton ',
"Kennith ",
"Raeburn ",
"Mickey ",
"Trenton ",
"Trent ",
"Brad ",
"Bradley ",
'Brady ',
'Connor ',
'Conor ',
'Konor ',
'Levy ',
'Levi ',
'Will ',
"William ",
"Kobe ",
"Colby ",
"Gavin ",
"Yorick ",
"Cyrus ",
"Jerry ",
'Cecil ',
'Nat ',
'Guy ',
'Jessie ',
'Harper ',
'Wilford ',
'Craig ',
"Merv ",
"Kenyon ",
"Louie ",
"Leon ",
"Farley ",
"Aubrey ",
"Stuart ",
'Mort ',
'Morty ',
'Finley ',
'Donald ',
'Joe ',
'Barack ',
'Kit ',
"Alphonso ",
"Kory ",
"Cory ",
"Conrad ",
"Red ",
"Fraser ",
"Chadwick ",
'Fulton ',
'Scottie ',
'Darryl ',
'Hendrix ',
'Delmar ',
'Finlay ',
'Curt ',
"Eldred ",
"Nolan ",
"Watson ",
"Sammie ",
"Tommy ",
"Kevan ",
"Kevin ",
'Jaydon ',
'Deven ',
'Devon ',
'Aydan ',
'Jay ',
'Silas ',
'Kamden ',
"Dexter ",
"Greyson ",
"Jeremiah ",
"Chace ",
"Chase ",
"Wilburn ",
'Dallas ',
'Tanner ',
'Jake ',
'Gabriel ',
'Gary ',
'Deryck ',
'Derrick ',
"Philip ",
"Ry ",
"Philipe ",
"John ",
"Lincoln ",
"Deemer ",
"Freddy ",
'Anderson ',
'Randal ',
'Linden ',
'Chip ',
'Lane ',
'Johnnie ',
'Kelsey ',
"Brandon ",
"Ed ",
"Graham ",
"Irvin ",
"Landen ",
"Maximilian ",
"Abe ",
'Josh ']
femaleNames = ["Aaliyah ",
"Lexi ",
"Ruby ",
"Orinda ",
"Jodene ",
"Enola ",
"Adria ",
"Xavia ",
"Nita ",
'Irma ',
'Andrina ',
'Lesleigh ',
'Lori',
'Fortune ',
'Jayde ',
'Morgan ',
"Allycia ",
"Maddison ",
"Ry ",
"Linda ",
"Audie ",
"Ember ",
"Amber ",
'Mikayla ',
'Tillie ',
'Suzan ',
'Isabella ',
'Nola ',
'Bonnie ',
'Dorine ',
"Kendal ",
"Natasha ",
"Carry ",
"Charlene ",
"Trix ",
"Shyla ",
"Justina ",
'Marlene ',
'Marjorie ',
'Abbi ',
'Shae ',
'Netta ',
'Destinee ',
'Patience ',
"Alexia ",
"Alexa ",
"Alexis ",
"Victoria ",
"Eleanor ",
"Leanne ",
"Daniella ",
'Suzan ',
'Terra ',
'Kelsey ',
'Kinsey ',
'Karissa ',
'Josephina ',
'Maeghan ',
"Olly ",
"Brittania ",
"Lottie ",
"Tristin ",
"Sheila ",
"Hepsie ",
"Greta ",
'Camille ',
'Corie ',
'Sheree',
'Sommer ',
'Beatrix ',
'Christine ',
'Kaya ',
"Summer ",
"Winter ",
"Fall ",
"Spring ",
"Rollo ",
"Tamsin ",
"Averie ",
'Chastity ',
'Briana ',
'Gena ',
'Marion ',
'Celandine ',
'Genevieve ',
'Essie ',
"Maya ",
"Breanna ",
"Eula ",
"Nyla ",
"Sarina ",
"Kelly ",
"Annis ",
'Jessamyn ',
'Sally ',
'Brandi ',
'Kelly ',
'Nyree ',
'Caroline ',
'Deena ',
"Margaret ",
"Ravenna ",
"Bella ",
"Gavin ",
"Yorick ",
"Patsy",
"Arianna ",
'Patsy ',
'Samantha',
'Sunshine ',
'Jessie ',
'Parnel ',
'Wilford ',
'Craig ',
"Merv ",
"Aaliyah ",
"Daniella ",
"Madelyn ",
"Flora ",
"Aubrey ",
"Alexandrina | |
will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should exist
in the time_array. Cannot be used with `time_range`, `lsts`, or
`lst_array`.
time_range : array_like of float, optional
The time range in Julian Date to keep in the object, must be length
2. Some of the times in the object should fall between the first and
last elements. Cannot be used with `times`, `lsts`, or `lst_array`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int or str, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array. If passing strings, the
canonical polarization strings (e.g. "xx", "rr") are supported and if the
`x_orientation` attribute is set, the physical dipole strings
(e.g. "nn", "ee") are also supported.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
Returns
-------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
"""
# build up history string as we go
history_update_string = " Downselected to specific "
n_selects = 0
if ant_str is not None:
if not (
antenna_nums is None
and antenna_names is None
and bls is None
and polarizations is None
):
raise ValueError(
"Cannot provide ant_str with antenna_nums, antenna_names, "
"bls, or polarizations."
)
else:
bls, polarizations = self.parse_ants(ant_str)
if bls is not None and len(bls) == 0:
raise ValueError(
f"There is no data matching ant_str={ant_str} in this object."
)
# Antennas, times and blt_inds all need to be combined into a set of
# blts indices to keep.
# test for blt_inds presence before adding inds from antennas & times
if blt_inds is not None:
blt_inds = uvutils._get_iterable(blt_inds)
if np.array(blt_inds).ndim > 1:
blt_inds = np.array(blt_inds).flatten()
history_update_string += "baseline-times"
n_selects += 1
if antenna_names is not None:
if antenna_nums is not None:
raise ValueError(
"Only one of antenna_nums and antenna_names can be provided."
)
if not isinstance(antenna_names, (list, tuple, np.ndarray)):
antenna_names = (antenna_names,)
if np.array(antenna_names).ndim > 1:
antenna_names = np.array(antenna_names).flatten()
antenna_nums = []
for s in antenna_names:
if s not in self.antenna_names:
raise ValueError(
"Antenna name {a} is not present in the antenna_names"
" array".format(a=s)
)
antenna_nums.append(
self.antenna_numbers[np.where(np.array(self.antenna_names) == s)][0]
)
if antenna_nums is not None:
antenna_nums = uvutils._get_iterable(antenna_nums)
if np.array(antenna_nums).ndim > 1:
antenna_nums = np.array(antenna_nums).flatten()
if n_selects > 0:
history_update_string += ", antennas"
else:
history_update_string += "antennas"
n_selects += 1
# Check to make sure that we actually have these antenna nums in the data
ant_check = np.logical_or(
np.isin(antenna_nums, self.ant_1_array),
np.isin(antenna_nums, self.ant_2_array),
)
if not np.all(ant_check):
raise ValueError(
"Antenna number % i is not present in the ant_1_array or "
"ant_2_array" % antenna_nums[~ant_check][0]
)
ant_blt_inds = np.where(
np.logical_and(
np.isin(self.ant_1_array, antenna_nums),
np.isin(self.ant_2_array, antenna_nums),
)
)[0]
else:
ant_blt_inds = None
if bls is not None:
if isinstance(bls, list) and all(
isinstance(bl_ind, (int, np.integer,),) for bl_ind in bls
):
for bl_ind in bls:
if not (bl_ind in self.baseline_array):
raise ValueError(
"Baseline number {i} is not present in the "
"baseline_array".format(i=bl_ind)
)
bls = list(zip(*self.baseline_to_antnums(bls)))
elif isinstance(bls, tuple) and (len(bls) == 2 or len(bls) == 3):
bls = [bls]
if len(bls) == 0 or not all(isinstance(item, tuple) for item in bls):
raise ValueError(
"bls must be a list of tuples of antenna numbers "
"(optionally with polarization) or a list of baseline numbers."
)
if not all(
[isinstance(item[0], (int, np.integer,),) for item in bls]
+ [isinstance(item[1], (int, np.integer,),) for item in bls]
):
raise ValueError(
"bls must be a list of tuples of antenna numbers "
"(optionally with polarization) or a list of baseline numbers."
)
if all(len(item) == 3 for item in bls):
if polarizations is not None:
raise ValueError(
"Cannot provide length-3 tuples and also specify polarizations."
)
if not all(isinstance(item[2], str) for item in bls):
raise ValueError(
"The third element in each bl must be a polarization string"
)
if ant_str is None:
if n_selects > 0:
history_update_string += ", baselines"
else:
history_update_string += "baselines"
else:
history_update_string += "antenna pairs"
n_selects += 1
bls_blt_inds = np.zeros(0, dtype=np.int64)
bl_pols = set()
for bl in bls:
if not (bl[0] in self.ant_1_array or bl[0] in self.ant_2_array):
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=bl[0])
)
if not (bl[1] in self.ant_1_array or bl[1] in self.ant_2_array):
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=bl[1])
)
wh1 = np.where(
np.logical_and(self.ant_1_array == bl[0], self.ant_2_array == bl[1])
)[0]
wh2 = np.where(
np.logical_and(self.ant_1_array == bl[1], self.ant_2_array == bl[0])
)[0]
if len(wh1) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh1))
if len(bl) == 3:
bl_pols.add(bl[2])
elif len(wh2) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh2))
if len(bl) == 3:
# find conjugate polarization
bl_pols.add(uvutils.conj_pol(bl[2]))
else:
raise ValueError(
"Antenna pair {p} does not have any data "
"associated with it.".format(p=bl)
)
if len(bl_pols) > 0:
polarizations = list(bl_pols)
if ant_blt_inds is not None:
# Use intersection (and) to join antenna_names/nums & ant_pairs_nums
ant_blt_inds = np.array(
list(set(ant_blt_inds).intersection(bls_blt_inds))
)
else:
ant_blt_inds = bls_blt_inds
if ant_blt_inds is not None:
if blt_inds is not None:
# Use intersection (and) to join antenna_names/nums/ant_pairs_nums
# with blt_inds
blt_inds = np.array(
list(set(blt_inds).intersection(ant_blt_inds)), dtype=np.int64
)
else:
blt_inds = ant_blt_inds
have_times = times is not None
have_time_range = time_range is not None
have_lsts = lsts is not None
have_lst_range = lst_range is not None
if (
np.count_nonzero([have_times, have_time_range, have_lsts, have_lst_range])
> 1
):
raise ValueError(
"Only one of [times, time_range, lsts, lst_range] may be "
"specified per selection operation."
)
if times is not None:
times = uvutils._get_iterable(times)
if np.array(times).ndim > 1:
times = np.array(times).flatten()
time_blt_inds = np.zeros(0, dtype=np.int64)
for jd in times:
if np.any(
np.isclose(
self.time_array,
jd,
rtol=self._time_array.tols[0],
atol=self._time_array.tols[1],
)
):
time_blt_inds = np.append(
time_blt_inds,
np.where(
np.isclose(
self.time_array,
jd,
rtol=self._time_array.tols[0],
atol=self._time_array.tols[1],
)
)[0],
)
else:
raise ValueError(
"Time {t} is not present in the time_array".format(t=jd)
)
if time_range is not None:
if np.size(time_range) != 2:
raise ValueError("time_range must be length 2.")
time_blt_inds = np.nonzero(
(self.time_array <= time_range[1]) & (self.time_array >= time_range[0])
)[0]
if time_blt_inds.size == 0:
raise ValueError(
f"No elements in time range between {time_range[0]} and "
f"{time_range[1]}."
)
if lsts is not None:
if np.any(np.asarray(lsts) > 2 * np.pi):
warnings.warn(
"The lsts parameter contained a value greater than 2*pi. "
"LST values are assumed to be in radians, not hours."
)
lsts = uvutils._get_iterable(lsts)
if np.array(lsts).ndim > 1:
lsts = np.array(lsts).flatten()
time_blt_inds = np.zeros(0, dtype=np.int64)
for lst in lsts:
if np.any(
np.isclose(
self.lst_array,
lst,
rtol=self._lst_array.tols[0],
atol=self._lst_array.tols[1],
)
):
time_blt_inds = np.append(
time_blt_inds,
np.where(
np.isclose(
self.lst_array,
lst,
rtol=self._lst_array.tols[0],
atol=self._lst_array.tols[1],
)
)[0],
)
else:
raise ValueError(f"LST {lst} is not present in the lst_array")
| |
* hours
minutes = remaining // 60
seconds = remaining - 60 * minutes
print(fmt_str.format(bundle_i0,
bundle_i0 + bundle_size - 1,
100 * (bundle_i0 + bundle_size) / N,
hours, minutes, seconds,
FPS))
# Save groups of 100 frames together
if (bundle_i0 > last_saved_frames + save_group + 1):
all_points = []
all_traj_pts = []
all_traj_clrs = []
i0 = last_saved_frames
i1 = i0 + save_group
for i, world_H in enumerate(transform_list[i0: i1]):
# Load ply format points
f_name = '{:.0f}.ply'.format(f_times[i0 + i])
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
# Apply transf
world_pts = np.hstack((points, np.ones_like(points[:, :1])))
world_pts = np.matmul(world_pts, world_H.T)
# Save frame
world_pts[:, 3] = i0 + i
all_points.append(world_pts)
# also save trajectory
traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i)))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
last_saved_frames += save_group
filename = join(out_folder, 'd_{:s}_{:05d}.ply'.format(day, i0))
write_ply(filename,
[np.vstack(all_points)],
['x', 'y', 'z', 't'])
filename = join(out_folder, 'd_{:s}_{:05d}_traj.ply'.format(day, i0))
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
#################
# Post processing
#################
all_points = []
all_traj_pts = []
all_traj_clrs = []
i0 = last_saved_frames
for i, world_H in enumerate(transform_list[i0:]):
# Load ply format points
f_name = '{:.0f}.ply'.format(f_times[i0 + i])
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
# Apply transf
world_pts = np.hstack((points, np.ones_like(points[:, :1])))
world_pts = np.matmul(world_pts, world_H.T)
# Save frame
world_pts[:, 3] = i0 + i
all_points.append(world_pts)
# also save trajectory
traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i)))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
last_saved_frames += save_group
filename = join(out_folder, 'd_{:s}_{:05d}.ply'.format(day, i0))
write_ply(filename,
[np.vstack(all_points)],
['x', 'y', 'z', 't'])
filename = join(out_folder, 'd_{:s}_{:05d}_traj.ply'.format(day, i0))
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
def bundle_icp(frame_names,
bundle_size=5,
score_thresh=0.99,
frame_voxel_size=-1,
verbose=2):
"""
Test ICP registration Use GT to extract a small interesting region.
"""
############
# Parameters
############
# Bundle stride (nb of frames between each bundle start)
bundle_stride = bundle_size - 1
# Group of frames saved together
save_group = 100
# List of transformation we are trying to optimize
transform_list = [np.eye(4) for _ in frame_names]
last_saved_frames = 0
FPS = 0
N = len(frame_names)
for b_i, bundle_i0 in enumerate(np.arange(0, len(frame_names), bundle_stride)):
####################
# Load bundle frames
####################
t = [time.time()]
if (bundle_i0 + bundle_size > N):
bundle_i0 = N - bundle_size
frame_pts = []
frame_norms = []
frame_w = []
for f_name in frame_names[bundle_i0:bundle_i0+bundle_size]:
# Load ply format points
data = read_ply(f_name)
points = np.vstack((data['x'], data['y'], data['z'])).T
# Get normals
normals, planarity, linearity = polar_normals(points, radius=1.5, h_scale=0.5)
norm_scores = planarity + linearity
# Remove low quality normals for fitting
points = points[norm_scores > score_thresh]
normals = normals[norm_scores > score_thresh]
norm_scores = (norm_scores[norm_scores > score_thresh] - score_thresh) / (1 - score_thresh)
# Subsample to reduce number of points
if frame_voxel_size > 0:
# grid supsampling
points, normals = grid_subsampling(points, features=normals, sampleDl=frame_voxel_size)
# Renormalize normals
normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
# Filter out points according to main normal directions (NOt necessary if normals are better computed)
bool_filter = normal_filtering(normals) > 0.5
points = points[bool_filter]
normals = normals[bool_filter]
norm_scores = norm_scores[bool_filter]
# Compute score for each component of rotations / translation
# Weights according to distance the futher, the higher (square rule because points lies on surfaces)
#rot_scores = np.expand_dims(norm_scores, 1) * np.cross(points, normals, axis=1)
#weights = np.hstack((rot_scores, -rot_scores))
weights = np.expand_dims(norm_scores, 1)
# Gather frames data
frame_pts.append(points)
frame_norms.append(normals)
frame_w.append(weights)
t += [time.time()]
##################
# Apply bundle ICP
##################
bundle_H, bundle_rms, all_H = bundle_pt2pl_icp(frame_pts,
frame_norms,
frame_w,
n_samples=1000,
max_pairing_dist=0.2,
max_iter=200,
avg_steps=5)
t += [time.time()]
# Update transformations to world coordinates
for b in range(bundle_size):
world_H = np.eye(4)
for bb in range(b, 0, -1):
world_H = np.matmul(bundle_H[bb], world_H)
world_H = np.matmul(transform_list[bundle_i0], world_H)
transform_list[bundle_i0 + b] = world_H
t += [time.time()]
if verbose > 0:
fmt_str = 'Bundle [{:3d},{:3d}] --- {:5.1f}% or {:02d}:{:02d}:{:02d} remaining at {:.1f}fps'
if bundle_i0 == 0:
FPS = bundle_size / (t[-1] - t[0])
else:
FPS += (bundle_size / (t[-1] - t[0]) - FPS) / 10
remaining = int((N - (bundle_i0 + bundle_size)) / FPS)
hours = remaining // 3600
remaining = remaining - 3600 * hours
minutes = remaining // 60
seconds = remaining - 60 * minutes
print(fmt_str.format(bundle_i0,
bundle_i0 + bundle_size - 1,
100 * (bundle_i0 + bundle_size) / N,
hours, minutes, seconds,
FPS))
# Save groups of 100 frames together
if (bundle_i0 > last_saved_frames + save_group + 1):
all_points = []
all_traj_pts = []
all_traj_clrs = []
i0 = last_saved_frames
i1 = i0 + save_group
for i, world_H in enumerate(transform_list[i0: i1]):
# Load ply format points
data = read_ply(frame_names[i0 + i])
points = np.vstack((data['x'], data['y'], data['z'])).T
# Apply transf
world_pts = np.hstack((points, np.ones_like(points[:, :1])))
world_pts = np.matmul(world_pts, world_H.T)
# Save frame
world_pts[:, 3] = i0 + i
all_points.append(world_pts)
# also save trajectory
traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i)))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
last_saved_frames += save_group
filename = 'debug_icp_{:05d}.ply'.format(i0)
write_ply(filename,
[np.vstack(all_points)],
['x', 'y', 'z', 't'])
filename = 'debug_icp_{:05d}_traj.ply'.format(i0)
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
#################
# Post processing
#################
all_points = []
all_traj_pts = []
all_traj_clrs = []
i0 = last_saved_frames
for i, world_H in enumerate(transform_list[i0:]):
# Load ply format points
data = read_ply(frame_names[i0 + i])
points = np.vstack((data['x'], data['y'], data['z'])).T
# Apply transf
world_pts = np.hstack((points, np.ones_like(points[:, :1])))
world_pts = np.matmul(world_pts, world_H.T)
# Save frame
world_pts[:, 3] = i0 + i
all_points.append(world_pts)
# also save trajectory
traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i)))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
last_saved_frames += save_group
filename = 'debug_icp_{:05d}.ply'.format(i0)
write_ply(filename,
[np.vstack(all_points)],
['x', 'y', 'z', 't'])
filename = 'debug_icp_{:05d}_traj.ply'.format(i0)
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
return transform_list
def bundle_slam(verbose=1):
############
# Parameters
############
# Path to data
data_path = '../../Data/NCLT'
gt_folder = 'ground_truth'
raw_folder = 'raw_ply'
days = np.sort([d for d in listdir(join(data_path, raw_folder))])
# Out files
out_folder = join(data_path, 'day_ply')
if not exists(out_folder):
makedirs(out_folder)
# Stride (nb of frames skipped for transformations)
frame_stride = 2
# Bundle size (number of frames jointly optimized) and stride (nb of frames between each bundle start)
bundle_size = 7
bundle_stride = bundle_size - 1
# Normal estimation parameters
score_thresh = 0.99
# Pointcloud filtering parameters
map_voxel_size = 0.05
frame_voxel_size = 0.05
# Group of frames saved together
save_group = 100
###############
# Load GT poses
###############
print('\nLoading days groundtruth poses...')
t0 = time.time()
gt_t, gt_H = load_gt_poses(join(data_path, gt_folder), only_day_1=True)
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
#######################
# Get lidar frame times
#######################
# Focus on a particular point
p0 = np.array([-220, -527, 12])
R0 = 20.0
print('\nGet timestamps in focused area...')
t0 = time.time()
day_f_times = get_area_frames(days, gt_t, gt_H, join(data_path, raw_folder), p0, R0, only_day_1=True)
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
###########################
# coarse map with pt2pl icp
###########################
for d, day in enumerate(days):
# List of transformation we are trying to optimize
frames_folder = join(data_path, 'raw_ply', day)
f_times = [f_t for f_t in day_f_times[d][::frame_stride]]
transform_list = [np.eye(4) for _ in f_times]
last_saved_frames = 0
FPS = 0
N = len(f_times)
for b_i, bundle_i0 in enumerate(np.arange(0, len(f_times), bundle_stride)):
####################
# Load bundle frames
####################
t = [time.time()]
if (bundle_i0 + bundle_size > len(f_times)):
bundle_i0 = len(f_times) - bundle_size
frame_pts = []
frame_norms = []
frame_w = []
for f_t in f_times[bundle_i0:bundle_i0+bundle_size]:
# Load ply format points
f_name = '{:.0f}.ply'.format(f_t)
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
estimate_normals_planarity_debug(points)
a = 1/0
t += [time.time()]
# Get normals
normals, planarity, linearity = polar_normals(points, radius=1.5, h_scale=0.5)
# Remove low quality normals for fitting
points = points[norm_scores > score_thresh]
normals = normals[norm_scores > score_thresh]
norm_scores = (norm_scores[norm_scores > score_thresh] - score_thresh) / (1 - score_thresh)
t += [time.time()]
# Subsample to reduce number of points
if frame_voxel_size > 0:
# grid supsampling
points, normals = grid_subsampling(points, features=normals, sampleDl=map_voxel_size)
# Renormalize normals
normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
# Filter out | |
<reponame>massongit/deep-crf
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
os.environ["CHAINER_SEED"] = "1234"
import random
import numpy as np
random.seed(1234)
np.random.seed(1234)
import chainer
from chainer import cuda
from chainer import optimizers
from chainer import serializers
import chainer.functions as F
from .bi_lstm import BiLSTM_CNN_CRF
import deepcrf.util
from .util import PADDING, UNKWORD
import logging
logger = logging.getLogger(__name__)
to_cpu = chainer.cuda.to_cpu
import os.path
import six
version = chainer.__version__
def my_cudnn(cudnn_flag):
if version >= '2.0':
if cudnn_flag:
chainer.config.use_cudnn = 'always'
# chainer.config.cudnn_deterministic = True
else:
chainer.config.use_cudnn = 'never'
def run(data_file, is_train=False, **args):
for k in six.iterkeys(args):
args[k] = deepcrf.util.str_to_unicode_python2(args[k])
is_test = not is_train
batchsize = args['batchsize']
model_name = args['model_name']
optimizer_name = args['optimizer']
save_dir = args['save_dir']
print(args)
def convert_multi_files(data_file):
if args.get('use_list_files', False):
with open(data_file) as f:
data_files = [filename.strip() for filename in f]
else:
data_files = [data_file]
return data_files
data_files = convert_multi_files(data_file)
# TODO: check save_dir exist
if not os.path.isdir(save_dir):
err_msg = 'There is no dir : {}\n'.format(save_dir)
err_msg += '##############################\n'
err_msg += '## Please followiing: \n'
err_msg += '## $ mkdir {}\n'.format(save_dir)
err_msg += '##############################\n'
raise ValueError(err_msg)
save_name = args['save_name']
save_name = os.path.join(save_dir, save_name)
xp = cuda.cupy if args['gpu'] >= 0 else np
efficient_gpu = False
if args['gpu'] >= 0:
cuda.get_device_from_id(args['gpu']).use()
xp.random.seed(1234)
efficient_gpu = args.get('efficient_gpu', False)
def to_gpu(x):
if args['gpu'] >= 0:
return chainer.cuda.to_gpu(x)
return x
# load files
dev_file = args['dev_file']
test_file = args['test_file']
delimiter = args['delimiter']
input_idx = list(map(int, args['input_idx'].split(',')))
output_idx = list(map(int, args['output_idx'].split(',')))
word_input_idx = input_idx[0] # NOTE: word_idx is first column!
additional_input_idx = input_idx[1:]
sentences_train = []
if is_train:
sentences_train = deepcrf.util.read_conll_file(filenames=data_files, delimiter=delimiter)
if len(sentences_train) == 0:
s = str(len(sentences_train))
err_msg = 'Invalid training sizes: {} sentences. '.format(s)
raise ValueError(err_msg)
else:
# Predict
if len(input_idx) == 1:
# raw text format
sentences_train = deepcrf.util.read_raw_file(filenames=data_files, delimiter=u' ')
else:
# conll format
sentences_train = deepcrf.util.read_conll_file(
filenames=data_files, delimiter=delimiter)
# sentences_train = sentences_train[:100]
sentences_dev = []
sentences_test = []
if dev_file:
dev_file = convert_multi_files(dev_file)
sentences_dev = deepcrf.util.read_conll_file(dev_file, delimiter=delimiter)
if test_file:
test_file = convert_multi_files(test_file)
sentences_test = deepcrf.util.read_conll_file(test_file, delimiter=delimiter)
# Additional setup
vocab_adds = []
for ad_feat_id in additional_input_idx:
sentences_additional_train = [[feat_obj[ad_feat_id] for feat_obj in sentence]
for sentence in sentences_train]
vocab_add = deepcrf.util.build_vocab(sentences_additional_train)
vocab_adds.append(vocab_add)
save_vocab = save_name + '.vocab'
save_vocab_char = save_name + '.vocab_char'
save_tags_vocab = save_name + '.vocab_tag'
save_train_config = save_name + '.train_config'
# TODO: check unknown pos tags
# TODO: compute unk words
if is_train:
sentences_words_train = [[w_obj[word_input_idx] for w_obj in sentence]
for sentence in sentences_train]
vocab = deepcrf.util.build_vocab(sentences_words_train)
vocab_char = deepcrf.util.build_vocab(deepcrf.util.flatten(sentences_words_train))
vocab_tags = deepcrf.util.build_tag_vocab(sentences_train)
elif is_test:
vocab = deepcrf.util.load_vocab(save_vocab)
vocab_char = deepcrf.util.load_vocab(save_vocab_char)
vocab_tags = deepcrf.util.load_vocab(save_tags_vocab)
vocab_adds = []
for i, idx in enumerate(additional_input_idx):
save_additional_vocab = save_name + '.vocab_additional_' + str(i)
vocab_add = deepcrf.util.load_vocab(save_additional_vocab)
vocab_adds.append(vocab_add)
if args.get('word_emb_file', False):
# set Pre-trained embeddings
# emb_file = './emb/glove.6B.100d.txt'
emb_file = args['word_emb_file']
word_emb_vocab_type = args.get('word_emb_vocab_type')
def assert_word_emb_shape(shape1, shape2):
err_msg = '''Pre-trained embedding size is not equal to `--n_word_emb` ({} != {})'''
if shape1 != shape2:
err_msg = err_msg.format(str(shape1), str(shape2))
raise ValueError(err_msg)
def assert_no_emb(word_vecs):
err_msg = '''There is no-embeddings! Please check your file `--word_emb_file`'''
if word_vecs.shape[0] == 0:
raise ValueError(err_msg)
if word_emb_vocab_type == 'replace_all':
# replace all vocab by Pre-trained embeddings
word_vecs, vocab_glove = deepcrf.util.load_glove_embedding_include_vocab(emb_file)
vocab = vocab_glove
elif word_emb_vocab_type == 'replace_only':
word_ids, word_vecs = deepcrf.util.load_glove_embedding(emb_file, vocab)
assert_no_emb(word_vecs)
elif word_emb_vocab_type == 'additional':
word_vecs, vocab_glove = deepcrf.util.load_glove_embedding_include_vocab(emb_file)
additional_vecs = []
for word, word_idx in sorted(six.iteritems(vocab_glove), key=lambda x: x[1]):
if word not in vocab:
vocab[word] = len(vocab)
additional_vecs.append(word_vecs[word_idx])
additional_vecs = np.array(additional_vecs, dtype=np.float32)
if args.get('vocab_file', False):
vocab_file = args['vocab_file']
vocab = deepcrf.util.load_vocab(vocab_file)
if args.get('vocab_char_file', False):
vocab_char_file = args['vocab_char_file']
vocab_char = deepcrf.util.load_vocab(vocab_char_file)
vocab_tags_inv = dict((v, k) for k, v in six.iteritems(vocab_tags))
PAD_IDX = vocab[PADDING]
UNK_IDX = vocab[UNKWORD]
CHAR_PAD_IDX = vocab_char[PADDING]
CHAR_UNK_IDX = vocab_char[UNKWORD]
tmp_xp = xp
if efficient_gpu:
tmp_xp = np # use CPU (numpy)
def parse_to_word_ids(sentences, word_input_idx, vocab):
return deepcrf.util.parse_to_word_ids(sentences, xp=tmp_xp, vocab=vocab,
UNK_IDX=UNK_IDX, idx=word_input_idx)
def parse_to_char_ids(sentences):
return deepcrf.util.parse_to_char_ids(sentences, xp=tmp_xp, vocab_char=vocab_char,
UNK_IDX=CHAR_UNK_IDX, idx=word_input_idx)
def parse_to_tag_ids(sentences):
return deepcrf.util.parse_to_tag_ids(sentences, xp=tmp_xp, vocab=vocab_tags,
UNK_IDX=-1, idx=-1)
x_train = parse_to_word_ids(sentences_train, word_input_idx, vocab)
x_char_train = parse_to_char_ids(sentences_train)
y_train = parse_to_tag_ids(sentences_train)
x_train_additionals = [parse_to_word_ids(sentences_train, ad_feat_id, vocab_adds[i])
for i, ad_feat_id in enumerate(additional_input_idx)]
x_dev = parse_to_word_ids(sentences_dev, word_input_idx, vocab)
x_char_dev = parse_to_char_ids(sentences_dev)
y_dev = parse_to_tag_ids(sentences_dev)
x_dev_additionals = [parse_to_word_ids(sentences_dev, ad_feat_id, vocab_adds[i])
for i, ad_feat_id in enumerate(additional_input_idx)]
y_dev_cpu = [[w[-1] for w in sentence]
for sentence in sentences_dev]
# tag_names = []
tag_names = list(set([tag[2:] if len(tag) >= 2 else tag[0]
for tag in six.iterkeys(vocab_tags)]))
x_test = parse_to_word_ids(sentences_test, word_input_idx, vocab)
x_char_test = parse_to_char_ids(sentences_test)
y_test = parse_to_tag_ids(sentences_test)
x_test_additionals = [parse_to_word_ids(sentences_test, ad_feat_id, vocab_adds[i])
for i, ad_feat_id in enumerate(additional_input_idx)]
cnt_train_unk = sum([tmp_xp.sum(d == UNK_IDX) for d in x_train])
cnt_train_word = sum([d.size for d in x_train])
unk_train_unk_rate = float(cnt_train_unk) / cnt_train_word
cnt_dev_unk = sum([tmp_xp.sum(d == UNK_IDX) for d in x_dev])
cnt_dev_word = sum([d.size for d in x_dev])
unk_dev_unk_rate = float(cnt_dev_unk) / max(cnt_dev_word, 1)
logging.info('train:' + str(len(x_train)))
logging.info('dev :' + str(len(x_dev)))
logging.info('test :' + str(len(x_test)))
logging.info('vocab :' + str(len(vocab)))
logging.info('vocab_tags:' + str(len(vocab_tags)))
logging.info('unk count (train):' + str(cnt_train_unk))
logging.info('unk rate (train):' + str(unk_train_unk_rate))
logging.info('cnt all words (train):' + str(cnt_train_word))
logging.info('unk count (dev):' + str(cnt_dev_unk))
logging.info('unk rate (dev):' + str(unk_dev_unk_rate))
logging.info('cnt all words (dev):' + str(cnt_dev_word))
# show model config
logging.info('######################')
logging.info('## Model Config')
logging.info('model_name:' + str(model_name))
logging.info('batchsize:' + str(batchsize))
logging.info('optimizer:' + str(optimizer_name))
# Save model config
logging.info('######################')
logging.info('## Model Save Config')
logging.info('save_dir :' + str(save_dir))
# save vocab
logging.info('save_vocab :' + save_vocab)
logging.info('save_vocab_char :' + save_vocab_char)
logging.info('save_tags_vocab :' + save_tags_vocab)
logging.info('save_train_config :' + save_train_config)
init_emb = None
if is_train:
deepcrf.util.write_vocab(save_vocab, vocab)
deepcrf.util.write_vocab(save_vocab_char, vocab_char)
deepcrf.util.write_vocab(save_tags_vocab, vocab_tags)
deepcrf.util.write_vocab(save_train_config, args)
for i, vocab_add in enumerate(vocab_adds):
save_additional_vocab = save_name + '.vocab_additional_' + str(i)
deepcrf.util.write_vocab(save_additional_vocab, vocab_add)
n_vocab_add = [len(_vadd) for _vadd in vocab_adds]
net = BiLSTM_CNN_CRF(n_vocab=len(vocab), n_char_vocab=len(vocab_char),
emb_dim=args['n_word_emb'],
hidden_dim=args['n_hidden'],
n_layers=args['n_layer'], init_emb=init_emb,
char_input_dim=args['n_char_emb'],
char_hidden_dim=args['n_char_hidden'],
n_label=len(vocab_tags),
n_add_feature_dim=args['n_add_feature_emb'],
n_add_feature=len(n_vocab_add),
n_vocab_add=n_vocab_add,
use_cudnn=args['use_cudnn'])
my_cudnn(args['use_cudnn'])
if args.get('word_emb_file', False):
if word_emb_vocab_type == 'replace_all':
# replace all vocab by Pre-trained embeddings
assert_word_emb_shape(word_vecs.shape[1], net.word_embed.W.shape[1])
net.word_embed.W.data = word_vecs[:]
elif word_emb_vocab_type == 'replace_only':
assert_no_emb(word_vecs)
assert_word_emb_shape(word_vecs.shape[1], net.word_embed.W.shape[1])
net.word_embed.W.data[word_ids] = word_vecs[:]
elif word_emb_vocab_type == 'additional':
assert_word_emb_shape(word_vecs.shape[1], net.word_embed.W.shape[1])
v_size = additional_vecs.shape[0]
net.word_embed.W.data[-v_size:] = additional_vecs[:]
if args.get('return_model', False):
return net
if args['gpu'] >= 0:
net.to_gpu()
init_alpha = args['init_lr']
if optimizer_name == 'adam':
opt = optimizers.Adam(alpha=init_alpha, beta1=0.9, beta2=0.9)
elif optimizer_name == 'adadelta':
opt = optimizers.AdaDelta()
if optimizer_name == 'sgd_mom':
opt = optimizers.MomentumSGD(lr=init_alpha, momentum=0.9)
if optimizer_name == 'sgd':
opt = optimizers.SGD(lr=init_alpha)
opt.setup(net)
opt.add_hook(chainer.optimizer.GradientClipping(5.0))
def eval_loop(x_data, x_char_data, y_data, x_train_additionals=[]):
# dev or test
net.set_train(train=False)
iteration_list = range(0, len(x_data), batchsize)
# perm = np.random.permutation(len(x_data))
sum_loss = 0.0
predict_lists = []
for i_index, index in enumerate(iteration_list):
x = x_data[index:index + batchsize]
x_char = x_char_data[index:index + batchsize]
target_y = y_data[index:index + batchsize]
if efficient_gpu:
x = [to_gpu(_) for _ in x]
x_char = [[to_gpu(_) for _ in words] for words in x_char]
target_y = [to_gpu(_) for _ in target_y]
x_additional = []
if len(x_train_additionals):
x_additional = [[to_gpu(_) for _ in x_ad[index:index + batchsize]]
for x_ad in x_train_additionals]
output = net(x_data=x, x_char_data=x_char, x_additional=x_additional)
predict, loss = net.predict(output, target_y)
sum_loss += loss.data
predict_lists.extend(predict)
_, predict_tags = zip(*predict_lists)
predicted_results = []
for predict in predict_tags:
predicted = [vocab_tags_inv[tag_idx] for tag_idx in to_cpu(predict)]
predicted_results.append(predicted)
return predict_lists, sum_loss, predicted_results
if args['model_filename']:
model_filename = args['model_filename']
serializers.load_hdf5(model_filename, net)
if is_test:
# predict
# model_filename = args['model_filename']
# model_filename = os.path.join(save_dir, model_filename)
# serializers.load_hdf5(model_filename, net)
vocab_tags_inv = dict([(v, k) for k, v in six.iteritems(vocab_tags)])
x_predict = x_train
x_char_predict = x_char_train
x_additionals = x_train_additionals
y_predict = y_train
if dev_file:
predict_dev, loss_dev, predict_dev_tags = eval_loop(
x_dev, x_char_dev, y_dev, x_dev_additionals)
gold_predict_pairs = [y_dev_cpu, predict_dev_tags]
result, phrase_info = deepcrf.util.conll_eval(
gold_predict_pairs, flag=False, tag_class=tag_names)
all_result = result['All_Result']
print('all_result: {}'.format(all_result))
predict_pairs, _, _tmp = eval_loop(x_predict, x_char_predict, y_predict, x_additionals)
_, predict_tags = zip(*predict_pairs)
predicted_output = args['predicted_output']
predicted_results = []
for predict in predict_tags:
predicted = [vocab_tags_inv[tag_idx] for tag_idx in to_cpu(predict)]
predicted_results.append(predicted)
with open(predicted_output, 'w') as f:
for predicted in predicted_results:
for tag in predicted:
f.write(tag + '\n')
f.write('\n')
return False
logging.info('start training...')
tmax = args['max_iter']
t = 0.0
prev_dev_accuracy = 0.0
prev_dev_f = 0.0
for epoch in six.moves.xrange(args['max_iter']):
# train
logging.info('epoch:' + str(epoch))
logging.info(' [train]')
net.set_train(train=True)
iteration_list = range(0, len(x_train), batchsize)
perm = np.random.permutation(len(x_train))
sum_loss = 0.0
predict_train = []
for i_index, index in enumerate(iteration_list):
data = [(x_train[i], x_char_train[i], y_train[i])
for i in perm[index:index + batchsize]]
x, x_char, | |
# -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
""" properties used by Document object """
import decimal
import datetime
import re
import time
try:
from collections import MutableSet, Iterable
def is_iterable(c):
return isinstance(c, Iterable)
support_setproperty = True
except ImportError:
support_setproperty = False
from couchdbkit.exceptions import BadValueError
__all__ = ['ALLOWED_PROPERTY_TYPES', 'Property', 'StringProperty',
'IntegerProperty', 'DecimalProperty', 'BooleanProperty',
'FloatProperty', 'DateTimeProperty', 'DateProperty',
'TimeProperty', 'DictProperty', 'StringDictProperty',
'ListProperty', 'StringListProperty',
'dict_to_json', 'list_to_json',
'value_to_json', 'MAP_TYPES_PROPERTIES', 'value_to_python',
'dict_to_python', 'list_to_python', 'convert_property',
'value_to_property', 'LazyDict', 'LazyList']
if support_setproperty:
__all__ += ['SetProperty', 'LazySet']
ALLOWED_PROPERTY_TYPES = set([
str,
str,
str,
bool,
int,
int,
float,
datetime.datetime,
datetime.date,
datetime.time,
decimal.Decimal,
dict,
list,
set,
type(None)
])
re_date = re.compile('^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])$')
re_time = re.compile('^([01]\d|2[0-3])\D?([0-5]\d)\D?([0-5]\d)?\D?(\d{3})?$')
re_datetime = re.compile('^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])(\D?([01]\d|2[0-3])\D?([0-5]\d)\D?([0-5]\d)?\D?(\d{3})?([zZ]|([\+-])([01]\d|2[0-3])\D?([0-5]\d)?)?)?$')
re_decimal = re.compile('^(\d+)\.(\d+)$')
class Property(object):
""" Property base which all other properties
inherit."""
creation_counter = 0
def __init__(self, verbose_name=None, name=None,
default=None, required=False, validators=None,
choices=None):
""" Default constructor for a property.
:param verbose_name: str, verbose name of field, could
be use for description
:param name: str, name of field
:param default: default value
:param required: True if field is required, default is False
:param validators: list of callable or callable, field validators
function that are executed when document is saved.
"""
self.verbose_name = verbose_name
self.name = name
self.default = default
self.required = required
self.validators = validators
self.choices = choices
self.creation_counter = Property.creation_counter
Property.creation_counter += 1
def __property_config__(self, document_class, property_name):
self.document_class = document_class
if self.name is None:
self.name = property_name
def __property_init__(self, document_instance, value):
""" method used to set value of the property when
we create the document. Don't check required. """
if value is not None:
value = self.to_json(self.validate(value, required=False))
document_instance._doc[self.name] = value
def __get__(self, document_instance, document_class):
if document_instance is None:
return self
value = document_instance._doc.get(self.name)
if value is not None:
value = self._to_python(value)
return value
def __set__(self, document_instance, value):
value = self.validate(value, required=False)
document_instance._doc[self.name] = self._to_json(value)
def __delete__(self, document_instance):
pass
def default_value(self):
""" return default value """
default = self.default
if callable(default):
default = default()
return default
def validate(self, value, required=True):
""" validate value """
if required and self.empty(value):
if self.required:
raise BadValueError("Property %s is required." % self.name)
else:
if self.choices and value is not None:
if isinstance(self.choices, list): choice_list = self.choices
if isinstance(self.choices, dict): choice_list = list(self.choices.keys())
if isinstance(self.choices, tuple): choice_list = [key for (key, name) in self.choices]
if value not in choice_list:
raise BadValueError('Property %s is %r; must be one of %r' % (
self.name, value, choice_list))
if self.validators:
if isinstance(self.validators, (list, tuple)):
for validator in self.validators:
if callable(validator):
validator(value)
elif callable(self.validators):
self.validators(value)
return value
def empty(self, value):
""" test if value is empty """
return (not value and value != 0) or value is None
def _to_python(self, value):
if value == None:
return value
return self.to_python(value)
def _to_json(self, value):
if value == None:
return value
return self.to_json(value)
def to_python(self, value):
""" convert to python type """
return str(value)
def to_json(self, value):
""" convert to json, Converted value is saved in couchdb. """
return self.to_python(value)
data_type = None
class StringProperty(Property):
""" string property str or unicode property
*Value type*: unicode
"""
to_python = str
def validate(self, value, required=True):
value = super(StringProperty, self).validate(value,
required=required)
if value is None:
return value
if not isinstance(value, str):
raise BadValueError(
'Property %s must be unicode or str instance, not a %s' % (self.name, type(value).__name__))
return value
data_type = str
class IntegerProperty(Property):
""" Integer property. map to int
*Value type*: int
"""
to_python = int
def empty(self, value):
return value is None
def validate(self, value, required=True):
value = super(IntegerProperty, self).validate(value,
required=required)
if value is None:
return value
if value is not None and not isinstance(value, int):
raise BadValueError(
'Property %s must be %s or long instance, not a %s'
% (self.name, type(self.data_type).__name__,
type(value).__name__))
return value
data_type = int
LongProperty = IntegerProperty
class FloatProperty(Property):
""" Float property, map to python float
*Value type*: float
"""
to_python = float
data_type = float
def validate(self, value, required=True):
value = super(FloatProperty, self).validate(value,
required=required)
if value is None:
return value
if not isinstance(value, float):
raise BadValueError(
'Property %s must be float instance, not a %s'
% (self.name, type(value).__name__))
return value
Number = FloatProperty
class BooleanProperty(Property):
""" Boolean property, map to python bool
*ValueType*: bool
"""
to_python = bool
data_type = bool
def validate(self, value, required=True):
value = super(BooleanProperty, self).validate(value,
required=required)
if value is None:
return value
if value is not None and not isinstance(value, bool):
raise BadValueError(
'Property %s must be bool instance, not a %s'
% (self.name, type(value).__name__))
return value
def empty(self, value):
"""test if boolean is empty"""
return value is None
class DecimalProperty(Property):
""" Decimal property, map to Decimal python object
*ValueType*: decimal.Decimal
"""
data_type = decimal.Decimal
def to_python(self, value):
return decimal.Decimal(value)
def to_json(self, value):
return str(value)
class DateTimeProperty(Property):
"""DateTime property. It convert iso3339 string
to python and vice-versa. Map to datetime.datetime
object.
*ValueType*: datetime.datetime
"""
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False,
**kwds):
super(DateTimeProperty, self).__init__(verbose_name, **kwds)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def validate(self, value, required=True):
value = super(DateTimeProperty, self).validate(value, required=required)
if value is None:
return value
if value and not isinstance(value, self.data_type):
raise BadValueError('Property %s must be a %s, current is %s' %
(self.name, self.data_type.__name__, type(value).__name__))
return value
def default_value(self):
if self.auto_now or self.auto_now_add:
return self.now()
return Property.default_value(self)
def to_python(self, value):
if isinstance(value, str):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = value[0:19] # remove timezone
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
except ValueError as e:
raise ValueError('Invalid ISO date/time %r [%s]' %
(value, str(e)))
return value
def to_json(self, value):
if self.auto_now:
value = self.now()
if value is None:
return value
return value.replace(microsecond=0).isoformat() + 'Z'
data_type = datetime.datetime
@staticmethod
def now():
return datetime.datetime.utcnow()
class DateProperty(DateTimeProperty):
""" Date property, like DateTime property but only
for Date. Map to datetime.date object
*ValueType*: datetime.date
"""
data_type = datetime.date
@staticmethod
def now():
return datetime.datetime.now().date()
def to_python(self, value):
if isinstance(value, str):
try:
value = datetime.date(*time.strptime(value, '%Y-%m-%d')[:3])
except ValueError as e:
raise ValueError('Invalid ISO date %r [%s]' % (value,
str(e)))
return value
def to_json(self, value):
if value is None:
return value
return value.isoformat()
class TimeProperty(DateTimeProperty):
""" Date property, like DateTime property but only
for time. Map to datetime.time object
*ValueType*: datetime.time
"""
data_type = datetime.time
@staticmethod
def now(self):
return datetime.datetime.now().time()
def to_python(self, value):
if isinstance(value, str):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = datetime.time(*time.strptime(value, '%H:%M:%S')[3:6])
except ValueError as e:
raise ValueError('Invalid ISO time %r [%s]' % (value,
str(e)))
return value
def to_json(self, value):
if value is None:
return value
return value.replace(microsecond=0).isoformat()
class DictProperty(Property):
""" A property that stores a dict of things"""
def __init__(self, verbose_name=None, default=None,
required=False, **kwds):
"""
:args verbose_name: Optional verbose name.
:args default: Optional default value; if omitted, an empty list is used.
:args**kwds: Optional additional keyword arguments, passed to base class.
Note that the only permissible value for 'required' is True.
"""
if default is None:
default = {}
Property.__init__(self, verbose_name, default=default,
required=required, **kwds)
data_type = dict
def validate(self, value, required=True):
value = super(DictProperty, self).validate(value, required=required)
if value and value is not None:
if not isinstance(value, dict):
raise BadValueError('Property %s must be a dict' % self.name)
value = self.validate_dict_contents(value)
return value
def validate_dict_contents(self, value):
try:
value = validate_dict_content(value)
except BadValueError:
raise BadValueError(
'Items of %s dict must all be in %s' %
(self.name, ALLOWED_PROPERTY_TYPES))
return value
def default_value(self):
"""Default value for list.
Because the property supplied to 'default' is a static value,
that value must be shallow copied to prevent all fields with
default values from sharing the same instance.
Returns:
Copy of the default value.
"""
value = super(DictProperty, self).default_value()
if value is None:
value = {}
return dict(value)
def to_python(self, value):
return LazyDict(value)
def to_json(self, value):
return value_to_json(value)
class StringDictProperty(DictProperty):
def to_python(self, value):
return LazyDict(value, item_type=str)
def validate_dict_contents(self, value):
try:
value = validate_dict_content(value, str)
except BadValueError:
raise BadValueError(
'Items of %s dict must all be in %s' %
(self.name, str))
return value
class ListProperty(Property):
"""A property that stores a list of things.
"""
def __init__(self, verbose_name=None, default=None,
required=False, item_type=None, **kwds):
"""Construct ListProperty.
:args verbose_name: Optional verbose name.
:args default: Optional default | |
<reponame>DEVESHTARASIA/tensorflow<filename>tensorflow/contrib/bayesflow/python/ops/csiszar_divergence_impl.py<gh_stars>100-1000
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Csiszar f-Divergence and helpers.
@@amari_alpha
@@arithmetic_geometric
@@chi_square
@@dual_csiszar_function
@@jeffreys
@@jensen_shannon
@@kl_forward
@@kl_reverse
@@log1p_abs
@@modified_gan
@@monte_carlo_csiszar_f_divergence
@@pearson
@@squared_hellinger
@@symmetrized_csiszar_function
@@total_variation
@@triangular
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.bayesflow.python.ops import monte_carlo_impl as monte_carlo
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import distribution
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
"""The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
<NAME> and <NAME>. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: Floating-type `Tensor` representing `log(u)` from above.
alpha: Floating-type Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: Floating-type `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with ops.name_scope(name, "amari_alpha", [logu]):
if alpha is None or contrib_framework.is_tensor(alpha):
raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
if self_normalized is None or contrib_framework.is_tensor(self_normalized):
raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.")
logu = ops.convert_to_tensor(logu, name="logu")
if alpha == 0.:
f = -logu
elif alpha == 1.:
f = math_ops.exp(logu) * logu
else:
f = math_ops.expm1(alpha * logu) / (alpha * (alpha - 1.))
if not self_normalized:
return f
if alpha == 0.:
return f + math_ops.expm1(logu)
elif alpha == 1.:
return f - math_ops.expm1(logu)
else:
return f - math_ops.expm1(logu) / (alpha - 1.)
def kl_reverse(logu, self_normalized=False, name=None):
"""The reverse Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-reverse Csiszar-function is:
```none
f(u) = -log(u) + (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[q, p]
```
The KL is "reverse" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: when self_normalized = True` this function makes non-log-space
calculations and may therefore be numerically unstable for `|logu| >> 0`.
Args:
logu: Floating-type `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_reverse_of_u: Floating-type `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with ops.name_scope(name, "kl_reverse", [logu]):
return amari_alpha(logu, alpha=0., self_normalized=self_normalized)
def kl_forward(logu, self_normalized=False, name=None):
"""The forward Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-forward Csiszar-function is:
```none
f(u) = u log(u) - (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, q]
```
The KL is "forward" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: Floating-type `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_forward_of_u: Floating-type `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with ops.name_scope(name, "kl_forward", [logu]):
return amari_alpha(logu, alpha=1., self_normalized=self_normalized)
def jensen_shannon(logu, self_normalized=False, name=None):
"""The Jensen-Shannon Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
```none
f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
```
When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, m] + KL[q, m]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
For more information, see:
<NAME>. "Divergence measures based on the Shannon entropy." IEEE Trans.
Inf. Th., 37, 145-151, 1991.
Args:
logu: Floating-type `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jensen_shannon_of_u: Floating-type `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with ops.name_scope(name, "jensen_shannon", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
npdt = logu.dtype.as_numpy_dtype
y = nn_ops.softplus(logu)
if self_normalized:
y -= np.log(2).astype(npdt)
return math_ops.exp(logu) * logu - (1. + math_ops.exp(logu)) * y
def arithmetic_geometric(logu, self_normalized=False, name=None):
"""The Arithmetic-Geometric Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True` the Arithmetic-Geometric Csiszar-function is:
```none
f(u) = (1 + u) log( (1 + u) / sqrt(u) ) - (1 + u) log(2)
```
When `self_normalized = False` the `(1 + u) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[m, p] + KL[m, q]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Jensen-Shannon
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: Floating-type `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
arithmetic_geometric_of_u: Floating-type `Tensor` of the
Csiszar-function evaluated at `u = exp(logu)`.
"""
with ops.name_scope(name, "arithmetic_geometric", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
y = nn_ops.softplus(logu) - 0.5 * logu
if self_normalized:
y -= np.log(2.).astype(logu.dtype.as_numpy_dtype)
return (1. + math_ops.exp(logu)) * y
def total_variation(logu, name=None):
"""The Total Variation Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Total-Variation Csiszar-function is:
```none
f(u) = 0.5 |u - 1|
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: Floating-type `Tensor` representing | |
6032 19728844 6032
19729379 6032 19730381 6032
19757045 9423 19758205 9423
19815622 17846 19816907 17846
19843858 6017 19845085 6017
19870553 22754 19871717 22754
19904035 97 19904908 97
19968539 14483 19969543 14483
19968539 14483 19969603 14483
19968539 14483 19969645 14483
19993679 56 19994657 56
20006661 56 20007898 56
20013155 56 20014000 56
20019637 56 20020817 56
20022947 56 20023878 56
20057171 15187 20058275 15187
20057171 15187 20058383 15187
20084950 2268 20085898 2268
20084950 2268 20085972 2268
20112852 4421 20113920 4421
20130153 349 20131513 349
20139846 349 20141137 349
20143710 349 20144681 349
20166756 6793 20167519 6793
20166756 6793 20167681 6793
20173596 6793 20174599 6793
20175117 6793 20176472 6793
20182562 6793 20183628 6793
20186973 3325 20188183 3325
20188678 3325 20189734 3325
20188678 3325 20189788 3325
20193436 3325 20194408 3325
20218417 289 20219571 289
20240937 289 20241856 289
20253028 8340 20254287 8340
20254965 8340 20255838 8340
20267696 8340 20268839 8340
20310687 17723 20311908 17723
20317604 17723 20318736 17723
20324108 2095 20325452 2095
20350870 2095 20351910 2095
20360908 2095 20362156 2095
20372473 8364 20373948 8364
20405048 13217 20405904 13217
20421325 13217 20422375 13217
20449955 21851 20450830 21851
20469261 6099 20470359 6099
20476064 6099 20477489 6099
20478017 6099 20478858 6099
20485700 6099 20486944 6099
20504994 7654 20506449 7654
20528535 6647 20529678 6647
20541964 6647 20543035 6647
20555373 6647 20556623 6647
20560718 6647 20561976 6647
20572359 23436 20573401 23436
20586314 23436 20587337 23436
20596327 8294 20597278 8294
20597877 8294 20598761 8294
20599328 8294 20600296 8294
20602563 8294 20603399 8294
20605649 8294 20606568 8294
20615214 8294 20616186 8294
20619877 319 20621184 319
20641344 319 20642584 319
20646397 319 20647599 319
20656097 2087 20657017 2087
20665738 2087 20666932 2087
20719387 20175 20720193 20175
20738746 2137 20739910 2137
20743515 2137 20744665 2137
20753269 2137 20754217 2137
20761213 12588 20762414 12588
20766711 12588 20767729 12588
20768420 12588 20769787 12588
20801518 7578 20802907 7578
20803336 7578 20804141 7578
20817663 16870 20818709 16870
20838210 16870 20839300 16870
20841443 16870 20842625 16870
20844818 4980 20846064 4980
20854542 4980 20855238 4980
20854542 4980 20855344 4980
20857539 4980 20858720 4980
20906206 7657 20907247 7657
20941935 3351 20943358 3351
20945404 3351 20946423 3351
20958534 29054 20959860 29054
20971926 29054 20973098 29054
21001207 2444 21002064 2444
21009246 6679 21010252 6679
21045316 6760 21046518 6760
21051963 6760 21053163 6760
21062137 6760 21063183 6760
21063697 6760 21064785 6760
21065307 6760 21066239 6760
21097551 6160 21098784 6160
21116091 4407 21117066 4407
21129384 258 21130581 258
21132798 258 21133722 258
21136007 258 21137127 258
21141217 258 21142254 258
21154372 258 21155358 258
21155989 258 21157257 258
21177801 264 21179011 264
21198926 158 21200213 158
21218313 158 21219226 158
21229654 158 21230596 158
21244033 13192 21244976 13192
21244033 13192 21245006 13192
21268058 8386 21269237 8386
21275080 8386 21275977 8386
21292979 2392 21294058 2392
21296410 2392 21297529 2392
21301529 2392 21302499 2392
21337150 9326 21338299 9326
21354082 7709 21355289 7709
21385572 18282 21386491 18282
21391579 18282 21392497 18282
21396228 18282 21397215 18282
21397739 18282 21398975 18282
21397739 18282 21399097 18282
21399625 18282 21400671 18282
21399625 18282 21400701 18282
21424226 5142 21425474 5142
21439251 5142 21440456 5142
21448519 5142 21449457 5142
21461689 38 21463063 38
21463628 38 21464499 38
21466707 38 21467803 38
21481917 38 21483013 38
21504678 207 21505636 207
21511505 207 21512673 207
21545657 11230 21546587 11230
21575904 8369 21577113 8369
21577710 8369 21579081 8369
21596438 13370 21597242 13370
21596438 13370 21597488 13370
21614425 13370 21615545 13370
21617463 13370 21618380 13370
21620691 3379 21621820 3379
21630946 3379 21632094 3379
21642314 3379 21643269 3379
21645807 3379 21646877 3379
21649157 3379 21650187 3379
21653952 6065 21655134 6065
21655845 6065 21657252 6065
21677037 6065 21678357 6065
21678854 6065 21679850 6065
21698874 15046 21699819 15046
21698874 15046 21700085 15046
21716432 15046 21717347 15046
21723380 15046 21724487 15046
21740480 397 21741405 397
21761613 77 21762681 77
21763379 77 21764538 77
21771800 77 21773014 77
21785458 6699 21786509 6699
21791898 6699 21793122 6699
21809031 6699 21810363 6699
21812496 6699 21813742 6699
21812496 6699 21813880 6699
21814321 6699 21815366 6699
21821262 6619 21822441 6619
21824588 6619 21825826 6619
21841692 6619 21842958 6619
21861079 11826 21862345 11826
21879171 11826 21880187 11826
21890875 11826 21892046 11826
21909837 3254 21910833 3254
21926546 3254 21927338 3254
21963345 9530 21964411 9530
21972720 9530 21973692 9530
21974238 9530 21975237 9530
21975600 9530 21976689 9530
21996068 2494 21996978 2494
22008392 2494 22009450 2494
22011597 2494 22012774 2494
22021852 7688 22022799 7688
22037923 7688 22039151 7688
22058225 140 22058968 140
22059582 140 22060368 140
22064271 140 22065553 140
22086903 2498 22088464 2498
22105422 2498 22106563 2498
22105422 2498 22106669 2498
22118154 15174 22119333 15174
22137248 15174 22138326 15174
22140299 2292 22141212 2292
22159543 2292 22160683 2292
22165840 2292 22166810 2292
22169031 5944 22170328 5944
22170769 5944 22171717 5944
22184740 5944 22186039 5944
22212399 8417 22213645 8417
22215803 8417 22216839 8417
22225567 8417 22226563 8417
22227035 8417 22227989 8417
22261306 19531 22262264 19531
22264645 19531 22265605 19531
22313504 12595 22314618 12595
22330954 12595 22332171 12595
22343797 5090 22344990 5090
22353654 5090 22354625 5090
22361971 5090 22362869 5090
22366247 3324 22367395 3324
22381004 3324 22382045 3324
22385932 3324 22387233 3324
22391027 3324 22391877 3324
22417278 8379 22418411 8379
22419126 8379 22420240 8379
22430856 8379 22432082 8379
22438730 8461 22439709 8461
22440144 8461 22441010 8461
22471465 8461 22472340 8461
22487340 6088 22488247 6088
22498354 6088 22499224 6088
22509499 8301 22510496 8301
22532227 9331 22533266 9331
22568162 2459 22569129 2459
22583118 2459 22583950 2459
22592881 2459 22594113 2459
22647129 20067 22648229 20067
22650234 20067 22651402 20067
22661890 20067 22662874 20067
22661890 20067 22662944 20067
22665305 20067 22666677 20067
22708126 3365 22709203 3365
22708126 3365 22709377 3365
22711522 3365 22712488 3365
22726189 3365 22727515 3365
22728121 8296 22729183 8296
22740993 8296 22742104 8296
22750447 8296 22751522 8296
22752114 8296 22753137 8296
22773493 8268 22774768 8268
22816234 21262 22817465 21262
22854538 4330 22855657 4330
22871571 4330 22872860 4330
22922947 5162 22923952 5162
22927379 5162 22928624 5162
22965101 4418 22966247 4418
22966749 4418 22967929 4418
22979971 4418 22981154 4418
22996313 2111 22997411 2111
22998048 2111 22999155 2111
23015718 2111 23016933 2111
23015718 2111 23016953 2111
23029095 22670 23030210 22670
23063015 175 23064142 175
23077607 175 23078645 175
23080881 175 23082167 175
23108875 4429 23109826 4429
23110348 4429 23111373 4429
23124828 4333 23126152 4333
23143996 4333 23144972 4333
23148595 7706 23149883 7706
23181339 7706 23182793 7706
23190359 7530 23191545 7530
23198573 7530 23199666 7530
23228734 7798 23229777 7798
23242075 7798 23243235 7798
23245378 7798 23246656 7798
23274406 2351 23275473 2351
23279330 2351 23280302 2351
23305289 12567 23306409 12567
23308567 12567 23309504 12567
23309978 12567 23310987 12567
23330578 10026 23331652 10026
23367720 4394 23368846 4394
23380716 4394 23381709 4394
23391943 11331 23392882 11331
23400870 11331 23402080 11331
23440648 9292 23441711 9292
23460774 6043 23461703 6043
23472004 6043 23473049 6043
23475280 6043 23476352 6043
23491303 197 23492442 197
23498982 197 23499997 197
23502239 197 23503315 197
23518158 2072 23519111 2072
23518158 2072 23519175 2072
23518158 2072 23519219 2072
23539205 2072 23540035 2072
23540418 6112 23541756 6112
23540418 6112 23541832 6112
23549746 6112 23551125 6112
23551549 6112 23552548 6112
23561008 6112 23561936 6112
23602644 67 23603515 67
23610373 5007 23611734 5007
23612324 5007 23613515 5007
23614085 5007 23615453 5007
23618960 5007 23620295 5007
23618960 5007 23620419 5007
23622383 5007 23623460 5007
23625502 5007 23626404 5007
23636627 5007 23637645 5007
23636627 5007 23637709 5007
23646485 5007 23647401 5007
23646485 5007 23647411 5007
23650988 20993 23652165 20993
23679636 20993 23680432 20993
23679636 20993 23680558 20993
23736579 6721 23737538 6721
23745285 6721 23746192 6721
23754803 6831 23756045 6831
23763310 6831 23764493 6831
23770131 6831 23771214 6831
23771865 6831 23772791 6831
23783076 5974 23784311 5974
23796250 5974 23797340 5974
23807292 5974 23808213 5974
23825037 7761 23826288 7761
23826722 7761 23827936 7761
23887627 9383 23888832 9383
23930185 3277 23931468 3277
23936867 3277 23937798 3277
23942648 3277 23943725 3277
23955100 3277 23956385 3277
23961855 | |
#! python3
"""
@created: 2020-10-11 11:10:00
@author: <NAME> ( pyGuru )
Moonlight PDF Reader
-------------------------------------------------------------------------------
Dependencies:
PyMuPDF v1.17.7+
PyMuPDF can be installed by : pip install pymupdf
-------------------------------------------------------------------------------
Description :
Moonlight PDF Reader is a python tkinter based pdf reader with many of the
advanced pdf features
"""
import os
import re
import tkinter as tk
import tkinter.simpledialog
from tkinter import PhotoImage
from tkinter import filedialog
from tkinter import messagebox
from PIL import ImageGrab
import MoonlightMiner
from CustomWidgets import CustomButton, RecentButton, CustomLabel, CustomFrame
class PDFReader(tk.Frame):
def __init__(self, master=None):
super().__init__(master=master)
self.master = master
self.pack()
self.bgcolor = 'gray18'
self.master['bg'] = self.bgcolor
self.width = self.master.winfo_screenwidth()
self.height = self.master.winfo_screenheight()
self.isHome = True
self.isAbout = False
self._attributes()
self.reader_frame()
self.options_frame()
self.right_frame()
self.meta_frame()
self.isFullscreen = False
# bindings with master window
self.master.bind('<F11>', self.toggleFullScreen)
self.master.bind('<Escape>', self.quitFullScreen)
self.master.bind('<Left>', self.prev_page)
self.master.bind('<Up>', self.prev_page)
self.master.bind('<Right>', self.next_page)
self.master.bind('<Down>', self.next_page)
self.master.bind('<Control-plus>', self.zoom_in)
self.master.bind('<Control-minus>', self.zoom_out)
self.master.bind('<Return>', self.search_page)
self.master.bind('<Control-Key-o>', self.open_file)
def _attributes(self):
self.miner = None
self.name = ''
self.author = ''
self.creator = ''
self.producer = ''
self.isEncrypted = False
self.size = 0
self.numPages = 0
self.last_accessed = ''
self.from_ = tk.IntVar()
self.to_ = tk.IntVar()
self.rotate_all = tk.IntVar()
self.current_page = 0
self.pagesize = None
self.pagewidth = 0
self.pageheight = 0
self.zoom = 1
self.hthick = 0.5
self.hcolor = 'gray15'
self.filepath = None
self.fileisOpen = False
self.recently_opened = []
self.other_filepath = None
self.custom_function = False
# variables for putting watermark
self.x1, self.x2, self.y1, self.y2 = tk.IntVar(), tk.IntVar(), tk.IntVar(), tk.IntVar()
# Frames
def reader_frame(self):
self.reader = tk.Frame(self, bg=self.bgcolor, highlightthickness=self.hthick,
highlightcolor=self.hcolor)
self.reader.configure(width=self.width-350, height=self.height)
self.reader.grid_propagate(0)
self.reader.grid(row=0, column=0, rowspan=3)
if self.fileisOpen:
# ribbon
self.ribbon = CustomFrame(self.reader, bg=self.bgcolor)
self.ribbon.configure(width=self.width-350, height=55)
self.ribbon.grid(row=0, column=0, columnspan=2, padx=(0,5), sticky='W')
self.title = CustomLabel(self.ribbon, width=40, text=self.name[:-4])
self.title.grid(row=0, column=0, sticky='W', pady=(10,0), padx=2)
self.up = tk.Button(self.ribbon, image=up_icon, bg=self.bgcolor,
relief=tk.FLAT, borderwidth=0, command=self.prev_page)
self.up.grid(row=0, column=1, pady=(5,0), padx=(50,0))
self.down = tk.Button(self.ribbon, image=down_icon, bg=self.bgcolor,
relief=tk.FLAT, borderwidth=0, command=self.next_page)
self.down.grid(row=0, column=2, pady=(5,0), padx=(5,0))
self.temp_page = tk.StringVar()
self.page_search = tk.Entry(self.ribbon, width=5)
self.page_search['textvariable'] = self.temp_page
self.page_search.grid(row=0, column=3, pady=(5,0), padx=(25,0))
self.search = tk.Button(self.ribbon, image=search_icon, bg=self.bgcolor,
relief=tk.FLAT, borderwidth=0, command=self.search_page)
self.search.grid(row=0, column=4, pady=(5,0), padx=(15,2))
self.pagetext = f'{self.current_page + 1} / {self.numPages + 1}'
self.pagelabel = CustomLabel(self.ribbon, text=self.pagetext, anchor='c', width=10)
self.pagelabel.grid(row=0, column=5, padx=5, pady=(5,0))
self.zoomin = tk.Button(self.ribbon, image=zoom_in_icon, bg=self.bgcolor,
relief=tk.FLAT, borderwidth=0, command=self.zoom_in)
self.zoomin.grid(row=0, column=6, pady=(5,0), padx=(25,0))
self.zoomout = tk.Button(self.ribbon, image=zoom_out_icon, bg=self.bgcolor,
relief=tk.FLAT, borderwidth=0, command=self.zoom_out)
self.zoomout.grid(row=0, column=7, pady=(5,0), padx=(5,0))
self.zoomtext = f'{self.zoom * 100} %'
self.zoomlabel = CustomLabel(self.ribbon, text=self.zoomtext, anchor='c', width=10)
self.zoomlabel.grid(row=0, column=8, padx=5, pady=(5,0))
# contents
self.contents = CustomFrame(self.reader, bg=self.bgcolor)
self.contents.configure(width=250, height=self.height-85)
self.contents.grid(row=1, column=0)
self.tablelabel = CustomLabel(self.contents, width=25, text='Table of Content', anchor='c')
self.tablelabel.grid(row=0, column=0, padx=4, pady=10)
self.content_scroll = tk.Scrollbar(self.contents, orient=tk.VERTICAL)
self.content_scroll.grid(row=1, column=1, sticky='ns')
self.content_list = tk.Listbox(self.contents, selectmode=tk.SINGLE,
selectbackground='sky blue', bg=self.bgcolor, fg='white',
yscrollcommand=self.content_scroll.set, font=('Times', 10))
self.content_list.configure(height=38, width=36)
self.enumerate_content()
self.content_list.bind('<Double-1>', self.get_page_content)
self.content_list.bind('<Return>', self.get_page_content)
self.content_list.bind('<Enter>', self.root_unbind)
self.content_list.bind('<Leave>', self.root_bind)
self.content_list.grid(row=1, column=0, padx=2, sticky='W')
self.content_scroll.config(command=self.content_list.yview)
# display
self.display = CustomFrame(self.reader, bg=self.bgcolor, borderwidth=0)
self.display.configure(width=self.width-100-350, height=self.height-85)
self.display.grid(row=1, column=1, padx=(0,5))
self.scrollx = tk.Scrollbar(self.display, orient=tk.VERTICAL)
self.scrollx.grid(row=0, column=1, sticky='ns')
self.scrolly = tk.Scrollbar(self.display, orient=tk.HORIZONTAL)
self.scrolly.grid(row=1, column=0, sticky='we')
self.output = tk.Canvas(self.display, bg=self.bgcolor)
self.output.configure(width=740, height=self.height-110,
yscrollcommand=self.scrollx.set,
xscrollcommand=self.scrolly.set)
self.output.grid(row=0, column=0)
self.scrollx.configure(command=self.output.yview)
self.scrolly.configure(command=self.output.xview)
else:
self.home_image = tk.Text(self.reader, bg=self.bgcolor, width=126, height=46)
self.home_image.grid(row=0, column=0)
self.home_image.insert(tk.END, '\n\n\n\n\n\n\t\t ')
self.home_image.image_create(tk.END, image = moonlight)
self.home_image.configure(state='disabled')
def options_frame(self):
self.options = CustomFrame(self, bg=self.bgcolor)
self.options.configure(width=350, height=55)
self.options.grid(row=0, column=1, pady=(6,0))
self.homebutton = CustomButton(self.options, text='Home', width=10, command=self.toggle_home)
self.homebutton.grid(row=0, column=0, sticky='W', padx=(10, 20), pady=(1,1))
self.toolsbutton = CustomButton(self.options, text='Tools', width=10, command=self.toggle_tools)
self.toolsbutton.grid(row=0, column=1, sticky='W', padx=(5, 20), pady=(1,1))
self.aboutbutton = CustomButton(self.options, text='About', width=10, command=self.toggle_about)
self.aboutbutton.grid(row=0, column=2, sticky='W', padx=(5, 20), pady=(1,1))
def right_frame(self):
# frame for all custom functions on the right
self.rightSidebar = CustomFrame(self, bg=self.bgcolor)
self.rightSidebar.configure(width=350, height=self.height-400)
self.rightSidebar.grid(row=1, column=1)
# check if home option is selected, its by-default
if self.isHome:
self.openbutton = CustomButton(self.rightSidebar, text='Open', command=self.open_file)
self.openbutton.grid(row=0, column=0, sticky='W', pady=(20,0), padx=10)
self.savetext = CustomButton(self.rightSidebar, text='Save as Text', command=self.get_text)
self.savetext.grid(row=1, column=0, sticky='W', pady=(10,0), padx=10)
self.closebutton = CustomButton(self.rightSidebar, text='Close File', command=self.close_file)
self.closebutton.grid(row=2, column=0, sticky='W', pady=(10,0), padx=10)
self.quitbutton = CustomButton(self.rightSidebar, text='Quit App', command=self.quit)
self.quitbutton.grid(row=3, column=0, sticky='W', pady=(10,0), padx=10)
if not self.fileisOpen:
self.savetext.configure(state='disabled')
# self.snapshot.configure(state='disabled')
self.closebutton.configure(state='disabled')
# check if about option is selected, gives info about Moonlight
elif self.isAbout:
self.aboutbox = tk.Text(self.rightSidebar, bg=self.bgcolor, width=40, height=20,
relief=tk.FLAT, borderwidth=0, fg='white smoke',
font=('TkDefaultFont', 12))
with open('files/about.txt', 'r') as file:
data = file.read()
self.aboutbox.insert(tk.END, data)
self.aboutbox.configure(state='disabled')
self.aboutbox.grid(row=0, column=0, sticky='W', pady=(10,0), padx=10)
# check if some custom function is clicked under tools functions
elif self.custom_function:
pass
# tools option is selected under which lies variety of custom function
else:
self.extract = CustomButton(self.rightSidebar, text='Extract Page',
command=self.extract_page)
self.extract.grid(row=0, column=0, sticky='W', pady=(10,0), padx=10)
self.extract_image = CustomButton(self.rightSidebar, text='Extract Images',
command=self.extract_images)
self.extract_image.grid(row=0, column=1, sticky='W', pady=(10,0), padx=10)
self.rotate = CustomButton(self.rightSidebar, text='Rotate Page',
command=self.rotate_page_frame)
self.rotate.grid(row=1, column=0, sticky='W', pady=(10,0), padx=10)
self.export = CustomButton(self.rightSidebar, text='Export PDF',
command=self.export_pdf_frame)
self.export.grid(row=1, column=1, sticky='W', pady=(15,0), padx=10)
self.encrypt = CustomButton(self.rightSidebar, text='Encrypt PDF',
command=self.encrypt_pdf)
self.encrypt.grid(row=2, column=0, sticky='W', pady=(15,0), padx=10)
self.decrypt = CustomButton(self.rightSidebar, text='Decrypt PDF',
command=self.decrypt_pdf)
self.decrypt.grid(row=2, column=1, sticky='W', pady=(15,0), padx=10)
self.split = CustomButton(self.rightSidebar, text='Split PDF',
command=self.split_pdf_frame)
self.split.grid(row=3, column=0, sticky='W', pady=(15,0), padx=10)
self.merge = CustomButton(self.rightSidebar, text='Merge PDF',
command=self.merge_pdf_frame)
self.merge.grid(row=3, column=1, sticky='W', pady=(15,0), padx=10)
self.watermark = CustomButton(self.rightSidebar, text='watermark PDF',
command=self.watermark_pdf_frame)
self.watermark.grid(row=4, column=0, sticky='W', pady=(15,0), padx=10)
if not self.fileisOpen:
self.extract.configure(state='disabled')
self.extract_image.configure(state='disabled')
self.rotate.configure(state='disabled')
self.export.configure(state='disabled')
self.encrypt.configure(state='disabled')
self.decrypt.configure(state='disabled')
self.split.configure(state='disabled')
self.merge.configure(state='disabled')
self.watermark.configure(state='disabled')
if self.isEncrypted:
self.encrypt.configure(state='disabled')
else:
self.decrypt.configure(state='disabled')
def meta_frame(self):
# Frame for pdf metadata and recents
self.metadata = CustomFrame(self, bg=self.bgcolor)
self.metadata.configure(width=350, height=350, highlightthickness=self.hthick,
highlightcolor=self.hcolor)
self.metadata.grid(row=2, column=1)
if not self.fileisOpen:
self.recent = CustomLabel(self.metadata, text='Recent')
self.recent.grid(row=0, column=0, sticky='W', pady=(10,5), padx=10)
with open('files/recents.txt' ,'r') as file:
self.recently_opened = file.readlines()
if len(self.recently_opened) == 0:
noFileIsOpen = CustomLabel(self.metadata, text='No file is opened recently')
noFileIsOpen.grid(row=1, column=0, sticky='W', pady=(20,0), padx=10)
else:
cnrow = 1
self.recently_opened = self.recently_opened[:5]
for recent in range(0, len(self.recently_opened)):
data = self.recently_opened[recent].split(';')
filepath = data[0]
name = os.path.basename(filepath)[:-4]
size = data[1]
text = f"\n {name[:25]:<30}, {size}\n {data[2]}"
btn = RecentButton(self.metadata, text=text,
command = lambda temppath = filepath:self.open_recent(temppath))
btn.grid(row=cnrow+recent, column=0, pady=(8,0), padx=10 )
else:
self.nameLabel = CustomLabel(self.metadata, text=self.name)
self.nameLabel.grid(row=0, column=0, sticky='W', pady=(10,0), padx=10)
self.sizeLabel = CustomLabel(self.metadata, text='Size : ' + self.size)
self.sizeLabel.grid(row=1, column=0, sticky='W', pady=(10,0), padx=10)
self.pagecount = CustomLabel(self.metadata, text='Page Count : ' + str(self.numPages + 1))
self.pagecount.grid(row=2, column=0, sticky='W', pady=(10,0), padx=10)
if self.author:
self.authorLabel = CustomLabel(self.metadata, text='Author : ' + self.author)
self.sizeLabel.grid(row=3, column=0, sticky='W', pady=(10,0), padx=10)
if self.creator:
self.creatorLabel = CustomLabel(self.metadata, text='Creator : ' + self.creator)
self.creatorLabel.grid(row=4, column=0, sticky='W', pady=(10,0), padx=10)
if self.producer:
self.producerLabel = CustomLabel(self.metadata, text='Producer : ' + self.producer)
self.producerLabel.grid(row=5, column=0, sticky='W', pady=(10,0), padx=10)
self.widthlabel = CustomLabel(self.metadata, text=f'Page Width : {self.pagewidth:.0f} px')
self.widthlabel.grid(row=6, column=0, sticky='W', pady=(10,0), padx=10)
self.heightlabel = CustomLabel(self.metadata, text=f'Page Height : {self.pageheight:.0f} px')
self.heightlabel.grid(row=7, column=0, sticky='W', pady=(10,0), padx=10)
# custom function frames start from here
def rotate_page_frame(self):
# rotate the pdf page frame
self.rightSidebar.destroy()
self.custom_function = True
self.right_frame()
label = CustomLabel(self.rightSidebar, text='Page rotate', anchor='c')
label.grid(row=0, column=0, pady=15, padx=25, columnspan=3)
anglelabel = CustomLabel(self.rightSidebar, text='Select angle to rotate', anchor='w')
anglelabel.grid(row=1, column=0, pady=(15,0), padx=25, columnspan=3)
ANGLES = [("+90", 90), ("180", 180), ("-90", 270)]
self.angle = tk.IntVar()
self.angle.set(90)
self.radios = [self.create_radios(angle) for angle in ANGLES]
c = 0
for radio in self.radios:
radio.grid(row=2, column=c, pady=20)
c += 1
checkbutton = tk.Checkbutton(self.rightSidebar, text='Rotate all pages',
variable=self.rotate_all)
checkbutton.grid(row=3, column=0, columnspan=2, padx=10)
rotate = tk.Button(self.rightSidebar, bg=self.bgcolor, text='rotate', width=15,
command=self.rotate_page, fg='white', cursor='hand2')
rotate.grid(row=4, column=0, pady=20, columnspan=2)
back = tk.Button(self.rightSidebar, bg=self.bgcolor, image=back_icon,
command=self.go_to_tools)
back.grid(row=4, column=2, pady=20)
def export_pdf_frame(self):
# export pdf to png / html / xml frame
self.rightSidebar.destroy()
self.custom_function = True
self.right_frame()
label = CustomLabel(self.rightSidebar, text='Export PDF', anchor='c')
label.grid(row=0, column=0, pady=15, padx=10, columnspan=2)
back = tk.Button(self.rightSidebar, bg=self.bgcolor, image=back_icon,
command=self.go_to_tools)
back.grid(row=0, column=2, pady=20)
png = CustomButton(self.rightSidebar, text='Export to PNG', anchor='c',
command=lambda : self.miner.get_image(self.current_page))
png.grid(row=1, column=0, pady=(15,0), padx=10, columnspan=3)
html = CustomButton(self.rightSidebar, text='Export to HTML',anchor='c',
command=self.get_html)
html.grid(row=2, column=0, pady=(15,0), padx=10, columnspan=3)
xml = CustomButton(self.rightSidebar, text='Export to XML', anchor='c',
command=self.get_xml)
xml.grid(row=3, column=0, pady=(15,0), padx=10, columnspan=3)
def split_pdf_frame(self):
# split the pdf frame
self.rightSidebar.destroy()
self.custom_function = True
self.right_frame()
label = CustomLabel(self.rightSidebar, text='Split PDF', anchor='c')
label.grid(row=0, column=0, pady=15, padx=25, columnspan=3)
fromlabel = CustomLabel(self.rightSidebar, text='From page : ', anchor='w', width=10)
fromlabel.grid(row=1, column=0, pady=15, padx=10)
fromentry = tk.Entry(self.rightSidebar, width=6, bg=self.bgcolor, fg='white')
fromentry['textvariable'] = self.from_
fromentry.grid(row=1, column=1, columnspan=2)
tolabel = CustomLabel(self.rightSidebar, text='To page : ', anchor='w', width=10)
tolabel.grid(row=2, column=0, pady=15, padx=10)
toentry = tk.Entry(self.rightSidebar, width=6, bg=self.bgcolor, fg='white')
toentry['textvariable'] = self.to_
toentry.grid(row=2, column=1, columnspan=2)
split = tk.Button(self.rightSidebar, bg=self.bgcolor, text='Split', width=15,
command=self.split_pdf, fg='white', cursor='hand2')
split.grid(row=3, column=0, pady=20, columnspan=2)
back = tk.Button(self.rightSidebar, bg=self.bgcolor, image=back_icon,
command=self.go_to_tools)
back.grid(row=3, column=2, pady=20)
def merge_pdf_frame(self):
# merge two pdfs frame
self.rightSidebar.destroy()
self.custom_function = True
self.right_frame()
label = CustomLabel(self.rightSidebar, text='Merge PDF', anchor='c')
label.grid(row=0, column=0, pady=15, padx=25, columnspan=3)
self.filelabel = CustomLabel(self.rightSidebar, text='Select File',anchor='w',
wraplength=200, height=3, width=20, bg=self.bgcolor)
self.filelabel.grid(row=1, column=0, pady=15, padx=10, columnspan=2)
choosefile = tk.Button(self.rightSidebar, image=clip_icon, command=self.choose_file)
choosefile.grid(row=1, column=2, pady=20)
infolabel = CustomLabel(self.rightSidebar, text='It will append this pdf in the end of above pdf',
anchor='w', width=35)
infolabel.grid(row=2, column=0, pady=15, padx=10, columnspan=3)
fromlabel = CustomLabel(self.rightSidebar, text='From page : ', anchor='w', width=10)
fromlabel.grid(row=3, column=0, pady=15, padx=10)
fromentry = tk.Entry(self.rightSidebar, width=6, bg=self.bgcolor, fg='white')
fromentry['textvariable'] = self.from_
fromentry.grid(row=3, column=1, columnspan=2)
tolabel = CustomLabel(self.rightSidebar, text='To page : ', anchor='w', width=10)
tolabel.grid(row=4, column=0, pady=15, padx=10)
toentry = tk.Entry(self.rightSidebar, width=6, bg=self.bgcolor, fg='white')
toentry['textvariable'] = self.to_
toentry.grid(row=4, column=1, columnspan=2)
merge = tk.Button(self.rightSidebar, bg=self.bgcolor, text='Merge', width=15,
command=self.merge_pdf, fg='white', cursor='hand2')
merge.grid(row=5, column=0, pady=20, columnspan=2)
back = tk.Button(self.rightSidebar, bg=self.bgcolor, image=back_icon,
command=self.go_to_tools)
back.grid(row=5, column=2, pady=20)
def watermark_pdf_frame(self):
# watermark pdf frame
self.rightSidebar.destroy()
self.custom_function = True
self.right_frame()
label = CustomLabel(self.rightSidebar, text='Watermark PDF', anchor='c')
label.grid(row=0, column=0, pady=15, padx=25, columnspan=4)
self.filelabel = CustomLabel(self.rightSidebar, text='Select File',anchor='w',
wraplength=200, height=3, width=20, bg=self.bgcolor)
self.filelabel.grid(row=1, column=0, pady=15, padx=10, columnspan=3)
choosefile = tk.Button(self.rightSidebar, image=clip_icon,
command=self.choose_image_file)
choosefile.grid(row=1, column=3, pady=20)
x1label = CustomLabel(self.rightSidebar, text='x1 : ', anchor='w', width=5)
x1label.grid(row=2, column=0, pady=15, sticky='W', padx=(25,2))
x1entry = tk.Entry(self.rightSidebar, width=5, bg=self.bgcolor, fg='white')
x1entry['textvariable'] = self.x1
x1entry.grid(row=2, column=1, columnspan=2, sticky='W')
y1label = CustomLabel(self.rightSidebar, text='y1 : ', anchor='w', width=5)
y1label.grid(row=2, column=2, pady=15, sticky='W')
y1entry = tk.Entry(self.rightSidebar, width=5, bg=self.bgcolor, fg='white')
y1entry['textvariable'] = self.y1
y1entry.grid(row=2, column=3, columnspan=2, sticky='W')
x2label = CustomLabel(self.rightSidebar, text='x2 : ', anchor='w', width=5)
x2label.grid(row=3, column=0, pady=15, sticky='W', padx=(25,2))
x2entry = tk.Entry(self.rightSidebar, width=5, bg=self.bgcolor, fg='white')
x2entry['textvariable'] = self.x2
x2entry.grid(row=3, column=1, columnspan=2, sticky='W')
y2label = CustomLabel(self.rightSidebar, text='y2 : ', anchor='w', width=5)
y2label.grid(row=3, column=2, pady=15, sticky='W')
y2entry = tk.Entry(self.rightSidebar, width=5, bg=self.bgcolor, fg='white')
y2entry['textvariable'] = self.y2
y2entry.grid(row=3, column=3, columnspan=2, sticky='W')
watermark = tk.Button(self.rightSidebar, bg=self.bgcolor, text='Watermark', width=15,
command=self.watermark_pdf, fg='white', cursor='hand2')
watermark.grid(row=4, column=0, pady=20, columnspan=2, padx=(25,2))
back = tk.Button(self.rightSidebar, bg=self.bgcolor, image=back_icon,
command=self.go_to_tools)
back.grid(row=4, column=3, pady=20)
# Frame Switches --------------------------------------------------------------------
def toggleFullScreen(self, event):
# toggle to fullscreen
if self.isFullscreen == False:
self.master.attributes('-fullscreen', True)
self.isFullscreen = True
def quitFullScreen(self, event):
# quit the fullscreen window
if self.isFullscreen == True:
self.master.attributes('-fullscreen', False)
self.isFullscreen = False
def create_radios(self, option):
# create a radio button
text, value = option
radio = tk.Radiobutton(self.rightSidebar, text=text, value=value, variable=self.angle,
font=('Arial', 11))
return radio
def choose_file(self):
# choose a pdf file
self.other_filepath = filedialog.askopenfilename(initialdir=cwd, filetypes=(("PDF","*.pdf"),))
if self.other_filepath:
self.filelabel['text'] = os.path.basename(self.other_filepath)
def choose_image_file(self):
# choose a image for watermarking the pdf
self.other_filepath = filedialog.askopenfilename(initialdir=cwd,
filetypes=(("PNG","*.png"),("JPG", '.jpg')))
if self.other_filepath:
self.filelabel['text'] = os.path.basename(self.other_filepath)
def choose_saveasname(self, text):
# choose a new filename for saving | |
import os
import re
import io
import hashlib
import mutagen
import zipfile
import datetime
from dynamic_preferences.registries import global_preferences_registry
from django.db import models, transaction
from django.db.utils import IntegrityError
from django.utils import timezone
from django.db.models import Q
from PIL import Image
# Create your models here.
# TODO: I actually don't think I like those "on_delete=models.CASCADE"
# parameters on ForeignKeys. I'm guessing that in all circumstances I'd
# prefer that we error out, rather than silently deleting data we might
# not mean to. I've squashed a few bugs related to that kind of thing
# already, and more similar bugs might be lurking still.
class SongHelper(object):
"""
A little class, little more than a glorified dict, which is used
to store some interim information while adding/updating/cleaning
tracks. The class helps in these ways:
1. Helps with associations of "Various" albums
2. Takes care of "unattached" tracks which aren't in an album
3. Takes care of stripping the artist prefix
"""
def __init__(self, artist_full, group, conductor, composer, album, song_obj):
# Direct vars
self.song_obj = song_obj
self.album = album
# Some inferred info
(self.artist_prefix, self.artist_name) = Artist.extract_prefix(artist_full)
self.norm_artist_name = App.norm_name(self.artist_name)
(self.group_prefix, self.group_name) = Artist.extract_prefix(group)
self.norm_group_name = App.norm_name(self.group_name)
(self.conductor_prefix, self.conductor_name) = Artist.extract_prefix(conductor)
self.norm_conductor_name = App.norm_name(self.conductor_name)
(self.composer_prefix, self.composer_name) = Artist.extract_prefix(composer)
self.norm_composer_name = App.norm_name(self.composer_name)
self.base_dir = os.path.dirname(song_obj.filename)
# Information which may be overwritten later
self.album_artist = self.artist_name
self.norm_album_artist = self.norm_artist_name
# If we have no defined album, make one up!
# This... might not be the right place to do this? Still, it's
# convenient here, so whatever.
if self.album == '':
self.album = Album.miscellaneous_format_str % (artist_full)
self.miscellaneous_album = True
else:
self.miscellaneous_album = False
# Create our normalized album name here, in case we'd started with it
# blank.
self.norm_album = App.norm_name(self.album)
# Also set a "live" boolean. Will mostly just be used for frontend
# filtering.
if App.livere.match(self.album):
self.live_album = True
else:
self.live_album = False
def set_album_artist(self, artist):
"""
Sets the album artist for this helper (and also the normalized version
of the artist)
"""
self.album_artist = artist
self.norm_album_artist = App.norm_name(artist)
class Artist(models.Model):
name = models.CharField(
max_length=255,
unique=True,
)
name.verbose_name = 'Artist'
normname = models.CharField(
max_length=255,
unique=True,
)
prefix = models.CharField(
max_length=32,
blank=True,
)
various = models.BooleanField(default=False)
class Meta:
ordering = ['name']
def __str__(self):
"""
Returns a string representation of ourselves
"""
if self.prefix and self.prefix != '':
return '%s %s' % (self.prefix, self.name)
else:
return self.name
def save(self, *args, **kwargs):
"""
Custom handler for save() which populates our normname field
automatically.
"""
self.normname = App.norm_name(self.name)
super(Artist, self).save(*args, **kwargs)
def __lt__(self, other):
"""
For sorting
"""
return self.normname < other.normname
# TODO: dynamic prefixes via the admin interface?
# TODO: prefix exceptions ("The The")
@staticmethod
def extract_prefix(name):
"""
Extracts a prefix from the given name, if one exists. Returns
a tuple of ``(prefix, name)``, where ``prefix`` may be an empty string.
"""
if name == '':
return ('', '')
match = App.prefixre.match(name)
if match.group(2):
return (match.group(2), match.group(3))
else:
return ('', name)
class Album(models.Model):
miscellaneous_format_str = '(Non-Album Tracks: %s)'
artist = models.ForeignKey(Artist, on_delete=models.CASCADE)
artist.verbose_name = 'Artist'
name = models.CharField(
max_length=255,
)
name.verbose_name = 'Album Title'
normname = models.CharField(
max_length=255,
)
year = models.IntegerField(
default=0,
)
year.verbose_name = 'Year'
miscellaneous = models.BooleanField(default=False)
live = models.BooleanField(default=False)
time_added = models.DateTimeField(default=timezone.now)
time_added.verbose_name = 'Added to Database'
# This is a bit of denormalization but lets us query for albums
# with art without having to bring in possibly-expensive JOINs.
# Also lets us just pass through original files from the filesystem
# rather than unnecessarily bloating our database.
art_filename = models.CharField(max_length=4096, null=True, blank=True, default=None)
art_mtime = models.IntegerField(null=True, blank=True, default=0)
art_ext = models.CharField(max_length=4, null=True, blank=True, default=None)
art_mime = models.CharField(max_length=64, null=True, blank=True, default=None)
class Meta:
unique_together = ('artist', 'name')
ordering = ['artist', 'name']
def __str__(self):
"""
Returns a string representation of ourselves
"""
if self.miscellaneous:
return Album.miscellaneous_format_str % (self.artist)
else:
return self.name
def save(self, *args, **kwargs):
"""
Custom handler for save() which populates our normname field
automatically, and does a few other things.
"""
# First compute our normname
self.normname = App.norm_name(self.name)
# This bit is unnecessary, really, but causes us to be consistent
# with the type of data being put into the DB. Our app's App.add()
# and App.update() procedures will leave album art fields as NULL/None
# when not found. However, when edited in the admin area, currently
# Django will insert a blank string into these fields if they're blank,
# which would change them from NULL. I'm a fan of consistency, so
# let's just set them to null explicitly for now. It looks like this
# has been fixed in github, though? Doesn't seem to be in Django 1.10
# yet. https://code.djangoproject.com/ticket/4136
if self.art_filename == '':
self.art_filename = None
if self.art_ext == '':
self.art_ext = None
if self.art_mime == '':
self.art_mime = None
# Now continue with the save.
super(Album, self).save(*args, **kwargs)
def get_songs_ordered(self):
"""
Returns all tracks in our album, ordered. A convenience function
for inclusion in templates, basically.
"""
return self.song_set.all().order_by('tracknum')
def get_songs_jplayer_streamable_ordered(self):
"""
Returns all tracks in our album which are capable of being streamed
via jPlayer, ordered. A convenience function for inclusion in
templates, basically.
"""
return self.song_set.all().filter(~Q(filetype=Song.OPUS)).order_by('tracknum')
def get_secondary_artists_list(self):
"""
Returns a list of all artists contained in songs in this
album, including groups, conductors, and composers. Since
these are inherent to Songs, not Albums, the best we can
really do is just loop through 'em.
"""
artists = {}
for song in self.song_set.all():
if (song.group and song.group.normname != self.artist.normname and
song.group not in artists):
artists[song.group] = True
if (song.conductor and song.conductor.normname != self.artist.normname and
song.conductor not in artists):
artists[song.conductor] = True
if (song.composer and song.composer.normname != self.artist.normname and
song.composer not in artists):
artists[song.composer] = True
return sorted(artists.keys())
def get_secondary_artists_tuple(self):
"""
Returns a tuple containing lists of artists who are in our
"secondary" artist fields of group, conductor, and composer.
Since these are inherent to Songs, not Albums, the best we
can really do is just loop through 'em.
Included in the tuple are a set of booleans detailing if
there are tracks without group/conductor/composer tags.
The tuple order is: (groups, have_empty_group,
conductors, have_empty_conductor,
composers, have_empty_composer)
"""
groups = {}
conductors = {}
composers = {}
have_empty_group = False
have_empty_conductor = False
have_empty_composer = False
for song in self.song_set.all():
if song.group:
if song.group not in groups:
groups[song.group] = True
else:
have_empty_group = True
if song.conductor:
if song.conductor not in conductors:
conductors[song.conductor] = True
else:
have_empty_conductor = True
if song.composer:
if song.composer not in composers:
composers[song.composer] = True
else:
have_empty_composer = True
return (sorted(groups.keys()), have_empty_group,
sorted(conductors.keys()), have_empty_conductor,
sorted(composers.keys()), have_empty_composer)
def get_album_image(self):
"""
Find our album art filename. Most of the heavy lifting here
is done in some static methods from App. Will return None
if no album art was found.
"""
if self.miscellaneous:
return None
if self.song_set.count() > 0:
song = self.song_set.all()[0]
base_dir = song.full_base_dir()
return App.get_directory_cover_image(base_dir)
else:
return None
def import_album_image_from_filename(self, filename, short_filename):
"""
Imports the given filename as our new album art. Will yield
a list of tuples in the format (loglevel, text) like the
static App.add() and App.update() functions.
Note that like those other funcs, since we're yielding things as we go,
something needs to loop through our output in order for things to
actually happen here.
Note too that despite some ``return False`` statements in here, because
we're a generator, those return values are pretty meaningless and
can't really be accessed.
``filename`` is the full path to the image, whereas ``short_filename``
is what will get stored in the DB
"""
if self.miscellaneous:
return False
valid_extension = False
for ext in App.cover_extensions:
if filename[-len(ext):] == ext:
valid_extension = True
break
if not valid_extension:
yield (App.STATUS_ERROR, 'Invalid extension for image %s' % (filename))
return False
if os.access(filename, os.R_OK):
try:
imagedata = None
with Image.open(filename) as im:
if im.format in App.image_format_to_mime:
(mime, ext) = App.image_format_to_mime[im.format]
stat_data = os.stat(filename)
self.art_filename = short_filename
self.art_mtime = int(stat_data.st_mtime)
self.art_ext = ext
self.art_mime = mime
self.save()
| |
#!/usr/bin/env python
from __future__ import print_function
import logging
import json
from pprint import pformat
from collections import defaultdict
from apiclient.errors import HttpError
from errors import BadConfigError
from copy import deepcopy
import time
MAX_ACTIONS_PER_BATCH = 950
ITERATION_LIMIT = 100
class Event(dict):
"""
A wrapper around events.
"""
# These are properties that are true for two events that are considered
# identical across domains.
props = [
"id",
"status",
"start",
"end",
"summary",
"description",
"location",
"colorId",
"reminders",
"transparency",
"visibility",
]
special_props = [
"attendees",
]
def __init__(self, args, **kwargs):
self.dirty = False
super(Event, self).__init__(args, **kwargs)
def active(self):
return self['status'] != 'cancelled'
def __cmp__(self, obj):
"""
Compare two events. If there's no meaningful difference, they're
'identical' (even if they were updated at different times). Otherwise,
the most recently updated copy wins.
"""
if not obj or not isinstance(obj, Event):
return 1
elif self.ehash() == obj.ehash():
return 0
else:
for p in self.props:
if p in self and p in obj:
if self[p] != obj[p]:
logging.debug("!!!!!==== %s %s %s", p, self[p], obj[p])
return cmp(self['updated'], obj['updated'])
def __setitem__(self, k, v):
self.dirty = True
dict.__setitem__(self, k, v)
def ehash(self):
d = {}
for p in self.props:
d[p] = self.get(p, None)
for p in self.special_props:
if p in self:
d[p] = sorted([a['email'] for a in self[p]])
return hash(json.dumps(d))
class Calendar:
"""
Represents a single Google Calendar and the domain service required to
edit it.
"""
def __init__(self, config, domains=None, service=None):
self.domain_id = config['domain']
self.url = config['url']
self.name = self.url
self.sync_token = ""
self.events = {}
self.batch = None
self.batch_count = 0
self.read_only = False
self.calendar_metadata = None
self.ratelimit = 0
if 'read_only' in config:
self.read_only = config['read_only']
logging.info("Creating new calendar at %s with url %s" % (self.
domain_id, self.url))
if domains is not None:
if not self.domain_id in domains:
raise BadConfigError(
"Domain %s referenced in calendar config not defined." %
self.domain_id)
self.domain = domains[self.domain_id]
self.service = self.domain.get_service()
if service:
self.service = service
# Perform self-checking
try:
calendars = self.domain.get_calendars()
except HttpError as e:
logging.critical("Error while trying to load calendar %s: %s",
self.url, repr(e))
raise e
for c in calendars.get('items', []):
if c['id'] == self.url:
self.calendar_metadata = c
break
if not self.calendar_metadata:
raise BadConfigError("Couldn't find calendar %s in domain %s!" % (
self.url, self.domain_id))
# Now that we have metadata, we can use a name instead of a URL
self.name = "%s [%s]" % (self.calendar_metadata['summary'], self.
domain_id)
if (self.calendar_metadata['accessRole'] not in self.valid_access_roles()):
logging.critical(
"Permission '%s' on calendar %s is too restrictive! Needed %s",
self.calendar_metadata['accessRole'], self.url, self.
valid_access_roles())
raise RuntimeError
def valid_access_roles(self):
"""
The set of access roles required.
"""
if self.read_only:
return ["owner", "writer", "reader"]
return ["owner", "writer"]
def active_events(self):
return {k:v for k,v in self.events.iteritems() if v.active()}
def update_events_from_result(self, result, exception=None):
"""
Given an Events resource result, update our local events.
"""
if exception is not None:
logging.warn("Callback indicated failure -- exception: %s",
exception)
logging.debug(pformat(result))
logging.debug(pformat(exception))
raise exception
# return 0
updated = 0
for event in result.get("items", []):
id = event['id']
new_event = Event(event)
old_event = self.events.get(id, None)
if new_event != old_event: # see Event.__cmp__; not that simple!
if not (old_event and not old_event.active()):
updated += 1
self.events[id] = Event(event)
if updated:
logging.info("Updated %d events" % updated)
return updated
def update_events(self):
"""
Get events from Google and update our local events using
update_events_from_result.
Uses syncToken to optimize result retrieval.
"""
request = self.service.events().list(calendarId=self.url,
syncToken=self.sync_token,
showDeleted=True)
updated = 0
while request is not None:
result = request.execute()
updated += self.update_events_from_result(result)
request = self.service.events().list_next(request, result)
self.sync_token = result.get("nextSyncToken", "")
# logging.info("Got %d events. syncToken is now %s" % (updated,
# self.sync_token))
return updated
def begin_batch(self):
"""
Start a new batch of actions.
"""
logging.debug("Calendar %s starting new batch" % self.url)
if self.batch:
logging.warn(
"begin_batch called with active batch! Trying to commit")
self.commit_batch()
self.batch = self.service.new_batch_http_request()
def commit_batch(self):
"""
Execute the currently active batch.
"""
if not self.batch:
logging.warn("commit_batch called but no batch was started!")
return
if self.batch_count:
# Only commit a batch when necessary, to save on HTTP requests.
logging.debug("Calendar %s committing batch of %d" % (self.url,
self.batch_count))
if self.ratelimit:
time.sleep(self.ratelimit / 1000.0)
self.ratelimit = 0
result = self.batch.execute()
#self.update_events_from_result(result)
# logging.debug(pformat(result))
self.batch = None
self.batch_count = 0
def _action_to_batch(self, action):
"""
Add an action to the currently active batch. If the batch contains more
than MAX_ACTIONS_PER_BATCH actions, commit it and start a new one.
"""
if self.batch:
self.batch.add(action, callback=lambda request_id, response,
exception: self.update_events_from_result(response,
exception=exception))
self.batch_count += 1
self.ratelimit += 2
if self.batch_count > MAX_ACTIONS_PER_BATCH:
self.commit_batch()
self.begin_batch()
else:
logging.critical(
"Tried to add a batch action but no batch was active!")
raise RuntimeError
def sync_event(self, event):
"""
If `event` doesn't exist, create it with `add_event`; otherwise,
if `event` is newer than our version, patch it with our version.
Idempotent if `event` is already the latest version.
"""
eid = event['id']
if eid in self.events:
my_event = self.events[eid]
if (my_event['status'] == 'cancelled' and event['status'] ==
'cancelled'):
my_event = event
return None
if my_event < event:
# logging.debug(pformat(self.events[eid]))
# logging.debug(pformat(event))
return self.update_event(eid, event)
else:
self.events[eid] = event
if event['status'] == 'cancelled':
# If the event is both new to us and cancelled, there's no need
# to add it (and Google doesn't let us do so for resources
# anyway) so just add it to our event set and return.
return None
return self.add_event(event)
# return self.import_event(event)
def _process_action(self, action):
"""
If we're running in batch mode, add the action to a batch.
Otherwise, execute the action immediately and update.
"""
if self.batch:
return self._action_to_batch(action)
else:
result = action.execute()
# logging.debug(pformat(result))
self.update_events_from_result(result)
return result
def add_event(self, event):
"""
Add an event, then update our events with the result.
"""
if self.read_only:
logging.debug("RO: %s +> %s" % (event['id'], self.name))
return None
action = self.service.events().insert(calendarId=self.url, body=event)
return self._process_action(action)
def patch_event(self, event_id, new_event):
"""
Unconditionally patch the event referenced by `event_id` with the
data in `event`.
"""
if self.read_only:
logging.debug("RO: %s => %s" % (new_event['id'], self.name))
return None
action = self.service.events().patch(calendarId=self.url,
eventId=event_id,
body=new_event)
return self._process_action(action)
def update_event(self, event_id, new_event):
"""
Unconditionally update the event referenced by `event_id` with the
data in `event`.
"""
if self.read_only:
logging.debug("RO: %s ~> %s" % (new_event['id'], self.name))
return None
# new_event['sequence'] += 1
action = self.service.events().update(calendarId=self.url,
eventId=event_id,
body=new_event)
return self._process_action(action)
def push_events(self, batch=False):
"""
If we have local modifications to events, push them
to the server.
"""
if batch: self.begin_batch()
updates = 0
for eid, e in self.events.iteritems():
if e.dirty:
logging.debug("Pushing dirty event %s", eid)
self.update_event(eid, e)
e.dirty = False
updates += 1
# if updates > MAX_ACTIONS_PER_BATCH:
# if batch:
# self.commit_batch()
# self.begin_batch()
if batch: self.commit_batch()
return updates
class SyncedCalendar:
"""
A collection of Calendars to be synced.
"""
def __init__(self, name, config, domains=None):
self.name = name
self.calendars = []
for cal_config in config['calendars']:
cal = Calendar(cal_config, domains=domains)
self.calendars.append(cal)
self.event_set = set()
def sync_event(self, id):
"""
Find the most up-to-date version of a given event, and sync changes
that need to be made.
"""
events = [c.events[id] for c in self.calendars if id in c.events]
if not [e for e in events if e.active()]:
# All events cancelled. We don't care.
return 0
elif [e for e in events if not e.active()]:
# One or more events cancelled. All events should be cancelled.
for e in events:
e['status'] = 'cancelled'
event = max(events) # See __cmp__ in Event for how this is determined.
sequence = max([e['sequence'] for e in events])
if sequence > event['sequence']:
# you get an update! you get an update! everyone gets an update!
event['sequence'] = sequence + 1
logging.debug("increasing SN of %.5s to %d", id, event['sequence'])
return sum([c.sync_event(event) is not None for c in self.calendars])
def print_debug_events(self):
for e in self.event_set:
print(("%.5s" % e), end=' ')
for c in self.calendars:
if e in c.events:
print(" %-4d %20.20s %25.25s" % (c.events[e]['sequence'],
c.events[e]['summary'], c.events[e]['updated']),
end=' ')
else:
print(" " * 32, end=' ')
print()
def sync(self):
"""
Update calendar info, getting the latest events. Then, for each event,
add | |
<filename>tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/export/op_descriptor.py
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nndct_shared.base import NNDCT_CONSTANT, NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from .code_template import CodeTemplate
class OpDescriptor(object):
@staticmethod
def input(ctx, node, output_str):
return "{} = args[{}]".format(output_str, int(node.name.split('_')[-1]))
@staticmethod
def rsub(ctx, node, output_str):
other = node.node_config('other')
if isinstance(other, Tensor):
other = ctx.get_output_tensor_name(other)
return "{output} = {other} - {input}".format(
output=output_str,
other=other,
input=ctx._to_list_str(ctx._get_module_input(node)))
@staticmethod
def strided_slice(ctx, node, output_str):
starts = node.node_config('start')
ends = node.node_config('end')
steps = node.node_config('step')
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(len(starts)):
start_symbol.append(ctx.infer_attr_value(starts[i]))
end_symbol.append(ctx.infer_attr_value(ends[i]))
step_symbol.append(ctx.infer_attr_value(steps[i]))
for i in range(len(starts)):
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
# for i in range(len(starts)):
# start_symbol = str(starts[i]) if starts[i] > 0 else ''
# end_symbol = str(ends[i]) if ends[i] < NNDCT_CONSTANT.INT_MAX else ''
# step_symbol = ':' + str(steps[i]) if steps[i] > 1 else ''
# slice_symbol = start_symbol + break_symbol + end_symbol + step_symbol
# if i > 0:
# symbols += "," + slice_symbol
# else:
# symbols = slice_symbol
input_str = ctx.infer_attr_value(node.node_config('input'))
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str,
input_tensor=input_str,
symbols=symbols)
@staticmethod
def slice_tensor_inplace_copy(ctx, node, output_str):
slice_tensor, input = ctx._get_module_input(node)
dim = node.node_config('dim')
index = node.node_config('index')
symbols = str(index)
for i in range(dim):
symbols = ','.join([':', symbols])
return "{slice_tensor}[{symbols}] = {input_tensor}".format(
slice_tensor=slice_tensor, symbols=symbols, input_tensor=input)
@staticmethod
def _sequence(ctx, node, output_str):
inputs = node.op.get_config('input')
for idx, ip in enumerate(inputs):
if isinstance(ip, Tensor):
inputs[idx] = ctx.get_output_tensor_name(ip)
return "{output} = {op_name}([{inputs}])".format(
output=output_str,
op_name=node.op.type,
inputs=ctx._to_list_str(inputs))
@staticmethod
def list(ctx, node, output_str):
return OpDescriptor._sequence(ctx, node, output_str)
@staticmethod
def index(ctx, node, output_str):
indices = ""
for i, index in enumerate(node.node_config('index')):
if isinstance(index, Tensor):
symbol = ctx.get_output_tensor_name(index)
elif index is None:
symbol = ":"
if i > 0:
indices += "," + symbol
else:
indices = symbol
input = node.node_config('input')
input_tensor = ctx.get_output_tensor_name(input)
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str, input_tensor=input_tensor, symbols=indices)
@staticmethod
def strided_slice_inplace_copy(ctx, node, output_str):
destination = node.node_config('destination')
source = node.node_config('source')
starts = node.node_config('start')
ends = node.node_config('end')
steps = node.node_config('step')
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(len(starts)):
start_symbol.append(ctx.infer_attr_value(starts[i]))
end_symbol.append(ctx.infer_attr_value(ends[i]))
step_symbol.append(ctx.infer_attr_value(steps[i]))
for i in range(len(starts)):
if starts[i] == ends[i]:
slice_symbol = start_symbol[i]
else:
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
destination_str = ctx.infer_attr_value(destination)
source_str = ctx.infer_attr_value(source)
return "{output}[{symbols}] = {input_tensor}".format(
output=destination_str,
input_tensor=source_str, symbols=symbols)
@staticmethod
def index_put_inplace(ctx, node, output_str):
# destination, _, source = ctx._get_module_input(node)
destination = node.node_config('input')
source = node.node_config('values')
indices = node.node_config('indices')
indices_symbol = ''
sep_symbol = ','
break_symbol = ':'
for i, index in enumerate(indices):
index = break_symbol if index is None else ctx.get_output_tensor_name(index)
if i > 0:
indices_symbol += sep_symbol + index
else:
indices_symbol = index
destination_str = ctx.infer_attr_value(destination)
source_str = ctx.infer_attr_value(source)
ctx.set_name_alias_for_output(output_str, destination_str)
return "{output}[{symbols}] = {input_tensor}".format(
output=destination_str,
input_tensor=source_str, symbols=indices_symbol)
#@staticmethod
#def loop(ctx, node, output_str):
# loop_pattern = None
# if node.node_config("is_while_loop"):
# raise NotImplementedError()
# else:
# loop_pattern = CodeTemplate("""$loop_outputs = $loop_vars
# for $iter_var in range(0, $max_trip_count):
# $block_inputs = $loop_outputs
# $body
# $loop_outputs = $body_ret
# """)
# loop_outputs = output_str
# loop_vars = node.node_config("initial_loop_vars")
# assert len(loop_vars) == len(ctx._get_module_output(node))
#
# def loop_var_to_str(var):
# if isinstance(var, list):
# start_str = '['
# end_str = ']'
# var_lst = []
# for ele in var:
# var_lst.append(loop_var_to_str(ele))
# return start_str + ",".join(var_lst) + end_str
# else:
# return ctx.get_output_tensor_name(var)
#
# loop_vars_str = ",".join([loop_var_to_str(var) for var in loop_vars])
#
# body_str = ""
# block_inputs_idx = 0
# iter_var_str = ''
# block_inputs = []
# max_trip_count = node.node_config("max_trip_count")
# if isinstance(max_trip_count, Tensor):
# max_trip_count = ctx.get_output_tensor_name(max_trip_count)
#
# for inner_node in node.blocks[0].nodes:
# if inner_node.op.type == NNDCT_OP.INPUT:
# output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
# if block_inputs_idx == 0:
# iter_var_str = output_str
# else:
# if isinstance(ctx._get_module_output(inner_node), list) and len(ctx._get_module_output(inner_node)) > 1:
# output_str = f"({output_str})"
# block_inputs.append(output_str)
# block_inputs_idx += 1
# else:
# forward_str, output_str = ctx._get_forward_str(inner_node)
# body_str += forward_str + '\n'
#
# block_inputs_str = ",".join(block_inputs)
#
# def get_ret_val_str(ret_val):
# if isinstance(ret_val, list):
# ret_val_str = ""
# head_str = "["
# tail_str = "]"
# for val in ret_val:
# ret_val_str += get_ret_val_str(val) + ","
# return head_str + ret_val_str + tail_str
# elif isinstance(ret_val, Tensor):
# return ctx.get_output_tensor_name(ret_val)
#
# body_ret_str = ",".join([get_ret_val_str(ret_val) for ret_val in node.blocks[0].return_struct[1:]])
#
# return loop_pattern.substitute(loop_outputs=loop_outputs,
# loop_vars=loop_vars_str,
# iter_var=iter_var_str,
# max_trip_count=max_trip_count,
# block_inputs=block_inputs_str,
# body=body_str,
# body_ret=body_ret_str)
@staticmethod
def loop(ctx, node, output_str):
loop_outputs = output_str
loop_vars = node.node_config("initial_loop_vars")
loop_vars_str = ctx.infer_attr_value(loop_vars[0] if len(loop_vars) == 1 else loop_vars)
assert len(loop_vars) == len(ctx._get_module_output(node))
init_condition_str = ctx.infer_attr_value(node.node_config("initial_condition"))
body_str = ""
block_inputs_idx = 0
iter_var_str = ''
block_inputs = []
iter_start_str = str(0)
max_trip_count = node.node_config("max_trip_count")
max_trip_count_str = ctx.infer_attr_value(max_trip_count)
for inner_node in node.blocks[0].nodes:
if inner_node.op.type == NNDCT_OP.INPUT:
output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
if block_inputs_idx == 0:
iter_var_str = output_str
else:
if isinstance(ctx._get_module_output(inner_node), list) and len(ctx._get_module_output(inner_node)) > 1:
output_str = f"({output_str})"
block_inputs.append(output_str)
block_inputs_idx += 1
elif inner_node.op.type == NNDCT_OP.DERIVE_LOOP_INDEX:
iter_start_str = str(inner_node.node_config("start"))
output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
iter_var_str = output_str
else:
forward_str, output_str = ctx._get_forward_str(inner_node)
body_str += forward_str + '\n'
block_inputs_str = ",".join(block_inputs)
body_ret_str = ",".join([ctx.infer_attr_value(ret_val) for ret_val in node.blocks[0].return_struct[1:]])
iter_end_str = "+".join([max_trip_count_str, iter_start_str])
iter_conditon_str = ctx.infer_attr_value(node.blocks[0].return_struct[0])
loop_pattern = None
if node.node_config("is_while_loop"):
loop_pattern = CodeTemplate("""\
$loop_outputs = $loop_vars
condition = $initial_condition
while condition:
$block_inputs = $loop_outputs
$body
$loop_outputs = $body_ret
condition = $iter_condition
""")
return loop_pattern.substitute(loop_outputs=loop_outputs,
loop_vars=loop_vars_str,
initial_condition=init_condition_str,
block_inputs=block_inputs_str,
body = body_str,
body_ret = body_ret_str,
iter_condition=iter_conditon_str)
else:
loop_pattern = CodeTemplate("""\
$loop_outputs = $loop_vars
for $iter_var in range($iter_start, $iter_end):
$block_inputs = $loop_outputs
$body
$loop_outputs = $body_ret
""")
return loop_pattern.substitute(loop_outputs=loop_outputs,
loop_vars=loop_vars_str,
iter_var=iter_var_str,
iter_start=iter_start_str,
iter_end=iter_end_str,
block_inputs=block_inputs_str,
body=body_str,
body_ret=body_ret_str)
@staticmethod
def list_add(ctx, node, output_str):
inputs = node.node_config("input")
others = node.node_config("other")
input_str = ""
if isinstance(inputs, list):
input_str += "["
for inp in inputs:
input_str += ctx.get_output_tensor_name(inp)
input_str += "]"
else:
input_str += ctx.get_output_tensor_name(inputs)
others_str = ""
if isinstance(others, list):
others_str += "["
for other in others:
others_str += ctx.get_output_tensor_name(other)
others_str += "]"
else:
others_str += ctx.get_output_tensor_name(others)
return f"{output_str} = {input_str} + {others_str}"
@staticmethod
def floor_div(ctx, node, output_str):
inputs = node.node_config("input")
others = node.node_config("other")
return f"{output_str} = {ctx.get_output_tensor_name(inputs)} // {ctx.get_output_tensor_name(others)}"
@staticmethod
def sequence_unpack(ctx, node, output_str):
if len(node.out_tensors) == 1:
return f"{output_str}, = {ctx._to_list_str(ctx._get_module_input(node))}"
else:
return f"{output_str} = {ctx._to_list_str(ctx._get_module_input(node))}"
@staticmethod
def slice(ctx, node, output_str):
start = node.node_config('start')
end = node.node_config('end')
step = node.node_config('step')
dim = node.node_config('dim')
break_symbol = ':'
symbols = ""
starts = []
ends = []
steps = []
for i in range(dim + 1):
if i != dim:
starts.append(str(0))
ends.append(str(NNDCT_CONSTANT.INT_MAX))
steps.append(str(1))
else:
starts.append(ctx.infer_attr_value(start))
ends.append(ctx.infer_attr_value(end))
steps.append(ctx.infer_attr_value(step))
for i in range(dim + 1):
slice_symbol = break_symbol.join([starts[i], ends[i], steps[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
input_str = ctx.infer_attr_value(node.node_config("input"))
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str,
input_tensor=input_str,
symbols=symbols)
@staticmethod
def length(ctx, node, output_str):
return "{output} = len({input})".format(output=output_str, input=ctx._to_list_str(ctx._get_module_input(node)))
@staticmethod
def If(ctx, node, output_str):
if_pattern = CodeTemplate("""\
if ($condition):
$block_0_body
$if_out = $ret_0
else:
$block_1_body
$if_out = $ret_1
""")
if_out_str = output_str
condition_str = ctx.infer_attr_value(node.node_config("condition"))
assert len(node.blocks) == 2
blocks = [""] * 2
block_ret = [""] * 2
for i, block in enumerate(node.blocks):
for inner_node in block.nodes:
forward_str, output_str = ctx._get_forward_str(inner_node)
blocks[i] += forward_str + '\n'
block_ret[i] = ",".join([ctx.infer_attr_value(ret_val) for ret_val | |
return (
X.groupby(group_colnames, as_index=False)
.apply(
lambda d: pd.DataFrame(
self.estimators_.get(d.name, self.fallback_).predict(
d[self.value_colnames_]
),
index=d.index,
)
)
.values.squeeze()
)
except AttributeError:
# Handle new groups
culprits = set(X[self.group_colnames_].agg(func=tuple, axis=1)) - set(
self.estimators_.keys()
)
if self.shrinkage is not None and self.use_global_model:
# Remove the global group from the culprits because the user did not specify
culprits = {culprit[1:] for culprit in culprits}
raise ValueError(
f"found a group(s) {culprits} in `.predict` that was not in `.fit`"
)
def __predict_shrinkage_groups(self, X):
"""Make predictions for all shrinkage groups"""
# DataFrame with predictions for each hierarchy level, per row. Missing groups errors are thrown here.
hierarchical_predictions = pd.concat(
[
pd.Series(self.__predict_group(X, level_columns))
for level_columns in self.group_colnames_hierarchical_
],
axis=1,
)
# This is a Series with values the tuples of hierarchical grouping
prediction_groups = X[self.group_colnames_].agg(func=tuple, axis=1)
# This is a Series of arrays
shrinkage_factors = prediction_groups.map(self.shrinkage_factors_)
# Convert the Series of arrays it to a DataFrame
shrinkage_factors = pd.DataFrame.from_dict(shrinkage_factors.to_dict()).T
return (hierarchical_predictions * shrinkage_factors).sum(axis=1)
def predict(self, X):
"""
Predict on new data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:return: array, shape=(n_samples,) the predicted data
"""
X = self.__prepare_input_data(X)
self.__validate(X)
check_is_fitted(
self,
[
"estimators_",
"groups_",
"group_colnames_",
"value_colnames_",
"fallback_",
],
)
if self.shrinkage is None:
return self.__predict_group(X, group_colnames=self.group_colnames_)
else:
return self.__predict_shrinkage_groups(X)
class OutlierRemover(TrainOnlyTransformerMixin, BaseEstimator):
"""
Removes outliers (train-time only) using the supplied removal model.
:param outlier_detector: must implement `fit` and `predict` methods
:param refit: If True, fits the estimator during pipeline.fit().
"""
def __init__(self, outlier_detector, refit=True):
self.outlier_detector = outlier_detector
self.refit = refit
self.estimator_ = None
def fit(self, X, y=None):
self.estimator_ = clone(self.outlier_detector)
if self.refit:
super().fit(X, y)
self.estimator_.fit(X, y)
return self
def transform_train(self, X):
check_is_fitted(self, "estimator_")
predictions = self.estimator_.predict(X)
check_array(predictions, estimator=self.outlier_detector, ensure_2d=False)
return X[predictions != -1]
class DecayEstimator(BaseEstimator):
"""
Morphs an estimator suchs that the training weights can be
adapted to ensure that points that are far away have less weight.
Note that it is up to the user to sort the dataset appropriately.
This meta estimator will only work for estimators that have a
"sample_weights" argument in their `.fit()` method.
The DecayEstimator will use exponential decay to weight the parameters.
w_{t-1} = decay * w_{t}
"""
def __init__(self, model, decay: float = 0.999, decay_func="exponential"):
self.model = model
self.decay = decay
self.func = decay_func
def _is_classifier(self):
return any(
["ClassifierMixin" in p.__name__ for p in type(self.model).__bases__]
)
def fit(self, X, y):
"""
Fit the data after adapting the same weight.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:param y: array-like, shape=(n_samples,) training data.
:return: Returns an instance of self.
"""
X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
self.weights_ = np.cumprod(np.ones(X.shape[0]) * self.decay)[::-1]
self.estimator_ = clone(self.model)
try:
self.estimator_.fit(X, y, sample_weight=self.weights_)
except TypeError as e:
if "sample_weight" in str(e):
raise TypeError(
f"Model {type(self.model).__name__}.fit() does not have 'sample_weight'"
)
if self._is_classifier():
self.classes_ = self.estimator_.classes_
return self
def predict(self, X):
"""
Predict new data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:return: array, shape=(n_samples,) the predicted data
"""
if self._is_classifier():
check_is_fitted(self, ["classes_"])
check_is_fitted(self, ["weights_", "estimator_"])
return self.estimator_.predict(X)
def score(self, X, y):
return self.estimator_.score(X, y)
class Thresholder(BaseEstimator, ClassifierMixin):
"""
Takes a two class estimator and moves the threshold. This way you might
design the algorithm to only accept a certain class if the probability
for it is larger than, say, 90% instead of 50%.
"""
def __init__(self, model, threshold: float):
self.model = model
self.threshold = threshold
def fit(self, X, y):
"""
Fit the data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:param y: array-like, shape=(n_samples,) training data.
:return: Returns an instance of self.
"""
X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
self.estimator_ = clone(self.model)
if not isinstance(self.estimator_, ProbabilisticClassifier):
raise ValueError(
"The Thresholder meta model only works on classifcation models with .predict_proba."
)
self.estimator_.fit(X, y)
self.classes_ = self.estimator_.classes_
if len(self.classes_) != 2:
raise ValueError(
"The Thresholder meta model only works on models with two classes."
)
return self
def predict(self, X):
"""
Predict new data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:return: array, shape=(n_samples,) the predicted data
"""
check_is_fitted(self, ["classes_", "estimator_"])
predicate = self.estimator_.predict_proba(X)[:, 1] > self.threshold
return np.where(predicate, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
check_is_fitted(self, ["classes_", "estimator_"])
return self.estimator_.predict_proba(X)
def score(self, X, y):
return self.estimator_.score(X, y)
class ConfusionBalancer(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""
The ConfusionBalancer attempts to give it's child estimator a more balanced
output by learning from the confusion matrix during training. The idea is that
the confusion matrix calculates P(C_i | M_i) where C_i is the actual class and
M_i is the class that the underlying model gives. We use these probabilities to
attempt a more balanced prediction by averaging the correction from the confusion
matrix with the original probabilities.
.. math::
p(\text{class_j}) = \alpha p(\text{model}_j) + (1-\alpha) p(\text{class_j} | \text{model}_j) p(\text{model}_j)
:param model: a scikit learn compatible classification model that has predict_proba
:param alpha: a hyperparameter between 0 and 1, determines how much to apply smoothing
:param cfm_smooth: a smoothing parameter for the confusion matrices to ensure zeros don't exist
"""
def __init__(self, estimator, alpha: float = 0.5, cfm_smooth=0):
self.estimator = estimator
self.alpha = alpha
self.cfm_smooth = cfm_smooth
def fit(self, X, y):
"""
Fit the data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:param y: array-like, shape=(n_samples,) training data.
:return: Returns an instance of self.
"""
X, y = check_X_y(X, y, estimator=self.estimator, dtype=FLOAT_DTYPES)
if not isinstance(self.estimator, ProbabilisticClassifier):
raise ValueError(
"The ConfusionBalancer meta model only works on classifcation models with .predict_proba."
)
self.estimator.fit(X, y)
self.classes_ = unique_labels(y)
cfm = confusion_matrix(y, self.estimator.predict(X)).T + self.cfm_smooth
self.cfm_ = cfm / cfm.sum(axis=1).reshape(-1, 1)
return self
def predict_proba(self, X):
"""
Predict new data, with probabilities
:param X: array-like, shape=(n_columns, n_samples,) training data.
:return: array, shape=(n_samples, n_classes) the predicted data
"""
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
preds = self.estimator.predict_proba(X)
return (1 - self.alpha) * preds + self.alpha * preds @ self.cfm_
def predict(self, X):
"""
Predict new data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:return: array, shape=(n_samples,) the predicted data
"""
check_is_fitted(self, ["cfm_", "classes_"])
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
return self.classes_[self.predict_proba(X).argmax(axis=1)]
class SubjectiveClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""
Corrects predictions of the inner classifier by taking into account a (subjective) prior distribution of the
classes.
This can be useful when there is a difference in class distribution between the training data set and
the real world. Using the confusion matrix of the inner classifier and the prior, the posterior probability for a
class, given the prediction of the inner classifier, can be computed. The background for this posterior estimation
is given `in this article <https://lucdemortier.github.io/articles/16/PerformanceMetrics>_`.
Based on the `evidence` attribute, this meta estimator's predictions are based on simple weighing of the inner
estimator's `predict_proba()` results, the posterior probabilities based on the confusion matrix, or a combination
of the two approaches.
:param estimator: An sklearn-compatible classifier estimator
:param prior: A dict of class->frequency representing the prior (a.k.a. subjective real-world) class
distribution. The class frequencies should sum to 1.
:param evidence: A string indicating which evidence should be used to correct the inner estimator's predictions.
Should be one of 'predict_proba', 'confusion_matrix', or 'both' (default). If `predict_proba`, the inner estimator's
`predict_proba()` results are multiplied by the prior distribution. In case of `confusion_matrix`, the inner
estimator's discrete predictions are converted to posterior probabilities using the prior and the inner estimator's
confusion matrix (obtained from the train data used in `fit()`). In case of `both` (default), the the inner
estimator's `predict_proba()` results are multiplied by the posterior probabilities.
"""
def __init__(self, estimator, prior, evidence="both"):
self.estimator = estimator
self.prior = prior
self.evidence = evidence
def _likelihood(self, predicted_class, given_class, cfm):
return cfm[given_class, predicted_class] / cfm[given_class, :].sum()
def _evidence(self, predicted_class, cfm):
return sum(
[
self._likelihood(predicted_class, given_class, cfm)
* self.prior[self.classes_[given_class]]
for given_class in range(cfm.shape[0])
]
)
def _posterior(self, y, y_hat, cfm):
y_hat_evidence = self._evidence(y_hat, cfm)
return (
(
self._likelihood(y_hat, y, cfm)
* self.prior[self.classes_[y]]
/ y_hat_evidence
)
if y_hat_evidence > 0
else self.prior[y] # in case confusion matrix has all-zero column for y_hat
)
def fit(self, X, y):
"""
Fits the inner estimator based on the data.
Raises a `ValueError` | |
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
# Returns
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight)
if self._uses_dynamic_learning_phase():
ins = x + y + sample_weights + [1]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if reset_metrics:
self.reset_metrics()
return unpack_singleton(outputs)
def test_on_batch(self, x, y, sample_weight=None, reset_metrics=True):
"""Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight)
if self._uses_dynamic_learning_phase():
ins = x + y + sample_weights + [0]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if reset_metrics:
self.reset_metrics()
return unpack_singleton(outputs)
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
# Arguments
x: Input samples, as a Numpy array.
# Returns
Numpy array(s) of predictions.
"""
x, _, _ = self._standardize_user_data(x)
if self._uses_dynamic_learning_phase():
ins = x + [0]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
return unpack_singleton(outputs)
@interfaces.legacy_generator_methods_support
def fit_generator(self, generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
validation_freq=1,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Trains the model on data generated batch-by-batch by a Python generator
(or an instance of `Sequence`).
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
# Arguments
generator: A generator or an instance of `Sequence`
(`keras.utils.Sequence`) object in order to avoid
duplicate data when using multiprocessing.
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single
batch. Therefore, all arrays in this tuple must have the same
length (equal to the size of this batch). Different batches may
have different sizes. For example, the last batch of the epoch
is commonly smaller than the others, if the size of the dataset
is not divisible by the batch size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Integer.
Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to `ceil(num_samples / batch_size)`
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire data provided,
as defined by `steps_per_epoch`.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_data: This can be either
- a generator or a `Sequence` object for the validation data
- tuple `(x_val, y_val)`
- tuple `(x_val, y_val, val_sample_weights)`
on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `validation_data` generator before stopping
at the end of every epoch. It should typically
be equal to the number of samples of your
validation dataset divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(validation_data)` as a number of steps.
validation_freq: Only relevant if validation data is provided. Integer
or `collections.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only). This can be useful to tell the model to
"pay more attention" to samples
from an under-represented class.
max_queue_size: Integer. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation
relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
shuffle: Boolean. Whether to shuffle the order of the batches at
the beginning of each epoch. Only used with instances
of `Sequence` (`keras.utils.Sequence`).
Has no effect when `steps_per_epoch` is not `None`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
# Returns
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
# Raises
ValueError: In case the generator yields data in an invalid format.
# Example
```python
def generate_arrays_from_file(path):
while True:
with open(path) as f:
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
"""
return training_generator.fit_generator(
self, generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
validation_freq=validation_freq,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
@interfaces.legacy_generator_methods_support
def evaluate_generator(self, generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
# Arguments
generator: Generator yielding tuples | |
<gh_stars>1-10
# Copyright 2021 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
import json
import os
import os.path
import shutil
import urllib.parse
from typing import Optional, List, Dict, Tuple
from urllib.parse import urlparse
import google.cloud.bigquery as bigquery
import pandas as pd
import pendulum
from airflow.exceptions import AirflowException
from airflow.models.variable import Variable
from airflow.operators.bash import BashOperator
from airflow.secrets.environment_variables import EnvironmentVariablesBackend
from airflow.sensors.external_task import ExternalTaskSensor
from academic_observatory_workflows.clearbit import clearbit_download_logo
from observatory.platform.utils.airflow_utils import get_airflow_connection_password, AirflowVars
from observatory.platform.utils.gc_utils import (
select_table_shard_dates,
bigquery_sharded_table_id,
download_blobs_from_cloud_storage,
)
from observatory.platform.utils.url_utils import get_url_domain_suffix
from observatory.platform.utils.workflow_utils import make_release_date
from observatory.platform.workflows.snapshot_telescope import SnapshotRelease
from observatory.platform.workflows.workflow import Workflow
# The minimum number of outputs before including an entity in the analysis
INCLUSION_THRESHOLD = 1000
# The query that pulls data to be included in the dashboards
QUERY = """
SELECT
agg.id,
agg.name,
agg.time_period as year,
(SELECT * from grid.links LIMIT 1) AS url,
(SELECT * from grid.types LIMIT 1) AS type,
DATE(agg.time_period, 12, 31) as date,
agg.total_outputs as n_outputs,
agg.access_types.oa.total_outputs AS n_outputs_oa,
agg.access_types.gold.total_outputs AS n_outputs_gold,
agg.access_types.green.total_outputs AS n_outputs_green,
agg.access_types.hybrid.total_outputs AS n_outputs_hybrid,
agg.access_types.bronze.total_outputs AS n_outputs_bronze
FROM
`{project_id}.{agg_dataset_id}.{agg_table_id}` as agg
LEFT OUTER JOIN `{project_id}.{grid_dataset_id}.{grid_table_id}` as grid ON agg.id = grid.id
WHERE agg.time_period <= EXTRACT(YEAR FROM CURRENT_DATE())
ORDER BY year DESC, name ASC
"""
# Overrides for country names
NAME_OVERRIDES = {
"Bolivia (Plurinational State of)": "Bolivia",
"Bosnia and Herzegovina": "Bosnia",
"Brunei Darussalam": "Brunei",
"Congo": "Congo Republic",
"Congo, Democratic Republic of the": "DR Congo",
"Iran (Islamic Republic of)": "Iran",
"Korea (Democratic People's Republic of)": "North Korea",
"Korea, Republic of": "South Korea",
"Lao People's Democratic Republic": "Laos",
"Micronesia (Federated States of)": "Micronesia",
"Moldova, Republic of": "Moldova",
"Palestine, State of": "Palestine",
"Saint Kitts and Nevis": "St Kitts & Nevis",
"Saint Lucia": "St Lucia",
"Saint Vincent and the Grenadines": "St Vincent",
"Sint Maarten (Dutch part)": "Sint Maarten",
"Svalbard and <NAME>": "Svalbard & <NAME>en",
"Syrian Arab Republic": "Syria",
"Taiwan, Province of China": "Taiwan",
"Tanzania, United Republic of": "Tanzania",
"Trinidad and Tobago": "Trinidad & Tobago",
"United Kingdom of Great Britain and Northern Ireland": "United Kingdom",
"United States of America": "United States",
"Venezuela (Bolivarian Republic of)": "Venezuela",
"Viet Nam": "Vietnam",
"Virgin Islands (British)": "Virgin Islands",
"Antigua and Barbuda": "Antigua & Barbuda",
"Russian Federation": "Russia",
}
def bq_query_to_gcs(*, query: str, project_id: str, destination_uri: str, location: str = "us") -> bool:
"""Run a BigQuery query and save the results on Google Cloud Storage.
:param query: the query string.
:param project_id: the Google Cloud project id.
:param destination_uri: the Google Cloud Storage destination uri.
:param location: the BigQuery dataset location.
:return: the status of the job.
"""
client = bigquery.Client()
# Run query
query_job: bigquery.QueryJob = client.query(query, location=location)
query_job.result()
# Create and run extraction job
source_table_id = f"{project_id}.{query_job.destination.dataset_id}.{query_job.destination.table_id}"
extract_job_config = bigquery.ExtractJobConfig()
extract_job_config.destination_format = bigquery.DestinationFormat.CSV
extract_job: bigquery.ExtractJob = client.extract_table(
source_table_id, destination_uri, job_config=extract_job_config, location=location
)
extract_job.result()
return query_job.state == "DONE" and extract_job.state == "DONE"
def clean_url(url: str) -> str:
"""Remove path and query from URL.
:param url: the url.
:return: the cleaned url.
"""
p = urlparse(url)
return f"{p.scheme}://{p.netloc}/"
class OaWebRelease(SnapshotRelease):
PERCENTAGE_FIELD_KEYS = ["outputs_oa", "outputs_gold", "outputs_green", "outputs_hybrid", "outputs_bronze"]
def __init__(
self,
*,
dag_id: str,
project_id: str,
release_date: pendulum.DateTime,
change_chart_years: int = 10,
agg_dataset_id: str = "observatory",
grid_dataset_id: str = "digital_science",
):
"""Create an OaWebRelease instance.
:param dag_id: the dag id.
:param project_id: the Google Cloud project id.
:param release_date: the release date.
:param change_chart_years: the number of years to include in the change charts.
:param agg_dataset_id: the dataset to use for aggregation.
:param grid_dataset_id: the GRID dataset id.
"""
super().__init__(dag_id=dag_id, release_date=release_date)
self.project_id = project_id
self.change_chart_years = change_chart_years
self.agg_dataset_id = agg_dataset_id
self.grid_dataset_id = grid_dataset_id
@property
def build_path(self):
return os.path.join(self.transform_folder, "build")
def load_csv(self, category: str) -> pd.DataFrame:
"""Load the CSV file for a given category.
:param category: the category, i.e. country or institution.
:return: the Pandas Dataframe.
"""
# Load CSV
csv_path = os.path.join(self.download_folder, f"{category}.csv")
df = pd.read_csv(csv_path)
df["date"] = pd.to_datetime(df["date"])
df.fillna("", inplace=True)
return df
def make_index(self, df: pd.DataFrame, category: str):
"""Make the data for the index tables.
:param df: Pandas dataframe with all data points.
:param category: the category, i.e. country or institution.
:return:
"""
# Aggregate
df_index_table = df.groupby(["id"]).agg(
{
"id": "first",
"name": "first",
"url": "first",
"type": "first",
"n_outputs": "sum",
"n_outputs_oa": "sum",
"n_outputs_gold": "sum",
"n_outputs_green": "sum",
"n_outputs_hybrid": "sum",
"n_outputs_bronze": "sum",
},
index=False,
)
# Exclude countries with small samples
df_index_table = df_index_table[df_index_table["n_outputs"] >= INCLUSION_THRESHOLD]
# Add percentages to dataframe
self.update_df_with_percentages(df_index_table, self.PERCENTAGE_FIELD_KEYS)
# Sort from highest oa percentage to lowest
df_index_table.sort_values(by=["p_outputs_oa"], ascending=False, inplace=True)
# Add ranks
df_index_table["rank"] = list(range(1, len(df_index_table) + 1))
# Add category
df_index_table["category"] = category
# Name overrides
df_index_table["name"] = df_index_table["name"].apply(
lambda name: NAME_OVERRIDES[name] if name in NAME_OVERRIDES else name
)
# Clean URLs
df_index_table["friendly_url"] = df_index_table["url"].apply(
lambda u: get_url_domain_suffix(u) if not pd.isnull(u) else u
)
# If country add wikipedia url
if category == "country":
df_index_table["url"] = df_index_table["name"].apply(
lambda name: f"https://en.wikipedia.org/wiki/{urllib.parse.quote(name)}"
)
return df_index_table
def update_df_with_percentages(self, df: pd.DataFrame, keys: List[str]):
"""Calculate percentages for fields in a Pandas dataframe.
:param df: the Pandas dataframe.
:param keys: they keys to calculate percentages for.
:return: None.
"""
for key in keys:
df[f"p_{key}"] = round(df[f"n_{key}"] / df["n_outputs"] * 100, 0)
def update_index_with_logos(self, df_index_table: pd.DataFrame, category: str, size=32, fmt="jpg"):
"""Update the index with logos, downloading logos if the don't exist.
:param df_index_table: the index table Pandas dataframe.
:param category: the category, i.e. country or institution.
:param size: the image size.
:param fmt: the image format.
:return: None.
"""
# Make logos
if category == "country":
df_index_table["logo"] = df_index_table["id"].apply(
lambda country_code: f"/logos/{category}/{country_code}.svg"
)
elif category == "institution":
base_path = os.path.join(self.build_path, "logos", category)
logo_path_unknown = f"/unknown.svg"
os.makedirs(base_path, exist_ok=True)
logos = []
for i, row in df_index_table.iterrows():
grid_id = row["id"]
url = clean_url(row["url"])
logo_path = logo_path_unknown
if not pd.isnull(url):
file_path = os.path.join(base_path, f"{grid_id}.{fmt}")
if not os.path.isfile(file_path):
clearbit_download_logo(company_url=url, file_path=file_path, size=size, fmt=fmt)
if os.path.isfile(file_path):
logo_path = f"/logos/{category}/{grid_id}.{fmt}"
logos.append(logo_path)
df_index_table["logo"] = logos
def save_index(self, df_index_table: pd.DataFrame, category: str):
"""Save the index table.
:param df_index_table: the index table Pandas Dataframe.
:param category: the category, i.e. country or institution.
:return: None.
"""
# Save subset
base_path = os.path.join(self.build_path, "data", category)
os.makedirs(base_path, exist_ok=True)
summary_path = os.path.join(base_path, "summary.json")
columns = [
"id",
"rank",
"name",
"category",
"logo",
"p_outputs_oa",
"p_outputs_gold",
"p_outputs_green",
"n_outputs",
"n_outputs_oa",
]
df_subset = df_index_table[columns]
df_subset.to_json(summary_path, orient="records")
def save_entity_details(self, df_index_table: pd.DataFrame, category: str):
"""Save the summary data for each entity, saving the result for each entity as a JSON file.
:param df_index_table: a Pandas dataframe.
:param category: the category, i.e. country or institution.
:return: None.
"""
base_path = os.path.join(self.build_path, "data", category)
os.makedirs(base_path, exist_ok=True)
df_index_table["category"] = category
records = df_index_table.to_dict("records")
for row in records:
entity_id = row["id"]
output_path = os.path.join(base_path, f"{entity_id}_summary.json")
with open(output_path, mode="w") as f:
json.dump(row, f, separators=(",", ":"))
def make_timeseries(self, df: pd.DataFrame) -> List[Tuple[str, pd.DataFrame]]:
"""Make timeseries data for each entity, returning them.
:param df: the Pandas dataframe containing all data points for a particular category.
:return: a dictionary with keys for each entity and change points data for each value.
"""
results = []
ts_groups = df.groupby(["id"])
for entity_id, df_group in ts_groups:
# Exclude institutions with small num outputs
total_outputs = df_group["n_outputs"].sum()
if total_outputs >= INCLUSION_THRESHOLD:
self.update_df_with_percentages(df_group, self.PERCENTAGE_FIELD_KEYS)
df_group = df_group.sort_values(by=["year"])
df_group = df_group.loc[:, ~df_group.columns.str.contains("^Unnamed")]
# Save to csv
df_ts: pd.DataFrame = df_group[
[
"year",
"n_outputs",
"n_outputs_oa",
"p_outputs_oa",
"p_outputs_gold",
"p_outputs_green",
"p_outputs_hybrid",
"p_outputs_bronze",
]
]
results.append((entity_id, df_ts))
return results
def save_timeseries(self, timeseries: List[Tuple[str, pd.DataFrame]], category: str):
"""Save the timeseries data.
:param timeseries: the timeseries data for each entity. A list of tuples, with (entity id, entity dataframe).
:param category: the category, i.e. country or institution.
:return: None.
"""
base_path = os.path.join(self.build_path, "data", category)
os.makedirs(base_path, exist_ok=True)
for entity_id, df_ts in timeseries:
ts_path = os.path.join(base_path, f"{entity_id}_ts.json")
df_ts.to_json(ts_path, orient="records")
def make_auto_complete(self, df_index_table: pd.DataFrame, category: str):
"""Build the autocomplete data.
:param df_index_table: index table Pandas | |
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# h2o_SFOc + ppi_SFOc -> h_SFOc + 2.0 pi_SFOc
reaction = Reaction('SFO_PPA')
reaction.name = 'Inorganic diphosphatase'
reaction.subsystem = 'AMP Conversion'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h2o_SFOc: -1.0,
ppi_SFOc: -1.0,
pi_SFOc: 2.0,
h_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# pi_e ->
pi_SFOe = Metabolite('pi_SFOe', formula='HO4P', name='Phosphate', compartment='SFOe', charge=-2)
reaction = Reaction('SFO_EX_pi')
reaction.name = 'Phosphate Transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({pi_e: SFO_Abnd,
pi_SFOe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('SFO_pit')
reaction.name = 'Phosphate Transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({pi_SFOe: -1.0,
pi_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Biomass Reaction
BIOMASS_SFO = Metabolite('Biomass_SFO', formula='', name='Biomass_SFO', compartment='e', charge=0)
reaction = Reaction('SFO_BIOMASS')
reaction.name = 'Biomass_SFO'
reaction.subsystem = 'Biomass'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({akg_SFOc: -1.17,
oaa_SFOc: -2.06,
g6p_SFOc: -0.26,
g3p_SFOc: -1.58,
_3pg_SFOc: -1.31,
pyr_SFOc: -4.33,
pep_SFOc: -0.92,
accoa_SFOc: -3.06,
e4p_SFOc: -0.40,
r5p_SFOc: -0.35,
fum_SFOc: 0.37,
ac_SFOc: 0.43,
for_SFOc: 0.29,
atp_SFOc: -36.0,
nadph_SFOc: -19.39,
nadh_SFOc: 1.10,
nh4_SFOc: -8.62,
h_SFOc: 10.13,
adp_SFOc: 34.6,
pi_SFOc: 31.88,
ppi_SFOc: 4.74,
amp_SFOc: 1.4,
co2_SFOc: 3.54,
h2o_SFOc: -7.57,
coa_SFOc: 3.06,
nad_SFOc: -1.10,
nadp_SFOc: 19.39,
so4_SFOc: -0.21,
BIOMASS_SFO: 1,
ATP_BIOMASS_SFO: -36.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('SFO_EX_BIOMASS')
reaction.name = 'Biomass Exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({BIOMASS_SFO: -1.0,
BIOMASS_COMM_e: SFO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# SFO ATP accounting
reaction = Reaction('SFO_ATP_SLP')
reaction.name = 'ATP produced via substrate-level phosphorylation'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_SLP_SFO: -1.0,
ATP_SLP: SFO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('SFO_ATP_HYDRO')
reaction.name = 'ATP (excess) consumed via hydrolysis'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_HYDR_SFO: -1.0,
ATP_HYDR: SFO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('SFO_ATP_IMF')
reaction.name = 'ATP produced via ion motive force '
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_IMF_SFO: -1.0,
ATP_IMF: SFO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('SFO_ATP_TRANS')
reaction.name = 'ATP consumed for transport'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_TRANS_SFO: -1.0,
ATP_TRANS: SFO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('SFO_ATP_BIOMASS')
reaction.name = 'ATP consumed via biomass equation'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_BIOMASS_SFO: -1.0,
ATP_BIOMASS: SFO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Summarize Model Reactions and Metabolites
print("Reactions: " + str(len(model.reactions)))
print("Metabolites: " + str(len(model.metabolites)))
print("Genes: " + str(len(model.genes)))
##SFO Transport Energy
if TransEnergetics == True:
##Acetate Transport Energy
deltaG_trans_grad_Acetate = R*(T+273.15)*(math.log(S_Acetate/C_in_Acetate))
ATP_trans_Acetate = -1*(deltaG_trans_grad_Acetate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Acetate > 0:
Constraint_trans_Acetate = model.problem.Constraint(model.reactions.SFO_Acetate_Transport_ATP.flux_expression - ATP_trans_Acetate * model.reactions.SFO_Acetate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Acetate)
##Lactate Transport Energy
deltaG_trans_grad_Lactate = R*(T+273.15)*(math.log(S_Lactate/C_in_Lactate))
ATP_trans_Lactate = -1*(deltaG_trans_grad_Lactate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Lactate > 0:
Constraint_trans_Lactate = model.problem.Constraint(model.reactions.SFO_Lactate_Transport_ATP.flux_expression - ATP_trans_Lactate* model.reactions.SFO_Lactate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Lactate)
##Proton Transport Energy
S_H = 10*math.exp(-pH_out)
C_in_H = 10*math.exp(-pH_in)
deltaG_trans_grad_Proton = R*(T+273.15)*(math.log(S_H/C_in_H))
ATP_trans_Proton = -1*(deltaG_trans_grad_Proton + deltaG_Sai)/ deltaG_ATP_Hydrolysis
if ATP_trans_Proton > 0:
Constraint_trans_Proton = model.problem.Constraint(model.reactions.SFO_Proton_Transport_ATP.flux_expression - ATP_trans_Proton* model.reactions.SFO_H_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Proton)
##Ethanol Transport Energy
deltaG_trans_grad_Ethanol = R*(T+273.15)*(math.log(S_Ethanol/C_in_Ethanol))
ATP_trans_Ethanol = -1*(deltaG_trans_grad_Ethanol)/ deltaG_ATP_Hydrolysis
if ATP_trans_Ethanol > 0:
Constraint_trans_Ethanol = model.problem.Constraint(model.reactions.SFO_Ethanol_Transport_ATP.flux_expression - ATP_trans_Ethanol* model.reactions.SFO_Ethanol_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Ethanol)
if HSF_Rel_Abnd > 0:
###Hydrogenic sugar fermenting organisms (HSFs)
ATP_SLP_HSF = Metabolite('ATP_SLP_HSF', formula='', name='', compartment='HSFe', charge=0)
ATP_IMF_HSF = Metabolite('ATP_IMF_HSF', formula='', name='', compartment='HSFe', charge=0)
ATP_BIOMASS_HSF = Metabolite('ATP_BIOMASS_HSF', formula='', name='', compartment='HSFe', charge=0)
ATP_HYDR_HSF = Metabolite('ATP_HYDR_HSF', formula='', name='', compartment='HSFe', charge=0)
ATP_TRANS_HSF = Metabolite('ATP_TRANS_HSF', formula='', name='', compartment='HSFe', charge=0)
#Xylan Degradation
xyl4_HSFc = Metabolite('xyl4_HSFc',formula='C20H34O17',name='xyl4_HSFc',compartment='HSFc', charge=0)
xyl__D_HSFc = Metabolite('xyl__D_HSFc', formula='C5H10O5', name='xylose-D', compartment='HSFc', charge=0)
h2o_HSFc = Metabolite('h2o_HSFc', formula='H2O', name='H2O', compartment='HSFc', charge=0)
xyl__D_HSFe = Metabolite('xyl__D_HSFe', formula='C5H10O5', name='xylose-D', compartment='HSFe', charge=0)
xyl4_HSFe = Metabolite('xyl4_HSFe', formula='C20H34O17', name='xyl4', compartment='HSFe', charge=0)
#xy14_HSFe <-> xy14_e
reaction = Reaction('HSF_EX_xyl4')
reaction.name = 'HSF xyl4 Exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({xyl4_e: HSF_Abnd,
xyl4_HSFe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#xy14_HSFe <-> xy14_HSFc
reaction = Reaction('HSF_xyl4t')
reaction.name = 'Xylan Transport'
reaction.subsystem = 'Complex Carbohydrate Degradation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({xyl4_HSFe: -1.0,
xyl4_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#xy14_HSFc + 3.0 h2o_HSFc <-> 4.0 xyl__D_HSFc
reaction = Reaction('HSF_C5Hyd')
reaction.name = 'Xylan Hydrolysis'
reaction.subsystem = 'Complex Carbohydrate Degradation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({xyl4_HSFc: -1.0,
h2o_HSFc: -3.0,
xyl__D_HSFc: 4.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Glucan Degradation
glc4_HSFc = Metabolite('glc4_HSFc',formula='C24H22O21',name='glucan',compartment='HSFc', charge=0)
glc__D_HSFc = Metabolite('glc__D_HSFc', formula='C6H12O6',name='D-Glucose',compartment='HSFc', charge=0)
glc4_HSFe = Metabolite('glc4_HSFe',formula='C24H22O21',name='glucan',compartment='HSFe', charge=0)
glc__D_HSFe = Metabolite('glc__D_HSFe', formula='C6H12O6',name='D-Glucose',compartment='HSFe', charge=0)
#glc4_HSFe <-> glc4_e
reaction = Reaction('HSF_EX_glc4')
reaction.name = 'HSF glc4 exchange'
reaction.subsystem = 'Complex Carbohydrate Degradation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glc4_e: HSF_Abnd,
glc4_HSFe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#glc4_HSFe <-> glc4_HSFc
reaction = Reaction('HSF_glc4t')
reaction.name = 'Glucan Transport'
reaction.subsystem = 'Complex Carbohydrate Degradation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glc4_HSFe: -1.0,
glc4_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#glc4_HSFc + h2o_HSFc <-> 4.0 glc__D_HSFc
h2o_HSFc = Metabolite('h2o_HSFc', formula='H2O', name='H2O', compartment='HSFc', charge=0)
reaction = Reaction('HSF_C6Hyd')
reaction.name = '<NAME>'
reaction.subsystem = 'Complex Carbohydrate Degradation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glc4_HSFc: -1.0,
h2o_HSFc: -3.0,
glc__D_HSFc: 4.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Pentose Utilization
#xyl__D_HSFc <-> xyl__D_e
reaction = Reaction('HSF_EX_xyl__D')
reaction.name = 'HSF xyl__D exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 0. # This is the default
reaction.add_metabolites({xyl__D_e: HSF_Abnd,
xyl__D_HSFc: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#xyl__D_HSFe <-> xyl__D_HSFc
reaction = Reaction('HSF_XYLt')
reaction.name = 'D xylose transport'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({xyl__D_HSFe: -1.0,
xyl__D_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#atp_HSFc + h2o_HSFc + xyl__D_HSFe <-> adp_HSFc + h_HSFc + pi_HSFc + xyl__D_HSFc
atp_HSFc = Metabolite('atp_HSFc', formula='C10H12N5O13P3', name='ATP', compartment='HSFc', charge=-4)
adp_HSFc = Metabolite('adp_HSFc', formula='C10H12N5O10P2', name='ADP', compartment='HSFc', charge=-3)
h_HSFc = Metabolite('h_HSFc', formula='H', name='H+', compartment='HSFc', charge=1)
pi_HSFc = Metabolite('pi_HSFc', formula='HO4P', name='xylose-D', compartment='HSFc', charge=-2)
reaction = Reaction('HSF_XYLabc')
reaction.name = 'D-xylose transport via ABC system'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HSFc: -1.0,
h2o_HSFc: -1.0,
xyl__D_HSFe: -1.0,
adp_HSFc: 1.0,
h_HSFc: 1.0,
pi_HSFc: 1.0,
xyl__D_HSFc: 1.0,
ATP_SLP_HSF: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#xyl__D_HSFc <-> xylu__D_HSFc
xylu__D_HSFc = Metabolite('xylu__D_HSFc', formula='C5H10O5', name='D-xylulose', compartment='HSFc', charge=0)
reaction = Reaction('HSF_XYLI1')
reaction.name = 'Xylose isomerase'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({xyl__D_HSFc: -1.0,
xylu__D_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#atp_HSFc + xylu__D_HSFc <-> adp_HSFc + h_HSFc + xu5p__D_HSFc
xu5p__D_HSFc = Metabolite('xu5p__D_HSFc', formula='C5H9O8P', name='D-Xylulose 5-phosphate', compartment='HSFc', charge=-2)
reaction = Reaction('HSF_XYLK')
reaction.name = 'Xylulokinase'
reaction.subsystem = 'Pentose Utilization'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HSFc: -1.0,
xylu__D_HSFc: -1.0,
adp_HSFc: 1.0,
h_HSFc: 1.0,
xu5p__D_HSFc: 1.0,
ATP_SLP_HSF: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
###Phosphoketolase
#pi_HSFc + xu5p__D_HSFc <-> actp_HSFc + g3p_HSFc + h2o_HSFc
actp_HSFc = | |
value
logger.debug(f'| FTDI device, port {port}: Set read latency timer')
self.device.ctrl_transfer(bmRequestType = usb.util.build_request_type(direction = usb.util.CTRL_OUT, type = usb.util.CTRL_TYPE_VENDOR, recipient = usb.util.CTRL_RECIPIENT_DEVICE),
bRequest = CONTROL_REQUESTS['set_latency_timer'],
wValue = 1,
wIndex = self.ports[port])
# TODO(?): To ensure that the device driver will not issue IN requests if the buffer is unable to accept data, add a call to FT_SetFlowControl prior to entering MPSSE mode
# Set MPSSE bit mode (vendor-specific command)
logger.debug(f'| FTDI device, port {port}: Set MPSSE bit mode')
self.device.ctrl_transfer(bmRequestType = usb.util.build_request_type(direction = usb.util.CTRL_OUT, type = usb.util.CTRL_TYPE_VENDOR, recipient = usb.util.CTRL_RECIPIENT_DEVICE),
bRequest = CONTROL_REQUESTS['set_bit_mode'],
wValue = BIT_MODES['mpsse'],
wIndex = self.ports[port])
# Check MPSSE synchronization
# Send invalid COMMANDS 0xAA and 0xAB to the MPSSE to check if it responds correctly
logger.debug(f'| FTDI device, port {port}: Check MPSSE synchronization')
for command in [COMMANDS['invalid_command_0'], COMMANDS['invalid_command_1']]:
self.write(port = port, data = [command])
data_r = self.read(port = port, length = 2)
if data_r != [RESPONSES['invalid_command'], command]:
logger.error(f'FAIL: Wrong response to an invalid command 0x{command:02X} from FTDI device, port {port}: [{", ".join([f"0x{item:02X}" for item in data_r])}]')
raise FTDIError
# Disable master clock x5 divider
logger.debug(f'| FTDI device, port {port}: Disable master clock x5 divider')
self.write(port = port, data = [COMMANDS['disable_x5_clock_divider']])
# self.read(port = port, length = 0) # This read is used to indirectly check that command was accepted by MPSEE as it should return just two modem status bytes and nothing else
# Configure pins as inputs
# Every I/O pin (AD[7:0], AC[7:0], BD[7:0], BC[7:0]) has an internal 75 kOhm pull-up to VCCIO
# Just in case, wright 1 to all I/O pins output latches to match the default pin state when it's configured as input (pulled-up internally to VCCIO)
# Configure all I/O pins as inputs
logger.debug(f'| FTDI device, port {port}: Configure I/O pins as inputs')
self.write(port = port, data = [COMMANDS['set_pins_d'], 0b11111111, 0b00000000, COMMANDS['set_pins_c'], 0b11111111, 0b00000000])
# self.read(port = port, length = 0) # This read is used to indirectly check that command was accepted by MPSEE as it should return just two modem status bytes and nothing else
# TEST: Read pins
# logger.debug(f'| FTDI device, port {port}: Read pins')
# value = self.write(port = port, data = [COMMANDS['get_pins_d'], COMMANDS['get_pins_c'], COMMANDS['send_immediate']])
# self.read(port = port, length = 2)
logger.debug('OK')
return self
def __exit__(self, exc_type, exc_value, traceback):
logger.debug('Finalize FTDI device')
# If some strings (e.g. iManufacturer, iProduct) in FTDI configuration EEPROM have changed it invalidates the USB device descriptor (self.device) as it essentially disconnects the USB device
# OS shall free the device resources (including claiming) when this happens
# This might be fixed in future PyUSB release (github.com/pyusb/pyusb/issues/64) but for now the usb.core.USBError exception shall be caught here
try:
# Reset the USB port device is connected to (generic USB command)
logger.debug('| USB: Reset port device is connected to')
self.device.reset()
except usb.core.USBError as exception:
if exception.args == (2, 'Entity not found'):
pass
else:
raise
else:
# Release interfaces
for configuration in self.device: # Iterate over device configurations
for interface in configuration: # Iterate over configuration interfaces
logger.debug(f'| USB: Release configuration {configuration.bConfigurationValue}, interface {interface.bInterfaceNumber}')
usb.util.release_interface(device = self.device, interface = interface.bInterfaceNumber)
# Reattach kernel driver
for configuration in self.device: # iterate over device configurations
for interface in configuration: # iterate over configuration interfaces
logger.debug(f'| OS: Attach kernel driver to configuration {configuration.bConfigurationValue}, interface {interface.bInterfaceNumber}')
self.device.attach_kernel_driver(interface = interface.bInterfaceNumber)
finally:
# Free all resources allocated by the device object
logger.debug('| Dispose resources')
usb.util.dispose_resources(device = self.device)
logger.debug('OK')
return False
#---------------------------------------------------------------------------
# EEPROM
def eeprom_write(self, address, data):
'''Write external uWire configuration EEPROM
Parameter:
'address' : (int) Start word address
'data' : (list) Words to write
Return:
None
'''
logger.debug(f'FTDI device, EEPROM, write, address: 0x{address:02X}, length: {len(data):d} words')
logger.debug(f'[{", ".join([f"0x{item:04X}" for item in data])}]')
if data == []:
logger.critical(f'FAIL: zero words were requested to be written to EEPROM')
raise FTDICritical
for current_address in range(address, len(data)):
# This command writes a 16-bit word at a time
self.device.ctrl_transfer(bmRequestType = usb.util.build_request_type(direction = usb.util.CTRL_OUT, type = usb.util.CTRL_TYPE_VENDOR, recipient = usb.util.CTRL_RECIPIENT_DEVICE),
bRequest = CONTROL_REQUESTS['eeprom_write'],
wValue = data[current_address],
wIndex = current_address)
logger.debug('OK')
return None
def eeprom_read(self, address, length):
'''Read external uWire configuration EEPROM
Parameter:
'address' : (int) Start word address
'length' : (int) Number of words to read
Return:
(list) Read words
'''
logger.debug(f'FTDI device, EEPROM, read, address: 0x{address:02X}, length: {length:d} words')
data = []
for current_address in range(address, length):
# This command reads a 16-bit word at a time
data_r = self.device.ctrl_transfer(bmRequestType = usb.util.build_request_type(direction = usb.util.CTRL_IN, type = usb.util.CTRL_TYPE_VENDOR, recipient = usb.util.CTRL_RECIPIENT_DEVICE),
bRequest = CONTROL_REQUESTS['eeprom_read'],
wValue = 0,
wIndex = current_address,
data_or_wLength = 2)
data += [data_r[1] * 256 + data_r[0]]
logger.debug('OK')
logger.debug(f'[{", ".join([f"0x{item:04X}" for item in data])}]')
return data
def eeprom_erase(self):
'''Erase external uWire configuration EEPROM
Parameter:
None
Return:
None
'''
logger.debug('FTDI device, EEPROM, erase')
self.device.ctrl_transfer(bmRequestType = usb.util.build_request_type(direction = usb.util.CTRL_OUT, type = usb.util.CTRL_TYPE_VENDOR, recipient = usb.util.CTRL_RECIPIENT_DEVICE),
bRequest = CONTROL_REQUESTS['eeprom_erase'],
wValue = 0,
wIndex = 0)
logger.debug('OK')
return None
def eeprom_program(self, address, data):
'''Program external uWire configuration EEPROM
Parameter:
'address' : (int) Start word address
'data' : (list) Words to write
Return:
None
'''
logger.debug('FTDI device, EEPROM, program')
self.eeprom_write(address = address, data = data)
data_r = self.eeprom_read(address = address, length = len(data))
if data != data_r:
logger.error('FAIL: write and read data are different')
raise FTDIError
logger.debug('OK')
return None
#---------------------------------------------------------------------------
# USB
def write(self, port, data):
'''Write to FTDI device over USB
Parameter:
'port' : (str) Port name ('A', 'B')
'data' : (list) list of bytes to write
Return:
None
'''
# Any amount of data may be sent to OUT endpoint as USB subsystem handles this automatically: device sends NAK on the OUT endpoint when its buffer gets full and the host computer reschedules the data delivery
# Therefore there is no need to split write data in chunks here
# logger.debug(f'Write {len(data)} byte(s) to FTDI device, port {port}: [{", ".join([f"0x{item:02X}" for item in data])}]')
if data == []:
logger.critical(f'FAIL: zero bytes were requested to be written')
raise FTDICritical
length = self.device.write(endpoint = ENDPOINTS_OUT[port], data = data)
if length != len(data):
logger.error(f'FAIL: {len(data)} bytes were requested to be written, {length} bytes were actually written')
raise FTDIError
# length = 0
# for attempt in range(10):
# length += self.device.write(endpoint = ENDPOINTS_OUT[port], data = data[length:])
# logger.debug(f| 'Attempt {attempt}: {length} out of {len(data)} bytes(s)')
# if length == len(data):
# break
# else:
# logger.error(f'FAIL: {len(data)} bytes were requested to be written, {length} bytes were actually written')
# raise FTDIError
# logger.debug(f| 'Write {len(data)} byte(s) in chunks to FTDI device, port {port}')
# Split long data list into chunks of maximum TX buffer
# for offset in range(0, len(data), self.tx_buffer_size):
# chunk = data[offset : offset + min(self.tx_buffer_size, len(data) - offset)]
# logger.debug(f'| Write {len(chunk)} byte(s) to FTDI device, port {port}: [{", ".join([f"0x{item:02X}" for item in chunk])}]')
# length = self.device.write(endpoint = ENDPOINTS_OUT[port], data = chunk)
# if length != len(chunk):
# logger.error(f'FAIL: {len(chunk)} bytes were requested to be written, {length} bytes were actually written')
# raise FTDIError
# logger.debug('OK')
return None
def read(self, port, length):
'''Read from FTDI device over USB
Parameter:
'port' : (str) Port name ('A', 'B')
'length' : (int) number of bytes to read
Return:
(list) list of read bytes
'''
# Host computer BUFFERS data from the IN endpoint until either its size reaches the requested limit or a timeout occurs
# logger.debug(f'Read {length} byte(s) from FTDI device, port {port}')
data = []
# Read out data with timeout
for attempt in range(100):
length_r = length - len(data)
length_r += MODEM_STATUS_LENGTH * (length_r // (USB_PACKET_SIZE - MODEM_STATUS_LENGTH) + 1) # Reserve space for modem status bytes in each USB packet
data_r = list(self.device.read(endpoint = ENDPOINTS_IN[port], size_or_buffer = length_r))
for packet, offset in enumerate(range(0, len(data_r), USB_PACKET_SIZE)):
chunk = data_r[offset : offset + min(USB_PACKET_SIZE, len(data_r) - offset)]
modem_status = chunk[0:2]
data += chunk[2:] # Skip modem status bytes that FTDI device returns on each USB | |
pd.DataFrame(pctls, index=[0])
def parse_hist(rtypes, exps, clients=[], dt='client-end-to-end'):
if not clients:
print('No clients given')
return {}
hists = {t: {exp: {dt : None} for exp in exps} for t in rtypes}
hists['all'] = {exp: {dt: None} for exp in exps}
for exp in exps:
wl = exp.split('_')[2].split('.')[0]
tdfs = {t: [] for t in rtypes}
for clt in clients:
# tdfs = {t: {} for t in rtypes}
# for i, clt in enumerate(clients):
exp_folder = os.path.join(exp_base_folder, exp, 'client'+str(clt), '')
filename = os.path.join(exp_folder, 'traces_hist')
if not Path(filename).exists():
print(f'{filename} does not exist')
continue
with open(filename, 'r') as f:
lines = f.readlines()
for (header,values) in zip(lines[::2], lines[1::2]):
t = values.split()[0]
if t == 'UNKNOWN':
continue
tdfs[t].append(pd.DataFrame(
{k:v for (k,v) in zip(header.split()[1:], values.split()[1:])},
index=[0]
))
## if i == 0:
## tdfs[t]['MIN'] = int(values.split()[1])
## tdfs[t]['MAX'] = int(values.split()[2])
## tdfs[t]['COUNT'] = int(values.split()[3])
## tdfs[t]['TOTAL'] = int(values.split()[4])
## tdfs[t].update(
## {int(k):int(v) for (k,v) in zip(header.split()[5:], values.split()[5:])}
## )
## else:
## if int(values.split()[1]) < tdfs[t]['MIN']:
## tdfs[t]['MIN'] = int(values.split()[1])
## if int(values.split()[2]) > tdfs[t]['MAX']:
## tdfs[t]['MAX'] = int(values.split()[2])
## tdfs[t]['COUNT'] += int(values.split()[3])
## tdfs[t]['TOTAL'] += int(values.split()[4])
## for k, v in zip(header.split()[5:], values.split()[5:]):
## if int(k) in tdfs[t]:
## tdfs[t][int(k)] += int(v)
## else:
## tdfs[t][int(k)] = int(v)
if len(clients) > len(tdfs[rtypes[0]]):
print(f'[{exp}] Missing {len(clients) - len(tdfs[rtypes[0]])} client histogram(s)')
# Compute percentiles for request types
typed_hists = {}
for t in rtypes:
typed_hists[t] = merge_hists(pd.concat(tdfs[t]).fillna(0).astype('uint64'))
#typed_hists[t] = pd.DataFrame(tdfs[t], index=[0]).astype('uint64')
hists[t][exp][dt] = compute_pctls(typed_hists[t])
hists[t][exp][dt]['p99_slowdown'] = hists[t][exp][dt]['p99'] / workloads[wl][t]['MEAN']
hists[t][exp][dt]['p99.9_slowdown'] = hists[t][exp][dt]['p99.9'] / workloads[wl][t]['MEAN']
# Merge them into an overall histogram
hists['all'][exp][dt] = compute_pctls(
merge_hists(pd.concat(typed_hists.values()).fillna(0).astype('uint64'))
)
# Compute slowdown hist for each req type
slowdown_hists = []
for t in rtypes:
h = typed_hists[t]
# Slowdown is bucket value / type mean service time
m = workloads[wl][t]['MEAN']
base_cols = {
'MIN': h['MIN'] / m,
'MAX': h['MAX'] / m,
'TOTAL': h['TOTAL'] / m,
'COUNT': h['COUNT'],
}
buckets = list(set(h.columns) - set(['MIN', 'MAX', 'COUNT', 'TOTAL']))
cols = {}
for bucket in buckets:
col = str(int(int(bucket) / m))
if col in cols:
cols[col] += h[bucket].values[0]
else:
cols[col] = h[bucket].values[0]
slowdown_hists.append(pd.DataFrame({**base_cols, **cols}))
merged_slowdown_hist = merge_hists(pd.concat(slowdown_hists).fillna(0).astype('uint64'))
slowdown_pctls = compute_pctls(merged_slowdown_hist)
hists['all'][exp][dt]['p99_slowdown'] = slowdown_pctls['p99']
hists['all'][exp][dt]['p99.9_slowdown'] = slowdown_pctls['p99.9']
return hists
def parse_rates(exps, clients=[]):
if not clients:
print('No clients given')
return {}
rates = {}
for exp in exps:
dfs = []
for clt in clients:
exp_folder = os.path.join(exp_base_folder, exp, 'client'+str(clt), '')
filename = os.path.join(exp_folder, 'traces_rates')
if not Path(filename).exists():
print(f'{filename} does not exist')
continue
clt_df = pd.read_csv(filename, delimiter='\t', engine='c')
dfs.append(clt_df)
df = pd.concat(dfs)
#rates[exp] = pd.DataFrame({'OFFERED': [df.OFFERED.values[0] * len(clients)], 'ACHIEVED': [df.ACHIEVED.sum()]})
rates[exp] = pd.DataFrame({'OFFERED': [df.OFFERED.sum()], 'ACHIEVED': [df.ACHIEVED.sum()]})
return rates
def prepare_pctl_data(rtypes, exps=[], exp_file=None, app="REST", dt='client-end-to-end', reset_cache=False, remove_drops=False, full_sample=False, verbose=False, **kwargs):
if (not reset_cache) and exp_file in cache:
return cache[exp_file]['all'], cache[exp_file]['typed']
if exp_file is not None:
exps = read_exp_names_from_file(exp_file)
if not exps:
print('No experiment labels given')
return
rates_df = parse_rates(exps, **kwargs)
if (full_sample):
t0 = time.time()
dfs = {t: prepare_traces(exps, [dt], req_type=t, client_only=True, **kwargs) for t in rtypes}
dfs['all'] = prepare_traces(exps, [dt], client_only=True, **kwargs)
t1 = time.time()
print('loaded {} traces in {} seconds'.format(len(exps), t1-t0))
else:
t0 = time.time()
dfs = parse_hist(rtypes, exps, **kwargs)
print(f'[{exp_file}] Parsed histograms in {time.time()-t0:.6f} seconds')
if verbose:
print(dfs)
t0 = time.time()
rows = []
typed_rows = []
for exp in exps:
pol = policies[exp.split('_')[0]]
load = float(exp.split('_')[1])
workload = exp.split('_')[2].split('.')[0]
n_resa = int(exp.split('_')[-1].split('.')[0])
run_number = int(exp.split('.')[2])
rate_df = rates_df[exp]
rate_data = [rate_df.OFFERED[0], rate_df.ACHIEVED[0]]
'''
if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1:
print(f'Exp {exp} dropped requests (achieved={rate_df.ACHIEVED.values}, offered={rate_df.OFFERED.values}) passing.')
continue
'''
if full_sample:
for i, t in enumerate(rtypes):
if not (exp not in dfs[t] or dt not in dfs[t][exp] or dfs[t][exp][dt].empty):
df = dfs[t][exp][dt]
df.VALUE /= 1000
df['slowdown'] = df.VALUE / workloads[workload][t]['MEAN']
data = [
pol, load, t,
int(df.VALUE.mean()), int(df.VALUE.median()), int(df.VALUE.quantile(q=.99)),
int(df.VALUE.quantile(q=.999)), int(df.VALUE.quantile(q=.9999)),
int(df.slowdown.quantile(q=.99)), int(df.slowdown.quantile(q=.999))
]
typed_rows.append(data + rate_data)
if not (exp not in dfs['all'] or dt not in dfs['all'][exp] or dfs['all'][exp][dt].empty):
df = dfs['all'][exp][dt]
df.VALUE /= 1000
df['slowdown'] = df.apply(lambda x: x.VALUE / workloads[workload][x.REQ_TYPE]['MEAN'], axis = 1)
data = [
pol, load, 'UNKNOWN',
int(df.VALUE.mean()), int(df.VALUE.median()), int(df.VALUE.quantile(q=.99)),
int(df.VALUE.quantile(q=.999)), int(df.VALUE.quantile(q=.9999)),
int(df.slowdown.quantile(q=.99)), int(df.slowdown.quantile(q=.999))
]
rows.append(data + rate_data)
else:
# So stupid that we have to get the [0] for each value in a 1row dataframe
for i, t in enumerate(rtypes):
if not (exp not in dfs[t] or dt not in dfs[t][exp] or dfs[t][exp][dt].empty):
df = dfs[t][exp][dt]
#if sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1 and not pol in ['c-PRE-MQ', 'c-PRE-SQ']:
#if sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1 and pol in ['c-PRE-MQ', 'c-PRE-SQ']:
#if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1 and not pol in ['c-PRE-MQ', 'c-PRE-SQ']:
if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1:
p999_slowdown = 1e9
p999 = 1e9
else:
p999_slowdown = df['p99.9_slowdown'][0]
p999 = df['p99.9'][0]
#if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .99) == 1 and not pol in ['c-PRE-MQ', 'c-PRE-SQ']:
if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .99) == 1:
p99_slowdown = 1e9
p99 = 1e9
else:
p99_slowdown = df['p99_slowdown'][0]
p99 = df['p99'][0]
data = [
pol, load, t, run_number,
df['MEAN'][0], df['MEDIAN'][0], p99, p999, df['p99.99'][0],
p99_slowdown, p999_slowdown
]
typed_rows.append(data + rate_data + [n_resa])
if not (exp not in dfs['all'] or dt not in dfs['all'][exp] or dfs['all'][exp][dt].empty):
df = dfs['all'][exp][dt]
#if sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1 and not pol in ['c-PRE-MQ', 'c-PRE-SQ']:
#if sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1 and pol in ['c-PRE-MQ', 'c-PRE-SQ']:
#if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1 and not pol in ['c-PRE-MQ', 'c-PRE-SQ']:
if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .999) == 1:
p999_slowdown = 1e9
p999 = 1e9
else:
p999_slowdown = df['p99.9_slowdown'][0]
p999 = df['p99.9'][0]
#if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .99) == 1 and not pol in ['c-PRE-MQ', 'c-PRE-SQ']:
if remove_drops and sum(rate_df.ACHIEVED < rate_df.OFFERED * .99) == 1:
p99_slowdown = 1e9
p99 = 1e9
else:
p99_slowdown = df['p99_slowdown'][0]
p99 = df['p99'][0]
data = [
pol, load, 'UNKNOWN', run_number,
df['MEAN'][0], df['MEDIAN'][0], p99, p999, df['p99.99'][0],
p99_slowdown, p999_slowdown
]
rows.append(data + rate_data + [n_resa])
t1 = time.time()
print(f'[{exp_file}] Prepared df rows in {t1-t0:.6f} seconds')
# print(rows)
t0 = time.time()
types = {
'policy': 'object', 'load': 'float', 'type': 'object', 'run_number': 'int', 'mean': 'int64',
'median': 'int64', 'p99': 'int64', 'p99_slowdown': 'int64', 'p99.9': 'int64',
'p99.99': 'int64', 'p99.9_slowdown': 'int64', 'offered': 'int64', 'achieved': 'int64',
'reserved': 'int64'
}
df = pd.DataFrame(
rows,
columns=[
'policy', 'load', 'type', 'run_number', 'mean', 'median', 'p99', 'p99.9', 'p99.99',
'p99_slowdown', 'p99.9_slowdown', 'offered', 'achieved', 'reserved'
]
).dropna().astype(dtype=types)
typed_df = pd.DataFrame(
typed_rows,
columns=[
'policy', 'load', 'type', 'run_number', 'mean', 'median', 'p99', 'p99.9', 'p99.99',
'p99_slowdown', 'p99.9_slowdown', 'offered', 'achieved', 'reserved'
]
).dropna().astype(dtype=types)
t1 = time.time()
print(f'[{exp_file}] Created df in {t1-t0:.6f} seconds')
# If we want to get Krps and us rather than rps and ns
df.achieved /= 1000
df.offered /= 1000
typed_df.achieved /= 1000
typed_df.offered /= 1000
cache[exp_file] = {}
cache[exp_file]['all'] = df
cache[exp_file]['typed'] = typed_df
return df, typed_df
def gen_wl_dsc(workload, req_names=None):
# The schedule file itself should have a dict rather than lists
wl_dict = {}
for i, rtype in enumerate(workload['rtype']):
wl_dict[rtype] = {}
wl_dict[rtype]['mean_ns'] = workload['mean_ns'][i]
wl_dict[rtype]['ratio'] = workload['ratios'][i]
wl_dict[rtype]['name'] = rtype
if req_names is not None:
wl_dict[rtype]['name'] = req_names[rtype]
req_iter = workload['rtype']
# Assume 1 or 2 request types
if len(workload['rtype']) > 1 and wl_dict[workload['rtype'][0]]['mean_ns'] > wl_dict[workload['rtype'][1]]['mean_ns']:
req_iter = [workload['rtype'][1], workload['rtype'][0]]
else:
req_iter = list(workload['rtype'])
# Get CPU demand
mean_ns = 0
for rtype in req_iter:
assert(rtype in wl_dict.keys())
mean_ns += wl_dict[rtype]['mean_ns'] * wl_dict[rtype]['ratio']
n_resas = 0
for rtype in req_iter:
demand = (wl_dict[rtype]['mean_ns'] * wl_dict[rtype]['ratio'] / mean_ns ) * 14
if round(demand) == 0:
demand = 1
else:
demand = round(demand)
demand = min(14 - n_resas, demand)
n_resas += demand
wl_dict[rtype]['demand'] = demand
wl_dict[rtype]['stealable'] = (14 - n_resas)
wl = ''
for i, rtype in enumerate(workload['rtype']):
wl += f"{wl_dict[rtype]['name']}: {wl_dict[rtype]['mean_ns']/1000} us, {wl_dict[rtype]['ratio'] * 100:.1f}%"
# wl += f" {wl_dict[rtype]['demand'] + wl_dict[rtype]['stealable']} cores ({wl_dict[rtype]['demand']} + {wl_dict[rtype]['stealable']})"
#wl += f" {wl_dict[rtype]['demand'] + wl_dict[rtype]['stealable']} cores"
if i < len(workload['rtype']):
wl += '\n'
return wl
fd = {'family': 'normal', 'weight': 'bold', 'size': 9}
matplotlib.rc('font', **fd)
matplotlib.rcParams['lines.markersize'] = 3
pd.set_option('display.max_rows', 500)
pd.options.display.float_format = '{:.9f}'.format
linestyles= [
('dotted', (0, (1, 1))),
('dashed', (0, (5, 5))),
('dashdotted', (0, (3, 5, 1, | |
not in set(cached_war.attacks))
else:
new_attacks = war.attacks
for attack in new_attacks:
await callback(attack, war)
return _ValidateEvent.shortcut_register(wrapped, tags, custom_class, retry_interval, WarEvents.event_type)
@_ValidateEvent
class ClientEvents:
"""Class that defines all valid client/misc events."""
event_type = "client"
def __getattr__(self, item):
def wrapped():
def deco(function):
function.is_client_event = True
function.event_name = item
return function
return deco
return wrapped
class EventsClient(Client):
# pylint: disable=missing-docstring
__doc__ = Client.__doc__
def __init__(self, **options):
super().__init__(**options)
self._setup()
self._in_maintenance_event = asyncio.Event()
self._in_maintenance_event.set() # only block when maintenance is on
self._keys_ready = asyncio.Event()
self.clan_retry_interval = 0
self.player_retry_interval = 0
self.war_retry_interval = 0
self.clan_cls = Clan
self.player_cls = Player
self.war_cls = ClanWar
self.clan_loops_run = 0
self.player_loops_run = 0
self.war_loops_run = 0
self.is_cwl_active = options.pop("cwl_active", True)
self.check_cwl_prep = options.pop("check_cwl_prep", False)
self._locks = {}
def _setup(self):
self._updater_tasks = {
"clan": self.loop.create_task(self._clan_updater()),
"player": self.loop.create_task(self._player_updater()),
"war": self.loop.create_task(self._war_updater()),
"maintenance": self.loop.create_task(self._maintenance_poller()),
"season": self.loop.create_task(self._end_of_season_poller())
}
for task in self._updater_tasks.values():
task.add_done_callback(self._task_callback_check)
self._clan_updates = set()
self._player_updates = set()
self._war_updates = set()
self._listeners = {"clan": [], "player": [], "war": [], "client": {}}
self._clans = {}
self._players = {}
self._wars = {}
def add_clan_updates(self, *tags):
"""Add clan tags to receive updates for.
Parameters
----------
\\*tags : str
The clan tags to add. If you wish to pass in an iterable, you must unpack it with \\*.
Example
-------
.. code-block:: python3
client.add_clan_updates("#tag1", "#tag2", "#tag3")
tags = ["#tag4", "#tag5", "#tag6"]
client.add_clan_updates(*tags)
"""
for tag in tags:
if not isinstance(tag, str):
raise TypeError("clan tag must be of type str not {0!r}".format(tag))
self._clan_updates.add(correct_tag(tag))
def remove_clan_updates(self, *tags):
"""Remove clan tags that you receive events updates for.
Parameters
----------
\\*tags : str
The clan tags to remove. If you wish to pass in an iterable, you must unpack it with \\*.
Example
-------
.. code-block:: python3
client.remove_clan_updates("#tag1", "#tag2", "#tag3")
tags = ["#tag4", "#tag5", "#tag6"]
client.remove_clan_updates(*tags)
"""
for tag in tags:
if not isinstance(tag, str):
raise TypeError("clan tag must be of type str not {0!r}".format(tag))
try:
self._clan_updates.remove(correct_tag(tag))
except KeyError:
pass # tag didn't exist to start with
def add_player_updates(self, *tags):
r"""Add player tags to receive events for.
Parameters
----------
\\*tags : str
The player tags to add. If you wish to pass in an iterable, you must unpack it with \*\.
Example
-------
.. code-block:: python3
client.add_player_updates("#tag1", "#tag2", "#tag3")
tags = ["#tag4", "#tag5", "#tag6"]
client.add_player_updates(*tags)
"""
for tag in tags:
if not isinstance(tag, str):
raise TypeError("player tag must be of type str not {0!r}".format(tag))
self._player_updates.add(correct_tag(tag))
def remove_player_updates(self, *tags):
r"""Remove player tags that you receive events updates for.
Parameters
----------
\\*tags : str
The player tags to remove. If you wish to pass in an iterable, you must unpack it with \*\.
Example
-------
.. code-block:: python3
client.remove_player_updates("#tag1", "#tag2", "#tag3")
tags = ["#tag4", "#tag5", "#tag6"]
client.remove_player_updates(*tags)
"""
for tag in tags:
if not isinstance(tag, str):
raise TypeError("player tag must be of type str not {0!r}".format(tag))
try:
self._player_updates.remove(correct_tag(tag))
except KeyError:
pass # the tag was never added
def add_war_updates(self, *tags):
r"""Add clan tags to receive war events for.
Parameters
----------
\\*tags : str
The clan tags to add that will receive war events.
If you wish to pass in an iterable, you must unpack it with \*\.
Example
-------
.. code-block:: python3
client.add_war_updates("#tag1", "#tag2", "#tag3")
tags = ["#tag4", "#tag5", "#tag6"]
client.add_war_updates(*tags)
"""
for tag in tags:
if not isinstance(tag, str):
raise TypeError("clan war tags must be of type str not {0!r}".format(tag))
self._war_updates.add(correct_tag(tag))
def remove_war_updates(self, *tags):
r"""Remove player tags that you receive events updates for.
Parameters
----------
\\*tags : str
The clan tags to remove that will receive war events.
If you wish to pass in an iterable, you must unpack it with \*\.
Example
-------
.. code-block:: python3
client.remove_war_updates("#tag1", "#tag2", "#tag3")
tags = ["#tag4", "#tag5", "#tag6"]
client.remove_war_updates(*tags)
"""
for tag in tags:
if not isinstance(tag, str):
raise TypeError("clan war tags must be of type str not {0!r}".format(tag))
try:
self._war_updates.remove(correct_tag(tag))
except KeyError:
pass # tag didn't exist to start with
def _get_cached_clan(self, clan_tag):
try:
return self._clans[clan_tag]
except KeyError:
return None
def _update_clan(self, clan):
self._clans[clan.tag] = clan
def _get_cached_player(self, player_tag):
try:
return self._players[player_tag]
except KeyError:
return None
def _update_player(self, player):
self._players[player.tag] = player
def _get_cached_war(self, key):
try:
return self._wars[key]
except KeyError:
return None
def _update_war(self, key, war):
self._wars[key] = war
def event(self, function):
"""A decorator or regular function that registers an event.
The function **may be** be a coroutine.
Parameters
----------
function : function
The function to be registered (not needed if used with a decorator)
Example
--------
.. code-block:: python3
import coc
client = coc.login(...)
@client.event
@coc.ClanEvents.description_change()
async def player_donated_troops(old_player, new_player):
print('{} has donated troops!'.format(new_player))
.. code-block:: python3
import coc
client = coc.login(...)
@client.event
@coc.ClientEvents.maintenance_start()
async def maintenance_has_started():
print('maintenance has started!')
.. note::
The order of decorators is important - the ``@client.event`` one must lay **above**
Returns
--------
function : The function registered
"""
if getattr(function, "is_client_event", False):
try:
self._listeners["client"][function.event_name].append(function)
except KeyError:
self._listeners["client"][function.event_name] = [function]
return function
if not getattr(function, "is_event_listener", None):
raise ValueError("no events found to register to this callback")
events = [Event.from_decorator(function, runner) for runner in function.event_runners]
retry_interval = getattr(function, "event_retry_interval")
cls = getattr(function, "event_cls")
tags = getattr(function, "event_tags")
event_type = events[0].type
self._listeners[event_type].extend(events)
if event_type == "clan":
self.clan_cls = cls or self.clan_cls
self.clan_retry_interval = retry_interval or self.clan_retry_interval
self.add_clan_updates(*tags)
elif event_type == "player":
self.player_cls = cls or self.player_cls
self.player_retry_interval = retry_interval or self.player_retry_interval
self.add_player_updates(*tags)
elif event_type == "war":
if function.event_name == "members":
self.check_cwl_prep = True # we need to check cwl clans in prep for this one.
self.war_cls = cls or self.war_cls
self.war_retry_interval = retry_interval or self.war_retry_interval
self.add_war_updates(*tags)
LOG.info("Successfully registered %s event", function)
return function
def add_events(self, *events):
r"""Shortcut to add many events at once.
This method just iterates over :meth:`EventsClient.listener`.
Parameters
-----------
\*\events: :class:`function`
The event listener functions to add.
"""
for event in events:
self.event(event)
def remove_events(self, *events):
r"""Shortcut to remove many events at once.
Parameters
-----------
\*\events: :class:`function`
The event listener functions to remove.
"""
for function in events:
for runner in function.event_runners:
event = Event.from_decorator(function, runner)
self._listeners[event.type].remove(event)
def run_forever(self):
"""A blocking call which runs the loop and script.
This is useful if you have no other clients to deal
with and just wish to run the script and receive updates indefinately.
Roughly equivilant to:
.. code-block:: python3
try:
client.loop.run_forever()
except KeyboardInterrupt:
client.close()
finally:
client.loop.close()
"""
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.close()
def close(self):
"""Closes the client and all running tasks."""
tasks = {t for t in asyncio.Task.all_tasks(loop=self.loop) if not t.done()}
for task in tasks:
task.cancel()
super().close()
def dispatch(self, event_name: str, *args, **kwargs):
# pylint: disable=broad-except
registered = self._listeners["client"].get(event_name)
if registered is None:
if event_name == "event_error":
LOG.exception("Ignoring exception in event task.")
print("Ignoring exception in event task.")
traceback.print_exc()
else:
for event in registered:
try:
asyncio.ensure_future(event(*args, **kwargs))
except (BaseException, Exception):
LOG.exception("Ignoring exception in %s.", event_name)
def _task_callback_check(self, result):
if not result.done():
return
if result.cancelled():
LOG.info("Task %s was cancelled", str(result))
return
exception = result.exception()
if not exception:
return
LOG.exception("Task raised an exception that was unhandled. Restarting the task.", exc_info=exception)
lookup = {
"clan": self._clan_updater,
"player": self._player_updater,
"war": self._war_updater,
"maintenance": self._maintenance_poller,
"season": self._end_of_season_poller
}
for name, value in self._updater_tasks.items():
if value != result:
continue
self._updater_tasks[name] = self.loop.create_task(lookup[name]())
self._updater_tasks[name].add_done_callback(self._task_callback_check)
async def _end_of_season_poller(self):
try:
while self.loop.is_running():
end_of_season = get_season_end()
now = datetime.utcnow()
await asyncio.sleep((end_of_season - now).total_seconds())
self.dispatch("new_season_start")
except asyncio.CancelledError:
pass
except (Exception, BaseException) as exception:
self.dispatch("event_error", exception)
return await self._end_of_season_poller()
async def _maintenance_poller(self):
# pylint: disable=broad-except, protected-access
maintenance_start = None
try:
while self.loop.is_running():
try:
player = await self.get_player("#JY9J2Y99")
await asyncio.sleep(player._response_retry + 1)
except Maintenance:
if maintenance_start is None:
self._in_maintenance_event.clear()
maintenance_start = datetime.utcnow()
self.dispatch("maintenance_start")
await asyncio.sleep(15)
except Exception:
await asyncio.sleep(DEFAULT_SLEEP)
else:
if maintenance_start is not None:
self._in_maintenance_event.set()
self.dispatch("maintenance_completion", maintenance_start)
maintenance_start = None
except asyncio.CancelledError:
pass
except (Exception, BaseException) as exception:
self.dispatch("event_error", exception)
return await self._maintenance_poller()
async def _war_updater(self):
# pylint: disable=broad-except
try:
while self.loop.is_running():
await asyncio.sleep(DEFAULT_SLEEP)
await self._in_maintenance_event.wait() # don't run if we're hitting maintenance errors.
self.dispatch("war_loop_start", self.war_loops_run)
if self.is_cwl_active and self.check_cwl_prep:
options = (WarRound.current_war, WarRound.current_preparation)
else:
options = (WarRound.current_war, )
tasks = [
self.loop.create_task(self._run_war_update(tag, option))
for tag in self._war_updates for |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.